content
stringlengths 5
1.05M
|
|---|
import pytest
from sivtools.data_structures import DotDict
def test_dotdict_get_item_by_key():
sample_dict = {}
sample_dict["item"] = "value"
my_dict = DotDict(sample_dict)
assert my_dict.item == "value"
def test_dotdict_nested():
inside_dict = {}
inside_dict["inner_item"] = "inner value"
sample_dict = {}
sample_dict["item"] = inside_dict
my_dict = DotDict(sample_dict)
assert my_dict.item.inner_item == "inner value"
def test_create_dotdict_with_non_mapping():
"""
Creating a DotDict using a non-mapping type results in an error
"""
with pytest.raises(TypeError, message="Requires mapping type"):
DotDict([1, 2, 3])
def test_accessing_keyword_element():
"""
Unallowed attribute is identfied and stored appropriately
"""
sample_dict = {}
sample_dict["1str"] = "value"
my_dict = DotDict(sample_dict)
assert "1str" in my_dict.unallowed_attributes
|
# Copyright 2017 Alexandru Catrina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JavaScript events list:
- getter used to obtain data
- setter used to reproduce data
"""
class Events(object):
EVENTS = {
"click": {
"getter": r"function(m){var s=function(n){var y=n.localName;y+=n.id!=''?'#'+n.id:'';y+=n.className!=''?'.'+n.className.replace(/\s/g,'.'):'';return y;};return s(m.parentElement)+' '+s(m);}(e.target)",
"setter": r"(function(d,i,z,b){var n=d.getElementById(i),e;if(n){n.style.display=z;e=d.elementFromPoint($clientX,$clientY);n.style.display=b;}if(!e)e=d.querySelector('$data');if(e)e.click();}(document,'mouse__pointer','none','block'))"
},
"mousemove": {
"getter": r"null",
"setter": r"(function(d,e,i){var x=$clientX,y=$clientY,n=d.getElementById(i);if(n==null){n=d.createElement(e);d.body.appendChild(n);n.id=i;n.style.position='fixed';n.style.width='16px';n.style.height='16px';n.style.marginTop='-8px';n.style.marginLeft='-8px';n.style.background='#fff';n.style.borderRadius='50px';n.style.border='3px solid #384c8c';n.style.zIndex=999999999;}n.style.top=y+'px';n.style.left=x+'px';dispatchEvent(new MouseEvent('mousemove',{clientX:x,clientY:y}));}(document,'div','mouse__pointer'))"
},
"scroll": {
"getter": r"window.scrollY",
"setter": r"window.scrollTo(0, $data)"
}
}
@classmethod
def builder(cls, schema, events=None):
if events is None:
events = cls.EVENTS
for name, methods in events.iteritems():
clazz = "{}Event".format(name.capitalize())
event = type(clazz, (schema, object), {
"_getter": methods.get("getter"),
"_setter": methods.get("setter"),
"_name": name
})
yield name, event
|
import numpy as np
class ANN:
def __init__(self, type= None, optimizer = None, initial_W = 0):
self.layers = []
self.type = type
self.optimizer = optimizer
self.depth = 0
self.initial_W = initial_W
def add_layer(self, m, n, Activation):
self.layers.append(Layer(M = m, N = n, activation = Activation, level = self.depth))
self.layers[self.depth].W = np.full((self.layers[self.depth].M, self.layers[self.depth].N), self.initial_W)
self.depth += 1
def check_ANN(self):
if self.type == None:
if self.optimizer == None:
if self.depth == 0:
print("Fault ANN: type is:", self.type, "optimizer is:", self.optimizer, "depth must be greater than", self.depth, "\n\n")
return False
elif self.optimizer == None:
if self.depth == 0:
print("Fault ANN: optimizer is:", self.optimizer, "depth must be greater than",
self.depth, "\n\n")
return False
elif self.type == None:
if self.depth == 0:
print("Fault ANN: type is:", self.type, "depth must be greater than", self.depth, "\n\n")
return False
elif self.depth == 0:
print("Fault ANN: depth must be greater than", self.depth, "\n\n")
return False
else:
return True
def check_Layers(self):
return True
def compile(self):
check_ANN()
check_Layers()
class Layer(object):
def __init__(self, M = None, N = None, bias = False, set_bias = 1, activation = None, W = None, level = None):
self.M = M
self.N = N
self.W = W
self.bias = bias
self.set_bias = set_bias
self.activation = activation
self.level = level
model = ANN(type = "forward footing", optimizer = "SGD")
model.add_layer(m = 13, n =12, Activation = 'relu')
|
# -*- coding: UTF-8 -*-
# Copyright 2014 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
def populate(p):
# Rapla (county)
p.county("Rapla", "")
p.municipality("Kehtna", "")
p.smallborough("Lelle", "")
p.smallborough("Eidapere", "")
p.smallborough("Kaerepere", "")
p.village("Saksa", "")
p.village("Ohekatku", "")
p.village("Rõue", "")
p.smallborough("Keava", "")
p.village("Käbiküla", "")
p.village("Metsaääre", "")
p.village("Kehtna-Nurme", "79051")
p.village("Laeste", "79064")
p.village("Koogimäe", "79052")
p.village("Palasi", "79018")
p.village("Kärpla", "79063")
p.village("Kastna", "79016")
p.village("Vastja", "79071")
p.village("Lau", "79008")
p.village("Linnaaluste", "79050")
p.village("Reonda", "79015")
p.village("Kõrbja", "79023")
p.village("Selja", "79110")
p.village("Ellamaa", "79011")
p.village("Sooaluste", "79070")
p.village("Ahekõnnu", "79111")
p.village("Kalbu", "79061")
p.village("Mukri", "79013")
p.village("Saarepõllu", "79014")
p.village("Lokuta", "")
p.village("Põrsaku", "79524")
p.village("Hertu", "79521")
p.village("Pae", "79009")
p.village("Nadalama", "79068")
p.village("Lellapere-Nurme", "79066")
p.village("Lalli", "79017")
p.village("Nõlva", "79112")
p.village("Hiie", "79060")
p.village("Kenni", "79012")
p.village("Haakla", "79021")
p.village("Paluküla", "79019")
p.village("Saunaküla", "79526")
p.village("Koogiste", "79022")
p.village("Lellapere", "79065")
p.village("Valtu-Nurme", "79527")
p.village("Kumma", "79523")
p.village("Ingliste", "79004")
p.village("Põllu", "79020")
p.town("Rapla", "")
p.village("Uusküla", "")
p.smallborough("Hagudi", "")
p.smallborough("Alu", "")
p.village("Sulupere", "")
p.village("Mahlamäe", "")
p.village("Tuti", "")
p.smallborough("Kuusiku", "")
p.village("Kalevi", "")
p.village("Kuku", "")
p.village("Purila", "")
p.village("Valtu", "")
p.village("Kodila", "")
p.village("Sikeldi", "")
p.village("Ülejõe", "")
p.village("Oola", "79643")
p.village("Kõrgu", "79615")
p.village("Alu-Metsküla", "79605")
p.village("Seli", "79604")
p.village("Tõrma", "79622")
p.village("Seli-Nurme", "79634")
p.village("Röa", "79614")
p.village("Aranküla", "79640")
p.village("Oela", "79617")
p.village("Ohulepa", "79618")
p.village("Kodila-Metsküla", "79613")
p.village("Äherdi", "79644")
p.village("Juula", "79540")
p.village("Mõisaaseme", "79632")
p.village("Tapupere", "79621")
p.village("Ridaküla", "79528")
p.village("Palamulla", "79619")
p.village("Väljataguse", "79543")
p.village("Lipstu", "79519")
p.village("Kelba", "79611")
p.village("Koigi", "79630")
p.village("Kuusiku-Nõmme", "79518")
p.village("Mällu", "79616")
p.village("Nõmme", "79642")
p.village("Raka", "79620")
p.village("Iira", "79517")
p.municipality("Märjamaa", "")
p.village("Sõtke", "")
p.village("Maidla", "")
p.village("Sipa", "")
p.village("Moka", "78236")
p.village("Kasti", "78221")
p.village("Vana-Nurtu", "78258")
p.village("Kangru", "78220")
p.village("Sõmeru", "78255")
p.village("Purga", "78249")
p.village("Velise-Nõlva", "78260")
p.village("Laukna", "")
p.village("Napanurga", "78240")
p.village("Inda", "78216")
p.village("Pajaka", "78248")
p.village("Risu-Suurküla", "78252")
p.village("Veski", "78261")
p.village("Urevere", "78120")
p.village("Mõraste", "78116")
p.village("Loodna", "78113")
p.village("Kiilaspere", "78223")
p.village("Paisumaa", "78247")
p.village("Aravere", "78213")
p.village("Valgu", "78204")
p.village("Vanamõisa", "")
p.village("Konuvere", "78227")
p.village("Altküla", "78212")
p.village("Nõmmeotsa", "78242")
p.village("Rassiotsa", "78251")
p.village("Varbola", "78203")
p.village("Nurme", "78241")
p.village("Ohukotsu", "78244")
p.village("Mäliste", "78238")
p.village("Kohtru", "78226")
p.village("Russalu", "78317")
p.village("Leevre", "78112")
p.village("Keskküla", "78222")
p.village("Kaguvere", "78219")
p.village("Alaküla", "78211")
p.village("Metsküla", "")
p.village("Velise", "78201")
p.village("Orgita", "78313")
p.village("Naistevalla", "78239")
p.village("Paaduotsa", "78246")
p.village("Nurtu-Nõlva", "78207")
p.village("Velisemõisa", "78259")
p.village("Kilgi", "78224")
p.village("Paeküla", "78206")
p.village("Sulu", "78253")
p.village("Vaimõisa", "78205")
p.village("Kunsu", "78228")
p.village("Kirna", "78225")
p.village("Aruküla", "78214")
p.village("Vilta", "78121")
p.village("Võeva", "78262")
p.village("Lestima", "78232")
p.village("Käriselja", "78231")
p.village("Päädeva", "78314")
p.village("Ojaäärse", "78245")
p.village("Teenuse", "78103")
p.village("Nääri", "78243")
p.village("Põlli", "78250")
p.village("Sooniste", "78118")
p.village("Kohatu", "78111")
p.village("Jõeääre", "78218")
p.village("Haimre", "78202")
p.village("Luiste", "78114")
p.village("Männiku", "78117")
p.village("Suurküla", "78254")
p.village("Soosalu", "78119")
p.village("Rangu", "78315")
p.village("Ringuta", "78316")
p.village("Koluta", "78311")
p.village("Hiietse", "78215")
p.village("Kõrtsuotsa", "78229")
p.village("Jaaniveski", "78217")
p.village("Tolli", "78256")
p.village("Mõisamaa", "78237")
p.municipality("Järvakandi", "")
p.municipality("Juuru", "")
p.smallborough("Juuru", "")
p.village("Järlepa", "")
p.village("Pirgu", "")
p.village("Orguse", "")
p.village("Hõreda", "79010")
p.village("Härgla", "79404")
p.village("Atla", "79403")
p.village("Mahtra", "79407")
p.village("Vankse", "79406")
p.village("Helda", "79417")
p.village("Kalda", "79418")
p.village("Sadala", "79419")
p.village("Jaluse", "79410")
p.village("Lõiuse", "79405")
p.municipality("Kaiu", "")
p.village("Kuimetsa", "")
p.village("Oblu", "79312")
p.village("Karitsa", "79320")
p.village("Kasvandu", "79321")
p.village("Tolla", "79322")
p.village("Põlliku", "79325")
p.village("Vahastu", "79303")
p.village("Tamsi", "79313")
p.village("Vaopere", "79314")
p.village("Suurekivi", "79333")
p.village("Toomja", "79323")
p.village("Vana-Kaiu", "79324")
p.municipality("Käru", "")
p.village("Sonni", "")
p.village("Lauri", "79215")
p.village("Kõdu", "79212")
p.village("Kädva", "79213")
p.village("Lungu", "79216")
p.village("Kändliku", "79214")
p.village("Kullimaa", "79211")
p.village("Jõeküla", "79218")
p.municipality("Kohila", "")
p.smallborough("Prillimäe", "")
p.village("Lohu", "")
p.village("Urge", "")
p.smallborough("Hageri", "")
p.village("Salutaguse", "")
p.village("Vilivere", "")
p.village("Sutlema", "")
p.smallborough("Aespa", "")
p.village("Masti", "")
p.village("Pukamäe", "")
p.village("Angerja", "79741")
p.village("Lümandu", "")
p.village("Pihali", "79703")
p.village("Adila", "79704")
p.village("Loone", "79831")
p.village("Põikma", "79709")
p.village("Rootsi", "79815")
p.village("Kadaka", "79811")
p.village("Rabivere", "79610")
p.village("Aandu", "79810")
p.village("Mälivere", "79814")
p.village("Pahkla", "79742")
p.village("Vana-Aespa", "79748")
p.municipality("Vigala", "")
p.village("Vana-Vigala", "78003")
p.village("Kivi-Vigala", "78001")
p.village("Kojastu", "78016")
p.village("Palase", "78025")
p.village("Tiduvere", "78031")
p.village("Paljasmaa", "78026")
p.village("Rääski", "78029")
p.village("Naravere", "78022")
p.village("Oese", "78023")
p.village("Araste", "78011")
p.village("Kurevere", "78018")
p.village("Manni", "78021")
p.village("Vaguja", "78032")
p.village("Tõnumaa", "78004")
p.village("Ojapere", "78024")
p.village("Kausi", "78014")
p.village("Vängla", "78034")
p.village("Kesu", "78015")
p.village("Pallika", "78027")
p.village("Jädivere", "78013")
p.village("Sääla", "78030")
p.village("Läti", "78020")
p.village("Leibre", "78019")
p.village("Konnapere", "78017")
p.village("Päärdu", "78028")
p.village("Avaste", "78012")
p.municipality("Raikküla", "")
p.village("Tamme", "")
p.village("Loe", "")
p.village("Nõmmküla", "78408")
p.village("Jalase", "78415")
p.village("Purku", "78401")
p.village("Pühatu", "78421")
p.village("Keo", "78404")
p.village("Vahakõnnu", "78411")
p.village("Põlma", "78410")
p.village("Lipa", "78405")
p.village("Lipametsa", "78406")
p.village("Raela", "78409")
p.village("Kõrvetaguse", "78417")
p.village("Riidaku", "78422")
p.village("Koikse", "78416")
p.village("Valli", "78413")
p.village("Nõmmemetsa", "78420")
p.village("Kaigepere", "78412")
p.village("Ummaru", "78423")
p.village("Lõpemetsa", "78419")
|
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from bulletin_board_bot.dependencies import DIContainer
class DIContainerMiddleware(BaseMiddleware):
def __init__(self, container: DIContainer):
super().__init__()
self._container = container
async def on_process_message(self, message: types.Message, data: dict):
data["container"] = self._container
async def on_process_callback_query(self, call: types.CallbackQuery, data: dict):
data["container"] = self._container
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 19:02:42 2019
@author: zoescrewvala
"""
import os
import cartopy
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import xarray as xr
#%% X-Y PLOT
#ds = Dataset(os.getcwd() + '/Output/simulations/CanESM2/R1_CanESM2_rcp26_c1_ba1_1sets_2000_2100.nc')
#alphabetical order
#sim_list = ['CanESM2', 'CCSM4', 'CSIRO-Mk3-6-0', 'CNRM-CM5', 'GFDL-CM3', 'GFDL-ESM2M', 'GISS-E2-R', 'IPSL-CM5A-LR', 'MPI-ESM-LR', 'NorESM1-M']
#mass bal order
sim_list = ['CSIRO-Mk3-6-0', 'CNRM-CM5', 'GISS-E2-R', 'GFDL-ESM2M', 'CCSM4', 'MPI-ESM-LR', 'NorESM1-M', 'CanESM2', 'GFDL-CM3', 'IPSL-CM5A-LR']
rcp_list = ['26', '45']
RCP_list = ['RCP 2.6', 'RCP 4.5', 'RCP 8.5']
vol = []
vol_regional = []
vol_regional_all = np.zeros((len(sim_list),122), dtype=float)
# set up plot
fig, ax = plt.subplots(1, 2, squeeze=False, sharex=False, sharey=False, gridspec_kw = {'wspace':0.2, 'hspace':0.05})
for j in range(len(rcp_list)):
for i in range(len(sim_list)):
# specific GCM
ds = xr.open_dataset(os.getcwd() + '/../Output/simulations/' + sim_list[i] + '/R1_' + sim_list[i] + '_rcp' + rcp_list[j] + '_c1_ba1_1sets_1980_2100.nc')
time = ds.variables['year_plus1'].values[:]
vol = ds.variables['volume_glac_annual'].values[:,:,0]
vol_regional = np.sum(vol, axis=0)
vol_regional_norm = vol_regional/vol_regional[0]
ax[0,j].plot(time[37:], vol_regional_norm[37:], linewidth=1, zorder=2, label=sim_list[i])
# GCM averages background
vol_regional_all[i] = vol_regional_norm
vol_regional_average = np.average(vol_regional_all, axis=0)
std = np.std(vol_regional_all, axis=0)
x_values=time[37:121]
y_values=vol_regional_average[37:121]
error = std[37:121]
ax[0,j].plot(x_values, y_values, color='k', linewidth=1, zorder=3, label='Average +/- st. dev.')
ax[0,j].fill_between(x_values, y_values-error, y_values+error, color='k', alpha=0.2, linewidth=0.4)
ds.close()
#%%
#dens_ice = 917 # in kg/m^3
#mb = ds.loc[:,'mb_mwea']
#area = ds.loc[:,'area']
#mb_uncertainty = ds.loc[:,'mb_mwea_sigma']
# variables for vol over time plot
# variables
#time = ds.variables['year_plus1'].values[:]
#vol = ds.variables['volume_glac_annual'].values[:,:,0]
#vol_regional = np.sum(vol, axis=0)
#vol_init = np.sum(vol[:,:,0][:,-1])
#vol_norm = vol_norm/vol_init
# X,Y values
#x_values = time
#y_values = vol_regional/vol_regional[0]
#y2_values = ds.loc[...]
# Set up your plot (and/or subplots)
#fig, ax = plt.subplots(1, 1, squeeze=False, sharex=False, sharey=False, gridspec_kw = {'wspace':0.4, 'hspace':0.15})
# Plot
# zorder controls the order of the plots (higher zorder plots on top)
# label used to automatically generate legends (legends can be done manually for more control)
#ax[0,0].plot(x_values, y_values, color='k', linewidth=1, zorder=2, label='plot1')
#ax[0,0].scatter(x_values, y_values, color='k', zorder=2, s=2)
#ax[0,0].scatter(x_values, y_values[7,:], color='m', zorder=2, s=2)
#ax[0,0].plot(x_values, y2_values, color='b', linewidth=1, zorder=2, label='plot2')
# Fill between
# fill between is useful for putting colors between plots (e.g., error bounds)
#ax[0,0].fill_between(x, y_low, y_high, facecolor='k', alpha=0.2, zorder=1)
# Text
# text can be used to manually add labels or to comment on plot
# transform=ax.transAxes means the x and y are between 0-1
ax[0,j].text(0.5, 1.02, RCP_list[j], size=20, fontweight='extra bold', horizontalalignment='center', verticalalignment='baseline',
transform=ax[0,j].transAxes)
# X-label
# ax[0,0].set_xlabel('Year', size=12)
#ax[0,0].set_xlim(0,1.1)
#ax[0,0].xaxis.set_tick_params(labelsize=12)
ax[0,j].xaxis.set_major_locator(plt.MultipleLocator(40))
ax[0,j].xaxis.set_minor_locator(plt.MultipleLocator(5))
#ax[0,0].set_xticklabels(['2020','2060','2100'])
# Y-label
ax[0,j].set_ylim(0,1.1)
ax[0,j].yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax[0,j].yaxis.set_minor_locator(plt.MultipleLocator(0.05))
ax[0,j].grid(c='k', alpha=0.2, which='major', lw=0.25)
# Tick parameters
# controls the plotting of the ticks
#ax[0,0].yaxis.set_ticks_position('both')
ax[0,j].tick_params(axis='both', which='major', labelsize=20, direction='inout')
#ax[0,0].tick_params(axis='both', which='minor', labelsize=12, direction='inout')
if rcp_list[j] == '26':
# Example Legend
# Option 1: automatic based on labels
ax[0,0].legend(loc=(0.05, 0.05), fontsize=16, labelspacing=0.25, handlelength=1, handletextpad=0.25, borderpad=0,
frameon=False)
# Option 2: manually define legend
#leg_lines = []
#labels = ['plot1', 'plot2']
#label_colors = ['k', 'b']
#for nlabel, label in enumerate(labels):
# line = Line2D([0,1],[0,1], color=label_colors[nlabel], linewidth=1)
# leg_lines.append(line)
#ax[0,0].legend(leg_lines, labels, loc=(0.05,0.05), fontsize=10, labelspacing=0.25, handlelength=1,
# handletextpad=0.25, borderpad=0, frameon=False)
ax[0,0].set_ylabel('Mass [normalized]', size=20, fontweight='bold')
# Save figure
# figures can be saved in any format (.jpg, .png, .pdf, etc.)
fig.set_size_inches(18, 6)
figure_fp = os.getcwd() + '/../Output/plots/vol_norm/'
if os.path.exists(figure_fp) == False:
os.makedirs(figure_fp)
figure_fn = 'mass_norm_allrcps.png'
fig.savefig(figure_fp + figure_fn, bbox_inches='tight', dpi=300)
|
# Generated by Django 3.0.6 on 2020-05-29 18:29
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('qualiCar_API', '0009_auto_20200529_1802'),
]
operations = [
migrations.AlterField(
model_name='incident',
name='create_on',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 29, 18, 29, 59, 40665, tzinfo=utc)),
),
migrations.AlterField(
model_name='incident',
name='last_change_on',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 29, 18, 29, 59, 40690, tzinfo=utc)),
),
]
|
from django.shortcuts import render
from django.views import generic
# Create your views here or die
class MentorSettingsView(generic.TemplateView):
template_name = 'mentor/settings.html'
|
#!/usr/bin/env python
#
# NopSCADlib Copyright Chris Palmer 2018
# nop.head@gmail.com
# hydraraptor.blogspot.com
#
# This file is part of NopSCADlib.
#
# NopSCADlib is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# NopSCADlib is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with NopSCADlib.
# If not, see <https://www.gnu.org/licenses/>.
#
#
#! Makes this document and doc/usage.md.
#
from __future__ import print_function
import os
from tests import do_cmd
import argparse
dir = 'scripts'
def doc_scripts():
doc_name = dir + '/readme.md'
with open(doc_name, 'wt') as doc_file:
print(
'''
# Python scripts
These are located in the `scripts` subdirectory, which needs to be added to the program search path.
They should work with both Python 2 and Python 3.
| Script | Function |
|:---|:---|''', file = doc_file)
for file in os.listdir('scripts'):
if file.endswith('.py'):
blurb = ''
with open(dir + '/' + file, 'rt') as f:
lines = f.readlines()
for line in lines:
if line == "if __name__ == '__main__':\n":
break
else:
continue
for line in lines[1:]:
if line.startswith('#! '):
line = line.replace('~\n', ' \n')
blurb = blurb + line[3 : -1]
if line.startswith("def "):
break
if not blurb:
print("Missing description for", file)
else:
print("| `%s` | %s |" % (file, blurb), file = doc_file)
with open(dir + "/readme.html", "wt") as html_file:
do_cmd(("python -m markdown -x tables " + doc_name).split(), html_file)
with open("docs/usage.html", "wt") as html_file:
do_cmd(("python -m markdown -x tables docs/usage.md").split(), html_file)
#
# Spell check
#
do_cmd(('codespell -L od ' + doc_name).split())
do_cmd(('codespell -L od docs/usage.md').split())
if __name__ == '__main__':
argparse.ArgumentParser(description='Generate scripts/readme.md and make html versions of that and doc/usage.md').parse_args()
doc_scripts()
|
import torch
import torch.nn as nn
from utils import transform_forward, transform_backward
def sqdist(X, Y):
assert X.size()[1] == Y.size()[1], 'dimensions do not match'
return ((X.reshape(X.size()[0], 1, X.size()[1])
- Y.reshape(1, Y.size()[0], Y.size()[1]))**2).sum(2)
class Constant(nn.Module):
def __init__(self, variance=1.0):
super(Constant, self).__init__()
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if X2 is None:
shape = [X.size()[0], X.size()[0]]
else:
shape = [X.size()[0], X2.size()[0]]
return transform_forward(self.variance) * torch.ones(shape[0], shape[1])
class RBF(nn.Module):
def __init__(self, dim, variance=1.0, lengthscale=None):
super(RBF, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
if lengthscale is None:
self.lengthscale \
= torch.nn.Parameter(transform_backward(torch.ones(1, dim)))
else:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.tensor(lengthscale)))
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if X2 is None:
X2 = X
l = transform_forward(self.lengthscale)
return transform_forward(self.variance)*(-0.5*sqdist(X/l, X2/l)).exp()
class Linear(nn.Module):
def __init__(self, dim, variance=1.0, lengthscale=None):
super(Linear, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
if lengthscale is None:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.ones(1, dim)))
else:
self.lengthscale = torch.nn.Parameter(transform_backward(torch.tensor(lengthscale)))
self.variance = torch.nn.Parameter(transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if X2 is None:
X2 = X
l = transform_forward(self.lengthscale)
return transform_forward(self.variance) * torch.mm(X / l, (X2 / l).t())
class White(nn.Module):
# when X != X2, K(X, X2) = 0
def __init__(self, dim, variance=1.0):
super(White, self).__init__()
self.dim = torch.tensor([dim], requires_grad=False)
self.variance = torch.nn.Parameter(
transform_backward(torch.tensor([variance])))
def forward(self, X, X2=None):
if X2 is None:
return torch.eye(X.size()[0])*transform_forward(self.variance)
else:
return 0.
class Add(nn.Module):
def __init__(self, k1, k2):
super(Add, self).__init__()
self.k1 = k1
self.k2 = k2
@property
def variance(self):
return transform_backward(transform_forward(self.k1.variance)
+ transform_forward(self.k2.variance))
def forward(self, X, X2=None):
return self.k1(X, X2) + self.k2(X, X2)
|
import numpy as np
import matplotlib.pyplot as plt
import msgpack
import os
from pathlib import Path
import argparse
plt.switch_backend('agg')
def file_load(indir, outdir, savefigbool, filename):
file_count = 0
current_path_name = Path().resolve()
Path('{}/output-figures'.format(current_path_name)).mkdir(parents=True, exist_ok=True)
max_file_count = 5 # Number of digits in the filename.
for name in Path(indir).iterdir():
file_count += 1
chkpt = msgpack.load(open(name, 'rb'))
mesh = chkpt['mesh']
prim = np.zeros([mesh['ni'], mesh['nj'], 3])
for patch in chkpt['primitive_patches']:
i0 = patch['rect'][0]['start']
j0 = patch['rect'][1]['start']
i1 = patch['rect'][0]['end']
j1 = patch['rect'][1]['end']
local_prim = np.array(np.frombuffer(patch['data'])).reshape([i1 - i0, j1 - j0, 3])
prim[i0:i1, j0:j1] = local_prim
plt.imshow(prim[:,:,0].T, origin='lower')
plt.title(r"{} $\Sigma^{{1/4}}$".format(name))
file_count_str = str(file_count)
if len(file_count_str) < max_file_count:
file_count_str = ('0' * (max_file_count - len(file_count_str))) + file_count_str
fname = '{}/output-figures/movie-{}.png'.format(current_path_name, file_count_str)
print(fname)
plt.savefig(fname, dpi=600)
make_movie(current_path_name, outdir, filename, max_file_count)
if savefigbool is False:
os.system("rm -rf {}/{}".format(current_path_name, 'output-figures'))
def make_movie(current_path, outdir, filename, max_count):
Path('{}/{}'.format(current_path, outdir)).mkdir(parents=True, exist_ok=True)
command = "ffmpeg -start_number 1 -i {}/output-figures/movie-%0{}d.png -c:v libx264 -vb 20M -r 30 -pix_fmt yuv420p -filter:v 'setpts=2*PTS' -y {}/movie-{}.mp4".format(current_path, max_count, outdir, filename)
os.system(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--indir', default='', help='Checkpoint file directory.', required=True)
parser.add_argument('--outdir', default='movie', help='Output movie directory.')
parser.add_argument('--filename', default='movie', help='Output movie name.')
parser.add_argument('--savefigs', default=False, help='Whether the program saves the figures used to make the movie.')
args = parser.parse_args()
file_load(args.indir, args.outdir, args.savefigs, args.filename)
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Official solution:
# https://leetcode.com/problems/sort-list/solution/
# Details on merge sort is inside
# Merge Sort
class Solution:
def sortList(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
start = slow.next
slow.next = None
l, r = self.sortList(head), self.sortList(start)
return self.merge(l, r)
def merge(self, l, r):
if not l or not r:
return l or r
dummy = p = ListNode(0)
while l and r:
if l.val < r.val:
p.next = l
l = l.next
else:
p.next = r
r = r.next
p = p.next
p.next = l or r
return dummy.next
# Time: O(NlogN), where nn is the number of nodes in linked list. The algorithm can be split into 2 phases, Split and Merge.
# Space:O(logN)
# where nn is the number of nodes in linked list. Since the problem is recursive, we need additional space
# to store the recursive call stack. The maximum depth of the recursion tree is nlogn
# quick sort
# Quicksort is also one of the efficient algorithms with the average time complexity of
# O(nlogn). But the worst-case time complexity is O(n^2). Also, variations of the quick sort
# like randomized quicksort are not efficient for the linked list because unlike arrays,
# random access in the linked list is not possible in O(1) time. If we sort the linked list
# using quicksort, we would end up using the head as a pivot element which may not be efficient in all scenarios.
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def partition(start, end):
node = start.next.next
pivotPrev = start.next
pivotPrev.next = end
pivotPost = pivotPrev
while node != end:
temp = node.next
if node.val > pivotPrev.val:
node.next = pivotPost.next
pivotPost.next = node
elif node.val < pivotPrev.val:
node.next = start.next
start.next = node
else:
node.next = pivotPost.next
pivotPost.next = node
pivotPost = pivotPost.next
node = temp
return [pivotPrev, pivotPost]
def quicksort(start, end):
if start.next != end:
prev, post = partition(start, end)
quicksort(start, prev)
quicksort(post, end)
newHead = ListNode(0)
newHead.next = head
quicksort(newHead, None)
return newHead.next
# Time: best is O(NlogN), worst is O(N^2)
# Space: O(1)
|
####################################
# File name: SiteScan_Image_Formatter_Source.py
# About: Embeds Drone Flight CSV GPS Info into Image Metadata/EXIF
# Version for Executable compilation
# Author: Geoff Taylor | Imagery & Remote Sensing Team | Esri
# Date created: 12/12/2019
# Date last modified: 12/13/2019
# Python Version: 3.7
####################################
"""
Dependencies: piexif & pillow
if ArcGIS Pro install access conda via command by entering:
"%PROGRAMFILES%\ArcGIS\Pro\bin\Python\Scripts\proenv"
install piexif by entering:
conda install -c conda-forge piexif
install pillow by entering:
conda install -c anaconda pillow
install Auto Py To EXE:
pip install auto-py-to-exe
To Compile run:
auto-py-to-exe
"""
from piexif import dump, insert, GPSIFD
#import csv
from csv import reader, DictReader
#from PIL import Image
from fractions import Fraction
from os import path, remove
def to_deg(value, loc):
"""convert decimal coordinates into degrees, munutes and seconds tuple
Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"]
return: tuple like (25, 13, 48.343 ,'N')
"""
if value < 0:
loc_value = loc[0]
elif value > 0:
loc_value = loc[1]
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
min = int(t1)
sec = round((t1 - min) * 60, 5)
return deg, min, sec, loc_value
def change_to_rational(number):
"""convert a number to rantional
Keyword arguments: number
return: tuple like (1, 2), (numerator, denominator)
"""
f = Fraction(str(number))
return f.numerator, f.denominator
def set_gps_location(file_name, lat, lng, altitude):
"""Adds GPS position as EXIF metadata
Keyword arguments:
file_name -- image file
lat -- latitude (as float)
lng -- longitude (as float)
altitude -- altitude (as float)
"""
lat_deg = to_deg(lat, ["S", "N"])
lng_deg = to_deg(lng, ["W", "E"])
exiv_lat = (change_to_rational(lat_deg[0]), change_to_rational(lat_deg[1]), change_to_rational(lat_deg[2]))
exiv_lng = (change_to_rational(lng_deg[0]), change_to_rational(lng_deg[1]), change_to_rational(lng_deg[2]))
gps_ifd = {
GPSIFD.GPSVersionID: (2, 0, 0, 0),
GPSIFD.GPSAltitudeRef: 1,
GPSIFD.GPSAltitude: change_to_rational(round(altitude)),
GPSIFD.GPSLatitudeRef: lat_deg[3],
GPSIFD.GPSLatitude: exiv_lat,
GPSIFD.GPSLongitudeRef: lng_deg[3],
GPSIFD.GPSLongitude: exiv_lng,
}
exif_dict = {"GPS": gps_ifd}
exif_bytes = dump(exif_dict)
insert(exif_bytes, file_name)
'''
def getGPSInfo(inFile):
""" Prints the GPS Information for a given Image
Keyword
arguments:
inFile -- image File with fill path
"""
img = Image.open(inFile)
exif_dict = piexif.load(img.info['exif'])
latitude = exif_dict['GPS'][piexif.GPSIFD.GPSLatitude]
longitude = exif_dict['GPS'][piexif.GPSIFD.GPSLongitude]
altitude = exif_dict['GPS'][piexif.GPSIFD.GPSAltitude]
print(latitude)
print(longitude)
print(altitude)
'''
def write_list_to_file(inList, csvFile):
"""Write a list to a csv file.
Keyword
arguments:
inList -- input Python structured list
csvFile -- csv file to write output data to
"""
if path.exists(csvFile):
remove(csvFile)
with open(csvFile, "w") as outfile:
for entries in inList:
outfile.write(entries)
outfile.write("\n")
def printHeaderInfo(gpscsv):
headerColumnsString = ""
with open(gpscsv, 'r') as infile:
reader = DictReader(infile)
fieldnames = reader.fieldnames
count = 0
for i in fieldnames:
upd = "{0} = {1} | ".format(i.strip(), count)
headerColumnsString = headerColumnsString + upd
count += 1
return headerColumnsString
def main(sourceImageFolder,
gpscsv,
errorLogCSV,
img_Name_Column,
Lat_Y_Column,
Long_X_Column,
Alt_Z_Column):
""" Embeds Drone Flight CSV GPS Info into Image Metadata/EXIF for all images in a given folder
arguments:
sourceImageFolder -- input folder containing images for embedding GPS Info.
gpscsv -- input CSV containing Image Name, Lat, Lon, Alt attributes
img_Name_Column -- Column in CSV containing the image file name ex: flight_07.jpg
Lat_Y_Column -- Column in CSV containing the image name
"""
failedFiles = []
with open(gpscsv) as csvfile:
readCSV = reader(csvfile, delimiter=',')
next(readCSV, None) # Skip the header
for row in readCSV:
imgFile = path.join(sourceImageFolder, row[img_Name_Column])
if not path.exists(imgFile):
print("Skipped {0} as file does not exist in Source Image Folder Location".format(row[img_Name_Column]))
failedFiles.append(row[img_Name_Column])
else:
print("{0} Processed Successfully".format(imgFile))
set_gps_location(imgFile, float(row[Lat_Y_Column]), float(row[Long_X_Column]), float(row[Alt_Z_Column]))
if len(failedFiles) != 0:
print("could not locate {0} files at path specified".format(len(failedFiles)))
print("appending names of failed files to CSV to errorLog {0}".format(errorLogCSV))
write_list_to_file(failedFiles, errorLogCSV)
print("see {0} for list of failed files... locate the files and reprocess".format(errorLogCSV))
del failedFiles
if __name__ == "__main__":
#####################
# User Input Values
#####################
print("Tool for Embedding Drone Flight CSV GPS Info into Image Metadata/EXIF")
print(" Optimizing Flight Imagery for Esri SiteScan")
print("#####################################################################")
print("")
sourceImageFolder = input("Input Flight Source Image Folder | example (C:/Images) : ")
gpscsv = input("Input Flight GPS CSV | example (C:/flightgps.csv) : ")
errorLogCSV = input("Choose where to write Error Log CSV | example (C:/errorlog.csv) : ")
print("Align the CSV columns by entering the appropriate info below: {0}".format(printHeaderInfo(gpscsv)))
img_Name_Column = int(input("Input Image Name Column : "))
Lat_Y_Column = int(input("Input Y Column : "))
Long_X_Column = int(input("Input X Column : "))
Alt_Z_Column = int(input("Input Z Column : "))
print(" ")
print("Processing Data:")
print(" ")
################
# Begin Script
################
main(sourceImageFolder, gpscsv, errorLogCSV, img_Name_Column, Lat_Y_Column, Long_X_Column, Alt_Z_Column)
|
import pygame
import random
import math
from userSession import userSession
"""
Pygame Pursuit-Evader Simulation
"""
"""
Stage 1: One human, one robot. Human is the pursuer and robot is the evader who aims to get a target from two possibilities.
Experiment set up: Record the EEG signals of the human pursuer under different conditions. The conditions include (rank by priority):
1. Different types of trajectories, e.g., staying ambiguous, zig-zagging, etc.
2. Different distances between the real target and the misleading target.
3. Different initial locations of the evader.
4. Different initial locations of the pursuer.
5. How important is the target. For example, the human is told a score related to each target before they start the chasing.
"""
# GLOBAL VARIABLES FOR RGB COLORS
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GREY = (128, 128, 128)
class pursuitEvasionSimulation():
def __init__(self):
# initilializes the pygame
pygame.init()
# title on top of game window
pygame.display.set_caption("Pursuit-Evasion Simulation")
# window size
self.window = pygame.display.set_mode((1000,1000))
# create user session class for pursuer and set up
self.pursuer = userSession()
# initial conditions
self.pursuePos = [random.randint(75,125), random.randint(875,925)]
self.evadePos = [random.randint(175,225), random.randint(775,825)]
self.realTarget = [random.randint(700,900), 125]
self.falseTarget = [random.randint(200,400), 125]
self.slope = (self.realTarget[1]-self.evadePos[1])/(self.realTarget[0]-self.evadePos[0])
self.midpoint = [((self.realTarget[0]+self.falseTarget[0])//2), 125]
self.verticalSpeed = 30
self.targetRadius = 20
self.width = 16
self.height = 24
self.velocity = 10
self.run = True
# simulates pursuit-evasion game
def simulation(self):
# initialize pursuer session and mark
self.pursuer.openSession()
# begin recording
self.pursuer.beginRecord()
# pick random strategy for evader
evadeStrat = random.randint(0,2)
while self.run:
pygame.time.delay(50)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
self.updateDisplay()
self.pursuerEEGController()
self.chooseEvadeStrat(evadeStrat)
if self.verticalSpeed != 0:
self.verticalSpeed -= 1
self.quit()
# sets up graphical interface
def updateDisplay(self):
self.window.fill(WHITE)
# draw pursuer
pygame.draw.rect(self.window, RED, (self.pursuePos[0], self.pursuePos[1], self.width, self.height))
# draw evader
pygame.draw.rect(self.window, BLUE, (self.evadePos[0], self.evadePos[1], self.width, self.height))
# draw targets
pygame.draw.circle(self.window, GREY, self.realTarget, self.targetRadius)
pygame.draw.circle(self.window, GREY, self.falseTarget, self.targetRadius)
# update display
pygame.display.update()
def quit(self):
pygame.quit()
# close pursuer session
self.pursuer.close()
# choose evade strategy
def chooseEvadeStrat(self, strat):
if strat == 0:
self.exaggeratingPath()
elif strat == 1:
self.switchingPath()
else:
self.ambiguousPath()
# exaggerating evasion technique
def exaggeratingPath(self):
if (self.evadePos[0] < self.realTarget[0]) and (self.evadePos[1] > self.realTarget[1]):
self.evadePos[0] += 5 # x = x + 5
self.evadePos[1] += (2.5*self.slope - self.verticalSpeed) # y = 2.5m + verticalSpeed (non-constant)
# go directly towards target once target x-pos or y-pos is reached
else:
if self.evadePos[0] < self.realTarget[0]:
self.evadePos[0] += self.velocity
if self.evadePos[1] > self.realTarget[1]:
self.evadePos[1] -= self.velocity
# switching evasion technique
def switchingPath(self):
timeElapsed = pygame.time.get_ticks() // 1000
# travel in sinusoidal trajectory towards target
if (self.evadePos[0] < self.realTarget[0]) and (self.evadePos[1] > self.realTarget[1]):
if timeElapsed == 0:
self.evadePos[0] += self.velocity
self.evadePos[1] += self.velocity*self.slope
else:
self.evadePos[0] += (25*math.sin(timeElapsed*self.velocity) + 2.5) # x = x + 25sin(velocity*t) + 1
self.evadePos[1] += (self.slope - timeElapsed) # y = m - t
# go directly towards target once target x-pos or y-pos is reached
else:
if self.evadePos[0] < self.realTarget[0]:
self.evadePos[0] += self.velocity
if self.evadePos[1] > self.realTarget[1]:
self.evadePos[1] -= self.velocity
# ambigious evasion technique - needs to be edited
def ambiguousPath(self):
if self.evadePos[0] < self.midpoint[0]:
self.evadePos[0] += self.velocity
self.evadePos[1] += self.slope
elif self.evadePos[1] > self.realTarget[1]:
self.evadePos[1] -= self.velocity
# go directly towards target once target x-pos or y-pos is reached
else:
if self.evadePos[0] < self.realTarget[0]:
self.evadePos[0] += self.velocity
if self.evadePos[1] > self.realTarget[1]:
self.evadePos[1] -= self.velocity
# controls given to pursuer
def pursuerEEGController(self):
action = self.pursuer.streamLineData()
# left
if action == "left" and self.pursuePos[0] > self.velocity:
self.pursuePos[0] -= self.velocity
# right
if action == "right" and self.pursuePos[0] < 1000 - self.width - self.velocity:
self.pursuePos[0] += self.velocity
# up
if action == "lift" and self.pursuePos[1] > self.velocity:
self.pursuePos[1] -= self.velocity
# down
if action == "drop" and self.pursuePos[1] < 1000 - self.height - self.velocity:
self.pursuePos[1] += self.velocity
# manual controls to pursue - implemented for testing purposes
def pursuerManualController(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and self.pursuePos[0] > self.velocity:
self.pursuePos[0] -= self.velocity
if keys[pygame.K_RIGHT] and self.pursuePos[0] < 1000 - self.width - self.velocity:
self.pursuePos[0] += self.velocity
if keys[pygame.K_UP] and self.pursuePos[1] > self.velocity:
self.pursuePos[1] -= self.velocity
if keys[pygame.K_DOWN] and self.pursuePos[1] < 1000 - self.height - self.velocity:
self.pursuePos[1] += self.velocity
if __name__ == "__main__":
sim = pursuitEvasionSimulation()
sim.simulation()
|
# ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda (yasuda@nii.ac.jp)
# All rights reserved.
# ==============================================================================
""" """
import tensorflow as tf
from tensorflow.contrib.seq2seq import BahdanauAttention
class TeacherForcingForwardAttention(BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length,
teacher_alignments,
name="ForwardAttention"):
super(TeacherForcingForwardAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=None,
name=name)
self.teacher_alignments = teacher_alignments
def __call__(self, query, state):
previous_alignments, prev_index = state
index = prev_index + 1
alignments = self.teacher_alignments[:, index]
next_state = (alignments, index)
return alignments, next_state
@property
def state_size(self):
return self._alignments_size, 1
def initial_state(self, batch_size, dtype):
initial_alignments = self.initial_alignments(batch_size, dtype)
initial_index = tf.to_int64(-1)
return initial_alignments, initial_index
class TeacherForcingAdditiveAttention(BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length,
teacher_alignments,
name="BahdanauAttention"):
super(TeacherForcingAdditiveAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=None,
name=name)
self.teacher_alignments = teacher_alignments
def __call__(self, query, state):
previous_alignments, prev_index = state
index = prev_index + 1
alignments = self.teacher_alignments[:, index]
next_state = (alignments, index)
return alignments, next_state
@property
def state_size(self):
return self._alignments_size, 1
def initial_state(self, batch_size, dtype):
initial_alignments = self.initial_alignments(batch_size, dtype)
initial_index = tf.to_int64(-1)
return initial_alignments, initial_index
|
# Tutorial Kivy 002: Melhorando a aparência, personalizando widgets usando as
# Propriedades Kivy
# http://inclem.net/2019/12/18/kivy/kivy_tutorial_002_improving_appearance/
from kivy.app import App
from kivy.uix.label import Label
class YourApp(App):
def build(self):
# root_widget = Label()
# root_widget.text = 'Hello world!'
# or
root_widget = Label(font_size=100, bold=True, markup=True)
root_widget.text = '[color=#0000CD]Hello[/color] [color=#00FF7F]world!\
[/color]'
# para colorir um texto deve usar markup=True e separar a propriedade
# text e seguir o padrão acima
# https://www.homehost.com.br/blog/tutoriais/tabela-de-cores-html/
return root_widget
if __name__ == '__main__':
YourApp().run()
|
# !/usr/bin/env python
# coding: utf-8
'''
Description:
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
Tags: Math, Binary Search
分析:
不能用乘除、取模,剩下的只有加减和位运算。
(位运算中左移1位相当于"x2")
直观的方法是: 除数中不断减去被除数; 每次将被除数翻倍,以优化速度。(注意溢出判断)
'''
class Solution(object):
# O(logn) runtime, O(1) space
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
MAX_INT, MIN_INT = 2**31 - 1, -(2**31)
sign = 1 if (dividend > 0 and divisor > 0) or (dividend < 0 and divisor < 0) else -1
dividend, divisor = abs(dividend), abs(divisor)
result = 0
while dividend >= divisor:
inc = divisor
i = 0
while dividend >= inc:
dividend -= inc
result += 1 << i
inc <<= 1
i += 1
result = result*sign
return result if MIN_INT <= result <= MAX_INT else MAX_INT
if __name__ == '__main__':
print Solution().divide(123, 12)
print Solution().divide(123, -12)
print Solution().divide(-123, 12)
print Solution().divide(-123, -12)
|
import os
from collections import namedtuple
from subprocess import check_output
__all__ = ["Runtime", "get_runtimes"]
Runtime = namedtuple("Runtime", "name version path")
def get_runtimes(): # -> List[Runtime]
runtimes_l = check_output(["dotnet", "--list-runtimes"])
runtimes_l = runtimes_l.decode("utf8").splitlines()
runtimes = []
for line in runtimes_l:
name, version, path = line.split(" ", 2)
path = os.path.join(path[1:-1], version)
runtimes.append(Runtime(name=name, version=version, path=path))
return runtimes
|
from threescale_api.utils import request2curl
def test_request2curl():
URL = "http://example.invalid"
HEADERS = {"X-Header": "this"}
HEADERS_STR = "-H 'X-Header: this'"
DATA = {'key': 'value'}
BODY_STR = "-d key=value"
request = _Request("GET", URL, None, None)
assert request2curl(request) == f"curl -X GET {URL}"
request = _Request("GET", URL, HEADERS, None)
assert request2curl(request) == f"curl -X GET {HEADERS_STR} {URL}"
request = _Request("POST", URL, HEADERS, DATA)
assert request2curl(request) == f"curl -X POST {HEADERS_STR} {BODY_STR} {URL}"
request = _Request("PUT", URL, None, DATA, True)
assert request2curl(request) == f"curl -X PUT {BODY_STR} {URL}"
class _Request:
def __init__(self, method, url, headers, data, encode=False):
self.method = method
self.url = url
self.headers = headers
self.body = None
if data:
self.body = "&".join([f"{key}={value}" for key, value in data.items()])
if encode and self.body:
self.body = self.body.encode("utf-8")
|
# -*- coding: utf-8 -*-
import numpy as np
__all__ = ["get_latitude_lines", "get_longitude_lines"]
def get_latitude_lines(dlat=np.pi / 6, npts=1000, niter=100):
res = []
latlines = np.arange(-np.pi / 2, np.pi / 2, dlat)[1:]
for lat in latlines:
theta = lat
for n in range(niter):
theta -= (2 * theta + np.sin(2 * theta) - np.pi * np.sin(lat)) / (
2 + 2 * np.cos(2 * theta)
)
x = np.linspace(-2, 2, npts)
y = np.ones(npts) * np.sin(theta)
a = 1
b = 2
y[(y / a) ** 2 + (x / b) ** 2 > 1] = np.nan
res.append((x, y))
return res
def get_longitude_lines(dlon=np.pi / 6, npts=1000, niter=100):
res = []
lonlines = np.arange(-np.pi, np.pi, dlon)[1:]
for lon in lonlines:
lat = np.linspace(-np.pi / 2, np.pi / 2, npts)
theta = np.array(lat)
for n in range(niter):
theta -= (2 * theta + np.sin(2 * theta) - np.pi * np.sin(lat)) / (
2 + 2 * np.cos(2 * theta)
)
x = 2 / np.pi * lon * np.cos(theta)
y = np.sin(theta)
res.append((x, y))
return res
|
import time
import idiokit
from abusehelper.core import bot, utils
COLUMNS = ("first seen", "threat", "malware", "host", "url", "status", "registrar", "ip", "asn", "cc")
def _value_split(values):
results = set()
for value in values:
results = results | set([x for x in value.split("|") if x])
return tuple(results)
@idiokit.stream
def _parse():
while True:
event = yield idiokit.next()
for key in event.keys():
event.pop(key, filter=lambda value: not value.strip())
for key in ("ip", "asn", "cc"):
event.update(key, _value_split(event.pop(key)))
for timestamp in event.pop("first seen"):
try:
timestamp = time.strftime(
"%Y-%m-%d %H:%M:%SZ",
time.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
)
except ValueError:
pass
else:
event.add("first seen", timestamp)
yield idiokit.send(event)
class RansomwareTrackerBot(bot.PollingBot):
feed_url = bot.Param(default="https://ransomwaretracker.abuse.ch/feeds/csv/")
@idiokit.stream
def poll(self):
self.log.info("Downloading {0}".format(self.feed_url))
try:
info, fileobj = yield utils.fetch_url(self.feed_url)
except utils.FetchUrlFailed as fuf:
raise bot.PollSkipped("Download failed: {0}".format(fuf))
lines = []
for line in fileobj:
line = line.strip()
if line and not line.startswith("#"):
lines.append(line)
yield idiokit.pipe(
utils.csv_to_events(tuple(lines),
columns=COLUMNS,
charset=info.get_param("charset", None)),
_parse()
)
if __name__ == "__main__":
RansomwareTrackerBot.from_command_line().execute()
|
import re
import datetime
from helpers.save_select_from_postgresql import save_pressure_to_postgresql
from helpers.analytics import analysis_result
from buttons import start_markup
from states import States
def pressure(update, context):
"""
take arm and pressure
prepare pressure data like ['180', '90']
take current datetime
prepare and write new data to postgreSQL:
username, systolic, diastolic, timestamp, date, arm
Return calendar
"""
arm = context.user_data.get('arm')
username = context.user_data.get('user_name')
user_input_pressure = update.message.text
timestamp = datetime.datetime.now()
list_pressure = re.split(r'[\^\,\.:;\\/\s]', user_input_pressure)
try:
systolic = list_pressure[0]
diastolic = list_pressure[1]
pulse = list_pressure[2]
save_pressure_to_postgresql(
username, systolic, diastolic, timestamp, arm, pulse
)
except (ValueError, IndexError):
systolic, diastolic = list_pressure[0], list_pressure[1]
save_pressure_to_postgresql(
username, systolic, diastolic, timestamp, arm
)
analytics = analysis_result(systolic, diastolic)
text = (
'''
New pressure data added. \n
%s
''' % analytics
)
context.bot.send_message(
chat_id=update.message.chat_id,
text=text,
reply_markup=start_markup
)
return States.START_BUTTON
|
"""
Created on Mon Jul 26 17:23:16 2021
@author: Andile Jaden Mbele
"""
"""
1. first nested loop takes len(L1)*len(L2) steps
2. second loop takes at most len(L1) steps
3. Latter term overwhelmed by form term
4. O(len(L1)*len(L2))
"""
def intersect(L1, L2):
tmp = []
for e1 in L1:
for e2 in L2:
if e1 == e2:
tmp.append(e1)
res = []
for e in tmp:
if not(e in res):
res.append(e)
return res
|
import time
from datetime import date
from apscheduler.schedulers.background import BackgroundScheduler
import conf_keys
from job_conf_parser import job_conf_parser
from singleton import singleton
from loggingex import LOG_WARNING
@singleton
class job_center():
def __init__(self):
self._sched = None
self._job_conf_path = ""
self._job_id_handle = {}
self._static_job_id_handle = {}
def start(self):
self._sched = BackgroundScheduler()
self._sched.start()
def add_jobs(self, jobs_info, is_static = False):
if None == self._sched:
LOG_WARNING("job center must call start() first")
return
for (job_name,job_info) in jobs_info.items():
if is_static and job_name in self._static_job_id_handle.keys():
continue
job_type = job_info["type"]
class_name = job_info["class"]
job_handle = self._get_obj(class_name)
if is_static:
self._static_job_id_handle[job_name] = job_handle
else:
self._job_id_handle[job_name] = job_handle
cmd = "self._sched.add_job(job_handle.run, job_type, id = job_name"
params = self._join_params(job_info)
if 0 != len(params):
cmd += " , "
cmd += params
cmd += ")"
#print cmd
eval(cmd)
def remove_jobs(self, jobs_info):
if None == self._sched:
LOG_WARNING("job center must call start() first")
return
for job_name in jobs_info.keys():
self._sched.remove_job(job_name)
self._job_id_handle.pop(job_name)
def _join_params(self, job_info):
params = ""
param = ""
job_type = job_info["type"]
for key in job_info.keys():
if key in conf_keys.job_conf_info_dict[job_type]:
if 0 != len(params):
params += ' , '
value = job_info[key]
if value.isdigit():
param = key + " = " + value
else:
param = key + " = '" + value + "'"
if 0 != len(param):
params += param
return params
def _get_obj(self, _cls_name):
_packet_name = _cls_name
_module_home = __import__(_packet_name,globals(),locals(),[_cls_name])
obj = getattr(_module_home,_cls_name)
class_obj = obj()
return class_obj
if __name__ == "__main__":
a = job_center("job_sample.conf")
a.start()
time.sleep(1000)
|
from probability_tree import BranchNode, LeafNode, parse_dict
def test_it_parses_leaf_node():
tree = parse_dict({"name": "leaf", "probability": 1.0, "conclusion": 1.0})
assert tree == LeafNode(name="leaf", probability=1.0, conclusion=1.0)
def test_it_parses_simple_tree():
tree = parse_dict(
{
"name": "root",
"probability": 1.0,
"children": [
{"name": "leaf1", "probability": 0.5, "conclusion": 1.0},
{"name": "leaf2", "probability": 0.5, "conclusion": 0.0},
],
}
)
assert tree == BranchNode(
name="root",
probability=1.0,
children=[
LeafNode(name="leaf1", probability=0.5, conclusion=1.0),
LeafNode(name="leaf2", probability=0.5, conclusion=0.0),
],
)
def test_it_parses_complex_tree():
tree = parse_dict(
{
"name": "root",
"probability": 1.0,
"children": [
{
"name": "branch1",
"probability": 0.5,
"children": [
{"name": "leaf1", "probability": 0.5, "conclusion": 0.5},
{"name": "leaf2", "probability": 0.5, "conclusion": 0.0},
],
},
{
"name": "branch2",
"probability": 0.5,
"children": [
{"name": "leaf1", "probability": 0.5, "conclusion": 1.0},
{"name": "leaf2", "probability": 0.5, "conclusion": 0.5},
],
},
],
}
)
assert tree == BranchNode(
name="root",
probability=1.0,
children=[
BranchNode(
name="branch1",
probability=0.5,
children=[
LeafNode(name="leaf1", probability=0.5, conclusion=0.5),
LeafNode(name="leaf2", probability=0.5, conclusion=0.0),
],
),
BranchNode(
name="branch2",
probability=0.5,
children=[
LeafNode(name="leaf1", probability=0.5, conclusion=1.0),
LeafNode(name="leaf2", probability=0.5, conclusion=0.5),
],
),
],
)
|
"""
To implement component value saving,
when component is created, the saved value should be loaded via `load_slicer_value`
and used as inital value for component's attr (e.g. `value`)
Then in each dashboard this component is used, a callback should be added
via `callback_slicer_state_saving` to save updates of the component's attr
"""
from typing import List, Optional, Union
from dash.dependencies import Input, Output
from flask import session
def _gen_slicer_key(slicer_key: str, value_type) -> str:
return f"{slicer_key}|{value_type.__name__}"
def save_slicer_value(slicer_key: str, value):
if "slicers" not in session:
session["slicers"] = {}
if value is None:
return
value_type = type(value)
key = _gen_slicer_key(slicer_key, value_type)
session["slicers"][key] = value
session.modified = True
def load_slicer_value(
slicer_key: str, value_type, available_options: Optional[List] = None, default=None
):
"""Loads saved slicer's value from `slicer_key` + `value_type`
:param slicer_key: slicer's key
:param value_type: type of saved value.
It's useful in case attr can have different types
:param available_options: (optional) to check if saved value is valid
:param default: returned if no saved value or it's not in `available_options`
"""
if "slicers" not in session:
session["slicers"] = {}
slicer_full_key = _gen_slicer_key(slicer_key, value_type)
if slicer_full_key not in session["slicers"]:
return default
saved_value = session["slicers"][slicer_full_key]
if available_options is not None:
if isinstance(saved_value, list):
new_options = [i for i in saved_value if i in available_options]
if new_options:
return new_options
return default
else:
if saved_value not in available_options:
return default
return saved_value
def callback_slicer_state_saving(
app, slicer_key: str, input_id: str, input_attr: Union[str, List[str]] = "value"
):
"""Add callback for saving slicer's attrs state"""
multiple_inputs = isinstance(input_attr, (tuple, list))
if multiple_inputs:
inputs = [Input(input_id, attr) for attr in input_attr]
else:
inputs = [Input(input_id, input_attr)]
@app.callback(Output(input_id, "id"), inputs)
def _save_slicer_state_to_session(*args):
save_slicer_value(slicer_key, list(args) if multiple_inputs else args[0])
# callbacks require output, so we use `id` as dummy output
return input_id
|
# Generated by Django 2.2.12 on 2021-03-28 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0002_auto_20210328_1538'),
]
operations = [
migrations.AddField(
model_name='siswa',
name='nama',
field=models.CharField(max_length=225, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=30, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
|
print('=' * 30)
print('\033[35m Simplex \033[m')
print('=' * 30)
totalCompra = produtosMais1000 = 0
nomeProdutoBarato = ''
precoProdutoBarato = 0
while True:
nomeProduto = str(input('\nNome do Produto: '))
preco = float(input('Preço: R$ '))
totalCompra += preco
if preco > 1000:
produtosMais1000 += 1
if nomeProdutoBarato == '':
nomeProdutoBarato = nomeProduto
precoProdutoBarato = preco
if preco < precoProdutoBarato:
nomeProdutoBarato = nomeProduto
precoProdutoBarato = preco
pergunta = str(input('\nQuer continuar? [S/N] ')).upper()
while pergunta not in 'SN':
pergunta = str(input('Quer continuar? [S/N] ')).upper()
if pergunta == 'N':
break
print('\n======== Estatísticas ========\n')
print(f'''O total da compra foi R${totalCompra:.2f}
Temos {produtosMais1000} produtos custando mais de R$1000,00
O produto mais barato foi {nomeProdutoBarato} que custa R${precoProdutoBarato:.2f}''')
|
# -*- coding: utf-8 -*-
from __future__ import division
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from django.shortcuts import ( # noqa
render, get_object_or_404)
from django import http
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from course.views import (
get_role_and_participation
)
from course.content import (
get_course_repo, get_course_desc, get_flow_desc,
parse_date_spec, get_course_commit_sha)
from course.constants import (
participation_role,
flow_permission, flow_rule_kind)
from course.models import (
Course,
FlowRuleException,
InstantFlowRequest,
FlowSession)
# {{{ flow permissions
class FlowSessionRuleBase(object):
def __init__(self, **attrs):
for name in self.__slots__:
setattr(self, name, attrs.get(name))
class FlowSessionStartRule(FlowSessionRuleBase):
__slots__ = [
"tag_session",
"may_start_new_session",
"may_list_existing_sessions",
]
class FlowSessionAccessRule(FlowSessionRuleBase):
__slots__ = [
"permissions",
"message",
]
def human_readable_permissions(self):
from course.models import FLOW_PERMISSION_CHOICES
permission_dict = dict(FLOW_PERMISSION_CHOICES)
return [permission_dict[p] for p in self.permissions]
class FlowSessionGradingRule(FlowSessionRuleBase):
__slots__ = [
"grade_identifier",
"grade_aggregation_strategy",
"due",
"generates_grade",
"description",
"credit_percent",
"use_last_activity_as_completion_time",
]
def _eval_generic_conditions(rule, course, role, now_datetime):
if hasattr(rule, "if_before"):
ds = parse_date_spec(course, rule.if_before)
if not (now_datetime <= ds):
return False
if hasattr(rule, "if_after"):
ds = parse_date_spec(course, rule.if_after)
if not (now_datetime >= ds):
return False
if hasattr(rule, "if_has_role"):
if role not in rule.if_has_role:
return False
return True
def get_flow_rules(flow_desc, kind, participation, flow_id, now_datetime,
consider_exceptions=True, default_rules_desc=[]):
if (not hasattr(flow_desc, "rules")
or not hasattr(flow_desc.rules, kind)):
rules = default_rules_desc[:]
else:
rules = getattr(flow_desc.rules, kind)[:]
if consider_exceptions:
for exc in (
FlowRuleException.objects
.filter(
participation=participation,
active=True,
kind=kind,
flow_id=flow_id)
# rules created first will get inserted first, and show up last
.order_by("creation_time")):
if exc.expiration is not None and now_datetime > exc.expiration:
continue
from relate.utils import dict_to_struct
rules.insert(0, dict_to_struct(exc.rule))
return rules
def get_session_start_rule(course, participation, role, flow_id, flow_desc,
now_datetime, facilities=None, for_rollover=False):
"""Return a :class:`FlowSessionStartRule` if a new session is
permitted or *None* if no new session is allowed.
"""
if facilities is None:
facilities = frozenset()
from relate.utils import dict_to_struct
rules = get_flow_rules(flow_desc, flow_rule_kind.start,
participation, flow_id, now_datetime,
default_rules_desc=[
dict_to_struct(dict(
may_start_new_session=True,
may_list_existing_sessions=False))])
for rule in rules:
if not _eval_generic_conditions(rule, course, role, now_datetime):
continue
if not for_rollover and hasattr(rule, "if_in_facility"):
if rule.if_in_facility not in facilities:
continue
if not for_rollover and hasattr(rule, "if_has_in_progress_session"):
session_count = FlowSession.objects.filter(
participation=participation,
course=course,
flow_id=flow_id,
in_progress=True).count()
if bool(session_count) != rule.if_has_in_progress_session:
continue
if not for_rollover and hasattr(rule, "if_has_session_tagged"):
tagged_session_count = FlowSession.objects.filter(
participation=participation,
course=course,
access_rules_tag=rule.if_has_session_tagged,
flow_id=flow_id).count()
if not tagged_session_count:
continue
if not for_rollover and hasattr(rule, "if_has_fewer_sessions_than"):
session_count = FlowSession.objects.filter(
participation=participation,
course=course,
flow_id=flow_id).count()
if session_count >= rule.if_has_fewer_sessions_than:
continue
if not for_rollover and hasattr(rule, "if_has_fewer_tagged_sessions_than"):
tagged_session_count = FlowSession.objects.filter(
participation=participation,
course=course,
access_rules_tag__isnull=False,
flow_id=flow_id).count()
if tagged_session_count >= rule.if_has_fewer_tagged_sessions_than:
continue
return FlowSessionStartRule(
tag_session=getattr(rule, "tag_session", None),
may_start_new_session=getattr(
rule, "may_start_new_session", True),
may_list_existing_sessions=getattr(
rule, "may_list_existing_sessions", True),
)
return FlowSessionStartRule(
may_list_existing_sessions=False,
may_start_new_session=False)
def get_session_access_rule(session, role, flow_desc, now_datetime,
facilities=None):
"""Return a :class:`ExistingFlowSessionRule`` to describe
how a flow may be accessed.
"""
if facilities is None:
facilities = frozenset()
from relate.utils import dict_to_struct
rules = get_flow_rules(flow_desc, flow_rule_kind.access,
session.participation, session.flow_id, now_datetime,
default_rules_desc=[
dict_to_struct(dict(
permissions=[flow_permission.view],
))])
for rule in rules:
if not _eval_generic_conditions(rule, session.course, role, now_datetime):
continue
if hasattr(rule, "if_in_facility"):
if rule.if_in_facility not in facilities:
continue
if hasattr(rule, "if_has_tag"):
if session.access_rules_tag != rule.if_has_tag:
continue
if hasattr(rule, "if_in_progress"):
if session.in_progress != rule.if_in_progress:
continue
if hasattr(rule, "if_expiration_mode"):
if session.expiration_mode != rule.if_expiration_mode:
continue
if hasattr(rule, "if_session_duration_shorter_than_minutes"):
duration_min = (now_datetime - session.start_time).total_seconds() / 60
if session.participation is not None:
duration_min /= float(session.participation.time_factor)
if duration_min > rule.if_session_duration_shorter_than_minutes:
continue
permissions = set(rule.permissions)
# {{{ deal with deprecated permissions
if "modify" in permissions:
permissions.remove("modify")
permissions.update([
flow_permission.submit_answer,
flow_permission.end_session,
])
if "see_answer" in permissions:
permissions.remove("see_answer")
permissions.add(flow_permission.see_answer_after_submission)
# }}}
# Remove 'modify' permission from not-in-progress sessions
if not session.in_progress:
for perm in [
flow_permission.submit_answer,
flow_permission.end_session,
]:
if perm in permissions:
permissions.remove(perm)
return FlowSessionAccessRule(
permissions=frozenset(permissions),
message=getattr(rule, "message", None)
)
return FlowSessionAccessRule(permissions=frozenset())
def get_session_grading_rule(session, role, flow_desc, now_datetime):
flow_desc_rules = getattr(flow_desc, "rules", None)
from relate.utils import dict_to_struct
rules = get_flow_rules(flow_desc, flow_rule_kind.grading,
session.participation, session.flow_id, now_datetime,
default_rules_desc=[
dict_to_struct(dict(
generates_grade=False,
))])
for rule in rules:
if hasattr(rule, "if_has_role"):
if role not in rule.if_has_role:
continue
if hasattr(rule, "if_has_tag"):
if session.access_rules_tag != rule.if_has_tag:
continue
if hasattr(rule, "if_completed_before"):
ds = parse_date_spec(session.course, rule.if_completed_before)
if session.in_progress and now_datetime > ds:
continue
if not session.in_progress and session.completion_time > ds:
continue
due = parse_date_spec(session.course, getattr(rule, "due", None))
if due is not None:
assert due.tzinfo is not None
generates_grade = getattr(rule, "generates_grade", True)
grade_identifier = None
grade_aggregation_strategy = None
if flow_desc_rules is not None:
grade_identifier = flow_desc_rules.grade_identifier
grade_aggregation_strategy = getattr(
flow_desc_rules, "grade_aggregation_strategy", None)
return FlowSessionGradingRule(
grade_identifier=grade_identifier,
grade_aggregation_strategy=grade_aggregation_strategy,
due=due,
generates_grade=generates_grade,
description=getattr(rule, "description", None),
credit_percent=getattr(rule, "credit_percent", 100),
use_last_activity_as_completion_time=getattr(
rule, "use_last_activity_as_completion_time", False),
)
raise RuntimeError(_("grading rule determination was unable to find "
"a grading rule"))
# }}}
# {{{ contexts
class CoursePageContext(object):
def __init__(self, request, course_identifier):
self.request = request
self.course_identifier = course_identifier
self.course = get_object_or_404(Course, identifier=course_identifier)
self.role, self.participation = get_role_and_participation(
request, self.course)
from course.views import check_course_state
check_course_state(self.course, self.role)
self.course_commit_sha = get_course_commit_sha(
self.course, self.participation)
self.repo = get_course_repo(self.course)
self.course_desc = get_course_desc(self.repo, self.course,
self.course_commit_sha)
class FlowContext(object):
def __init__(self, repo, course, flow_id,
participation=None, flow_session=None):
"""*participation* and *flow_session* are not stored and only used
to figure out versioning of the flow content.
"""
self.repo = repo
self.course = course
self.flow_id = flow_id
from django.core.exceptions import ObjectDoesNotExist
self.course_commit_sha = get_course_commit_sha(
self.course, participation)
try:
self.flow_desc = get_flow_desc(self.repo, self.course,
flow_id, self.course_commit_sha)
except ObjectDoesNotExist:
raise http.Http404()
class PageOrdinalOutOfRange(http.Http404):
pass
class FlowPageContext(FlowContext):
"""This object acts as a container for all the information that a flow page
may need to render itself or respond to a POST.
Note that this is different from :class:`course.page.PageContext`,
which is used for in the page API.
"""
def __init__(self, repo, course, flow_id, ordinal,
participation, flow_session, request=None):
FlowContext.__init__(self, repo, course, flow_id,
participation, flow_session=flow_session)
from course.content import adjust_flow_session_page_data
adjust_flow_session_page_data(repo, flow_session,
course.identifier, self.flow_desc)
if ordinal >= flow_session.page_count:
raise PageOrdinalOutOfRange()
from course.models import FlowPageData
page_data = self.page_data = get_object_or_404(
FlowPageData, flow_session=flow_session, ordinal=ordinal)
from course.content import get_flow_page_desc
try:
self.page_desc = get_flow_page_desc(
flow_session, self.flow_desc, page_data.group_id,
page_data.page_id)
except ObjectDoesNotExist:
self.page_desc = None
self.page = None
self.page_context = None
else:
self.page = instantiate_flow_page_with_ctx(self, page_data)
page_uri = None
if request is not None:
from django.core.urlresolvers import reverse
page_uri = request.build_absolute_uri(
reverse("relate-view_flow_page",
args=(course.identifier, flow_session.id, ordinal)))
from course.page import PageContext
self.page_context = PageContext(
course=self.course, repo=self.repo,
commit_sha=self.course_commit_sha,
flow_session=flow_session,
page_uri=page_uri)
self._prev_answer_visit = False
@property
def prev_answer_visit(self):
if self._prev_answer_visit is False:
from course.flow import get_prev_answer_visit
self._prev_answer_visit = get_prev_answer_visit(self.page_data)
return self._prev_answer_visit
@property
def ordinal(self):
return self.page_data.ordinal
def instantiate_flow_page_with_ctx(fctx, page_data):
from course.content import get_flow_page_desc
page_desc = get_flow_page_desc(
fctx.flow_id, fctx.flow_desc,
page_data.group_id, page_data.page_id)
from course.content import instantiate_flow_page
return instantiate_flow_page(
"course '%s', flow '%s', page '%s/%s'"
% (fctx.course.identifier, fctx.flow_id,
page_data.group_id, page_data.page_id),
fctx.repo, page_desc, fctx.course_commit_sha)
# }}}
def course_view(f):
def wrapper(request, course_identifier, *args, **kwargs):
pctx = CoursePageContext(request, course_identifier)
response = f(pctx, *args, **kwargs)
pctx.repo.close()
return response
from functools import update_wrapper
update_wrapper(wrapper, f)
return wrapper
def render_course_page(pctx, template_name, args,
allow_instant_flow_requests=True):
args = args.copy()
from course.views import get_now_or_fake_time
now_datetime = get_now_or_fake_time(pctx.request)
if allow_instant_flow_requests:
instant_flow_requests = list((InstantFlowRequest.objects
.filter(
course=pctx.course,
start_time__lte=now_datetime,
end_time__gte=now_datetime,
cancelled=False)
.order_by("start_time")))
else:
instant_flow_requests = []
args.update({
"course": pctx.course,
"course_desc": pctx.course_desc,
"participation": pctx.participation,
"role": pctx.role,
"participation_role": participation_role,
"num_instant_flow_requests": len(instant_flow_requests),
"instant_flow_requests":
[(i+1, r) for i, r in enumerate(instant_flow_requests)],
})
return render(pctx.request, template_name, args)
# {{{ page cache
class PageInstanceCache(object):
"""Caches instances of :class:`course.page.Page`."""
def __init__(self, repo, course, flow_id):
self.repo = repo
self.course = course
self.flow_id = flow_id
self.flow_desc_cache = {}
self.page_cache = {}
def get_flow_desc_from_cache(self, commit_sha):
try:
return self.flow_desc_cache[commit_sha]
except KeyError:
flow_desc = get_flow_desc(self.repo, self.course,
self.flow_id, commit_sha)
self.flow_desc_cache[commit_sha] = flow_desc
return flow_desc
def get_page(self, group_id, page_id, commit_sha):
key = (group_id, page_id, commit_sha)
try:
return self.page_cache[key]
except KeyError:
from course.content import get_flow_page_desc, instantiate_flow_page
page_desc = get_flow_page_desc(
self.flow_id,
self.get_flow_desc_from_cache(commit_sha),
group_id, page_id)
page = instantiate_flow_page(
location="flow '%s', group, '%s', page '%s'"
% (self.flow_id, group_id, page_id),
repo=self.repo, page_desc=page_desc,
commit_sha=commit_sha)
self.page_cache[key] = page
return page
# }}}
# {{{ codemirror config
def get_codemirror_widget(language_mode, interaction_mode,
config=None, addon_css=(), addon_js=(), dependencies=(),
read_only=False):
theme = "default"
if read_only:
theme += " relate-readonly"
from codemirror import CodeMirrorTextarea, CodeMirrorJavascript
from django.core.urlresolvers import reverse
help_text = (_("Press F9 to toggle full-screen mode. ")
+ _("Set editor mode in <a href='%s'>user profile</a>.")
% reverse("relate-user_profile"))
actual_addon_css = (
"dialog/dialog",
"display/fullscreen",
) + addon_css
actual_addon_js = (
"search/searchcursor",
"dialog/dialog",
"search/search",
"comment/comment",
"edit/matchbrackets",
"display/fullscreen",
"selection/active-line",
"edit/trailingspace",
) + addon_js
if language_mode == "python":
indent_unit = 4
else:
indent_unit = 2
actual_config = {
"fixedGutter": True,
#"autofocus": True,
"matchBrackets": True,
"styleActiveLine": True,
"showTrailingSpace": True,
"indentUnit": indent_unit,
"readOnly": read_only,
"extraKeys": CodeMirrorJavascript("""
{
"Ctrl-/": "toggleComment",
"Tab": function(cm)
{
var spaces = \
Array(cm.getOption("indentUnit") + 1).join(" ");
cm.replaceSelection(spaces);
},
"F9": function(cm) {
cm.setOption("fullScreen",
!cm.getOption("fullScreen"));
}
}
""")
}
if interaction_mode == "vim":
actual_config["vimMode"] = True
actual_addon_js += ('../keymap/vim',)
elif interaction_mode == "emacs":
actual_config["keyMap"] = "emacs"
actual_addon_js += ('../keymap/emacs',)
elif interaction_mode == "sublime":
actual_config["keyMap"] = "sublime"
actual_addon_js += ('../keymap/sublime',)
# every other interaction mode goes to default
if config is not None:
actual_config.update(config)
return CodeMirrorTextarea(
mode=language_mode,
dependencies=dependencies,
theme=theme,
addon_css=actual_addon_css,
addon_js=actual_addon_js,
config=actual_config), help_text
# }}}
# {{{ facility processing
class FacilityFindingMiddleware(object):
def process_request(self, request):
pretend_facilities = request.session.get("relate_pretend_facilities")
if pretend_facilities is not None:
facilities = pretend_facilities
else:
import ipaddress
remote_address = ipaddress.ip_address(
six.text_type(request.META['REMOTE_ADDR']))
facilities = set()
from django.conf import settings
for name, props in six.iteritems(settings.RELATE_FACILITIES):
ip_ranges = props.get("ip_ranges", [])
for ir in ip_ranges:
if remote_address in ipaddress.ip_network(six.text_type(ir)):
facilities.add(name)
request.relate_facilities = frozenset(facilities)
# }}}
# vim: foldmethod=marker
|
from peewee import *
database = Proxy()
class UnknownField(object):
def __init__(self, *_, **__): pass
class BaseModel(Model):
class Meta:
database = database
class Example(BaseModel):
author_id = IntegerField(null=True)
message = TextField(null=True)
class Meta:
table_name = 'example'
class User(BaseModel):
has_admin = BooleanField()
has_banned = BooleanField()
has_vip = BooleanField()
prefix = CharField()
user_id = IntegerField()
class Meta:
table_name = 'user'
class Chats(BaseModel):
chat_id = IntegerField()
class Meta:
table_name = "chats"
class Users(BaseModel):
banned = BooleanField(constraints=[SQL("DEFAULT false")], null=True)
has_admin = BooleanField(constraints=[SQL("DEFAULT false")])
has_vip = BooleanField(constraints=[SQL("DEFAULT false")], null=True)
prefix = CharField(null=True)
user_id = IntegerField()
class Meta:
table_name = 'users'
|
""" Build tool that finds dependencies automatically for any language.
fabricate is a build tool that finds dependencies automatically for any
language. It's small and just works. No hidden stuff behind your back. It was
inspired by Bill McCloskey's make replacement, memoize, but fabricate works on
Windows as well as Linux.
Read more about how to use it and how it works on the project page:
http://code.google.com/p/fabricate/
Like memoize, fabricate is released under a "New BSD license". fabricate is
copyright (c) 2009 Brush Technology. Full text of the license is here:
http://code.google.com/p/fabricate/wiki/License
"""
# so you can do "from fabricate import *" to simplify your build script
__all__ = ['ExecutionError', 'shell', 'md5_hasher', 'mtime_hasher', 'Builder',
'setup', 'run', 'autoclean', 'memoize', 'outofdate', 'main']
# fabricate version number
__version__ = '1.05'
# if version of .deps file has changed, we know to not use it
deps_version = 1
import atexit
import optparse
import os
import platform
import re
import stat
import subprocess
import sys
import tempfile
import time
# So we can use md5func in old and new versions of Python without warnings
try:
import hashlib
md5func = hashlib.md5
except ImportError:
import md5
md5func = md5.new
# Use json, or pickle on older Python versions if simplejson not installed
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
import cPickle
# needed to ignore the indent= argument for pickle's dump()
class PickleJson:
def load(self, f):
return cPickle.load(f)
def dump(self, obj, f, indent=None, sort_keys=None):
return cPickle.dump(obj, f)
json = PickleJson()
def printerr(message):
""" Print given message to stderr with a line feed. """
print >>sys.stderr, message
class ExecutionError(Exception):
pass
def shell(command, input=None, silent=True):
""" Run given shell command and return its output as a string.
- input='string' to pass standard input into the process.
- input=None (default) to use parent's stdin (keyboard)
- silent=False to use parent's stdout (i.e. print output
as-it-comes instead of returning it)
"""
if input:
stdin = subprocess.PIPE
else:
stdin = None
if silent:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(command, shell=True, stdin=stdin, stdout=stdout,
stderr=subprocess.STDOUT)
if input:
proc.stdin.write(input)
output = ''
if silent:
output = proc.stdout.read()
status = proc.wait()
if status:
raise ExecutionError('Command %r terminated with exit status %d'
% (command.split(' ')[0], status), output, status)
if silent:
return output
def access_file(filename):
""" Access (read a byte from) file to try to update its access time. """
f = open(filename)
data = f.read(1)
f.close()
def file_has_atimes(filename):
""" Return True if the given filesystem supports access time updates for
this file. The atime resolution must be at least one day (as it is on
FAT filesystems). """
resolution = 24*60*60 # in seconds (worst-case resolution)
stat = os.stat(filename)
os.utime(filename, (stat.st_atime-resolution, stat.st_mtime))
previous = os.stat(filename).st_atime
access_file(filename)
return os.stat(filename).st_atime > previous
def has_atimes(paths):
""" Return True if a file created in each path supports fast atimes.
Note: for speed, this only tests files created at the top directory
of each path. A safe assumption in most build environments.
In the unusual case that any sub-directories are mounted
on alternate file systems that don't support atimes, the build may
fail to identify a dependency """
for path in paths:
handle, filename = tempfile.mkstemp(dir=path)
try:
try:
f = os.fdopen(handle, 'wb')
except:
os.close(handle)
raise
try:
f.write('x') # need a byte in the file for access test
finally:
f.close()
if not file_has_atimes(filename):
return False
finally:
os.remove(filename)
return True
def has_strace():
""" Return True if this system has strace. """
if platform.system() == 'Windows':
# even if windows has strace, it's probably a dodgy cygwin one
return False
try:
subprocess.Popen('strace', stderr=subprocess.PIPE)
return True
except OSError:
return False
def _file_times(path, depth, ignoreprefix='.'):
""" Helper function for file_times().
Return a dict of file times, recursing directories that don't
start with ignoreprefix """
names = os.listdir(path)
times = {}
for name in names:
if ignoreprefix and name.startswith(ignoreprefix):
continue
fullname = os.path.join(path, name)
st = os.stat(fullname)
if stat.S_ISDIR(st.st_mode):
if depth > 1:
times.update(_file_times(fullname, depth-1, ignoreprefix))
elif stat.S_ISREG(st.st_mode):
times[fullname] = st.st_atime, st.st_mtime
return times
def file_times(paths, depth=100, ignoreprefix='.'):
""" Return a dict of "filepath: (atime, mtime)" entries for each file in
given paths list. "filepath" is the absolute path, "atime" is the
access time, "mtime" the modification time.
Recurse directories that don't start with ignoreprefix """
times = {}
for path in paths:
times.update(_file_times(os.path.abspath(path), depth, ignoreprefix))
return times
def md5_hasher(filename):
""" Return MD5 hash of given filename, or None if file doesn't exist. """
try:
f = open(filename, 'rb')
try:
return md5func(f.read()).hexdigest()
finally:
f.close()
except IOError:
return None
def mtime_hasher(filename):
""" Return modification time of file, or None if file doesn't exist. """
try:
st = os.stat(filename)
return repr(st.st_mtime)
except (IOError, OSError):
return None
def shrink_path(filename):
""" Try to shrink a filename for display (remove the leading path if the
file is relative to the current working directory). """
cwd = os.getcwd()
prefix = os.path.commonprefix([cwd, filename])
if prefix:
filename = filename[len(prefix)+1:]
return filename
class Builder(object):
""" The Builder.
You can subclass this and override the "runner" function to do what you
want. For an example, see:
http://code.google.com/p/fabricate/wiki/HowtoSubclassBuilder
"runner" is the function used to run commands and generate
dependencies. It must take a command line string as its argument, and
return a tuple of (deps, outputs), where deps is a list of abspath'd
dependency files and outputs a list of abspath'd output files. It
defaults to a function that just calls smart_runner, which uses
strace_runner or atimes_runner as it can, automatically.
"""
def __init__(self, dirs=None, dirdepth=100, ignoreprefix='.',
hasher=md5_hasher, depsname='.deps', quiet=False):
""" Initialise a Builder with the given options.
"dirs" is a list of paths to look for dependencies (or outputs) in
if using the strace or atimes runners.
"dirdepth" is the depth to recurse into the paths in "dirs" (default
essentially means infinitely). Set to 1 to just look at the
immediate paths in "dirs" and not recurse at all. This can be
useful to speed up the atimes_runner if you're building in a large
tree and you don't care about all of the subdirectories.
"ignoreprefix" prevents recursion into directories that start with
prefix. It defaults to '.' to ignore svn directories.
Change it to '_svn' if you use _svn hidden directories.
"hasher" is a function which returns a string which changes when
the contents of its filename argument changes, or None on error.
Default is md5_hasher, but can also be mtime_hasher.
"depsname" is the name of the JSON dependency file to load/save.
"quiet" set to True tells the builder to not display the commands being
executed (or other non-error output).
"""
if dirs is None:
dirs = ['.']
self.dirs = [os.path.abspath(path) for path in dirs]
self.dirdepth = dirdepth
self.ignoreprefix = ignoreprefix
self.depsname = depsname
self.hasher = hasher
self.quiet = quiet
self.checking = False
def echo(self, message):
""" Print message, but only if builder is not in quiet mode. """
if not self.quiet:
print message
def echo_command(self, command):
""" Show a command being executed. """
self.echo(command)
def echo_delete(self, filename, error=None):
""" Show a file being deleted. For subclassing Builder and overriding
this function, the exception is passed in if an OSError occurs
while deleting a file. """
if error is None:
self.echo('deleting %s' % shrink_path(filename))
def run(self, command, runner=None):
""" Run given shell command, but only if its dependencies or outputs
have changed or don't exist. Override default runner if given. """
if not self.outofdate(command):
return
# if just checking up-to-date-ness, set flag and do nothing more
self.outofdate_flag = True
if self.checking:
return
# use runner to run command and collect dependencies
self.echo_command(command)
if runner is None:
runner = self.runner
deps, outputs = runner(command)
if deps is not None or outputs is not None:
deps_dict = {}
# hash the dependency inputs and outputs
for dep in deps:
hash = self.hasher(dep)
if hash is not None:
deps_dict[dep] = "input-" + hash
for output in outputs:
hash = self.hasher(output)
if hash is not None:
deps_dict[output] = "output-" + hash
self.deps[command] = deps_dict
def memoize(self, command):
""" Run given shell command as per run(), but return the status code
instead of raising an exception if there's an error. """
try:
self.run(command)
return 0
except ExecutionError, exc:
message, data, status = exc
return status
def outofdate(self, command):
""" Return True if given command is out of date. Command can either be
a callable build function or a command line string. """
if callable(command):
# command is a build function
self.checking = True
self.outofdate_flag = False
command()
self.checking = False
return self.outofdate_flag
else:
# command is a command line string
if command in self.deps:
for dep, oldhash in self.deps[command].items():
assert oldhash.startswith('input-') or \
oldhash.startswith('output-'), \
"%s file corrupt, do a clean!" % self.depsname
oldhash = oldhash.split('-', 1)[1]
# make sure this dependency or output hasn't changed
newhash = self.hasher(dep)
if newhash is None or newhash != oldhash:
break
else:
# all dependencies are unchanged
return False
# command has never been run, or one of the dependencies didn't
# exist or had changed
return True
def autoclean(self):
""" Automatically delete all outputs of this build as well as the .deps
file. """
# first build a list of all the outputs from the .deps file
outputs = []
for command, deps in self.deps.items():
outputs.extend(dep for dep, hash in deps.items()
if hash.startswith('output-'))
outputs.append(os.path.abspath(self.depsname))
self._deps = None
for output in outputs:
try:
os.remove(output)
except OSError, e:
self.echo_delete(output, e)
else:
self.echo_delete(output)
@property
def deps(self):
""" Lazy load .deps file so that instantiating a Builder is "safe". """
if not hasattr(self, '_deps') or self._deps is None:
self.read_deps()
atexit.register(self.write_deps)
return self._deps
def read_deps(self):
""" Read dependency JSON file into deps object. """
try:
f = open(self.depsname)
try:
self._deps = json.load(f)
# make sure the version is correct
if self._deps.get('.deps_version', 0) != deps_version:
printerr('Bad %s dependency file version! Rebuilding.'
% self.depsname)
self._deps = {}
self._deps.pop('.deps_version', None)
finally:
f.close()
except IOError:
self._deps = {}
def write_deps(self):
""" Write out deps object into JSON dependency file. """
if self._deps is None:
return # we've cleaned so nothing to save
self.deps['.deps_version'] = deps_version
f = open(self.depsname, 'w')
try:
json.dump(self.deps, f, indent=4, sort_keys=True)
finally:
f.close()
self._deps.pop('.deps_version', None)
def runner(self, command):
""" The default command runner. Override this in a subclass if you want
to write your own auto-dependency runner."""
return self.smart_runner(command)
def smart_runner(self, command):
""" Smart command runner that uses strace if it can, otherwise
access times if available, otherwise always builds. """
if not hasattr(self, '_smart_runner'):
if has_strace():
self._smart_runner = self.strace_runner
elif has_atimes(self.dirs):
self._smart_runner = self.atimes_runner
else:
self._smart_runner = self.always_runner
return self._smart_runner(command)
def _utime(self, filename, atime, mtime):
""" Call os.utime but ignore permission errors """
try:
st = os.utime(filename, (atime, mtime))
except OSError, e:
# ignore permission errors -- we can't build with files
# that we can't access anyway
if e.errno != 1:
raise
def _age_atimes(self, filetimes, age):
""" Age files' atimes to be at least age old. Only adjust if the given
filetimes dict says it isn't that old, and return a new dict of
filetimes with the ages adjusted. """
adjusted = {}
now = time.time()
for filename, entry in filetimes.iteritems():
if now - entry[0] < age:
entry = entry[0] - age, entry[1]
st = self._utime(filename, entry[0], entry[1])
adjusted[filename] = entry
return adjusted
# *** Note: tree walking time can be halved by caching afters for the next
# command's befores.
# We can also save lots of utime-ing by not restoring original atimes until
# after the final build step (because currently we're restoring atimes just
# to age them again for the next command.)
def atimes_runner(self, command):
""" Run command and return its dependencies and outputs, using before
and after access times to determine dependencies. """
originals = file_times(self.dirs, self.dirdepth, self.ignoreprefix)
befores = self._age_atimes(originals, 24*60*60)
shell(command, silent=False)
afters = file_times(self.dirs, self.dirdepth, self.ignoreprefix)
deps = []
outputs = []
for name in afters:
if name in befores:
# file in both befores+afters, add to outputs if mtime changed
if afters[name][1] > befores[name][1]:
outputs.append(name)
elif afters[name][0] > befores[name][0]:
# otherwise add to deps if atime changed
deps.append(name)
else:
# file created (in afters but not befores), add as output
outputs.append(name)
# Restore atimes of files we didn't access: not for any functional
# reason -- it's just to preserve the access time for the user's info
for name in deps:
originals.pop(name)
for name in originals:
original = originals[name]
if original != afters.get(name, None):
self._utime(name, original[0], original[1])
return deps, outputs
def _is_relevant(self, fullname):
""" Return True if file is in the dependency search directories. """
for path in self.dirs:
if fullname.startswith(path):
rest = fullname[len(path):]
# files in dirs starting with ignoreprefix are not relevant
if os.sep+self.ignoreprefix in os.sep+os.path.dirname(rest):
continue
# files deeper than dirdepth are not relevant
if rest.count(os.sep) > self.dirdepth:
continue
return True
return False
def _do_strace(self, ecmd, outfile, outname):
""" Run strace on given (escaped) command, sending output to file.
Return (status code, list of dependencies, list of outputs). """
calls = 'open,stat64,execve,exit_group,chdir,mkdir,rename'
shell('strace -f -o %s -e trace=%s /bin/sh -c "%s"' %
(outname, calls, ecmd), silent=False)
cwd = os.getcwd()
status = 0
deps = set()
outputs = set()
for line in outfile:
is_output = False
open_match = re.match(r'.*open\("([^"]*)", ([^,)]*)', line)
stat64_match = re.match(r'.*stat64\("([^"]*)", .*', line)
execve_match = re.match(r'.*execve\("([^"]*)", .*', line)
mkdir_match = re.match(r'.*mkdir\("([^"]*)", .*', line)
rename_match = re.match(r'.*rename\("[^"]*", "([^"]*)"\)', line)
kill_match = re.match(r'.*killed by.*', line)
if kill_match:
return None, None, None
match = None
if open_match:
match = open_match
mode = match.group(2)
if 'O_WRONLY' in mode or 'O_RDWR' in mode:
# it's an output file if opened for writing
is_output = True
elif stat64_match:
match = stat64_match
elif execve_match:
match = execve_match
elif mkdir_match:
match = mkdir_match
elif rename_match:
match = rename_match
# the destination of a rename is an output file
is_output = True
if match:
name = os.path.normpath(os.path.join(cwd, match.group(1)))
if self._is_relevant(name) and (os.path.isfile(name) or
os.path.isdir(name) or not os.path.lexists(name)):
if is_output:
outputs.add(name)
else:
deps.add(name)
match = re.match(r'.*chdir\("([^"]*)"\)', line)
if match:
cwd = os.path.normpath(os.path.join(cwd, match.group(1)))
match = re.match(r'.*exit_group\((.*)\).*', line)
if match:
status = int(match.group(1))
return status, list(deps), list(outputs)
def strace_runner(self, command):
""" Run command and return its dependencies and outputs, using strace
to determine dependencies (by looking at what files are opened or
modified). """
ecmd = command
ecmd = ecmd.replace('\\', '\\\\')
ecmd = ecmd.replace('"', '\\"')
exename = command.split()[0]
handle, outname = tempfile.mkstemp()
try:
try:
outfile = os.fdopen(handle, 'r')
except:
os.close(handle)
raise
try:
status, deps, outputs = self._do_strace(ecmd, outfile, outname)
if status is None:
raise ExecutionError(
'strace of %r was killed unexpectedly' % exename)
finally:
outfile.close()
finally:
os.remove(outname)
if status:
raise ExecutionError(
'strace of %r terminated with exit status %d'
% (exename, status), '', status)
return list(deps), list(outputs)
def always_runner(self, command):
""" Runner that always runs given command, used as a backup in case
a system doesn't have strace or atimes. """
shell(command, silent=False)
return None, None
# default Builder instance, used by helper run() and main() helper functions
default_builder = Builder()
default_command = 'build'
def setup(builder=None, default=None, runner=None, **kwargs):
""" Setup the default Builder (or an instance of given builder if "builder"
is not None) with the same keyword arguments as for Builder().
"default" is the name of the default function to run when the build
script is run with no command line arguments. """
global default_builder, default_command
if builder is not None:
default_builder = builder()
if default is not None:
default_command = default
default_builder.__init__(**kwargs)
if runner is not None:
default_builder.runner = getattr(default_builder, runner)
def run(command):
""" Run the given command using the default Builder (but only if its
dependencies have changed). """
default_builder.run(command)
def autoclean():
""" Automatically delete all outputs of the default build. """
default_builder.autoclean()
def memoize(command):
""" A memoize function compatible with memoize.py. Basically the same as
run(), but returns the status code instead of raising an exception
if there's an error. """
return default_builder.memoize(command)
def outofdate(command):
""" Return True if given command is out of date and needs to be run. """
return default_builder.outofdate(command)
def parse_options(usage):
""" Parse command line options and return parser and args. """
parser = optparse.OptionParser(usage='Usage: %prog '+usage,
version='%prog '+__version__)
parser.disable_interspersed_args()
parser.add_option('-t', '--time', action='store_true',
help='use file modification times instead of MD5 sums')
parser.add_option('-d', '--dir', action='append',
help='add DIR to list of relevant directories')
parser.add_option('-c', '--clean', action='store_true',
help='autoclean build outputs before running')
parser.add_option('-q', '--quiet', action='store_true',
help="don't echo commands, only print errors")
options, args = parser.parse_args()
default_builder.quiet = options.quiet
if options.time:
default_builder.hasher = mtime_hasher
if options.dir:
default_builder.dirs.extend(os.path.abspath(d) for d in options.dir)
if options.clean:
default_builder.autoclean()
return parser, options, args
def main(globals_dict=None):
""" Run the default function or the function(s) named in the command line
arguments. Call this at the end of your build script. If one of the
functions returns nonzero, main will exit with the last nonzero return
value as its status code. """
if globals_dict is None:
try:
globals_dict = sys._getframe(1).f_globals
except:
printerr("Your Python version doesn't support sys._getframe(1),")
printerr("call main(globals()) explicitly")
sys.exit(1)
usage = '[options] build script functions to run'
parser, options, actions = parse_options(usage)
if not actions:
actions = [default_command]
status = 0
try:
for action in actions:
if '(' not in action:
action = action.strip() + '()'
name = action.split('(')[0].split('.')[0]
if name in globals_dict:
this_status = eval(action, globals_dict)
if this_status:
status = int(this_status)
else:
printerr('%r command not defined!' % action)
sys.exit(1)
except ExecutionError, exc:
message, data, status = exc
printerr(message)
sys.exit(status)
if __name__ == '__main__':
# if called as a script, emulate memoize.py -- run() command line
parser, options, args = parse_options('[options] command line to run')
status = 0
if args:
status = memoize(' '.join(args))
elif not options.clean:
parser.print_help()
status = 1
# autoclean may have been used
sys.exit(status)
|
from collections import defaultdict
for _ in range(int(input())):
n = int(input())
ans = defaultdict(int)
j = 1
while j**2<=n:
ans[j**2]+=1
j+=1
i = 1
while i**3<=n:
ans[i**3]+=1
i+=1
print(len(ans))
|
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from mlbframe import db, models
import datetime
import os.path
db.drop_all()
db.create_all()
epoch = datetime.date(2017, 4, 1)
meta = models.Meta()
meta.last_updated = epoch
db.session.add(meta)
db.session.commit()
"""
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
"""
|
# Not consistent with test passing
import numpy as np
import path_plan
from path_plan import compute_probability
from path_plan import model_polyfit
from numpy import interp
import sys
def main():
# Indian Road congress (INC)
V_lane_width = [2.0, 23.5]
# https://nptel.ac.in/content/storage2/courses/105101008/downloads/cete_24.pdf
# Break point of speed
BP_lane_width = [0.0, 7.3]
speed = [0.0, 10.0]
lane_width = interp(V_lane_width, BP_lane_width, speed)
half_lane = np.array([0. , 0., 0., lane_width // 2.])
print(lane_width, half_lane)
left_path_weight = 1.
right_path_weight = 1.
l_probability = 0.006
r_probability = 0.123
left_polyfit = 0.1
right_polyfit = 0.22
ss = compute_probability(speed, left_polyfit, right_polyfit, l_probability, r_probability)
print(ss)
if __name__ == '__main__':
main()
|
"""Common definitions for GAN metrics."""
import hashlib
import os
import time
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from training import dataset, misc
# ----------------------------------------------------------------------------
# Base class for metrics.
class MetricBase:
def __init__(self, name):
self.name = name
self._dataset_obj = None
self._progress_lo = None
self._progress_hi = None
self._progress_max = None
self._progress_sec = None
self._progress_time = None
self._reset()
def close(self):
self._reset()
def _reset(
self,
network_pkl=None,
run_dir=None,
data_dir=None,
dataset_args=None,
mirror_augment=None,
):
if self._dataset_obj is not None:
self._dataset_obj.close()
self._network_pkl = network_pkl
self._data_dir = data_dir
self._dataset_args = dataset_args
self._dataset_obj = None
self._mirror_augment = mirror_augment
self._eval_time = 0
self._results = []
if (dataset_args is None or mirror_augment is None) and run_dir is not None:
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config["dataset"])
self._dataset_args["shuffle_mb"] = 0
self._mirror_augment = run_config["train"].get("mirror_augment", False)
def configure_progress_reports(self, plo, phi, pmax, psec=15):
self._progress_lo = plo
self._progress_hi = phi
self._progress_max = pmax
self._progress_sec = psec
def run(
self,
network_pkl,
run_dir=None,
data_dir=None,
dataset_args=None,
mirror_augment=None,
num_gpus=1,
tf_config=None,
log_results=True,
Gs_kwargs=dict(is_validation=True),
):
self._reset(
network_pkl=network_pkl,
run_dir=run_dir,
data_dir=data_dir,
dataset_args=dataset_args,
mirror_augment=mirror_augment,
)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(
tf_config
).as_default(): # pylint: disable=not-context-manager
self._report_progress(0, 1)
_G, _D, Gs = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, Gs_kwargs=Gs_kwargs, num_gpus=num_gpus)
self._report_progress(1, 1)
self._eval_time = (
time.time() - time_begin
) # pylint: disable=attribute-defined-outside-init
if log_results:
if run_dir is not None:
log_file = os.path.join(run_dir, "metric-%s.txt" % self.name)
with dnnlib.util.Logger(log_file, "a"):
print(self.get_result_str().strip())
else:
print(self.get_result_str().strip())
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = "..." + network_name[-26:]
result_str = "%-30s" % network_name
result_str += " time %-12s" % dnnlib.util.format_time(self._eval_time)
for res in self._results:
result_str += " " + self.name + res.suffix + " "
result_str += res.fmt % res.value
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary(
"Metrics/" + self.name + res.suffix, res.value
)
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix="", fmt="%-10.4f"):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _report_progress(self, pcur, pmax, status_str=""):
if (
self._progress_lo is None
or self._progress_hi is None
or self._progress_max is None
):
return
t = time.time()
if (
self._progress_sec is not None
and self._progress_time is not None
and t < self._progress_time + self._progress_sec
):
return
self._progress_time = t
val = self._progress_lo + (pcur / pmax) * (
self._progress_hi - self._progress_lo
)
dnnlib.RunContext.get().update(status_str, int(val), self._progress_max)
def _get_cache_file_for_reals(self, extension="pkl", **kwargs):
all_args = dnnlib.EasyDict(
metric_name=self.name, mirror_augment=self._mirror_augment
)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode("utf-8"))
dataset_name = self._dataset_args.get(
"tfrecord_dir", None
) or self._dataset_args.get("h5_file", None)
dataset_name = os.path.splitext(os.path.basename(dataset_name))[0]
return os.path.join(
".stylegan2-cache",
"%s-%s-%s.%s" % (md5.hexdigest(), self.name, dataset_name, extension),
)
def _get_dataset_obj(self):
if self._dataset_obj is None:
self._dataset_obj = dataset.load_dataset(
data_dir=self._data_dir, **self._dataset_args
)
return self._dataset_obj
def _iterate_reals(self, minibatch_size):
dataset_obj = self._get_dataset_obj()
while True:
images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
yield images
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(
latents,
None,
output_transform=fmt,
is_validation=True,
num_gpus=num_gpus,
assume_frozen=True,
)
yield images
def _get_random_labels_tf(self, minibatch_size):
return self._get_dataset_obj().get_random_labels_tf(minibatch_size)
# ----------------------------------------------------------------------------
# Group of multiple metrics.
class MetricGroup:
def __init__(self, metric_kwarg_list):
self.metrics = [
dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list
]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return " ".join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
# ----------------------------------------------------------------------------
# Dummy metric for debugging purposes.
class DummyMetric(MetricBase):
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
_ = Gs, Gs_kwargs, num_gpus
self._report_result(0.0)
# ----------------------------------------------------------------------------
|
import json
from ansiblemetrics.import_metrics import general_metrics, playbook_metrics, tasks_metrics
from flask import abort, jsonify
from api.defect_prediction import DefectPredictor
def list_all():
"""
This function responds to a request for /api/metrics/all (GET)
:return: a lists of metrics' names.
"""
metrics = dict(list(general_metrics.items()) + list(playbook_metrics.items()) + list(tasks_metrics.items()))
l = []
for name in metrics:
l.append(name)
return json.dumps(l), 200
def run_all(script):
"""
This function responds to a request for /api/metrics/all (POST)
:return: a json object with metrics values.
"""
dp = DefectPredictor(script)
if not dp.isValid:
abort(400, 'Not a valid yaml file.')
# Check empty file (note: empty files are valid yaml files)
if dp.isEmpty:
abort(400, 'Empty file.')
return dp.extract_metrics(), 200
|
import pickle
import json
import argparse
import cv2
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
import matplotlib.lines as lines
from tqdm import tqdm
import _init_paths
from datasets_rel.pytorch_misc import intersect_2d, argsort_desc
from functools import reduce
from utils.boxes import bbox_overlaps
from utils_rel.boxes_rel import boxes_union
from graphviz import Digraph
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh=0.5, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
rel_iou = bbox_overlaps(gt_box[None, :], boxes)[0]
inds = rel_iou >= iou_thresh
else:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualization')
parser.add_argument(
'--output_dir',
help='output directory to save the testing results. If not provided, '
'defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument(
'--st',
help='Visualization Start',
default=0, type=int)
parser.add_argument(
'--num',
help='Visualization Number',
default=10, type=int)
parser.add_argument(
'--no_do_vis',
help='do not visualize',
action='store_true')
parser.add_argument(
'--rel_class_recall', help='rel class recall.',
action='store_true')
parser.add_argument(
'--phrdet', help='use phrdet.',
action='store_true')
parser.add_argument(
'--dataset',
help='Visualization Number',
default='ag', type=str)
parser.add_argument(
'--filename',
help='Visualization file',
default='rel_detections_topk', type=str)
parser.add_argument(
'--cnt_lim',
help='Visualization Number',
default=10, type=int)
parser.add_argument(
'--lim',
help='Visualization Number',
default=0, type=int)
args = parser.parse_args()
if not os.path.exists(os.path.join(args.output_dir, 'vis')):
os.mkdir(os.path.join(args.output_dir, 'vis'))
saved_path = os.path.join(args.output_dir, 'vis')
topk_dets_f = os.path.join(args.output_dir, args.filename+'.pkl')
with open(topk_dets_f, 'rb') as f:
res = pickle.load(f)
f.close()
with open(os.path.join('data', args.dataset,'annotations/objects.json'), 'r') as f:
obj_list = json.load(f)
f.close()
with open(os.path.join('data', args.dataset, 'annotations/predicates.json'), 'r') as f:
rel_list = json.load(f)
f.close()
print('Loading test_videos_list.json')
if args.dataset.find('ag') >= 0:
val_map_list_path = os.path.join('data', 'ag', 'annotations/test_videos_list.json')
with open(val_map_list_path, 'r') as f:
val_map_list = json.load(f)
f.close()
elif args.dataset.find('vidvrd_train') >= 0:
val_map_list_path = os.path.join('data', 'vidvrd', 'annotations/train_fname_list.json')
with open(val_map_list_path, 'r') as f:
val_map_list = json.load(f)
f.close()
val_map_list_ = set()
for i, v in enumerate(val_map_list):
ll = v.split('/')
if len(ll) >= 2:
val_map_list_.add(ll[-2].split('.')[-2])
val_map_list = list(val_map_list_)
elif args.dataset.find('vidvrd') >= 0:
val_map_list_path = os.path.join('data', 'vidvrd', 'annotations/val_fname_list.json')
with open(val_map_list_path, 'r') as f:
val_map_list = json.load(f)
f.close()
val_map_list_ = set()
for i, v in enumerate(val_map_list):
ll = v.split('/')
if len(ll) >= 2:
val_map_list_.add(ll[-2].split('.')[-2])
val_map_list = list(val_map_list_)
else:
raise Exception
print('test_videos_list.json loaded.')
all_gt_cnt = 0
video_gt_cnt = [{} for r in range(len(rel_list))]
video_recalls = [{} for r in range(len(rel_list))]
video_len = len(val_map_list)
for j, i in enumerate(val_map_list):
file_real_name = i + '.mp4'
for r in range(len(rel_list)):
video_gt_cnt[r][file_real_name] = 0.
video_recalls[r][file_real_name] = {10:0., 20: 0., 50: 0., 100: 0.}
edge_width = 3
font_size = 18
rel_class_recalls = [{10:0, 20: 0, 50: 0, 100: 0} for i in range(len(rel_list))]
tot_recalls = [{10:0, 20: 0, 50: 0, 100: 0} for i in range(len(rel_list))]
rel_class_gt_num = [0 for i in range(len(rel_list))]
recalls = [10, 20, 50, 100]
print('total {} images. '.format(len(res)))
args.num = min(args.num, len(res))
print('Number is {}. '.format(args.num))
cnt = 0
for res_i in res[args.st:args.num]:
r_ans = {10:0, 20: 0, 50: 0, 100: 0}
r_score = {10:0, 20: 0, 50: 0, 100: 0}
f_name = res_i['image']
det_boxes_s_top = res_i['det_boxes_s_top']
det_boxes_o_top = res_i['det_boxes_o_top']
det_labels_s_top = res_i['det_labels_s_top']
det_labels_p_top = res_i['det_labels_p_top']
det_labels_o_top = res_i['det_labels_o_top']
det_scores_top = res_i['det_scores_top']
gt_boxes_sbj = res_i['gt_boxes_sbj']
gt_boxes_obj = res_i['gt_boxes_obj']
gt_labels_sbj = res_i['gt_labels_sbj']
gt_labels_obj = res_i['gt_labels_obj']
gt_labels_prd = res_i['gt_labels_prd']
mm = res_i['image'].split('/')
file_real_name = mm[-2]
cur_frame_id = int(mm[-1].split('.')[0])
gt_boxes_so = np.hstack((gt_boxes_sbj, gt_boxes_obj))
gt_labels_spo = np.vstack((gt_labels_sbj, gt_labels_prd, gt_labels_obj)).transpose()
det_labels_spo_top = np.vstack((det_labels_s_top, det_labels_p_top, det_labels_o_top)).transpose()
if args.rel_class_recall:
for i in gt_labels_prd:
rel_class_gt_num[i] += 1
video_gt_cnt[i][file_real_name] += 1.
det_boxes_so_top = np.hstack((det_boxes_s_top, det_boxes_o_top))
pred_to_gt = _compute_pred_matches(
gt_labels_spo, det_labels_spo_top,
gt_boxes_so, det_boxes_so_top,
phrdet=args.phrdet)
gt_obj_set = set()
gt_tri_info_set = set()
for i in range(len(gt_boxes_sbj)):
tri_info = []
tri_info += list(gt_boxes_sbj[i, :].astype(np.int))
tri_info += [gt_labels_sbj[i].astype(np.int), ]
tri_info += list(gt_boxes_obj[i, :].astype(np.int))
tri_info += [gt_labels_obj[i].astype(np.int), ]
tri_info += [gt_labels_prd[i].astype(np.int), ]
gt_tri_info_set.add(tuple(tri_info))
if tuple(list(gt_boxes_sbj[i, :].astype(np.int)) + [gt_labels_sbj[i].astype(np.int), ]) not in gt_obj_set:
gt_obj_set.add(tuple(list(gt_boxes_sbj[i, :].astype(np.int)) + [gt_labels_sbj[i].astype(np.int), ]))
if tuple(list(gt_boxes_obj[i, :].astype(np.int)) + [gt_labels_obj[i].astype(np.int), ]) not in gt_obj_set:
gt_obj_set.add(tuple(list(gt_boxes_obj[i, :].astype(np.int)) + [gt_labels_obj[i].astype(np.int), ]))
for k in recalls:
gt_score = [0 for i in range(len(gt_boxes_sbj))]
if len(pred_to_gt):
match = reduce(np.union1d, pred_to_gt[:k])
match = np.array(match, dtype=np.int)
if args.rel_class_recall:
for gt_i in match:
rel_class_recalls[gt_labels_prd[gt_i]][k] += 1
video_recalls[gt_labels_prd[gt_i]][file_real_name][k] += 1.
for p_id, pred_i in enumerate(pred_to_gt[:k]):
for gt_id in pred_i:
gt_score[gt_id] = max(gt_score[gt_id], det_scores_top[p_id])
else:
match = []
gt_score = []
r_ans[k] = match
r_score[k] = gt_score
if len(gt_labels_prd) > args.lim and cnt <= args.cnt_lim:
if not args.no_do_vis:
saved_name = f_name.split('/')[-2:]
saved_name = saved_name[0] + '/' + saved_name[1]
img = mpimg.imread(f_name)
cnt += 1
for k in recalls:
if k < 20: continue
if k > 50: continue
preserve_set_obj = set()
preserve_set_rel = set()
rec_pos = {}
fig = plt.figure(figsize=(18, 12))
ax = plt.gca()
plt.imshow(img)
plt.axis('off')
det_title = plt.title('det')
plt.setp(det_title, color='b')
for gt_id in r_ans[k]:
x, y, x1, y1 = gt_boxes_sbj[gt_id].astype(np.int)
s_name = obj_list[gt_labels_sbj[gt_id]]
s_cx, s_cy = (x+x1)//2, (y+y1)//2
srect = plt.Rectangle((x,y),x1-x,y1-y, fill=False, edgecolor='b', linewidth=3)
ax.add_patch(srect)
#ax.text(s_cx, s_cy,
ax.text(x, y,
s_name,
fontsize=font_size,
color='white',
bbox=dict(facecolor='orange', alpha=0.5, pad=0, edgecolor='none'))
tri_info = [x, y, x1, y1, gt_labels_sbj[gt_id].astype(np.int)]
if tuple([x, y, x1, y1, gt_labels_sbj[gt_id].astype(np.int)]) not in preserve_set_obj:
preserve_set_obj.add(tuple([x, y, x1, y1, gt_labels_sbj[gt_id].astype(np.int)]))
x, y, x1, y1 = gt_boxes_obj[gt_id].astype(np.int)
o_name = obj_list[gt_labels_obj[gt_id]]
o_cx, o_cy = (x+x1)//2, (y+y1)//2
orect = plt.Rectangle((x,y),x1-x,y1-y, fill=False, edgecolor='b', linewidth=3)
ax.add_patch(orect)
ax.text(x, y,
o_name,
fontsize=font_size,
color='white',
bbox=dict(facecolor='blue', alpha=0.5, pad=0, edgecolor='none'))
p_name = rel_list[gt_labels_prd[gt_id].astype(np.int)]+ ' ' + str(r_score[k][gt_id])
tri_info += [x, y, x1, y1, gt_labels_obj[gt_id].astype(np.int)]
if tuple([x, y, x1, y1, gt_labels_obj[gt_id].astype(np.int)]) not in preserve_set_obj:
preserve_set_obj.add(tuple([x, y, x1, y1, gt_labels_obj[gt_id].astype(np.int)]))
tri_info += [gt_labels_prd[gt_id].astype(np.int), ]
preserve_set_rel.add(tuple(tri_info))
rel_l = lines.Line2D([s_cx, o_cx], [s_cy, o_cy], color='purple', linewidth=3)
ax.add_line(rel_l)
lx, ly = s_cx + 8*(o_cx - s_cx) / 9, s_cy + 8*(o_cy - s_cy) / 9
if (lx, ly) in rec_pos:
rec_pos[(lx, ly)] += 10
else:
rec_pos[(lx, ly)] = 0
d = rec_pos[(lx, ly)]
ax.text(lx, ly + d,
p_name,
fontsize=font_size,
color='white',
bbox=dict(facecolor='purple', alpha=0.5, pad=0, edgecolor='none'))
dot = Digraph(filename=(saved_name + '_' +str(k)).replace('/', '_'))
dot.body.append('size="16,16"')
dot.body.append('rankdir="LR"')
#dot.node_attr.update(style='filled')
map_obj_node = dict()
pn = 0
for gt_obj in gt_obj_set:
ol = obj_list[gt_obj[-1].astype(np.int)]
if gt_obj in preserve_set_obj:
dot.node(str(gt_obj), str(ol), color='green', shape='box')
else:
dot.node(str(gt_obj), str(ol), color='red', shape='box')
map_obj_node[gt_obj] = pn
pn += 1
for gt_tri_info in gt_tri_info_set:
sn, on, pn = gt_tri_info[4], gt_tri_info[9], gt_tri_info[10]
sx, sy, sx1, sy1 = gt_tri_info[0],gt_tri_info[1],gt_tri_info[2],gt_tri_info[3]
ox, oy, ox1, oy1 = gt_tri_info[5],gt_tri_info[6],gt_tri_info[7],gt_tri_info[8]
st, ed = str(tuple([sx, sy, sx1, sy1, sn])), str(tuple([ox, oy, ox1, oy1, on]))
rl = rel_list[gt_tri_info[-1].astype(np.int)].replace('_', ' ')
#if gt_tri_info in preserve_set_rel:
# dot.node(str(gt_tri_info), rl, color='lightblue2')
#else:
# dot.node(str(gt_tri_info), rl, color='red')
#dot.edge(st, str(gt_tri_info))
#dot.edge(str(gt_tri_info), ed)
if gt_tri_info in preserve_set_rel:
dot.edge(st, ed, rl, color='green')
else:
dot.edge(st, ed, rl, fontcolor='red', color='red')
dot.render(os.path.join(
saved_path,
(saved_name + '_' +str(k)).replace('/', '_')
), cleanup=True)
saved_file_name = (saved_name + '_' +str(k)+'.png').replace('/', '_')
plt.savefig(os.path.join(saved_path, saved_file_name), bbox_inches='tight')
plt.close(fig)
if args.rel_class_recall:
for r in range(len(rel_list)):
for file_real_name, v in video_recalls[r].items():
for k, vals in v.items():
video_recalls[r][file_real_name][k] = float(vals) / (float(video_gt_cnt[r][file_real_name]) + 1e-12)
tot_recalls[r][k] += video_recalls[r][file_real_name][k]
for k in tot_recalls[r]:
tot_recalls[r][k] = float(tot_recalls[r][k]) / (float(video_len) + 1e-12)
print('=========== ' + 'Image_ver_rel_recalls' + ' ===========')
mr_list = []
for k in recalls:
print('=========== {} ==========='.format(k))
mrr = float(0.)
for i, gt_rel_num in enumerate(rel_class_gt_num):
rel_class_recalls[i][k] = float(rel_class_recalls[i][k]) / (float(gt_rel_num) + 1e-12)
print('%s: %.2f' % (rel_list[i], 100 * rel_class_recalls[i][k]))
mrr += rel_class_recalls[i][k]
mr_list.append((k, 100*mrr/len(rel_class_gt_num)))
for i in mr_list:
print('mR@{}: {}'.format(i[0], i[1]))
print('=========== ' + 'Video_ver_rel_recalls' + ' ===========')
mr_v_list = []
for k in recalls:
mrr = float(0.)
for i, gt_rel_num in enumerate(rel_class_gt_num):
#print('%s: %.2f' % (rel_list[i], 100 * tot_recalls[i][k]))
mrr += tot_recalls[i][k]
mr_v_list.append((k, 100*mrr/len(rel_class_gt_num)))
for i in mr_v_list:
print('mR@{}: {}'.format(i[0], i[1]))
|
from flask import Flask, render_template, request
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
app = Flask(__name__)
def get_spreadsheet_data():
json_key = json.load(open('spreadsheet_credentials.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
sh = gc.open("Simple Sheet")
worksheet = sh.sheet1
data = worksheet.get_all_values()
return data
@app.route("/")
def index():
data = get_spreadsheet_data()
heroes_list = []
print data
for row in data:
hero = row[0]
heroes_list.append(hero)
print heroes_list
return render_template("index.html", heroes_list=heroes_list)
@app.route("/submit", methods=['POST'])
def submit():
data = get_spreadsheet_data()
print request.form
term = request.form.getlist('hero')
find_hero = term[0]
print find_hero
message = ""
for row in data:
if find_hero == row[0]:
hero = row[0]
dessert = row[1]
message = "{} loves {}.".format(hero, dessert)
if message == "":
message = "Sorry, hero not found."
return render_template("submit.html", message=message )
if __name__=='__main__':
app.run(debug=True)
|
from pydantic import BaseModel
class CodeBase(BaseModel):
key: str
code: str = None
class CodeCreate(CodeBase):
pass
class Code(CodeBase):
id: int
class Config:
orm_mode = True
class TransmitResponse(BaseModel):
success: bool
label: int = -1
|
import os
from behave import when, then, given
from helpers import aws_helper, emr_step_generator
@when("An emrfs '{step_type}' step is started on the ingest-hbase EMR cluster")
def step_impl(context, step_type):
s3_prefix = (
context.ingest_hbase_emrfs_prefix_override
if context.ingest_hbase_emrfs_prefix_override
else context.ingest_hbase_emr_cluster_root_s3_root_directory
)
context.ingest_hbase_emr_job_step_id = emr_step_generator.generate_emrfs_step(
context.ingest_hbase_emr_cluster_id,
context.ingest_hbase_emr_cluster_root_s3_bucket_id,
s3_prefix,
step_type,
context.ingest_hbase_emrfs_arguments,
)
@when("A script '{step_type}' step is started on the ingest-hbase EMR cluster")
def step_impl(context, step_type):
script_name = None
arguments = None
if step_type == "major compaction":
script_name = "/var/ci/major_compaction_script.sh"
elif step_type == "download scripts":
script_name = "/var/ci/download_scripts.sh"
elif step_type == "generate snapshots":
script_name = "/var/ci/snapshot_tables_script.sh"
arguments = context.ingest_hbase_snapshot_tables_override
elif step_type == "hbck":
script_name = "/var/ci/hbck_details_script.sh"
arguments = context.ingest_hbase_hbck_arguments
if script_name:
context.ingest_hbase_emr_job_step_id = emr_step_generator.generate_script_step(
context.ingest_hbase_emr_cluster_id,
script_name,
step_type,
arguments,
)
@given("A bash '{step_type}' step is started on the ingest-hbase EMR cluster")
@when("A bash '{step_type}' step is started on the ingest-hbase EMR cluster")
def step_impl(context, step_type):
bash_script = None
if step_type == "drop all tables":
bash_script = "echo -e \"drop_all '.*'\\ny\" | hbase shell"
elif step_type == "disable all tables":
bash_script = "hbase shell <<< list | egrep '^[a-z]' | grep -v '^list' | while read; do echo -e \"disable '$REPLY'\"; done | hbase shell"
elif step_type == "download cdl script":
bash_script = f"aws s3 cp {context.cdl_run_script_s3_url} /opt/emr/run_cdl.sh && chmod +x /opt/emr/run_cdl.sh"
elif step_type == "download cdl input split script":
bash_script = f"aws s3 cp {context.cdl_split_inputs_s3_url} /opt/emr/split_inputs.pl && chmod +x /opt/emr/split_inputs.pl"
elif step_type == "download hdl script":
bash_script = f"aws s3 cp {context.hdl_run_script_s3_url} /opt/emr/run_hdl.sh && chmod +x /opt/emr/run_hdl.sh"
elif step_type == "download create tables script":
bash_script = f"aws s3 cp {context.create_hbase_tables_script_url} /opt/emr/create_hbase_tables.sh && chmod +x /opt/emr/create_hbase_tables.sh"
elif step_type == "disable cleaner chore":
bash_script = (
"echo $'cleaner_chore_enabled; cleaner_chore_switch false' | hbase shell"
)
elif step_type == "enable cleaner chore":
bash_script = (
"echo $'cleaner_chore_enabled; cleaner_chore_switch true' | hbase shell"
)
elif step_type == "disable balancer":
bash_script = "echo $'balance_switch false' | hbase shell"
elif step_type == "enable balancer":
bash_script = "echo $'balance_switch true' | hbase shell"
if bash_script:
context.ingest_hbase_emr_job_step_id = emr_step_generator.generate_bash_step(
context.ingest_hbase_emr_cluster_id,
bash_script,
step_type,
)
@given("The '{step_type}' step is executed successfully")
@when("The '{step_type}' step is executed successfully")
def step_impl(context, step_type):
execution_state = aws_helper.poll_emr_cluster_step_status(
context.ingest_hbase_emr_job_step_id, context.ingest_hbase_emr_cluster_id
)
if execution_state != "COMPLETED":
raise AssertionError(
f"'{step_type}' step failed with final status of '{execution_state}'"
)
|
"""
This script will go through the commit logs for projects we dont have trace
links for [Moreno et al] and do our best to guess at them.
"""
import dulwich.repo
import re
import csv
from src.main import load_projects, load_repos, load_goldsets
import os.path
from src.utils import clone
projects = load_projects()
for project in projects:
dest_fn = os.path.join(project.full_path, 'issue2git.csv')
if os.path.exists(dest_fn):
continue
if project.name == 'eclipse':
continue
repos = load_repos(project)
golds = load_goldsets(project)
ids = set(i for i,g in golds)
i2g = dict.fromkeys(ids)
for k in i2g:
i2g[k] = set()
for repo in repos:
#b = re.compile('BOOKKEEPER-([\d]+):')
#b = re.compile('ZOOKEEPER-([\d]+)')
b = re.compile('%s-([\d]+)' % project.name.upper())
for entry in repo.get_walker():
a = entry.commit
for issue in b.findall(a.message):
if issue in i2g:
i2g[issue].add(a.id)
with open(dest_fn, 'w') as f:
w = csv.writer(f)
for issue, gits in i2g.items():
if gits:
w.writerow([issue] + list(gits))
|
from flask import json, jsonify
from datetime import datetime
laiks = datetime.now()
LOGFAILS = "chats.txt"
def lasi():
chata_rindas = []
with open(LOGFAILS, "r", encoding="utf-8") as f:
for rinda in f:
chata_rindas.append(json.loads(rinda))
return jsonify({"chats": chata_rindas})
def pieraksti_zinju(dati):
with open(LOGFAILS, "a", newline="", encoding="utf-8") as f:
f.write(json.dumps(dati["chats"]) + "\n")
|
import allure
import pytest
from app import logger
from app.cli import pytestOption
from app.core.appium import AppiumService
from app.loader import Loader
procs = []
def pytest_addoption(parser):
op = pytestOption(parser)
# 配置文件
op.add_config_option()
# 运行设备:设备名,输入ios/android会选择默认的ios/android设备,未输入会选择default设备
op.add_device_option()
# 运行case(模块): ios/android/bunny/...
op.add_case_option()
# log 配置
# op.add_log_option()
# output
op.add_output_option()
# appium
op.add_appium_option()
op.add_attachment_option()
def pytest_sessionstart(session):
from app import Logger
Logger.init_logging(
log_path=session.config.getoption('--log-file')
)
def pytest_sessionfinish(session, exitstatus):
normal = False
for p in procs:
normal = p.stop()
if procs and not normal:
exitstatus = 1
# -----------------------
# ----- config file -----
# -----------------------
@pytest.fixture(scope='session', autouse=True)
def global_config(request):
return request.config.getoption('--global-config')
@pytest.fixture(scope='session', autouse=True)
def device_config(request):
return request.config.getoption('--device-config')
# ----------------------
# ----- start up -----
# ----------------------
@pytest.fixture(scope='session', autouse=True)
def device(request, device_config, output_dir):
devname = request.config.getoption('--device')
devconfig = Loader.load(device_config)
devplatform = request.config.getoption('--platform')
device_info = devconfig[devname]
default_caps = Loader().get_default_caps().get(devplatform) or {}
default_caps.update(device_info['caps'])
device_info['caps'] = default_caps
device_info['port'] = request.config.getoption('--port')
device_info['bp'] = request.config.getoption('--bp')
device_info['host'] = request.config.getoption('--service-address')
device_info['output_dir'] = output_dir
if device_info['port']:
ap = AppiumService(
device=device_info['caps'],
ports=[device_info['port'], device_info['bp']]
)
ap.start()
procs.append(ap)
device_info['caps'] = ap.device
device_info['host'] = 'http://127.0.0.1:{}/wd/hub'.format(ap.port)
else:
if device_info['host'] and \
not device_info['host'].startswith('http') and \
not device_info['host'].endswith('/wd/hub'):
device_info['host'] = 'http://{}/wd/hub'.format(device_info['host'])
if devplatform == 'Android' and not device_info['caps'].get('systemPort'):
device_info['caps']['systemPort'] = request.config.getoption('--system-port')
elif devplatform == 'iOS' and not device_info['caps'].get('wdaLocalPort'):
device_info['caps']['wdaLocalPort'] = device_info['bp']
logger.debug('{} caps: {}'.format(devname, device_info['caps']))
return device_info
@pytest.fixture(scope='session', autouse=True)
def output_dir(request):
return request.config.getoption('--output-dir')
@pytest.fixture(scope='session', autouse=True)
def options(request):
return request.config.option
# ----------------------
# ----- attachment -----
# ----------------------
@pytest.fixture(scope='session', autouse=True)
def disable_screenshot(request):
return request.config.getoption('--disable-screenshot')
def pytest_exception_interact(node, call, report):
try:
if not node.funcargs.get('disable_screenshot'):
if getattr(node.instance, 'driver'):
driver = node.instance.driver
allure.attach(driver.get_screenshot_as_png(), 'Fail screenshot', allure.attachment_type.PNG)
except AttributeError:
pass
|
from pytest import mark
from leetcode.uncommon_words_from_two_sentences import Solution
from . import read_csv
@mark.parametrize('a, b, expect', read_csv(__file__))
def test_two_sum(a, b, expect):
result = Solution().uncommonFromSentences(a, b)
assert set(result) == eval(expect)
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import print_function
from __future__ import division
import mock
import os
from tests.compat import unittest
from haxor_news.hacker_news import HackerNews
from haxor_news.settings import freelancer_post_id, who_is_hiring_post_id
from tests.mock_hacker_news_api import MockHackerNewsApi
class ConfigTest(unittest.TestCase):
def setUp(self):
self.hn = HackerNews()
self.hn.hacker_news_api = MockHackerNewsApi()
self.limit = len(self.hn.hacker_news_api.items)
self.valid_id = 0
self.invalid_id = 9000
self.query = 'foo'
def test_config(self):
expected = os.path.join(os.path.abspath(os.environ.get('HOME', '')),
self.hn.config.CONFIG)
assert self.hn.config.get_config_path(self.hn.config.CONFIG) == expected
@mock.patch('haxor_news.config.Config.save_cache')
def test_clear_item_cache(self, mock_save_cache):
item_ids = self.hn.config.item_ids
self.hn.config.clear_item_cache()
assert self.hn.config.item_ids == item_ids
assert self.hn.config.item_cache == []
mock_save_cache.assert_called_with()
def test_load_hiring_and_freelance_ids(self):
self.hn.config.load_hiring_and_freelance_ids()
assert self.hn.config.hiring_id != who_is_hiring_post_id
assert self.hn.config.freelance_id != freelancer_post_id
def test_load_hiring_and_freelance_ids_invalid_url(self):
self.hn.config.load_hiring_and_freelance_ids(url='https://example.com')
assert self.hn.config.hiring_id == who_is_hiring_post_id
assert self.hn.config.freelance_id == freelancer_post_id
os.remove('./downloaded_settings.py')
def test_load_hiring_and_freelance_ids_from_cache_or_defaults(self):
self.hn.config.load_hiring_and_freelance_ids_from_cache_or_defaults()
assert self.hn.config.hiring_id == who_is_hiring_post_id
assert self.hn.config.freelance_id == freelancer_post_id
def test_save_and_load_item_ids(self):
self.hn.config.item_ids = [0, 1, 2]
self.hn.config.item_cache = [3, 4, 5]
self.hn.config.save_cache()
item_ids = self.hn.config.item_ids
assert item_ids == [0, 1, 2]
item_cache = self.hn.config.item_cache
assert item_cache == [3, 4, 5]
@mock.patch('haxor_news.hacker_news.HackerNews.view')
@mock.patch('haxor_news.config.Config.clear_item_cache')
def test_view_comment_clear_cache(self, mock_clear_item_cache, mock_view):
index = 0
comments = False
comments_recent = False
comments_unseen = True
comments_hide_non_matching = False
comments_clear_cache = True
browser = False
self.hn.view_setup(
index, self.query, comments, comments_recent,
comments_unseen, comments_hide_non_matching,
comments_clear_cache, browser)
comments_expected = True
mock_clear_item_cache.assert_called_with()
mock_view.assert_called_with(
index, self.hn.QUERY_UNSEEN, comments_expected,
comments_hide_non_matching, browser)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from federatedml.model_selection.stepwise.hetero_stepwise import HeteroStepwise
from federatedml.util import LOGGER
from federatedml.util import consts
def _get_stepwise_param(model):
model.model_param.stepwise_param.role = model.role
model.model_param.stepwise_param.mode = model.mode
return model.model_param.stepwise_param
def run(model, train_data, validate_data=None):
if not model.need_run:
return train_data
if model.mode == consts.HETERO:
step_obj = HeteroStepwise()
else:
raise ValueError("stepwise currently only support Hetero mode.")
stepwise_param = _get_stepwise_param(model)
step_obj.run(stepwise_param, train_data, validate_data, model)
pred_result = HeteroStepwise.predict(train_data, model)
LOGGER.info("Finish running Stepwise")
return pred_result
|
# Copyright (c) 2020 HAW Hamburg
#
# This file is subject to the terms and conditions of the MIT License. See the
# file LICENSE in the top level directory for more details.
# SPDX-License-Identifier: MIT
"""Simple helpers that can be useful with mm_pal."""
import serial.tools.list_ports
def serial_connect_wizard(if_obj, **kwargs):
"""Console based wizard to help connect to a serial port.
Args:
if_obj (obj): Interface class to instantiate.
**kwargs: Keyword args to pass to the instantation of the if_obj.
``port`` keyword is overwritten with selected serial port.
Return:
(obj): Instantiated if_obj.
Raises:
ConnectionError: No connections available.
"""
serial_devices = sorted(serial.tools.list_ports.comports())
if len(serial_devices) == 0:
raise ConnectionError("Could not find any available devices")
if len(serial_devices) == 1:
print(f'Connected to {serial_devices[0][0]}')
kwargs['port'] = serial_devices[0][0]
return if_obj(**kwargs)
print('Select a serial port:')
max_num = 0
for i, s_dev in enumerate(serial_devices):
print(f"{i}: {s_dev}")
max_num = i
s_num = -1
while s_num < 0 or max_num < s_num:
try:
s_num = int(input("Selection(number): "))
except ValueError:
print("Invalid selection!")
kwargs['port'] = serial_devices[int(s_num)][0]
return if_obj(**kwargs)
|
# Example:
# >>> nest(range(12),[2,2,3])
# [[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]
def nest(flat,levels):
'''Turn a flat list into a nested list, with a specified number of lists per nesting level.
Excess elements are silently ignored.'''
return _nest(flat,levels).next()
def _nest(flat,levels):
if levels:
it = _nest(flat,levels[1:])
while 1:
yield list(itertools.islice(it,levels[0]))
else:
for d in flat:
yield d
|
#Written by Owain using the proper OSRS quest dialogues
#13/08/18
from game.content.quest import Quest
from game.content.quest import QuestHandler
from game.content.quest import QuestReward
from game.item import ItemAssistant
from game.content.dialogueold import DialogueHandler
from game.content.skilling import Skilling
from game.content.miscellaneous import PlayerMiscContent
from core import GameType
stake = 1549
garlic = 1550
hammer = 2347
beer = 1917
def configure_quest_7():
quest_name = 'Vampire Slayer'
quest_stages = 5
Quest.addQuest(quest_name, quest_stages)
def quest_button_7(player):
quest_name = 'Vampire Slayer'
quest_stage = player.getQuest(7).getStage()
if quest_stage == 0:
QuestHandler.startInfo(player, quest_name, "I can start this quest by speaking to @dre@Morgan@dbl@ who is in", "@dre@Draynor Village@dbl@.", "Must be able to kill a level 34 @dre@Vampire@dbl@", "This quest takes roughly @dre@10@dbl@ minutes to complete.")
elif quest_stage == 1 or quest_stage == 2 or quest_stage == 3:
QuestHandler.startInfo(player, quest_name, "I should travel to the @dre@Blue Moon Inn@dbl@ in @dre@Varrock@dbl@ and", "speak to @dre@Dr Harlow@dbl@.", "", "")
elif quest_stage == 4:
QuestHandler.startInfo(player, quest_name, "I need to go to the basement in @dre@Draynor Manor@dbl@ and " , "kill the @dre@Vampire@dbl@!", "", "")
elif quest_stage == 5:
QuestHandler.startInfo(player, quest_name, "I have completed @dre@Vampire Slayer@dbl@.", "", "", "")
#Morgan
def first_click_npc_3479(player):
if GameType.isOsrsPvp() == False:
quest_stage = player.getQuest(7).getStage()
if quest_stage == 0:
player.getDH().sendDialogues(3500)
elif quest_stage == 5:
player.getDH().sendDialogues(3033)
else:
player.getDH().sendDialogues(3030)
#Dr Harlow
def first_click_npc_3480(player):
quest_stage = player.getQuest(7).getStage()
if quest_stage == 1 or quest_stage == 2:
player.getDH().sendDialogues(3008)
elif quest_stage == 3:
player.getDH().sendDialogues(3019)
else:
player.getDH().sendPlayerChat("He looks rather drunk, I'd probably be best to leave" , "him alone.", 610)
#Draynor mansion stairs
def first_click_object_2616(player):
player.getPA().movePlayer(3077, 9771, 0);
def first_click_object_2617(player):
player.getPA().movePlayer(3115, 3356, 0);
#Morgan's house stairs
def first_click_object_15645(player):
player.getPA().movePlayer(3102, 3267, 1);
def first_click_object_15648(player):
player.getPA().movePlayer(3098, 3267, 0);
#Taking garlic in Morgan's house
def first_click_object_2612(player):
PlayerMiscContent.takeGarlic(player)
#Summoning the vampire
def first_click_object_2614(player):
quest_stage = player.getQuest(7).getStage()
if quest_stage == 4:
if ItemAssistant.hasItemInInventory(player, garlic) and ItemAssistant.hasItemInInventory(player, stake) and ItemAssistant.hasItemInInventory(player, hammer):
NpcHandler.spawnNpc(player, 3481, 3078, 9774, 0, True, True);
else:
player.getPA().sendMessage("You should get a garlic clove, a stake and a hammer before doing this.")
else:
player.getPA().sendMessage("Nothing interesting happens.")
def kill_npc_3481(player):
player.getQuest(7).setStage(5)
QuestHandler.updateAllQuestTab(player);
amount = 4825 * Skilling.getBaseExperience(player, ServerConstants.ATTACK)
reward = QuestReward("3 Quest Points", ""+ str(amount) +" Attack XP")
player.completeQuest("Vampire Slayer", reward, stake)
Skilling.addSkillExperience(player, 4825, ServerConstants.ATTACK, False)
#Start of main dialogue
def chat_3500(player):
player.getDH().sendNpcChat("Please please help us, bold adventurer!", 596)
player.nextDialogue = 3001;
def chat_3001(player):
player.getDH().sendPlayerChat("What's the problem?", 591)
player.nextDialogue = 3002;
def chat_3002(player):
player.getDH().sendNpcChat("Our little village has been dreadfully ravaged by an evil" , "vampire! He lives in the basement of the manor to the" , "north, we need someone to get rid of him once and for" , "all!", 610)
player.nextDialogue = 3003;
def chat_3003(player):
player.getDH().sendOptionDialogue("No, vampires are scary!", "Ok, I'm up for an adventure.", 3004)
def option_one_3004(player):
player.getDH().sendPlayerChat("No, vampires are scary!", 591)
def option_two_3004(player):
player.getDH().sendPlayerChat("Ok, I'm up for an adventure.", 591)
player.nextDialogue = 3005;
def chat_3005(player):
player.getDH().sendNpcChat("I think first you should seek help. I have a friend who", "is a retired vampire hunter, his name is Dr. Harlow. He " , "may be able to give you some tips. He can normally be" , "found in the Blue Moon Inn in Varrock, he's a bit of", 591)
player.nextDialogue = 3006;
def chat_3006(player):
player.getDH().sendNpcChat("an old soak these days. Mention his old friend Morgan, " , "I'm sure he wouldn't want me killed by a vampire.", 591)
player.nextDialogue = 3007;
def chat_3007(player):
player.getDH().sendPlayerChat("I'll look him up then.", 591)
player.getQuest(7).setStage(1)
QuestHandler.updateAllQuestTab(player);
#Dr Harlow dialogues
def chat_3008(player):
player.getDH().sendNpcChat("Buy me a drrink pleassh...", 591)
quest_stage = player.getQuest(7).getStage()
if quest_stage == 2 and ItemAssistant.hasItemInInventory(player, 1917):
player.nextDialogue = 3016;
else:
player.nextDialogue = 3009;
def chat_3009(player):
player.getDH().sendOptionDialogue("No, you've had enough.", "Morgan needs your help!", 3010)
def option_one_3010(player):
player.getDH().sendPlayerChat("No, you've had enough.", 591)
def option_two_3010(player):
player.getDH().sendPlayerChat("Morgan needs your help!", 591)
player.nextDialogue = 3011;
def chat_3011(player):
player.getDH().sendNpcChat("Morgan you shhay..?", 591)
player.nextDialogue = 3012;
def chat_3012(player):
player.getDH().sendPlayerChat("His village is being terrorised by a vampire! He told me" , "to ask you about how I can stop it.", 597)
player.nextDialogue = 3013;
def chat_3013(player):
player.getDH().sendNpcChat("Buy me a beer... then I'll teash you what you need to" , "know...", 591)
player.nextDialogue = 3014;
def chat_3014(player):
player.getDH().sendPlayerChat("But this is your friend Morgan we're talking about!", 591)
player.nextDialogue = 3015;
def chat_3015(player):
player.getDH().sendNpcChat("Buy ush a drink anyway...", 591)
player.getQuest(7).setStage(2)
QuestHandler.updateAllQuestTab(player);
#If a player has a beer
def chat_3016(player):
player.getDH().sendPlayerChat("Here you go.", 591)
player.nextDialogue = 3017;
def chat_3017(player):
player.getDH().sendItemChat("", "You give a beer to Dr Harlow.", 1917, 200, 14, 0)
ItemAssistant.deleteItemFromInventory(player, beer, 1)
player.getQuest(7).setStage(3)
QuestHandler.updateAllQuestTab(player);
player.nextDialogue = 3018;
def chat_3018(player):
player.getDH().sendNpcChat("Cheersh matey...", 591)
player.nextDialogue = 3019;
def chat_3019(player):
player.getDH().sendPlayerChat("So tell me how to kill vampires then.", 591)
player.nextDialogue = 3020;
def chat_3020(player):
player.getDH().sendNpcChat("Yesh yesh vampires, I was very good at" , "killing em once...", 591)
player.nextDialogue = 3021;
def chat_3021(player):
player.getDH().sendStatement("Dr Harlow appears to sober up slightly.")
player.nextDialogue = 3022;
def chat_3022(player):
player.getDH().sendNpcChat("Well you're gonna need a stake, otherwise he'll just" , "regenerate. Yes, you must have a stake to finish it off..." , "I just happen to have one with me.", 591)
player.nextDialogue = 3023;
def chat_3023(player):
player.getDH().sendItemChat("", "Dr Harlow hands you a stake.", stake, 200, 14, 0)
ItemAssistant.addItemToInventoryOrDrop(player, stake, 1)
player.nextDialogue = 3024;
def chat_3024(player):
player.getDH().sendNpcChat("You'll need a hammer as well, to drive it in properly," , "your everyday general store hammer will do. One last" , "thing... It's wise to carry garlic with you, vampires are" , "somewhat weakened if they can smell garlic. Morgan", 591)
player.nextDialogue = 3025;
def chat_3025(player):
player.getDH().sendNpcChat("always liked garlic, you should try his house. But" , "remember, a vampire is still a dangerous foe!", 591)
player.nextDialogue = 3026;
def chat_3026(player):
player.getDH().sendPlayerChat("Thank you very much!", 591)
player.getQuest(7).setStage(4)
QuestHandler.updateAllQuestTab(player);
#Talking to Morgan again
#Start of main dialogue
def chat_3030(player):
player.getDH().sendNpcChat("Have you managed to speak to Dr Harlow yet?", 591)
quest_stage = player.getQuest(7).getStage()
if quest_stage == 1 or quest_stage == 2:
player.nextDialogue = 3037;
else:
player.nextDialogue = 3031;
def chat_3031(player):
player.getDH().sendPlayerChat("Yes, I have. He's given me some advice on how" , "to take down the vampire!", 591)
player.nextDialogue = 3032;
def chat_3032(player):
player.getDH().sendNpcChat("Wonderful! I'd better let you get on with the business" , "then. Best of luck!", 591)
#If quest is done
def chat_3033(player):
player.getDH().sendNpcChat("So, is it dead?", 591)
player.nextDialogue = 3034;
def chat_3034(player):
player.getDH().sendPlayerChat("Yup. It was a piece of cake.", 591)
player.nextDialogue = 3035;
def chat_3035(player):
player.getDH().sendNpcChat("Cake? I asked about the vampire!", 591)
player.nextDialogue = 3036;
def chat_3036(player):
player.getDH().sendPlayerChat("Never mind...", 591)
def chat_3037(player):
player.getDH().sendPlayerChat("Not yet.", 591)
player.nextDialogue = 3038;
def chat_3038(player):
player.getDH().sendNpcChat("Please hurry!", 591)
|
default_app_config = 'wshop.apps.dashboard.orders.config.OrdersDashboardConfig'
|
description = 'Tensile machine'
group = 'optional'
excludes = ['tensile']
tango_base = 'tango://dhcp02.ictrl.frm2.tum.de:10000/test/doli/'
devices = dict(
teload = device('nicos.devices.entangle.Actuator',
description = 'load value of the tensile machine',
tangodevice = tango_base + 'force',
precision = 2,
fmtstr = '%.1f',
),
tepos = device('nicos.devices.entangle.Sensor',
description = 'position value of the tensile machine',
tangodevice = tango_base + 'position',
fmtstr = '%.4f',
),
)
display_order = 40
|
import cv2
import os
import numpy as np
# create a CLAHE with L channel(Contrast Limited Adaptive Histogram Equalization).
def pre_proc_CEH2(img):
img_lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(img_lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
cl1 = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return cl1
# create a CLAHE (Contrast Limited Adaptive Histogram Equalization).
def pre_proc_CEH(img):
img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize )
cl1 = clahe.apply(img_bw)
return cl1
# create Equalization Histogram
def pre_proc_EH(img):
img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(img_bw)
eh1 = np.hstack((img_bw,equ))
return eh1
# reduce the black background
def cut_img(img):
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_ , thresholded = cv2.threshold(grayscale, 0, 255,cv2.THRESH_OTSU)
bbox = cv2.boundingRect(thresholded)
x, y, w, h = bbox
img_cut = img[y:y+h, x:x+w]
return img_cut
# reduce the black background
def cut_and_resize_to_original_img(img):
shp = img.shape[0:2]
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_ , thresholded = cv2.threshold(grayscale, 0, 255,cv2.THRESH_OTSU)
bbox = cv2.boundingRect(thresholded)
x, y, w, h = bbox
img_cut = img[y:y+h, x:x+w]
bgr_final = cv2.cvtColor(img_cut, cv2.COLOR_LAB2BGR)
img_cut_resized = cv2.resize(bgr_final,shp,interpolation=cv2.INTER_AREA)
return img_cut_resized
def CEH_cut_pipeline(img):
img_uint = img.astype(np.uint8)
img1 = cut_and_resize_to_original_img(img_uint)
img2 = pre_proc_CEH2(img1)
return img2
# Read image per image
def load_images_from_folder(path_folder):
PROC_FOLDER = path_folder + "_procEH/"
if os.path.isdir(os.path.dirname(PROC_FOLDER)) is False:
os.makedirs(os.path.dirname(PROC_FOLDER))
for filename in os.listdir(path_folder):
img = cv2.imread(os.path.join(path_folder,filename))
if img is not None:
img_proc = cut_img(img)
img_proc = pre_proc_EH(img) #change with pre_proc_EH
path = os.path.join(PROC_FOLDER, filename)
cv2.imwrite(path, img_proc)
# CHANGE THE DIRECTORY OF IMAGES
#load_images_from_folder("test2")
|
__Author__ = "noduez"
'''Label控件演示'''
import tkinter as tk
top = tk.Tk()
label = tk.Label(top, text='Hello World')
label.pack()
tk.mainloop()
|
#don't turn on display. It slow down the processing
import os
if 'DISPLAY' in os.environ:
del os.environ['DISPLAY']
import ROOT
import math
from tools.EventClassification import EventClassifier
from tools.KinematicsCalculator import KinematicsCalculator
from tools.SystematicsUniverse import GetAllSystematicsUniverses
def MakeHist(chain,e,cate=None):
hist_correlation = ROOT.TH2D("h1{}".format(e),";prev_dEdX;this dEdX;",50,0,10,50,0,10)
hist_count = ROOT.TH2D("h2{}".format(e),";N-Plane DS(mm);N-Clusters;NEvents",40,0,40,20,0,10)
hist_perplane = ROOT.TH3D("h3{}".format(e),";N-Plane DS; Energy(GeV);dE/dX(MeV/cm)",40,0,40,10,0,10,100,0,20)
hist_LLR = ROOT.TH2D("h4{}".format(e),";Energy(Gev);LLR score; NEvents",10,0,10,40,-20,20)
#hist_modelLLR = PlotUtils.MnvH2D("h4m{}".format(e),"",30,0,30,100,-10,10)
hist_mindedx = ROOT.TH1D("h5{}".format(e),";dEdX(MeV/cm);N-hits",50,0,20)
hist_spread = ROOT.TH2D("h8{}".format(e),"",40,0,80,100,0,1000)
hist_Exuv = ROOT.TH2D("h6{}".format(e),"",100,-0.5,0.5,40,0,10000)
hist_Euv = ROOT.TH2D("h7{}".format(e),"",100,-0.5,0.5,40,0,10000)
#hist_nplane = ROOT.TH2D("h8{}".format(e),"",20,0,20,50,0,100)
#hist_mindedx_proxy = ROOT.TH1D("h8{}".format(e),"",50,0,5)
hist_strange0 = ROOT.TH1D("h9{}".format(e),"",50,0,10)
hist_strange1 = ROOT.TH1D("h10{}".format(e),"",50,0,10)
hist_strange2 = ROOT.TH1D("h11{}".format(e),"",50,0,10)
count = 0
for e in chain:
if e.n_prongs == 0:
continue
# Exuv = (e.prong_XViewE[0]-e.prong_UViewE[0]-e.prong_VViewE[0])/(e.prong_XViewE[0]+e.prong_UViewE[0]+e.prong_VViewE[0])
# Euv = (e.prong_UViewE[0]-e.prong_VViewE[0])/(e.prong_UViewE[0]+e.prong_VViewE[0])
# Esum = e.prong_XViewE[0]+e.prong_UViewE[0]+e.prong_VViewE[0]
# hist_Exuv.Fill(Exuv,e.prong_XViewE[0]+e.prong_UViewE[0]+e.prong_VViewE[0])
# hist_Euv.Fill(Euv,e.prong_XViewE[0]+e.prong_UViewE[0]+e.prong_VViewE[0])
if cate is not None:
#chain.kin_cal.CalculateKinematics(e)
eventClassifier.Classify(e)
if not eventClassifier.is_reco_signal:
continue
if e.prong_part_E[0][3]<1500:
continue
if e.recoile_passive_idclus > 1500:
continue
if cate == "signal":
#if e.mc_intType!=7:
if abs(e.mc_incoming) != 12 or e.mc_current!=1 :
continue
if cate == "NC":
if e.mc_current!=2:# or e.mc_intType!=4:
# or 211 in map(abs, e.mc_FSPartPDG) or 2212 in map(abs,e.mc_FSPartPDG):
continue
# if abs(Exuv)>0.2 or abs(Euv)>0.3:
# continue
#if e.recoile_passive >1200:
# continue
energy = e.prong_part_E[0][3]/1e3
#print energy
dedx = e.prong_dEdXs[0]
dedx_dz = e.prong_dEdXs_dz[0]
dedx_dx = e.prong_dEdXs_projection[0]
dEdX_sum = {}
vertex_plane = Get_z_plane(e.mc_vtx[2])
#print dedx.size()
#print [e.prong_binned_energy_bin_contents[0][i] for i in range(e.prong_binned_energy_bin_contents[0].size())]
for i in range(min(dedx.size(),dedx_dz.size())):
try:
dEdX_sum[dedx_dz[i]] += dedx[i]
except KeyError:
dEdX_sum[dedx_dz[i]] = dedx[i]
# print "count:",count
print(e.mc_run, e.mc_subrun, e.mc_nthEvtInFile+1)
print(e.prong_dEdXMeanFrontTracker[0])
print(e.prong_TruePID[0], e.prong_part_E[0][3]-e.prong_TrueEnergy[0])
print(e.prong_axis_vertex[0][2]-e.vtx[2] if e.vtx[2]>0 else -1)
for key in sorted(dEdX_sum):
print("("+ repr(key)+ ", dEdX:" + repr(dEdX_sum[key]) + ")")
input("continue?")
top_hit_counts = len(dEdX_sum)/3
hit_count = 0
LLR = [0,0,0]
min_dedx = 99999
model_LLR = 0
strange_count = [0,0,0]
#print len(dedx_sum)
energy_index = min(11,int(math.ceil(energy)))
for i in range(-10,84*2+1):
if len(dedx_dz) == 0 or i < dedx_dz[0]:
continue
if hit_count>top_hit_counts :
break
hit_count += 1
dplane = i-dedx_dz[0]
dedx_step = dEdX_sum.setdefault(i,0)
if dedx_step<0.5 and dedx_step>=0:
strange_count[0] +=1
if dedx_step<12.5 and dedx_step>=12:
strange_count[1] +=1
if dedx_step<9 and dedx_step>=8.5:
strange_count[2] +=1
hist_mindedx.Fill(dedx_step,1)
# if (e.ev_run==10069 and e.ev_subrun==44 and e.ev_gate==455):
# print "count:",count
# print e.prong_part_E[0][3]
# for key in sorted(dEdX_sum):
# print "("+ repr(key)+ ", dEdX:" + repr(dEdX_sum[key]) + ")"
# raw_input("continue?")
#if dedx_sum[j] == 0:
# print "warning: zero dedx"
#LLR *= ROOT.TMath.Landau(dedx_sum[j],3.75,1.46,True)/ROOT.TMath.Landau(dedx_sum[j],1.7,0.506,True)
#dplane = plane - dedx_dz_trun[0]
# prob_e = f.Get("elikelihood{}_{}".format(dplane,energy_index))
# prob_pi = f.Get("glikelihood{}_{}".format(dplane,energy_index))
# if not prob_e or not prob_pi:
# print dplane,energy_index
# pe = max(prob_e.GetBinContent(prob_e.FindBin(dedx_sum[j])),0.0001)
# ppi = max(prob_pi.GetBinContent(prob_pi.FindBin(dedx_sum[j])),0.0001)
# k = 2 if dedx_dz_trun[j] % 2 == 1 else (dedx_dz_trun[j] % 4)/2
# LLR[k]+=-math.log(pe)+math.log(ppi)
# model_LLR += -math.log(ROOT.TMath.Landau(dedx_sum[j],1.7,0.506,True))+math.log(ROOT.TMath.Landau(dedx_sum[j],3.75,1.46,True))
# hist_modelLLR.Fill(j,max(-10,min(10,model_LLR)),1)
# if (j>=3):
# ave_dedx = sum(dedx_sum[j-3:j+1])/4
# min_dedx = min(ave_dedx,min_dedx)
# hist_mindedx.Fill(j-3,min_dedx,1)
#hist_spread.Fill(j,min(999.9,dedx_spread[j]),1)
hist_perplane.Fill(dplane,energy,dedx_step)
# if j==15:
# if abs(dedx_sum[j] -2) < 0.5:
# print e.mc_run, e.mc_subrun, e.mc_nthEvtInFile+1
# print e.prong_axis_vertex[0][2]
# print [i for i in dedx_sum]
# print [e.prong_binned_energy_bin_contents[0][i] for i in range(e.prong_binned_energy_bin_contents[0].size())]
# print Exuv, Euv
# raw_input("prese ENTER to continue")
# j+=1
# if len(dedx_dz_trun) == 0:
# print e.mc_run, e.mc_subrun, e.mc_nthEvtInFile+1
# print e.prong_axis_vertex[0][2]
# print [i for i in dedx]
# print [i for i in dedx_dz]
# continue
# if (dplane<15 and dedx_dz_trun[-1]< 84*2+1):
# for j in range(dplane+1, min(15, 84*2+1-dedx_dz_trun[0])):
# dplane = j
# hist_perplane.Fill(dplane,energy,0)
# prob_e = f.Get("elikelihood{}_{}".format(dplane,energy_index))
# prob_pi = f.Get("glikelihood{}_{}".format(dplane,energy_index))
# pe = max(prob_e.GetBinContent(1),0.001)
# ppi = max(prob_pi.GetBinContent(1),0.001)
# k = 2 if (dedx_dz_trun[0]+j) % 2 == 1 else ((dedx_dz_trun[0]+j) % 4)/2
# LLR[k]+=-math.log(pe)+math.log(ppi)
# if len(dedx_dz_trun)>34:
# # that we can do something fancy:
# for k in range(0,16):
# prob_pi = f.Get("glikelihood{}_{}".format(k,energy_index))
# for l in range(len(startPilike)):
# ppi = max(prob_pi.GetBinContent(prob_pi.FindBin(dedx_sum[l+k])),0.001)
# startPilike[l]+=-math.log(ppi)
# # print "hey"
# # print dedx_sum
# # print startPilike
# min_value = min(startPilike)
# #print min_value
# hist_minPilike.Fill(min_value)
# hist_nplane.Fill(startPilike.index(min_value),min_value)
#print sum(LLR)
#score = sum(LLR)
#hist_LLR.Fill(energy_index,max(-20,min(19.9,score)),1)
#hist_KNN.Fill(min_LLR,max(min(dedx_spread[0::2]),min(dedx_spread[1::2])))
#dtheta = e.prong_axis_vector[0][0]-e.prong_TrueTheta[0]
#hist_dtheta.Fill(dtheta)
#hist_KNN.Fill(e.prong_TrueTrajFracInProng[0],min(abs(e.prong_TruePID[0]),120))
hist_strange0.Fill(min(strange_count[0],9))
hist_strange1.Fill(min(strange_count[1],9))
hist_strange2.Fill(min(strange_count[2],9))
# print e.mc_run, e.mc_subrun, e.mc_nthEvtInFile+1
# print dEdX_sum
# print [e.prong_binned_energy_bin_contents[0][i] for i in range(e.prong_binned_energy_bin_contents[0].size())]
# #print Exuv, Euv
# print e.recoile_passive
# raw_input("prese ENTER to continue")
count+=1
# print e.mc_run, e.mc_subrun, e.mc_nthEvtInFile+1
# print [i for i in dedx]
# print [i for i in dedx_dz]
# raw_input("press enter to continue")
#break
print(count)
#hist.Divide(hist,hist_count)
hist_count.Scale(1.0/count)
#hist_perplane.Scale(1.0/count)
return hist_LLR,hist_mindedx,hist_perplane,hist_strange0,hist_strange1,hist_strange2
def MakeChain(path):
chain = ROOT.TChain("NuECCQE");
chain.Add(path)
return chain
def calMCC(hist_true,hist_false,cut):
TP = hist_true.Integral(1,cut)
FN = hist_true.Integral(cut,101)
FP = hist_false.Integral(1,cut)
TN = hist_false.Integral(cut,101)
try:
MCC = (TP*TN-FP*FN) / (math.sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)))
EFF = TP/(TP+FN)
PUR = TP/(TP+FP)
except ZeroDivisionError:
print(TP,FN,FP,TN)
return None,None,None
print("MCC is {}".format( MCC))
#print "efficiency is {}".format(EFF)
#print "purity is {}".format(PUR)
return MCC,EFF,PUR
def Get_z_plane(z):
with open(os.environ["PLOTUTILSROOT"]+"/data/Minerva_Planes.txt") as f:
for line in f.readlines():
this_plane = line.split()
if (abs(z -float(this_plane[5]))<25 or z<float(this_plane[5])):
return int(this_plane[3])*2+int(this_plane[4])-1
return None
if __name__ == "__main__":
path_e = "/pnfs/minerva/persistent/users/hsu/electronPC-test/grid/central_value/minerva/ana/v21r1p1/00/00/00/01/*.root"
path_g = "/pnfs/minerva/persistent/users/hsu/photonPCAna-oe2/grid/central_value/minerva/ana/v21r1p1/00/00/00/01/*.root"
nue_IA=True
if nue_IA:
f = ROOT.TFile.Open("hist_IA_tmp.root","RECREATE")
chain = ROOT.TChain("NuECCQE");
#for i in range(1):
print(chain.Add("/pnfs/minerva/persistent/users/hsu/NuECCQE-v3-mc/me1B_merged/NuECCQE_mc_AnaTuple_*.root"))
#print chain.Add("/pnfs/minerva/persistent/users/hsu/short_test/grid/central_value/minerva/ana/v21r1p1/00/11/10/00/*.root")
#print chain.Add("/minerva/data/users/hsu/test/nogrid/central_value/minerva/ana/v21r1p1/00/11/10/01/SIM_minerva_00111001_Subruns_0122_NuECCQE_Ana_Tuple_v21r1p1.root")
kin_cal = KinematicsCalculator(correct_beam_angle=True, correct_MC_energy_scale=False, calc_true = True)
eventClassifier = EventClassifier(classifiers=["Reco"], use_kin_cuts=False,use_sideband=[])
chain.kin_cal = kin_cal
chain.ShortName = lambda : "cv"
hist_LLR_e,hist_dedx_e,hist_perplane_e,hist_count_e,hist_Exuv_e,hist_Euv_e = MakeHist(chain,"e","signal")
hist_LLR_g,hist_dedx_g,hist_perplane_g,hist_count_g,hist_Exuv_g,hist_Euv_g= MakeHist(chain,"g","NC")
else :
input("will overwrite hist.root, are you sure?")
f = ROOT.TFile.Open("hist_PC.root","RECREATE")
chain_e = MakeChain(path_e)
chain_pi = MakeChain(path_g)
hist_LLR_e,hist_dedx_e,hist_perplane_e, hist_count_e,hist_Exuv_e,hist_Euv_e= MakeHist(chain_e,"e")
#hist_LLR_g,hist_dedx_g,hist_perplane_g, hist_count_g, hist_Exuv_g,hist_Euv_g = MakeHist(chain_pi,"g")
# f2 = ROOT.TFile.Open("plot_bal.root","RECREATE")
hist_LLR_e.Write()
hist_dedx_e.Write()
hist_perplane_e.Write()
hist_count_e.Write()
hist_Exuv_e.Write()
hist_Euv_e.Write()
hist_LLR_g.Write()
hist_dedx_g.Write()
hist_perplane_g.Write()
hist_count_g.Write()
hist_Exuv_g.Write()
hist_Euv_g.Write()
# hist_perplane_e.Write()
# hist_perplane_g.Write()
# p1 = ROOT.TGraphErrors()
# p2 = ROOT.TGraphErrors()
# print "min dedx mcc"
# for i in range(1,51):
# mcc,eff,pur = calMCC(hist_dedx_e,hist_dedx_g,i)
# if mcc is not None:
# p1.SetPoint(i,eff,pur)
# print "LLR"
# for i in range (1,41):
# mcc,eff,pur = calMCC(hist_LLR_e.ProjectionY("1"),hist_LLR_g.ProjectionY("2"),i)
# if mcc is not None:
# p2.SetPoint(i,eff,pur)
# p1.Write("tg1")
# p2.Write("tg2")
for i in range(1,80):
for j in range(1,12):
hist_temp = hist_perplane_e.ProjectionZ("elikelihood{}_{}".format(i-1,j),i,i,j,j)
if hist_temp.Integral(0,101)!= 0:
hist_temp.Scale(1.0/hist_temp.Integral(0,101))
hist_temp.Write()
hist_temp = hist_perplane_g.ProjectionZ("glikelihood{}_{}".format(i-1,j),i,i,j,j)
if hist_temp.Integral(0,101)!= 0:
hist_temp.Scale(1.0/hist_temp.Integral(0,101))
hist_temp.Write()
for i in range(0,52):
hist_temp = hist_LLR_e.ProjectionY("ecorrelation{}".format(i),i,i)
if hist_temp.Integral(0,51)!= 0:
hist_temp.Scale(1.0/hist_temp.Integral(0,51))
hist_temp.Write()
hist_temp = hist_LLR_g.ProjectionY("gcorrelation{}".format(i),i,i)
if hist_temp.Integral(0,51)!= 0:
hist_temp.Scale(1.0/hist_temp.Integral(0,51))
hist_temp.Write()
c= ROOT.TCanvas("c","c",1024,768)
#for i in range(1,12):
#hist_perplane_e.GetYaxis().SetRange(i,i);
hist_perplane_e.Project3D("zx").Draw("COLZ")
c.Print("e_profile{}.png".format(0))
#hist_perplane_g.GetYaxis().SetRange(i,i)
hist_perplane_g.Project3D("zx").Draw("COLZ")
c.Print("g_profile{}.png".format(0))
#calMCC(hist_LLR_e.ProjectionY("",i,i),hist_LLR_g.ProjectionY("",i,i),5)
#print "total"
#print "baseline"
#calMCC(hist_dedx_e,hist_dedx_g,24)
#calMCC(hist_dedx_e.ProjectionY("",20,20),hist_dedx_g.ProjectionY("",20,20),24)
# for i in range(1,12):
hist_LLR_e.SetLineColor(ROOT.kRed)
hist_LLR_e.ProjectionY("").Draw("")
hist_LLR_g.ProjectionY("").Draw("SAME")
legend = ROOT.TLegend(0.3,0.7,0.7,0.9)
legend.AddEntry(hist_LLR_e,"CCNuE")
legend.AddEntry(hist_LLR_g,"NC")
legend.Draw()
c.Print("hist_LLR.png")
#hist_perplane_e.Project3D("zx").Draw("COLZ")
#c.Print("hist_nue_signal1.png")
#hist_LLR_g.Draw("COLZ")
#c.Print("hist_g.png")
#hist_perplane_g.Project3D("zx").Draw("COLZ")
#c.Print("hist_nue_NC1.png")
#hist_LLR_g.Draw("COLZ")
#c.Print("NCshowerwidth.png")
#hist_perplane_g.Draw("COLZ")
#c.Print("NCcount.png")
#hist_dedx_e.Draw("COLZ")
# c.Print("10deg_spread.png")
f.Close()
|
#
# Normalize YAML reports
#
from datetime import datetime
from itertools import groupby
import functools
import hashlib
import logging
import re
import string
import uuid
import yaml
log = logging.getLogger("normalize")
class UnsupportedTestError(Exception):
pass
test_name_mappings = {
"http_host": "http_host",
"HTTP Host": "http_host",
"http_requests_test": "http_requests",
"http_requests": "http_requests",
"HTTP Requests Test": "http_requests",
"bridge_reachability": "bridge_reachability",
"bridgereachability": "bridge_reachability",
"TCP Connect": "tcp_connect",
"tcp_connect": "tcp_connect",
"DNS tamper": "dns_consistency",
"dnstamper": "dns_consistency",
"dns_consistency": "dns_consistency",
"HTTP Invalid Request Line": "http_invalid_request_line",
"http_invalid_request_line": "http_invalid_request_line",
"http_header_field_manipulation": "http_header_field_manipulation",
"HTTP Header Field Manipulation": "http_header_field_manipulation",
"Multi Protocol Traceroute Test": "multi_protocol_traceroute",
"multi_protocol_traceroute_test": "multi_protocol_traceroute",
"multi_protocol_traceroute": "multi_protocol_traceroute",
"traceroute": "multi_protocol_traceroute",
"parasitic_traceroute_test": "parasitic_traceroute",
"parasitic_tcp_traceroute_test": "parasitic_traceroute",
"tls-handshake": "tls_handshake",
"tls_handshake": "tls_handshake",
"dns_injection": "dns_injection",
"captivep": "captive_portal",
"captiveportal": "captive_portal",
"HTTPFilteringBypass": "http_filtering_bypass",
"httpfilteringbypass": "http_filtering_bypass",
"HTTPTrix": "http_trix",
"httptrix": "http_trix",
"http_test": "http_test",
"http_url_list": "http_url_list",
"dns_spoof": "dns_spoof",
"netalyzrwrapper": "netalyzr_wrapper",
"meek_fronted_requests_test": "meek_fronted_requests_test",
"lantern_circumvention_tool_test": "lantern_circumvention_tool_test",
"psiphon_test": "psiphon_test",
"this_test_is_nameless": "this_test_is_nameless",
"test_get_random_capitalization": "http_header_field_manipulation",
"test_put_random_capitalization": "http_header_field_manipulation",
"test_post_random_capitalization": "http_header_field_manipulation",
"test_random_big_request_method": "http_invalid_request_line",
"test_random_invalid_field_count": "http_invalid_request_line",
"summary": "invalid",
"test_get": "invalid",
"test_post": "invalid",
"test_put": "invalid",
"test_send_host_header": "invalid",
}
schema = [
"id",
"input",
"input_hashes",
"report_id",
"report_filename",
"options",
"probe_cc",
"probe_asn",
"probe_ip",
"probe_city",
"backend_version",
"data_format_version",
"test_name",
"test_version",
"test_start_time",
"measurement_start_time",
"test_runtime",
"test_helpers",
"software_name",
"software_version",
"bucket_date",
"test_keys",
]
# Some values overlap across categories e.g. captive_portal
test_categories = {
"dnst": {"dns_consistency", "dns_injection", "captive_portal"},
"process": {"lantern", "psiphon"},
"httpt": {
"http_requests",
"meek_fronted_requests",
"domclass_collector",
"http_keyword_filtering",
"http_uk_mobile_networks",
"http_header_field_manipulation",
"http_url_list",
"http_host",
"squid",
"captive_portal",
"psiphon",
"tor_http_requests_test",
},
"scapyt": {
"chinatrigger",
"keyword_filtering",
"parasitic_traceroute",
"multi_protocol_traceroute",
"dns_spoof",
},
"tcpt": {"http_filtering_bypass", "http_invalid_request_line", "http_trix"},
}
regexps = dict(
ipv4=r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})",
hostname=r"([a-zA-Z0-9](?:(?:[a-zA-Z0-9-]*|(?<!-)\.(?![-.]))*[a-zA-Z0-9]+)?)",
)
## Simhash generation
simhash_re = re.compile(r"[\w\u4e00-\u9fcc]+")
def gen_simhash(s):
content = s.lower()
content = "".join(re.findall(simhash_re, content))
mx = max(len(content) - 4 + 1, 1)
features = [content[i : i + 4] for i in range(mx)]
features = ((k, sum(1 for _ in g)) for k, g in groupby(sorted(features)))
v = [0] * 64
masks = [1 << i for i in range(64)]
for h, w in features:
h = h.encode("utf-8")
h = int(hashlib.md5(h).hexdigest(), 16)
for i in range(64):
v[i] += w if h & masks[i] else -w
ans = 0
for i in range(64):
if v[i] > 0:
ans |= masks[i]
return ans
### Normalize entries across format versions ###
def nest_test_keys(entry):
if entry["test_keys"] is None:
entry["test_keys"] = {}
for test_key in set(entry.keys()) - set(schema):
entry["test_keys"][test_key] = entry.pop(test_key)
return entry
def normalize_str(body):
if body is None:
return None
assert len(body) < 10000
if isinstance(body, bytes):
return body.decode("UTF-8", "backslashreplace")
return body.replace("\0", "")
def normalize_body(body):
if body is None:
return None
if isinstance(body, bytes):
return body.decode("UTF-8", "backslashreplace")
return body.replace("\0", "")
def match(pattern, source):
found = re.search(pattern, source)
if found:
return found.group(1)
return ""
def normalize_httpt(entry):
def normalize_headers(headers):
# XXX: data loss -- ordering, formatting, duplicate headers, whitespace
normalized_headers = {}
for name, values in headers:
value = values[0]
if isinstance(value, list):
value = value[0]
normalized_headers[name] = normalize_str(value)
return normalized_headers
experiment_requests = []
control_requests = []
url_option_idx = None
url_option_names = ["--url", "-u"]
for url_option in url_option_names:
try:
url_option_idx = entry.get("options").index(url_option) + 1
except (ValueError, AttributeError):
continue
if url_option_idx is not None and entry["input"] is None:
entry["input"] = entry["options"][url_option_idx]
# This is needed to fix the requests and responses in the
# tor_http_requests test.
if entry["test_keys"].get("request", None) and entry["test_keys"].get(
"response", None
):
entry["test_keys"]["requests"] = entry["test_keys"].get("requests", [])
entry["test_keys"]["requests"].append(
{
"response": entry["test_keys"].pop("response"),
"request": entry["test_keys"].pop("request"),
}
)
for session in entry["test_keys"].get("requests", []):
if isinstance(session.get("response"), dict):
session["response"]["body"] = normalize_body(session["response"]["body"])
session["response"]["headers"] = normalize_headers(
session["response"]["headers"]
)
else:
session["response"] = {"body": None, "headers": {}}
if isinstance(session.get("request"), dict):
session["request"]["body"] = normalize_body(session["request"]["body"])
session["request"]["headers"] = normalize_headers(
session["request"]["headers"]
)
else:
session["request"] = {"body": None, "headers": {}}
is_tor = False
exit_ip = None
exit_name = None
if session["request"]["url"].startswith("shttp"):
session["request"]["url"] = session["request"]["url"].replace(
"shttp://", "http://"
)
is_tor = True
elif session["request"].get("tor") is True:
is_tor = True
elif session["request"].get("tor") in [False, None, {"is_tor": False}]:
is_tor = False
elif session["request"].get("tor", {}).get("is_tor") is True:
is_tor = True
exit_ip = session["request"].get("tor", {}).get("exit_ip", None)
exit_name = session["request"].get("tor", {}).get("exit_name", None)
else:
log.error("Could not detect tor or not tor status")
log.debug(session)
session["request"]["tor"] = {
"is_tor": is_tor,
"exit_ip": exit_ip,
"exit_name": exit_name,
}
session["response_length"] = None
for k, v in session["response"]["headers"].items():
# sort of re-normalisation from body back to binary
if k.lower() == "content-length":
session["response_length"] = v
if is_tor is True:
control_requests.append(session)
else:
experiment_requests.append(session)
entry["test_keys"]["requests"] = []
try:
entry["test_keys"]["requests"].append(experiment_requests.pop(0))
except IndexError:
pass
try:
entry["test_keys"]["requests"].append(control_requests.pop(0))
except IndexError:
pass
entry["test_keys"]["requests"] += experiment_requests
entry["test_keys"]["requests"] += control_requests
if entry["test_keys"].get("headers_diff", None) is not None:
entry["test_keys"]["headers_diff"] = list(entry["test_keys"]["headers_diff"])
return entry
def _normalize_answer(ans):
try:
ttl = match(r"ttl=(\d+)", ans[0])
except Exception:
log.error("Failed to parse ttl in %s" % ans[0])
ttl = None
answer_type = match("type=([A-Z]+)", ans[0])
na = dict(ttl=ttl, answer_type=answer_type)
if answer_type == "A":
na["ipv4"] = match("address=" + regexps["ipv4"], ans[1])
elif answer_type == "MX":
na["hostname"] = match("address=" + regexps["ipv4"], ans[1])
na["preference"] = match(r"preference=(\d+)", ans[1])
elif answer_type in ["PTR", "CNAME"]:
na["hostname"] = match("name=" + regexps["hostname"], ans[1])
elif answer_type == "SOA":
na["responsible_name"] = match("rname=" + regexps["hostname"], ans[1])
na["hostname"] = match("mname=" + regexps["hostname"], ans[1])
na["serial_number"] = match(r"serial=(\d+)", ans[1])
na["refresh_interval"] = match(r"refresh=(\d+)", ans[1])
na["retry_interval"] = match(r"retry=(\d+)", ans[1])
na["minimum_ttl"] = match(r"minimum=(\d+)", ans[1])
na["expiration_limit"] = match(r"expire=(\d+)", ans[1])
return na
def normalize_dnst(entry):
entry["test_keys"].pop("test_resolvers", None)
errors = entry["test_keys"].pop("tampering", None)
if errors:
entry["test_keys"]["errors"] = errors
entry["test_keys"]["successful"] = [e[0] for e in errors if e[1] is False]
entry["test_keys"]["failed"] = [e[0] for e in errors if e[1] is not True]
entry["test_keys"]["inconsistent"] = [e[0] for e in errors if e[1] is True]
elif entry["test_name"] == "dns_consistency":
entry["test_keys"]["errors"] = {}
entry["test_keys"]["successful"] = []
entry["test_keys"]["failed"] = []
entry["test_keys"]["inconsistent"] = []
queries = []
for query in entry["test_keys"].pop("queries", []):
try:
query["hostname"] = match(r"\[Query\('(.+)'", query.pop("query"))
except:
query["hostname"] = None
try:
query["resolver_hostname"], query["resolver_port"] = query.pop("resolver")
except:
query["resolver_hostname"], query["resolver_port"] = [None, None]
query.pop("addrs", None)
answers = []
for answer in query.pop("answers", []):
normalized_answer = _normalize_answer(answer)
answers.append(normalized_answer)
query["answers"] = answers
failure = query.get("failure", None)
if not failure and len(answers) == 0:
failure = "no_answer"
query["failure"] = failure
queries.append(query)
entry["test_keys"]["queries"] = queries
return entry
def normalize_tcpt(entry):
return entry
def normalize_process(entry):
return entry
def normalize_entry(entry, bucket_date, perma_fname, esha):
"""Autoclaving"""
hashuuid = esha[:16] # sha1 is 20 bytes
if isinstance(entry.get("report"), dict):
entry.update(entry.pop("report"))
test_name = entry.get("test_name", "invalid")
test_name = test_name_mappings.get(test_name, test_name.lower())
entry["test_name"] = test_name
entry["bucket_date"] = bucket_date
if not entry.get("id"):
entry["id"] = str(uuid.UUID(bytes=hashuuid))
entry["report_filename"] = perma_fname
# Ensure all the keys in the schema are present
for key in schema:
entry[key] = entry.get(key, None)
if entry.get("data_format_version", "") == "0.2.0":
if entry["test_keys"] is None:
entry = nest_test_keys(entry)
return entry
ts = entry.pop("start_time", 0)
test_start_time = datetime.utcfromtimestamp(ts)
try:
tst = entry.pop("test_start_time")
# This is the old test_start_time key that now is called
# "measurement_start_time"
if isinstance(tst, float):
measurement_start_time = datetime.utcfromtimestamp(tst)
elif tst is None:
measurement_start_time = test_start_time
else:
test_start_time = datetime.strptime(tst, "%Y-%m-%d %H:%M:%S")
measurement_start_time = datetime.strptime(
entry.get("measurement_start_time"), "%Y-%m-%d %H:%M:%S"
)
except KeyError:
# Failback to using the start_time
measurement_start_time = test_start_time
entry["measurement_start_time"] = measurement_start_time.strftime(
"%Y-%m-%d %H:%M:%S"
)
entry["test_start_time"] = test_start_time.strftime("%Y-%m-%d %H:%M:%S")
entry["data_format_version"] = "0.2.0"
if isinstance(entry.get("options", []), dict):
entry["options"] = entry["options"].get("subargs", [])
entry = nest_test_keys(entry)
assert test_name not in test_categories["process"]
# Some values overlap across categories so multiple ifs are needed
if test_name in test_categories["httpt"]:
entry = normalize_httpt(entry)
if test_name in test_categories["dnst"]:
entry = normalize_dnst(entry)
# Ignore old, rare tests
if test_name in test_categories["scapyt"]:
raise UnsupportedTestError
if test_name in ("captive_portal", "tls_handshake"):
raise UnsupportedTestError
# TODO: tests these in
# test_normalize_yaml_sanitise_tcp_connect_bridge_reach
if entry["test_name"] == "tcp_connect":
# On 2019-10-08 the last bridge_reachability entry from YAML
# in the metadb was from 2016-10-12
raise UnsupportedTestError
elif entry["test_name"] == "bridge_reachability":
# On 2019-10-08 the last bridge_reachability entry from YAML
# in the metadb was from 2016-10-12
raise UnsupportedTestError
return entry
### Stream entries from YAML.lz4 files ####
class BlobSlicerError(RuntimeError):
pass
class BrokenFrameError(BlobSlicerError):
pass
class TruncatedReportError(BlobSlicerError):
pass
def stream_yaml_blobs(fd):
"""Detects YAML objects from a stream.
Returns an iterator of (offset, blob)
"""
head = b""
for blob in iter(functools.partial(fd.read, 1048576), ""):
if len(blob) == 0:
break
bloboff = fd.tell() - len(blob)
head, blob = b"", head + blob
start = 0
while head == b"":
prefix = blob[start : start + 4]
if prefix == b"---\n": # ordinary preamble
end = blob.find(b"\n...\n", start)
if end != -1:
yield bloboff + start, blob[start : end + 5]
start = end + 5
else:
head = blob[start:]
elif not prefix:
break
elif prefix == b"...\n": # duplicate trailer
# e.g. 2013-05-05/20130505T065614Z-VN-AS24173-dns_consistency-no_report_id-0.1.0-probe.yaml
start += 4
elif len(prefix) < 4: # need next blob
head = blob[start:]
elif chr(prefix[0]) == "#": # comment
# e.g. 2013-09-12/20130912T144929Z-MD-AS1547-dns_consistency-no_report_id-0.1.0-probe.yaml
end = blob.find(b"\n", start)
if end != -1:
start = end + 1
else:
head = blob[start:]
else:
raise BrokenFrameError(bloboff + start, prefix)
if head:
raise TruncatedReportError(fd.tell() - len(head), head[:100])
def generate_report_id(header):
# TODO: test
start_time = datetime.fromtimestamp(header.get("start_time", 0))
report_id = start_time.strftime("%Y%m%dT%H%M%SZ_")
value_to_hash = header.get("probe_cc", "ZZ").encode("utf-8")
value_to_hash += header.get("probe_asn", "AS0").encode("utf-8")
value_to_hash += header.get("test_name", "invalid").encode("utf-8")
value_to_hash += header.get("software_version", "0.0.0").encode("utf-8")
probe_city = header.get("probe_city", "None")
# probe_city = probe_city.encode("utf-8") # u'Reykjav\xedk' in bucket 2014-02-20
# probe_city = (
# probe_city.encode("utf-8")
# if isinstance(probe_city, unicode)
# else str(probe_city)
# ) # u'Reykjav\xedk' in bucket 2014-02-20
# probe_city = probe_city.decode() #encode("utf-8")
if probe_city is not None:
value_to_hash += probe_city.encode("utf-8")
report_id += "".join(
string.ascii_letters[b % len(string.ascii_letters)]
for b in hashlib.sha512(value_to_hash).digest()
)[:50]
return report_id
## Entry points
def iter_yaml_msmt_normalized(data, bucket_tstamp: str, report_fn: str):
"""Yields normalized measurements from a YAML bytestream"""
assert bucket_tstamp.startswith("20")
assert len(bucket_tstamp) == 10
assert len(report_fn.split("/")) == 2, report_fn
# Taken from autoclaving.py stream_yaml_reports
blobgen = stream_yaml_blobs(data)
off, header = next(blobgen)
headsha = hashlib.sha1(header)
# XXX: bad header kills whole bucket
header = yaml.safe_load(header)
# Generates report_id if needed
if not header.get("report_id"):
header["report_id"] = generate_report_id(header)
for off, entry in blobgen:
esha = headsha.copy()
esha.update(entry)
esha = esha.digest()
entry = yaml.safe_load(entry)
if not entry: # e.g. '---\nnull\n...\n'
continue
if "test_start_time" in entry and "test_start_time" in header:
header.pop("test_start_time")
entry.update(header)
try:
yield normalize_entry(entry, bucket_tstamp, report_fn, esha)
except Exception as e:
log.error(str(e), exc_info=1)
continue
# try:
# if not entry: # e.g. '---\nnull\n...\n'
# continue
# if "test_start_time" in entry and "test_start_time" in header:
# header.pop("test_start_time")
# entry.update(header)
# out = normalize_entry(entry, bucket_tstamp, report_fn, esha)
# yield out
# except Exception as exc:
# out = normalize_entry(entry, bucket_tstamp, report_fn, esha)
# yield out
|
import collections
import random
import flowws
from flowws import Argument as Arg
import freud
import numpy as np
from .internal import ScaledMSE, ScaledMAE
CoarseSystem = collections.namedtuple('CoarseSystem',
['box', 'nlist', 'positions', 'types', 'type_names',
'child_positions', 'child_types', 'child_type_names'])
def coarse_grain(record, num_neighbors=4, x_scale=1.):
positions = []
types = []
child_positions = []
child_types = []
index_groups = np.split(np.arange(len(record.residue_ids)),
np.unique(record.residue_ids, True)[1])[1:]
for group in index_groups:
group_child_positions = record.positions[group]/x_scale
group_child_types = record.types[group]
center_of_mass = np.mean(group_child_positions, axis=0)
positions.append(center_of_mass)
types.append(record.residue_types[group[0]])
child_positions.append(group_child_positions)
child_types.append(group_child_types)
positions = np.array(positions, dtype=np.float32)
types = np.array(types, dtype=np.uint32)
box = np.array(record.box, dtype=np.float32)/x_scale
aabb = freud.locality.AABBQuery(box, positions)
query = aabb.query(positions, dict(num_neighbors=num_neighbors, exclude_ii=True, mode='nearest'))
nlist = query.toNeighborList()
return CoarseSystem(
box, nlist, positions, types, record.residue_type_names,
child_positions, child_types, record.type_names)
def loop_neighborhood_environments(
rec, neighborhood_size, seed=13, fraction_range=(0, 2.),
randomly_rotate=False):
rand = np.random.default_rng(seed)
index_i = rec.nlist.query_point_indices
index_j = rec.nlist.point_indices
shuffle_indices = np.arange(len(rec.positions))
fraction_assignments = np.linspace(0, 1, len(rec.positions), endpoint=False)
rand.shuffle(fraction_assignments)
filt = np.logical_and(fraction_range[0] <= fraction_assignments,
fraction_assignments < fraction_range[1])
if not np.sum(filt):
raise ValueError(
'No particles found for fraction_range: {}'.format(fraction_range))
shuffle_indices = shuffle_indices[filt]
while True:
rand.shuffle(shuffle_indices)
for i in shuffle_indices:
bond_start = rec.nlist.find_first_index(i)
bond_stop = rec.nlist.find_first_index(i + 1)
bonds = slice(bond_start, bond_stop)
r0 = rec.positions[index_i[bond_start]]
rij = (rec.positions[index_j[bonds]] - rec.positions[index_i[bonds]])
types_j = rec.types[index_j[bonds]]
types_i = rec.types[index_i[bonds]]
rchildren = rec.child_positions[i] - r0
tchildren = rec.child_types[i]
if randomly_rotate:
import rowan
q = rowan.random.rand(1)[None]
rij = rowan.rotate(q, rij)
rchildren = rowan.rotate(q, rchildren)
yield rij, types_i, types_j, rchildren, tchildren
def randomly_loop_iter(xs, seed):
rand = random.Random(seed)
xs = list(xs)
while True:
rand.shuffle(xs)
yield from xs
def make_batches(cg_records, batch_size, neighborhood_size,
max_atoms, max_types, global_type_remaps, y_scale=1.,
fraction_range=(0, 2.), seed=13, randomly_rotate=False):
rand = random.Random(seed)
name_iter = randomly_loop_iter(sorted(cg_records), rand.randint(0, 2**32))
iterators = {}
for (name, rec) in sorted(cg_records.items()):
iterators[name] = loop_neighborhood_environments(
rec, neighborhood_size, seed + 1, fraction_range,
randomly_rotate=randomly_rotate)
while True:
cg_rij = np.zeros((batch_size, neighborhood_size, 3), dtype=np.float32)
cg_tij = np.zeros((batch_size, neighborhood_size, 2*max_types), dtype=np.float32)
fg_tchild = np.zeros((batch_size, max_atoms), dtype=np.uint32)
fg_rchild = np.zeros((batch_size, max_atoms, 3), dtype=np.float32)
for batch_i in range(batch_size):
name = next(name_iter)
(res_type_remap, atom_type_remap) = global_type_remaps[name]
(rij, types_i, types_j, rchildren, tchildren) = next(iterators[name])
types_i, types_j = res_type_remap[types_i], res_type_remap[types_j]
types_i = np.eye(max_types)[types_i]
types_j = np.eye(max_types)[types_j]
cg_rij[batch_i, :len(rij)] = rij
cg_tij[batch_i, :len(rij), :max_types] = types_j + types_i
cg_tij[batch_i, :len(rij), max_types:] = types_j - types_i
fg_tchild[batch_i, :len(rchildren)] = atom_type_remap[tchildren]
fg_rchild[batch_i, :len(rchildren)] = rchildren/y_scale
yield (cg_rij, cg_tij, fg_tchild), fg_rchild
@flowws.add_stage_arguments
class PDBCoarseGrained(flowws.Stage):
"""Generate data for backmapping coarse-graining of structures from the PDB.
This module calculates coarse-grained versions of PDB records,
loaded using `PDBCache`. Generators are produced for model
training.
"""
ARGS = [
Arg('neighborhood_size', '-n', int,
help='Neighborhood size (number of input amino acid coordinates) to use'),
Arg('batch_size', '-b', int, 32,
help='Batch size to use'),
Arg('seed', '-s', int, 14,
help='Random seed to use'),
Arg('validation_fraction', '-v', float, .3,
help='Fraction of record names to be assigned to validation set'),
Arg('test_fraction', '-t', float,
help='Fraction of record names to be assigned to validation set'),
Arg('x_scale', '-x', float, 64.,
help='Factor by which to decrease residue length scales'),
Arg('y_scale', '-y', float, 8.,
help='Factor by which to decrease atomic length scales'),
Arg('randomly_rotate', '-r', bool, False,
help='If True, randomly rotate environments'),
]
def run(self, scope, storage):
all_records = scope['pdb_records']
coarse_records = {}
skipped_records = []
for (name, rec) in all_records.items():
coarse = coarse_grain(
rec, self.arguments['neighborhood_size'], self.arguments['x_scale'])
if any(np.max(np.bincount(ts)) > 1 for ts in coarse.child_types):
skipped_records.append((name, 'duplicate child types'))
continue
if len(coarse.positions) <= self.arguments['neighborhood_size']:
skipped_records.append((name, 'too few positions'))
continue
coarse_records[name] = coarse
scope['coarse_records'] = coarse_records
scope['skipped_records'] = skipped_records
print('{} final records'.format(len(coarse_records)))
print('{} skipped records'.format(len(skipped_records)))
max_atoms = 0
all_residue_types, all_atom_types = set(), set()
for rec in coarse_records.values():
all_residue_types.update(rec.type_names)
all_atom_types.update(rec.child_type_names)
max_atoms = max(max_atoms, max(len(v) for v in rec.child_positions))
all_residue_types = ['NORES'] + list(sorted(all_residue_types))
residue_type_map = {name: i for (i, name) in enumerate(all_residue_types)}
all_atom_types = ['NOATM'] + list(sorted(all_atom_types))
atom_type_map = {name: i for (i, name) in enumerate(all_atom_types)}
global_type_remaps = {}
for (name, rec) in coarse_records.items():
res_type_remap = [residue_type_map[name] for name in rec.type_names]
res_type_remap = np.array(res_type_remap, dtype=np.uint32)
atom_type_remap = [atom_type_map[name] for name in rec.child_type_names]
atom_type_remap = np.array(atom_type_remap, dtype=np.uint32)
global_type_remaps[name] = (res_type_remap, atom_type_remap)
print('Max number of atoms in a residue:', max_atoms)
scaled_mse = ScaledMSE(self.arguments['y_scale'])
scaled_mae = ScaledMAE(self.arguments['y_scale'])
y_scale = self.arguments['y_scale']/self.arguments['x_scale']
ranges, labels = [0], []
if self.arguments['validation_fraction']:
ranges.append(self.arguments['validation_fraction'])
labels.append('validation')
if 'test_fraction' in self.arguments:
ranges.append(self.arguments['test_fraction'])
labels.append('test')
ranges.append(2.)
labels.append('train')
cumulative_ranges = np.cumsum(ranges)
label_ranges = {name: (start, stop) for (name, start, stop)
in zip(labels, cumulative_ranges[:-1], cumulative_ranges[1:])}
for (name, fraction_range) in label_ranges.items():
scope['{}_generator'.format(name)] = make_batches(
coarse_records, self.arguments['batch_size'], self.arguments['neighborhood_size'],
max_atoms, len(all_residue_types), global_type_remaps, y_scale,
fraction_range, self.arguments['seed'],
randomly_rotate=self.arguments['randomly_rotate'])
if 'validation_generator' not in scope:
scope['validation_generator'] = scope['train_generator']
scope['x_scale'] = self.arguments['x_scale']
scope['y_scale'] = self.arguments['y_scale']
scope['type_names'] = all_residue_types
scope['type_name_map'] = residue_type_map
scope['child_type_names'] = all_atom_types
scope['child_type_name_map'] = atom_type_map
scope.setdefault('metrics', []).extend([scaled_mse, scaled_mae])
|
import concurrent.futures
import argparse
import json
import sys
import os
from adb import adb_commands
from adb import sign_cryptography
from slugify import slugify
from functools import partial
def get_dirname_for_addr(addr):
return slugify(addr)
def get_device_info(args, signer, addr):
dirname = get_dirname_for_addr(addr)
try:
device = adb_commands.AdbCommands()
device.ConnectDevice(
port_path=None,
serial=addr,
default_timeout_ms=args.timeout,
rsa_keys=[signer]
)
version = device.Shell('cat /proc/version', timeout_ms=args.timeout)
if args.screenshot or args.getprop:
outpath = '{}/{}'.format(args.output, dirname)
os.mkdir(outpath)
if args.screenshot:
try:
device.Shell(
'screencap -p /data/local/tmp/screenshot.png',
timeout_ms=args.timeout
)
device.Pull(
'/data/local/tmp/screenshot.png',
dest_file='{}/screenshot.png'.format(outpath),
timeout_ms=120000
)
device.Shell(
'rm -rf /data/local/tmp/screenshot.png',
timeout_ms=args.timeout
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print(e)
if args.getprop:
getprop = device.Shell('getprop', timeout_ms=args.timeout)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
return None
if args.getprop:
with open('{}/getprop.txt'.format(outpath), 'w+') as f:
f.write(getprop)
return {
'addr': addr,
'dirname': dirname,
'version': version
}
def parse_args():
p = argparse.ArgumentParser(description='')
p.add_argument('-t', '--threads',
help='maximum number of threads', type=int, default=10
)
p.add_argument('-k', '--adbkey',
help='path to adb key file', type=str, default='~/.android/adbkey'
)
p.add_argument('-o', '--output',
help='output directory name', type=str, default='output'
)
p.add_argument('-P', '--getprop',
help='retrieve and store device information via getprop',
default=False,
action='store_true'
)
p.add_argument('-S', '--screenshot',
help='retrieves a screenshot of the device', default=False,
action='store_true'
)
p.add_argument('--timeout',
help='adb timeout in milisseconds', type=int, default=10000
)
return p.parse_args()
def main():
args = parse_args()
addrs = []
for input_line in sys.stdin:
addrs.append(input_line.rstrip())
os.mkdir(args.output)
signer = sign_cryptography.CryptographySigner(
os.path.expanduser(args.adbkey)
)
x_get_device_info = partial(get_device_info, args, signer)
i = 0
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=args.threads) as x:
for device_info in x.map(x_get_device_info, addrs):
i += 1
print('Progress: {} addresses tried (of {})'.format(i, len(addrs)))
if device_info != None:
results.append(device_info)
print()
print('Saving results ...')
with open('{}/results.json'.format(args.output), 'w+') as f:
f.write(json.dumps(results))
print('Done.')
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
exit(1)
except Exception as e:
raise
|
# Space: O(n)
# Time: O(n)
import collections
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
length1 = len(s1)
length2 = len(s2)
if length1 > length2: return False
if length2 == 0: return False
slow, fast = 0, 0
data = collections.Counter(s1)
match = len(data)
while fast < length2:
if s2[fast] in data:
data[s2[fast]] -= 1
if data[s2[fast]] == 0:
match -= 1
while match == 0 and slow <= fast:
if len(s2[slow:fast + 1]) == length1:
return True
if s2[slow] in data:
data[s2[slow]] += 1
if data[s2[slow]] > 0:
match += 1
slow += 1
fast += 1
return False
|
# -*- coding: utf-8
# Core
import pytest
# Models
from custom_auth_user.models import User
# Store
from custom_auth_user.user.store import UserStore
# Commands
from custom_auth_user.user.commands.register_command import register
@pytest.mark.django_db
class TestRegisterCommand():
@pytest.fixture
def user_store(self):
return UserStore()
def test_register_command(self, user_store):
user = register(
user_store=user_store,
email='email@email.com',
username='username',
first_name='first name',
last_name='last name',
password='password')
assert isinstance(user, User), \
'Should return user data when register command succeeds'
|
# -*- coding: utf-8 -*-
import scrapy
from kuan2.items import Kuan2Item
import re
import logging # 要先安装好
logging.basicConfig(filename='kuan.log', filemode='w', level=logging.WARNING,
format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %I:%M:%S %p')
# https://juejin.im/post/5aee70105188256712786b7f
logging.warning("warn message")
logging.error("error message")
class KuspiderSpider(scrapy.Spider):
name = 'kuspider'
allowed_domains = ['www.coolapk.com']
start_urls = ['https://www.coolapk.com/apk/']
# 每个请求之间设置延迟
# https://stackoverflow.com/questions/30404364/scrapy-delay-request
custom_settings = {
# "DOWNLOAD_DELAY": 2, # 延迟2s
# "CONCURRENT_REQUESTS_PER_DOMAIN": 8 # 每秒默认并发8次,可适当降低
}
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
# 循环爬取第一种方法,直接构造全部url
def start_requests(self):
pages = []
for page in range(1,2): # 一共有610页
url = 'https://www.coolapk.com/apk/?page=%s' % page
page = scrapy.Request(
url, callback=self.parse, headers=self.headers)
pages.append(page)
return pages
def parse(self, response):
# print(response.text)
contents = response.css('.app_left_list>a')
for content in contents:
url = content.css('::attr("href")').extract_first()
url = response.urljoin(url)
yield scrapy.Request(url, callback=self.parse_url)
# # 循环爬取第二种方法获取翻页节点,循环爬取下一页
# next_page = response.css('.pagination li:nth-child(8) a::attr("href")').extract_first()
# url = response.urljoin(next_page)
# # print(url) # test ok
# yield scrapy.Request(url,callback=self.parse )
def parse_url(self, response):
item = Kuan2Item()
item['name'] = response.css('.detail_app_title::text').extract_first()
results = self.get_comment(response)
item['volume'] = results[0]
item['download'] = results[1]
item['follow'] = results[2]
item['comment'] = results[3]
item['tags'] = self.get_tags(response)
item['score'] = response.css('.rank_num::text').extract_first()
num_score = response.css('.apk_rank_p1::text').extract_first()
item['num_score'] = re.search('共(.*?)个评分', num_score).group(1)
yield item
def get_comment(self, response):
messages = response.css('.apk_topba_message::text').extract_first()
result = re.findall(
r'\s+(.*?)\s+/\s+(.*?)下载\s+/\s+(.*?)人关注\s+/\s+(.*?)个评论.*?', messages)
if result: # 不为空
results = list(result[0])
return results
def get_tags(self, response):
data = response.css('.apk_left_span2')
tags = [item.css('::text').extract_first() for item in data]
return tags
|
# DEPENDENCIES
import main
import functions
# LIBRARIES
import time
import winsound
import cv2 as cv
import numpy as np
from urllib.request import urlopen
import keyboard
from pynput.mouse import Button, Controller
import PySimpleGUI as sg
# VARIABLES
mouse = Controller()
delay = main.delay
image = functions.getImage(main.imagePath)
imageGray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
imageThresholded = []
contours = []
# CONTOURS
if not main.usesAdaptiveThreshold:
# SIMPLE
contours, imageThresholded = functions.generateSimpleContours(imageGray, main.simpleThreshold, main.simpleThresholdMaxValue,
main.simpleThresholdType, main.simpleThresholdContourApproximationMethod)
else:
# ADAPTIVE
contours, imageThresholded = functions.generateAdaptiveContours(imageGray, main.adaptiveThresholdMaxValue, main.adaptiveThresholdMethod,
main.adaptiveThresholdType, main.blockSize, main.c, main.adaptiveThresholdContourApproximationMethod)
# DRAWS ON SCREEN
if not main.preview:
# STARTUP
time.sleep(main.startupTime)
main.window.minimize()
# INITX AND INITY WILL BE THE TOP LEFT CORNER OF THE IMAGE
initX = mouse.position[0]
initY = mouse.position[1]
isDrawing = True
# DRAWS ALL POINTS
for contour in contours:
if not isDrawing:
break
mouse.release(Button.left)
time.sleep(delay)
for index, point in enumerate(contour):
# BREAKS EXECUTION
if keyboard.is_pressed("esc"):
mouse.release(Button.left)
isDrawing = False
break
# MOVES THE MOUSE TO THE NEXT POINT
mouse.position = (initX + point[0][0], initY + point[0][1])
time.sleep(delay)
# STARTS DRAWING ON A NEW CONTOUR
if(index == 1):
mouse.press(Button.left)
time.sleep(delay)
# DONE
mouse.release(Button.left)
winsound.Beep(440, 1000)
else:
# PREVIEWS
if main.previewType == "Image":
# SHOWS IMAGE + CONTOURS
cv.drawContours(image, contours, -1, (0,255,0), 2)
cv.imshow("Image Preview", image)
elif main.previewType == "Threshold":
# SHOWS THRESHOLDED IMAGE
cv.drawContours(imageThresholded, contours, -1, (0,255,0), 2)
cv.imshow("Threshold Preview", imageThresholded)
elif main.previewType == "Contours":
# SHOWS ONLY CONTOURS
blackimg = np.zeros(image.shape)
cv.drawContours(blackimg, contours, -1, (0,255,0), 2)
cv.imshow("Contours Preview", blackimg)
cv.waitKey(0)
|
import torch
from torch.nn import functional as F
def nll_loss(y_hat, y, reduce=True):
y_hat = y_hat.permute(0,2,1)
y = y.squeeze(-1)
loss = F.nll_loss(y_hat, y)
return loss
def test_loss():
yhat = torch.rand(16, 100, 54)
y = torch.rand(16, 100, 1)
loss = nll_loss(yhat, y.squeeze(-1))
|
# Generated by Django 3.2.7 on 2021-09-16 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='favorites',
name='name',
field=models.CharField(max_length=50, null=True),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-26 14:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webdike', '0011_auto_20171124_0641'),
]
operations = [
migrations.RemoveField(
model_name='step',
name='active',
),
migrations.AddField(
model_name='step',
name='population',
field=models.FloatField(default=1),
),
migrations.AlterField(
model_name='step',
name='stage',
field=models.IntegerField(choices=[(0, 'Imported'), (1, 'Splitted'), (2, 'Polished'), (3, 'Connected'), (4, 'Explained')]),
),
]
|
import io
import json
from defang import defang
class MachinaeOutput:
@staticmethod
def get_formatter(format):
if format.upper() == "N":
return NormalOutput()
elif format.upper() == "J":
return JsonOutput()
elif format.upper() == "D":
return DotEscapedOutput()
elif format.upper() == "S":
return ShortOutput()
@staticmethod
def escape(text):
return str(text)
def init_buffer(self):
self._buffer = io.StringIO()
def print(self, line, lf=True):
self._buffer.write(line)
if lf:
self._buffer.write("\n")
class NormalOutput(MachinaeOutput):
def output_header(self, target, otype, otype_detected):
self.print("*" * 80)
self.print("* Information for {0}".format(self.escape(target)))
self.print("* Observable type: {0} (Auto-detected: {1})".format(otype, otype_detected))
self.print("*" * 80)
#This needs to be refactored so the site from args is available here. No time currently, will do though
self.print("Not seeing what you expect? Likely not a valid site. Try running with --list-sites")
def run(self, result_sets: object):
self.init_buffer()
for row in result_sets:
(target, otype, otype_detected) = row.target_info
self.output_header(target, otype, otype_detected)
self.print("")
for item in row.results:
site = item.site_info
if hasattr(item, "error_info"):
self.print("[!] Error from {0}: {1}".format(site["name"], item.error_info))
continue
if len(item.resultset) == 0:
self.print("[-] No {0} Results".format(site["name"]))
else:
self.print("[+] {0} Results".format(site["name"]))
for result in item.resultset:
labels = getattr(result[0], "labels", None)
if len(result[0].values()) > 1 or labels is not None:
values = map(repr, result[0].values())
values = map(self.escape, values)
if labels is not None:
values = zip(labels, values)
values = ["{0}: {1}".format(label, value) for (label, value) in values]
output = ", ".join(values)
if result[1] is not None:
output = "({0})".format(", ".join(values))
output = defang(output)
else:
output = self.escape(list(result[0].values())[0])
output = defang(output)
if result[1] is not None:
output = "{1}: {0}".format(output, result[1])
output = defang(output)
self.print(" [-] {0}".format(output))
return self._buffer.getvalue()
class DotEscapedOutput(NormalOutput):
escapes = {
# ".": "\u2024",
# ".": "<dot>",
# ".": " DOT ",
".": "[.]",
"@": " AT ",
"http://": "hxxp://",
"https://": "hxxps://",
}
def output_header(self, target, otype, otype_detected):
super().output_header(target, otype, otype_detected)
self.print("* These characters are escaped in the output below:")
for (find, replace) in self.escapes.items():
self.print("* '{0}' replaced with '{1}'".format(find, replace))
self.print("* Do not click any links you find below")
self.print("*" * 80)
@classmethod
def escape(cls, text):
text = super(DotEscapedOutput, cls).escape(text)
for (find, replace) in cls.escapes.items():
text = text.replace(find, replace)
return text
class JsonGenerator(MachinaeOutput):
def run(self, result_sets):
records = list()
for row in result_sets:
(target, otype, otype_detected) = row.target_info
for item in row.results:
output = dict()
output["site"] = item.site_info["name"]
output["results"] = dict()
if hasattr(item, "error_info"):
output["results"] = {"error_info": str(item.error_info)}
elif len(item.resultset) > 0:
for result in item.resultset:
if result.pretty_name not in output["results"]:
output["results"][result.pretty_name] = list()
values = list(result.value.values())
if len(values) == 1:
output["results"][result.pretty_name].append(values[0])
elif len(values) > 1:
output["results"][result.pretty_name].append(values)
for (k, v) in output["results"].items():
if len(v) == 1:
output["results"][k] = v[0]
records.append(output)
return records
class JsonOutput(JsonGenerator):
def run(self, result_sets):
self.init_buffer()
for record in super().run(result_sets):
self.print(json.dumps(record))
return self._buffer.getvalue()
class ShortOutput(MachinaeOutput):
def run(self, result_sets):
self.init_buffer()
for row in result_sets:
(target, otype, otype_detected) = row.target_info
self.print("[+] {0}".format(target))
for item in row.results:
site = item.site_info
if hasattr(item, "error_info"):
self.print(" {0}: Error".format(site["name"]))
elif len(item.resultset) == 0:
self.print(" {0}: No".format(site["name"]))
else:
self.print(" {0}: Yes".format(site["name"]))
return self._buffer.getvalue()
|
#!/usr/bin/python
import pyrfa
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.setDebugMode(True)
p.acquireSession("Session1")
p.createOMMConsumer()
p.login()
print(p.directoryRequest())
|
x = input()
a= []
while x != '0':
a.append(x)
x = input()
print(a)
|
# -*- coding: utf-8 -*-
# ======================================================================================================================
# Imports
# ======================================================================================================================
from tempest_zigzag import cli
from click.testing import CliRunner
from lxml import etree
def test_cli_happy_path(file_test_xml_all_pass, file_test_list, tempest_config_file):
"""Tests that the CLI will exit 0 and print out parsable xml when there is nothing to do"""
runner = CliRunner()
cli_arguments = [file_test_xml_all_pass, file_test_list, tempest_config_file]
result = runner.invoke(cli.main, args=cli_arguments)
assert 0 == result.exit_code
assert etree.XML(result.output) is not None
def test_cli_mix_up_args(file_test_xml_all_pass, file_test_list):
"""Tests that tempest-zigzag will exit non-zero if args are transposed"""
runner = CliRunner()
cli_arguments = [file_test_list, file_test_xml_all_pass]
result = runner.invoke(cli.main, args=cli_arguments)
assert result.exit_code != 0
|
from argparse import ArgumentParser
from path_helpers import path
from ._version import get_versions
#: ..versionadded:: 2.17
__version__ = get_versions()['version']
del get_versions
#: .. versionadded:: 2.13
MICRODROP_PARSER = ArgumentParser(description='MicroDrop: graphical user '
'interface for the DropBot Digital '
'Microfluidics control system.',
add_help=False)
MICRODROP_PARSER.add_argument('-c', '--config', type=path, default=None)
def base_path():
return path(__file__).abspath().parent
def glade_path():
'''
Return path to `.glade` files used by `gtk` to construct views.
'''
return base_path().joinpath('gui', 'glade')
|
#!/usr/bin/python -tt
# Project: Dropbox (Indigo Wire Networks)
# Filename: nornir_config_create
# claudia
# PyCharm
from __future__ import absolute_import, division, print_function
__author__ = "Claudia de Luna (claudia@indigowire.net)"
__version__ = ": 1.0 $"
__date__ = "7/30/18"
__copyright__ = "Copyright (c) 2018 Claudia"
__license__ = "Python"
import argparse
import nornir_discovery
from nornir import InitNornir
from nornir.plugins.functions.text import print_result
from nornir.plugins.tasks.text import template_file
def config_to_file(task, arg={}):
"""
Render configuration update snippet for a give switch
:param task:
:param arg:
:return:
"""
# Define the Jinja2 template file we will use to build our custom commands for each device
j2template = 'vlan_updates.j2'
# Generate a unique text file of commands for each device in our inventory
filename = "cfg-{}.txt".format(task.host)
task.host["rendered_cfg"] = task.run(task=template_file, template=j2template, path='', info=arg)
with open(filename,"w") as cfg_file:
cfg_file.write(str(task.host['rendered_cfg'][0]))
print("\nCreated Configuration file {} for device {} in local directory...".format(task.host,filename))
def main():
# Get our shoW vlan output from each device in our inventory
send_commands=['show vlan']
output_dict = nornir_discovery.send_napalm_commands(send_commands, show_output=True, debug=False)
# Set the TextFSM template we will be using to parse the show vlan output so we get it back in a way we can use
template_filename = 'cisco_ios_show_vlan.template'
# Initialize the vlan dictionary we will send to our Jinja2 template
j2_data_dict = {}
# ======= Define the Nornir Environment ========
nornir_instance = InitNornir()
# For each device lets build out the list of vlans which must be removed
for dev, output in output_dict.items():
parsed_results = nornir_discovery.parse_with_texfsm(output, template_filename, debug=False)
remove_vlan_list = []
# For each Vlan we found configured on the device
for vlan_data in parsed_results:
# We are only interested in vlans between 1 and 999
# vlan_data[0] is the vlan number
if 1 < int(vlan_data[0]) < 1000:
ints_in_vlan = len(vlan_data[3])
# If the vlan has no associated interfaces, then add it to the remove_vlan_list list
if ints_in_vlan == 0:
remove_vlan_list.append(vlan_data[0])
# Build a dictionary where the key is the device or host and the value the list of vlans to remove
# This will be passed along when we build our configs
j2_data_dict.update({dev: remove_vlan_list})
# ====== Generate Configs
# Execute a task "run" in the Nornir environment using our config_file Task function and pass it the customized data
# which is required to build out a custom config for each device removing any unused vlans and adding the standard
# vlans
print(f"Generating configurations:")
r = nornir_instance.run(task=config_to_file, arg=j2_data_dict)
# Debug print statements
# print("\n")
# print(r)
# Prints abbreviated output
# print_result(r, vars=['stdout'])
# # Prints full output -- good for troubleshooting
# print_result(r)
# Standard call to the main() function.
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Script Description",
epilog="Usage: ' python nornir_config_create' ")
arguments = parser.parse_args()
main()
|
import unittest
from logrec.dataprep.preprocessors.java import process_comments_and_str_literals
from logrec.dataprep.model.chars import OneLineCommentStart, NewLine, Quote, MultilineCommentStart, MultilineCommentEnd
# TODO write explanations with normal strings
from logrec.dataprep.model.containers import SplitContainer, StringLiteral, OneLineComment, MultilineComment
from logrec.dataprep.model.word import Word, Underscore
class JavaTest(unittest.TestCase):
def test_process_comments_and_str_literals(self):
'''
Positive scenario
<start>"//test_MyClass"
//*/
"/*!"
/*
/*
<end>
'''
tokens = [Quote(),
OneLineCommentStart(),
SplitContainer([Word.from_("test"),
Underscore(),
Word.from_("my"),
Word.from_("Class")]),
Quote(),
NewLine(),
OneLineCommentStart(),
MultilineCommentEnd(),
NewLine(),
Quote(),
MultilineCommentStart(),
SplitContainer.from_single_token("!"),
Quote(),
NewLine(),
MultilineCommentStart(),
NewLine(),
MultilineCommentEnd(),
NewLine(),
]
actual = process_comments_and_str_literals(tokens, {})
expected = [StringLiteral([OneLineCommentStart(), SplitContainer([
Word.from_("test"),
Underscore(),
Word.from_("my"),
Word.from_("Class")],
)]),
NewLine(),
OneLineComment([MultilineCommentEnd()]),
NewLine(),
StringLiteral([MultilineCommentStart(),
SplitContainer.from_single_token("!")]),
NewLine(),
MultilineComment([NewLine()]),
NewLine()
]
self.assertEqual(expected, actual)
def test_process_comments_and_str_literals_no_multiline_comment_start(self):
tokens = [MultilineCommentEnd(), Word.from_("a")]
actual = process_comments_and_str_literals(tokens, {})
expected = [MultilineCommentEnd(), Word.from_("a")]
self.assertEqual(expected, actual)
def test_process_comments_and_str_literals_newline_after_open_quote(self):
tokens = [Quote(), NewLine()]
actual = process_comments_and_str_literals(tokens, {})
expected = [Quote(), NewLine()]
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
from django.urls import path
from . import views
urlpatterns = [
path("fileresponse/", views.file_response),
]
|
#!/usr/bin/env python3
"""
The goal of this module is to take the explicit qualified type information of
a set of functions as well as a call tree and to expand the type information
to include implicit type information. The following rules apply:
- A function with direct type X also has indirect type X
- A function that calls a function with indirect type X also has indirect type X
"""
import sys
from scrapers import AnnotationKind
def determine_indirect_type( function, call_tree,
funptr_types, function_types ):
"""
For the given function, determine its indirect type by recursively
walking the call tree and colleting function types that are indirectly
called by this function
"""
visited = set( [ function ] )
qualifiers = set()
for child in call_tree.calls( function ):
qualifiers |= _rec_determine_indirect_type(
child, call_tree, funptr_types, function_types, visited )
return qualifiers
def _rec_determine_indirect_type( function, call_tree, funptr_types,
function_types, visited ):
"""
Recurse the call tree starting at |function| and collecting the types
of everything visited. Do not recurse on |visited|.
"""
types = []
types += funptr_types.get( function, [] )
types += function_types.get( function, [] )
for child in call_tree.calls( function ):
if child not in visited:
visited.add( child )
types += _rec_determine_indirect_type(
child, call_tree, funptr_types, function_types,
visited )
return set( [ ( AnnotationKind.INDIRECT, qual ) for ( _, qual ) in types ] )
def augment_types( call_tree, funptr_types, function_types ):
"""
Given a call tree, the types of all function pointers, and the types of all
functions (augmented with overrides), determine the indirect types of
all functions and return a mapping of function to its complete type
"""
types = {}
for function in function_types.keys():
indirect_types = determine_indirect_type(
function, call_tree, funptr_types, function_types )
direct_types = function_types[ function ]
types[ function ] = indirect_types | direct_types
return types
if __name__ == '__main__':
from pprint import pprint
import scrapers
import call_tree
from ast_helpers import get_translation_unit
target = get_translation_unit( sys.argv[ 1 ] )
call_tree = call_tree.build_call_tree( target )
overrides = scrapers.Overrides.scrape( target )
func_types = scrapers.FunctionQualifiers.scrape( target )
funcptr_types = scrapers.FunctionPointers.scrape( target )
call_tree.augment_with_overrides( overrides )
pprint( augment_types( call_tree, funcptr_types, func_types ) )
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 14:36:02 2021
@author: endocv2021@generalizationChallenge
"""
# import network
import os
import os.path as osp
import argparse
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
import skimage
from skimage import io
from skimage.transform import resize as rsz_sk
from tifffile import imsave
from models.get_model import get_arch
def create_predFolder(task_type):
directoryName = 'EndoCV2021'
if not os.path.exists(directoryName):
os.mkdir(directoryName)
if not os.path.exists(os.path.join(directoryName, task_type)):
os.mkdir(os.path.join(directoryName, task_type))
return os.path.join(directoryName, task_type)
def detect_imgs(infolder, ext='.tif'):
import os
items = os.listdir(infolder)
flist = []
for names in items:
if names.endswith(ext) or names.endswith(ext.upper()):
flist.append(os.path.join(infolder, names))
return np.sort(flist)
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("--n_classes", type=int, default=1, help="num classes (default: None)")
# Deeplab Options
parser.add_argument("--model_name", type=str, default='fpnet_mobilenet_W', help='model name')
parser.add_argument("--ckpt_path", type=str, default='/home/aggcmab/code/checkpoints/F1/fpnet_mobilenet_W/', help='checkpoint path')
parser.add_argument("--im_size", help='delimited list input, could be 512, or 480,600', type=str, default='512,640')
parser.add_argument("--gpu_id", type=str, default='1', help="GPU ID")
parser.add_argument("--random_seed", type=int, default=1, help="random seed (default: 1)")
return parser
def mymodel():
'''
Returns
-------
model : TYPE
DESCRIPTION.
device : TYPE
DESCRIPTION.
'''
opts = get_argparser().parse_args()
im_size = tuple([int(item) for item in opts.im_size.split(',')])
if isinstance(im_size, tuple) and len(im_size)==1:
tg_size = (im_size[0], im_size[0])
elif isinstance(im_size, tuple) and len(im_size)==2:
tg_size = (im_size[0], im_size[1])
os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Device: %s" % device)
print(opts.model_name)
model, mean, std = get_arch(opts.model_name, n_classes=opts.n_classes)
checkpoint = torch.load(osp.join(opts.ckpt_path, 'model_checkpoint.pth'), map_location='cpu')
model.load_state_dict(checkpoint['model_state_dict'])
# model = nn.DataParallel(model)
model.to(device)
model.mode = 'eval'
model.eval()
return model, mean, std, tg_size, device
if __name__ == '__main__':
'''
You are not allowed to print the images or visualizing the test data according to the rule.
We expect all the users to abide by this rule and help us have a fair challenge "EndoCV2021-Generalizability challenge"
FAQs:
1) Most of my predictions do not have polyp.
--> This can be the case as this is a generalisation challenge. The dataset is very different and can produce such results. In general, not all samples
have polyp.
2) What format should I save the predictions.
--> you can save it in the tif or jpg format.
3) Can I visualize the data or copy them in my local computer to see?
--> No, you are not allowed to do this. This is against challenge rules. No test data can be copied or visualised to get insight. Please treat this as unseen image.!!!
4) Can I use my own test code?
--> Yes, but please make sure that you follow the rules. Any visulization or copy of test data is against the challenge rules. We make sure that the
competition is fair and results are replicative.
'''
model, mean, std, tg_size, device = mymodel()
task_type = 'segmentation'
# set image folder here!
directoryName = create_predFolder(task_type)
# ----> three test folders [https://github.com/sharibox/EndoCV2021-polyp_det_seg_gen/wiki/EndoCV2021-Leaderboard-guide]
subDirs = ['EndoCV_DATA1', 'EndoCV_DATA2', 'EndoCV_DATA3']
print(subDirs)
for j in range(0, len(subDirs)):
# ---> Folder for test data location!!! (Warning!!! do not copy/visulise!!!)
imgfolder='/project/def-sponsor00/endocv2021-test-noCopyAllowed-v1/' + subDirs[j]
# set folder to save your checkpoints here!
saveDir = os.path.join(directoryName , subDirs[j]+'_pred')
if not os.path.exists(saveDir):
os.mkdir(saveDir)
imgfiles = detect_imgs(imgfolder, ext='.jpg')
from torchvision import transforms
data_transforms = transforms.Compose([
transforms.Resize(tg_size),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
file = open(saveDir + '/'+"timeElaspsed" + subDirs[j] +'.txt', mode='w')
timeappend = []
for imagePath in imgfiles[:]:
"""plt.imshow(img1[:,:,(2,1,0)])
Grab the name of the file.
"""
filename = (imagePath.split('/')[-1]).split('.jpg')[0]
print('filename is printing::=====>>', filename)
img1 = Image.open(imagePath).convert('RGB').resize((256,256), resample=0)
image = data_transforms(img1)
# perform inference here:
images = image.to(device, dtype=torch.float32)
#
img = skimage.io.imread(imagePath)
size=img.shape
start.record()
#
outputs = model(images.unsqueeze(0))
#
end.record()
torch.cuda.synchronize()
print(start.elapsed_time(end))
timeappend.append(start.elapsed_time(end))
#
probs = outputs.squeeze().sigmoid().detach().cpu()
preds = (probs > 0.5).numpy()
probs = probs.numpy()
pred = (preds * 255.0).astype(np.uint8)
prob = (probs * 255.0).astype(np.uint8)
img_mask = rsz_sk(pred, (size[0], size[1]), anti_aliasing=True)
img_prob = rsz_sk(prob, (size[0], size[1]), anti_aliasing=True)
io.imsave(saveDir + '/' + filename + '_mask.jpg', (img_mask * 255.0).astype('uint8'))
io.imsave(saveDir + '/' + filename + '_prob.jpg', (img_prob * 255.0).astype('uint8'))
file.write('%s -----> %s \n' %
(filename, start.elapsed_time(end)))
# TODO: write time in a text file
file.write('%s -----> %s \n' %
('average_t', np.mean(timeappend)))
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/result')
def result():
dict={'phy':50,'che':60,'maths':70,'OSAMA':6644}
return render_template('result.html', result=dict)
if __name__ == '__main__':
app.run(debug=True)
|
import re
discinfo = re.compile(r'Disc #(\d+) has (\d+) positions; at time=0, it is at position (\d+).', re.ASCII)
def parse(s):
discs, npos, offset = zip(*[re.search(discinfo, line).groups() for line in s.split('\n')])
return [(n, d + o) for d, n, o in zip(map(int, discs), map(int, npos), map(int, offset))]
def crt(info):
t = 0
inc = 1
for n, offset in sorted(info):
while (t + offset) % n:
t += inc
inc *= n
return t
if __name__ == '__main__':
from aocd.models import Puzzle
s = '''Disc #1 has 5 positions; at time=0, it is at position 4.
Disc #2 has 2 positions; at time=0, it is at position 1.'''
i = parse(s)
assert crt(i) == 5
puz = Puzzle(2016, 15)
info = parse(puz.input_data)
puz.answer_a = crt(info)
print('Part 1:', puz.answer_a)
puz.answer_b = crt(info + [(11, 7)])
print('Part 2:', puz.answer_b)
|
import re
with open('England.txt') as f:
data = f.read()
pat = re.compile(r"\{\{基礎情報 (.*?)\n\}\}", re.S)
baseInfo = '\n'.join(pat.findall(data))
print(baseInfo)
pat = re.compile(r"\|(.*?) = (.*)")
Info = pat.findall(baseInfo)
dic = {key: cont for key, cont in Info}
# print(dic)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import JsonObject
class TokenResponse(JsonObject):
def __init__(self, data):
super().__init__(data)
def __str__(self):
return f"TokenResponse(token={self.token}, renewToken={self.renewToken})"
|
from strategy.strategy import StrategyAbstract
from indicator.oscillator import Atr
class ESuperTrend(StrategyAbstract):
def apply_strategy(self) -> None:
self.data = self.data.copy()
atr = Atr(self.data, 'close')
self.data['e_atr'], _ = atr.compute(span=14, avg_type='ewm')
self.data['mean'] = (self.data['low'] + self.data['high']) / 2
self.data['upper_band'] = self.data['mean'] + 3 * self.data['e_atr']
self.data['lower_band'] = self.data['mean'] - 3 * self.data['e_atr']
self.data.dropna(axis=0, inplace=True)
self.data.reset_index(drop=True, inplace=True)
prev_row = None
final_upper_band = list()
final_lower_band = list()
for row in self.data.itertuples(index=True):
if prev_row is None:
prev_row = row
final_upper_band.append(row.upper_band)
final_lower_band.append(row.lower_band)
continue
if row.upper_band < final_upper_band[-1] or prev_row.close > final_upper_band[-1]:
final_upper_band.append(row.upper_band)
else:
final_upper_band.append(final_upper_band[-1])
if row.lower_band > final_lower_band[-1] or prev_row.close < final_lower_band[-1]:
final_lower_band.append(row.lower_band)
else:
final_lower_band.append(final_lower_band[-1])
prev_row = row
self.data['final_upper_band'] = final_upper_band
self.data['final_lower_band'] = final_lower_band
prev_row = None
super_trend = list()
for row in self.data.itertuples(index=True):
if prev_row is None:
prev_row = row
super_trend.append(row.final_upper_band)
continue
if super_trend[-1] == prev_row.final_upper_band and row.close <= row.final_upper_band:
super_trend.append(row.final_upper_band)
elif super_trend[-1] == prev_row.final_upper_band and row.close > row.final_upper_band:
super_trend.append(row.final_lower_band)
elif super_trend[-1] == prev_row.final_lower_band and row.close >= row.final_lower_band:
super_trend.append(row.final_lower_band)
elif super_trend[-1] == prev_row.final_lower_band and row.close < row.final_lower_band:
super_trend.append(row.final_upper_band)
prev_row = row
self.data['super_trend'] = super_trend
self._reinit_data()
self.stop_loss.data = self.data
nb_prev = 3
self.prev_rows = nb_prev * [None]
stop_loss, take_profit = 0, 0
for row in self.data.itertuples(index=True):
if row.Index < max(self.stop_loss.min_rows + 1, nb_prev):
self._do_nothing(row, 0, 0)
self._do_common_processes(row, nb_prev, first_rows=True)
continue
if all([x.close < x.super_trend for x in self.prev_rows]) and row.close > row.super_trend:
self.buy_signal = True
else:
self.buy_signal = False
if all([x.close > x.super_trend for x in self.prev_rows]) and row.close < row.super_trend:
self.sell_signal = True
else:
self.sell_signal = False
stop_loss, take_profit = self.make_decision(row, stop_loss, take_profit)
self._do_common_processes(row, nb_prev, first_rows=False)
self._save_strategy_result()
|
from flask import Blueprint
main = Blueprint('main', __name__)
import json
from engine import SentimentAnalysis
from flask import Flask, request
@main.route("/", methods = ['GET'])
def hello():
return "Hello World!"
@main.route('/predict/', methods = ['POST'])
def get_predict():
if request.method == 'POST':
if 0 < len(request.data) < 50000:
text = str(request.data)
rating = sa.get_predict_ratings(text)[0]
r = [int(round(i, 2)*100) for i in rating]
response = {"message": "success", "value": {"x1": r[0], "x2": r[1], "x3": r[2], "x4": r[3], "x5": r[4]}}
return json.dumps(response)
else:
response = {"message": "error", "value": "something's wrong with the input text"}
return json.dumps(response)
def create_app():
global sa
sa = SentimentAnalysis()
app = Flask(__name__)
app.register_blueprint(main)
return app
if __name__ == "__main__":
app = create_app()
app.run(debug=False, host='127.0.0.1', port=8686)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pymysql
"""
conn = MySQLdb.connect(
host = '127.0.0.1',
port = 3306,
user = 'root',
passwd='',
db = 'test',
)
cursor = conn.cursor()
sql = "create table user(id int,name varchar(30),password varchar(30))"
cursor.execute(sql)
sql = "insert into user(id,name,password) values('1','xiaoming','123456')"
cursor.execute(sql)
conn.commit()
cursor.execute('show tables')
cursor.execute('select * from user')
cursor.fetchall()
conn.close()
"""
class Mysql(object):
def __init__(self):
try:
self.conn = pymysql.connect(
host = '127.0.0.1',
port = 3306,
user = 'root',
passwd='xxx',
db = 'test',
)
except Exception as e:
print(e)
else:
print('连接成功')
self.cur = self.conn.cursor()
# def create_table(self): #创建表
# sql =''
# res = self.cur.execute(sql)
# print(res)
def add(self): #增加数据
sql = '
res = self.cur.execute(sql)
if res:
self.conn.commit()
else:
self.conn.rollback()
print(res)
def rem(self): #删除数据
sql = ''
res = self.cur.execute(sql)
if res:
self.conn.commit()
else:
self.conn.rollback()
print(res)
def mod(self): #更改数据
sql = ''
res = self.cur.execute(sql)
if res:
self.conn.commit()
else:
self.conn.rollback()
print(res)
def show(self):
sql = ''
self.cur.execute(sql)
res = self.cur.fetchall()
for i in res:
print(i)
def close(self): #关闭
self.cur.close()
self.conn.close()
if __name__=="__main__":
mysql = Mysql()
# mysql.create_table()
# mysql.add()
# mysql.mod()
# mysql.rem()
# mysql.show()
# mysql.close()
|
import asyncio
import logging
async def safe_wrapper(c):
try:
return await c
except asyncio.CancelledError:
raise
except Exception as e:
logging.getLogger(__name__).error(f"Unhandled error in background task: {str(e)}", exc_info=True)
def safe_ensure_future(coro, *args, **kwargs):
return asyncio.ensure_future(safe_wrapper(coro), *args, **kwargs)
async def safe_gather(*args, **kwargs):
try:
return await asyncio.gather(*args, **kwargs)
except Exception as e:
logging.getLogger(__name__).debug(f"Unhandled error in background task: {str(e)}", exc_info=True)
raise
|
# Generated by Django 2.0.7 on 2018-07-17 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_classified', '0002_auto_20180713_2158'),
]
operations = [
migrations.AlterField(
model_name='item',
name='is_active',
field=models.BooleanField(db_index=True, default=True, verbose_name='active'),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Contact phone'),
),
]
|
import sys
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
n,k,s=map(int,input().split())
ans=[10**9 if s!=10**9 else 10**9-1 for i in range(n)]
ans[:k]=[s for i in range(k)]
print(*ans)
|
# Merge Sort
# Like QuickSort, Merge Sort is a Divide and Conquer algorithm.
# It divides the input array into two halves, calls itself for the two halves,
# and then merges the two sorted halves. The merge() function is used for merging two halves.
# The merge(arr, l, m, r) is a key process that assumes that arr[l..m] and arr[m+1..r]
# are sorted and merges the two sorted sub-arrays into one. See the following C implementation
# for details.
# MergeSort(arr[], l, r)
# If r > l
# 1. Find the middle point to divide the array into two halves:
# middle m = l+ (r-l)/2
# 2. Call mergeSort for first half:
# Call mergeSort(arr, l, m)
# 3. Call mergeSort for second half:
# Call mergeSort(arr, m+1, r)
# 4. Merge the two halves sorted in step 2 and 3:
# Call merge(arr, l, m, r)
# The following diagram from wikipedia shows the complete merge sort process
# for an example array {38, 27, 43, 3, 9, 82, 10}. If we take a closer look at the diagram,
# we can see that the array is recursively divided into two halves till the size becomes 1.
# Once the size becomes 1, the merge processes come into action and start merging a
def mergeSort(arr):
if len(arr) > 1:
mid = len(arr)//2
i = j = k = 0
L, R = arr[:mid], arr[mid:]
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
print('cop dat', i, j)
if L[i] < R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i += 1
k += 1
while j < len(R):
arr[k] = R[j]
j += 1
k += 1
mergeSort(L)
mergeSort(R)
return arr
# arr = [12, 11, 13, 5, 6, 7, 32, 4, 20, 17, 8]
print(mergeSort([12, 11, 13, 5, 6, 7]))
print(mergeSort([12, 11, 13, 5, 6, 7, 32, 4, 20, 17, 8]))
|
from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from .models import Person, Documento
from .forms import PersonForm
from django.contrib.auth.decorators import login_required
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView, View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils import timezone
from django.urls import reverse_lazy
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.auth.mixins import LoginRequiredMixin
@login_required()
def person_list(request):
person = Person.objects.all()
return render(request, 'person.html', {'person': person})
@login_required()
def person_new(request):
if not request.user.has_perm('clientes.person_add'):
return HttpResponse("Falha na autenticação")
form = PersonForm(request.POST, request.FILES, None)
if form.is_valid():
form.save()
return redirect("person_list")
return render(request, 'person_form.html', {'form': form})
@login_required()
def person_update(request, id):
person = get_object_or_404(Person, pk=id)
form = PersonForm(request.POST or None, request.FILES or None, instance=person)
if form.is_valid():
form.save()
return redirect('person_list')
return render(request, 'person_form.html', {'form': form})
@login_required()
def person_delete(request, id):
person = get_object_or_404(Person, pk=id)
if request.method == 'POST':
person.delete()
return redirect('person_list')
return render(request, 'person_del.html', {'person': person})
class PersonList(LoginRequiredMixin, ListView):
model = Person
class PersonDetail(LoginRequiredMixin ,PermissionRequiredMixin, DetailView):
permission_required = ('permissão2',)
model = Person
def get_object(self, queryset=None):
pk = self.kwargs.get(self.pk_url_kwarg)
return Person.objects.select_related('doc').get(id=pk)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class PersonCreate(LoginRequiredMixin ,CreateView):
model = Person
fields = ['first_name', 'last_name', 'age', 'salary', 'bio', 'img']
success_url = '/clientes/list2/'
class PersonUpdate(LoginRequiredMixin, UpdateView):
model = Person
fields = ['first_name', 'last_name', 'age', 'salary', 'bio', 'img']
success_url = reverse_lazy('PersonList')
class PersonDelete(LoginRequiredMixin, DeleteView):
model = Person
success_url = reverse_lazy('PersonList')
class BulkView(LoginRequiredMixin, View):
def get(self, request):
documentos = ['123456', '147852', '963258', '987456', '91128739']
lis_doc = []
for documento in documentos:
d = Documento(num_doc=documento)
lis_doc.append(d)
Documento.objects.bulk_create(lis_doc)
return HttpResponse('funcionou')
|
# * Copyright (c) 2020-2021. Authors: see NOTICE file.
# *
# * Licensed under the GNU Lesser General Public License, Version 2.1 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * https://www.gnu.org/licenses/lgpl-2.1.txt
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from functools import lru_cache
from pydantic import BaseSettings
class Settings(BaseSettings):
bioformats_host = "localhost"
bioformats_port = 4321
bioformats_metadata_timeout = 15
bioformats_conversion_timeout = 200 * 60
class Config:
env_file = "pims-config.env"
env_file_encoding = 'utf-8'
@lru_cache()
def get_settings():
env_file = os.getenv('CONFIG_FILE', 'pims-config.env')
return Settings(_env_file=env_file)
|
# -*- test-case-name: wokkel.test.test_delay -*-
#
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Delayed Delivery.
Support for comunicating Delayed Delivery information as specified by
U{XEP-0203<http://xmpp.org/extensions/xep-0203.html>} and its predecessor
U{XEP-0091<http://xmpp.org/extensions/xep-0091.html>}.
"""
from dateutil.parser import parse
from dateutil.tz import tzutc
from twisted.words.protocols.jabber.jid import InvalidFormat, JID
from twisted.words.xish import domish
NS_DELAY = 'urn:xmpp:delay'
NS_JABBER_DELAY = 'jabber:x:delay'
class Delay(object):
"""
Delayed Delivery information.
Instances of this class represent delayed delivery information that can be
parsed from and rendered into both XEP-0203 and legacy XEP-0091 formats.
@ivar stamp: The timestamp the stanza was originally sent.
@type stamp: L{datetime.datetime}
@ivar sender: The optional entity that originally sent the stanza or
delayed its delivery.
@type sender: L{JID}
"""
def __init__(self, stamp, sender=None):
self.stamp = stamp
self.sender = sender
def toElement(self, legacy=False):
"""
Render this instance into a domish Element.
@param legacy: If C{True}, use the legacy XEP-0091 format.
@type legacy: C{bool}
"""
if not self.stamp:
raise ValueError("stamp is required")
if self.stamp.tzinfo is None:
raise ValueError("stamp is not offset-aware")
if legacy:
element = domish.Element((NS_JABBER_DELAY, 'x'))
stampFormat = '%Y%m%dT%H:%M:%S'
else:
element = domish.Element((NS_DELAY, 'delay'))
stampFormat = '%Y-%m-%dT%H:%M:%SZ'
stamp = self.stamp.astimezone(tzutc())
element['stamp'] = stamp.strftime(stampFormat)
if self.sender:
element['from'] = self.sender.full()
return element
@staticmethod
def fromElement(element):
"""
Create an instance from a domish Element.
"""
try:
stamp = parse(element[u'stamp'])
# Assume UTC if no timezone was given
if stamp.tzinfo is None:
stamp = stamp.replace(tzinfo=tzutc())
except (KeyError, ValueError):
stamp = None
try:
sender = JID(element[u'from'])
except (KeyError, InvalidFormat):
sender = None
delay = Delay(stamp, sender)
return delay
class DelayMixin(object):
"""
Mixin for parsing delayed delivery information from stanzas.
This can be used as a mixin for subclasses of L{wokkel.generic.Stanza}
for parsing delayed delivery information. If both XEP-0203 and XEP-0091
formats are present, the former takes precedence.
"""
delay = None
childParsers = {
(NS_DELAY, 'delay'): '_childParser_delay',
(NS_JABBER_DELAY, 'x'): '_childParser_legacyDelay',
}
def _childParser_delay(self, element):
self.delay = Delay.fromElement(element)
def _childParser_legacyDelay(self, element):
if not self.delay:
self.delay = Delay.fromElement(element)
|
# Generated by Django 2.1 on 2018-09-20 13:00
# Generated by Django 2.1 on 2018-09-20 12:41
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('published_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(editable=False, max_length=140, unique=True)),
('rating_average', models.DecimalField(blank=True, decimal_places=2, max_digits=3, null=True)),
('image', models.ImageField(default='static/images/no-img.jpg', upload_to='static/images')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('favourited', models.ManyToManyField(blank=True, related_name='favourited', to=settings.AUTH_USER_MODEL)),
('userDisLikes', models.ManyToManyField(blank=True, related_name='_article_userDisLikes_+', to=settings.AUTH_USER_MODEL)),
('userLikes', models.ManyToManyField(blank=True, related_name='_article_userLikes_+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-published_at'],
},
),
migrations.CreateModel(
name='ArticleRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)])),
],
),
migrations.CreateModel(
name='ArticleTags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=30, unique=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_body', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article', to_field='slug')),
('commented_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, to_field='username')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='articles.Comment')),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('like', models.BooleanField()),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.Article')),
],
),
]
|
def getData():
dataset = pd.read_csv('FUTURES MINUTE.txt', header = None)
dataset.columns = ['Date','time',"1. open","2. high",'3. low','4. close','5. volume']
dataset['date'] = dataset['Date'] +" "+ dataset['time']
dataset.drop('Date', axis=1, inplace=True)
dataset.drop('time', axis=1, inplace=True)
dataset['date'] = dataset['date'].apply(lambda x: pd.to_datetime(x, errors='ignore'))
dataset['date'] = dataset['date'].apply(lambda x: datetime.datetime.strftime(x, '%Y-%m-%d %H:%M:%S'))
dataset.set_index(dataset.index.map(lambda x: pd.to_datetime(x, errors='ignore')))
dataset.set_index('date',inplace=True)
return dataset
|
"""
TODO
# Needs to connect to the DB to read
UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5"
# Needs to connect to the DB, and to get the table name
UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7"
"""
"""
<key>proc.num[postgres,,,wal receiver]</key>
<key>proc.num[postgres,,,wal sender]</key>
<key>pgsql.get.pg.sr.status[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF}]</key>
<key>sr.db.list.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]</key>
<key>psql.confl_bufferpin[{#DBNAME}]</key>
<key>psql.confl_deadlock[{#DBNAME}]</key>
<key>psql.confl_lock[{#DBNAME}]</key>
<key>psql.confl_snapshot[{#DBNAME}]</key>
<key>psql.confl_tablespace[{#DBNAME}]</key>
<key>sr.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]</key>
<key>pgsql.get.pg.stat_replication[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF},{#MODE}]</key>
<key>sr.status.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]</key>
"""
|
import threading
import time
from src.robot import Robot
import brickpi
#Initialize the interface
interface=brickpi.Interface()
interface.initialize()
NUMBER_OF_PARTICLES = None
OFFSET = 100
DISTANCE_TO_PIXEL = 15
#Draw the square
print "drawLine:" + str((OFFSET,OFFSET,40*DISTANCE_TO_PIXEL+OFFSET,OFFSET))
print "drawLine:" + str((40*DISTANCE_TO_PIXEL+OFFSET,OFFSET,40*DISTANCE_TO_PIXEL+OFFSET,OFFSET+40*DISTANCE_TO_PIXEL))
print "drawLine:" + str((40*DISTANCE_TO_PIXEL+OFFSET,40*DISTANCE_TO_PIXEL+OFFSET,OFFSET,40*DISTANCE_TO_PIXEL+OFFSET))
print "drawLine:" + str((OFFSET,40*DISTANCE_TO_PIXEL+OFFSET,OFFSET,OFFSET))
Robot = Robot(interface, pid_config_file="carpet_config.json")
for i in range(4):
for j in range(4):
Robot.travel_straight(10,update_particles=True)
NUMBER_OF_PARTICLES = Robot.get_state()
NUMBER_OF_PARTICLES = [(OFFSET+point[0][0]*DISTANCE_TO_PIXEL,OFFSET+point[0][1]*DISTANCE_TO_PIXEL,point[0][2]) for point in NUMBER_OF_PARTICLES]
print "drawParticles:" + str(NUMBER_OF_PARTICLES)
time.sleep(5)
Robot.rotate_right(90,update_particles=True)
NUMBER_OF_PARTICLES = Robot.get_state()
NUMBER_OF_PARTICLES = [(OFFSET+point[0][0]*DISTANCE_TO_PIXEL,OFFSET+point[0][1]*DISTANCE_TO_PIXEL,point[0][2]) for point in NUMBER_OF_PARTICLES]
print "drawParticles:" + str(NUMBER_OF_PARTICLES)
time.sleep(5)
interface.terminate()
|
"""
GMail! Woo!
"""
__title__ = 'gmail'
__version__ = '0.1'
__author__ = 'Charlie Guo'
__build__ = 0x0001
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Charlie Guo'
from .gmail import Gmail
from .mailbox import Mailbox
from .message import Message
from .utils import login, authenticate
|
#!/usr/bin/env python3
import socket
import threading
import queue
import random
import time
# Config
HOST = "example.com"
PORT = 6667
NICK = "bot"
CHANS = ["#test"]
OPS = ["me"]
DEBUG = True
FORTUNES = open("fortunes.txt", "r").read().strip().split("\n")
DELAY = 0.1 # reading & sending new messages
# Handlers
def handle_PING(bot):
bot.send("PONG %s" % bot.params)
def handle_376(bot):
for chan in CHANS:
bot.send("JOIN %s" % chan)
def handle_JOIN(bot):
nick = bot.src[1:bot.src.index("!")]
if nick in OPS:
bot.send("MODE %s +o %s" % (bot.params, nick))
else:
if nick != NICK:
bot.send_msg(bot.params, "Cześć %s!" % nick)
def handle_PRIVMSG(bot):
chan, msg = bot.params.split(" ", 1)
msg = msg[1:]
if msg in msg_handles:
bot.send_msg(chan, msg_handles[msg]())
def handle_KICK(bot):
chan, nick, params = bot.params.split(" ", 2)
if nick == NICK and chan in CHANS:
time.sleep(5)
bot.send("JOIN %s" % chan)
handles = {
"PING": handle_PING,
"376": handle_376,
"JOIN": handle_JOIN,
"PRIVMSG": handle_PRIVMSG,
"KICK": handle_KICK
}
def msg_handle_fortune():
return random.choice(FORTUNES)
msg_handles = {
"!fortune": msg_handle_fortune
}
# Magic. It works because of magic. Don't touch it.
class IrcSocket(socket.socket):
def __init__(self, *attr):
super().__init__(*attr)
def recvuntil(self, txt):
result = b""
while result.find(txt) == -1:
try:
char = self.recv(1)
if len(char) == 0:
return False
except socket.error as msg:
print(msg)
return False
result += char
return result
class ReciverThread(threading.Thread):
def __init__(self, socket, queue):
super().__init__()
self.socket = socket
self.queue = queue
def run(self):
while True:
msg = str(self.socket.recvuntil(b"\n").strip(), "utf-8")
self.queue.put(msg)
class IrcBot:
def __init__(self):
self.socket = IrcSocket(socket.AF_INET, socket.SOCK_STREAM)
self.queue = queue.Queue()
self.reciver = ReciverThread(self.socket, self.queue)
self.reciver.daemon = True
def start(self):
self.socket.connect((HOST, PORT))
self.reciver.start()
self.send("NICK %s" % NICK)
self.send("USER %s %s %s :%s" % (NICK, NICK, NICK, NICK))
while True:
try:
msg = self.queue.get(timeout=DELAY)
msg_splited = msg.split(" ", 2)
if len(msg_splited) == 2:
msg_splited.insert(0, "")
self.src, self.cmd, self.params = msg_splited
if DEBUG:
print("RECIVED: %s %s %s" % (self.src, self.cmd, self.params))
self.runHandle()
except queue.Empty:
pass
def runHandle(self):
if self.cmd in handles:
handles[self.cmd](self)
def send(self, msg):
msg_utf = bytes(msg + "\r\n", "utf-8")
if DEBUG:
print("SEND: %s" % msg)
self.socket.sendall(msg_utf)
def send_msg(self, chan, msg):
self.send("PRIVMSG %s :%s" % (chan, msg))
if __name__ == "__main__":
bot = IrcBot()
bot.start()
|
from common.tests.core import SimpleTestCase
from search.factories import SuggestionLogFactory
from search.models.suggestion import SuggestionLog
class QueryDataTestCase(SimpleTestCase):
def setUp(self):
self.login_user()
def tearDown(self):
SuggestionLog.objects.all().delete()
def call_query_data_api(self, params={}):
response = self.client.get('/api/dashboard/query-data/', params)
data = self.json(response)
return response, data
def test_success_call_with_pagination(self):
SuggestionLog.objects.all().delete()
self.logs = [SuggestionLogFactory() for i in range(16)]
response, data = self.call_query_data_api()
data.should.contain('data')
isinstance(data['data'], list).should.be.true
len(data['data']).should.equal(15)
response, data = self.call_query_data_api({'page': 1})
len(data['data']).should.equal(1)
def test_api_call_with_num_suggestions_filter(self):
SuggestionLogFactory(num_suggestions=0)
SuggestionLogFactory(num_suggestions=1)
response, data = self.call_query_data_api({'fail': 1})
len(data['data']).should.equal(1)
data['data'][0]['num_suggestions'].should.equal(0)
def test_search_by_query_call(self):
SuggestionLogFactory(search_query='query')
SuggestionLogFactory(search_query='other')
q = 'qu'
response, data = self.call_query_data_api({'q': q})
len(data['data']).should.equal(1)
data['data'][0]['search_query'].should.contain(q)
def test_bad_request_with_invalid_sort_order(self):
response, data = self.call_query_data_api({'order_by': 'abcxyz'})
response.status_code.should.equal(400)
data.should.contain('error')
def test_sort_order(self):
SuggestionLogFactory.create_batch(search_query='query1', size=2)
SuggestionLogFactory.create_batch(search_query='query2', size=4)
SuggestionLogFactory.create_batch(search_query='query3', size=3)
response, data = self.call_query_data_api({'order_by': '-num_usage'})
data = data['data']
data[0]['search_query'].should.equal('query2')
data[1]['search_query'].should.equal('query3')
data[2]['search_query'].should.equal('query1')
def test_does_not_return_less_than_2_chars_query(self):
SuggestionLogFactory(search_query='long_long_query')
SuggestionLogFactory(search_query='12')
response, data = self.call_query_data_api()
len(data['data']).should.equal(1)
|
from decouple import config
from flask import Flask, render_template
from .models import DB, User, Tweet # imports our DB from models.py
# Make our "app factory" (app-creator) function:
def create_app():
"""
Create and configure an instance of the Flask application.
"""
app = Flask(__name__)
# Add config. for our DB, using the URL we defined for the DB
# as a global variable for this project (in our .env file):
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
# Stop tracking modifications on sqlalchemy config, as per this
# warning we are getting: 'SQLALCHEMY_TRACK_MODIFICATIONS adds
# significant overhead and ':
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Tell our DB about the app (initialize our DB with our app):
DB.init_app(app)
@app.route('/')
def root():
# [Deployed model goes here]
# Return in the HTML+CSS template we're using, by rendering
# the result within the template, and returning that:
users = User.query.all()
return render_template("base.html",
title="Welcome!",
header="Welcome to TwitOff!",
text="Coming soon...",
users=users
)
# Route at /resetdb that clears and resets our database:
@app.route('/resetdb')
def resetdb():
DB.drop_all()
DB.create_all()
return render_template("base.html",
title="Reset Database",
users=[])
return app
# While debugging:
# if __name__ == "__main__":
# app.run(debug=True, port=8080)
|
import random
class Instances(object):
"""docstring for Instances"""
def __init__(self):
super(Instances, self).__init__()
self.clases = []
self.columnas = []
self.columnaTipo = {}
self.instances = []
def setClases(self, clasesPar):
self.clases = list(clasesPar)
def addClase(self, clase):
self.clases.append(clase)
def getClases(self):
return self.clases
def getNumeroColumnas(self):
return len(self.columnas)
def getNumeroInstances(self):
return len(self.instances)
def addInstance(self, instance):
self.instances.append(instance)
instance.setInstances(self)
def getListInstances(self):
return self.instances
def addColumna(self, nombreColumna, tipoColumna):
self.columnas.append(nombreColumna)
self.columnaTipo[nombreColumna] = tipoColumna
def setColumnas(self, columnaList, columnaTipo):
self.columnas = columnaList
self.columnaTipo = columnaTipo
def getColumnasTipo(self):
return self.columnaTipo
def getColumnasList(self):
return self.columnas
def getTipoColumnaByIndex(self, index):
return self.columnaTipo[self.columnas[index]]
def getTipoColumnaByNombre(self, nombre):
return self.columnaTipo[nombre]
def shuffle(self):
random.shuffle(self.instances)
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.micheline import michelson_to_micheline
from pytezos.michelson.formatter import micheline_to_michelson
class MichelsonCodingTestKT1G39(TestCase):
def setUp(self):
self.maxDiff = None
def test_michelson_parse_code_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_code_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_code_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/code_KT1G39.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_storage_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_storage_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_storage_KT1G39(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/storage_KT1G39.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ong4Gv(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ong4Gv(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ong4Gv(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ong4Gv.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooqEHd(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooqEHd(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooqEHd(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooqEHd.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onynir(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onynir(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onynir(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onynir.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_onn4pk(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_onn4pk(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_onn4pk(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_onn4pk.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooYJ85(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooYJ85(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooYJ85(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooYJ85.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_ooDRnz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_ooDRnz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_ooDRnz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_ooDRnz.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
def test_michelson_parse_parameter_oophVz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json')
actual = michelson_to_micheline(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.tz'))
self.assertEqual(expected, actual)
def test_michelson_format_parameter_oophVz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.tz')
actual = micheline_to_michelson(get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json'),
inline=True)
self.assertEqual(expected, actual)
def test_michelson_inverse_parameter_oophVz(self):
expected = get_data(
path='contracts/KT1G393LjojNshvMdf68XQD24Hwjn7xarzNe/parameter_oophVz.json')
actual = michelson_to_micheline(micheline_to_michelson(expected))
self.assertEqual(expected, actual)
|
from django.conf import settings
from django.core.files.storage import Storage
from meiduo_mall.settings.dev import FDFS_BASE_URL
class FastDFSStorage(Storage):
'''自定义文件存储类'''
def __init__(self, fdfs_base_url=None):
# if not fdfs_base_url:
# self.fdfs_base_url = settings.FDFS_BASE_URL
# self.fdfs_base_url = fdfs_base_url
self.fdfs_base_url =fdfs_base_url or settings.FDFS_BASE_URL
def _open(self,name,mode='rb'):
'''
打开文件时会被调用的,文档告诉我必须重写
:param name: 文件路径
:param mode: 文件打开方式
:return: None
'''
# 因为当前不是去打开某个文件,所以这个方法目前无用,但又必须重写,所以pass
pass
def _safe(self,name,content):
'''
PS:将来后台管理系统,需要在这个方法中实现上传文件到fastDFS服务器中
保存文件时会被调用的,文档告诉我必须重
:param name: 文件路径
:param content: 文件二进制内容
:return: None
'''
# 因为当前不是去打开某个文件,所以这个方法目前无用,但又必须重写,所以pass
pass
# def url(self,name):
# '''
# 返回文件的全路径
# :param name: 文件相对路径
# :return: 文件的全路径
# '''
# return settings.FDFS_BASE_URL+name
# #return FDFS_BASE_URL + name
# pass
def url(self,name):
'''
返回文件的全路径
:param name: 文件相对路径
:return: 文件的全路径
'''
return self.fdfs_base_url+name
#return FDFS_BASE_URL + name
|
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API client library."""
from __future__ import unicode_literals
import bs4
def error_message(response, message=None, error=RuntimeError):
"""Raise an error using error message extracted from response."""
if not message:
message = 'Unknown error, with error: '
soup = bs4.BeautifulSoup(response.text, features='html.parser')
text = ''
if soup.p:
text = soup.p.string
raise error('{0:s}, with error [{1:d}] {2!s} {3:s}'.format(
message, response.status_code, response.reason, text))
class Error(Exception):
"""Base error class."""
class UnableToRunAnalyzer(Error):
"""Raised when unable to run an analyzer."""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.