code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
from lib import base
class NewRequestModal(base.Modal):
pass
| jmakov/ggrc-core | test/selenium/src/lib/page/lhn_modal/new_request.py | Python | apache-2.0 | 309 |
from .demo import *
SITE = Site(
globals(),
use_java=True,
# is_local_project_dir=True,
hidden_languages='nl',
remote_user_header='REMOTE_USER')
DEBUG = True
# SITE.appy_params.update(raiseOnError=True)
# SITE.appy_params.update(pythonWithUnoPath='/usr/bin/python3')
SITE.default_build_method = "appyodt"
SITE.webdav_url = '/'
| lino-framework/welfare | lino_welfare/projects/mathieu/settings/doctests.py | Python | agpl-3.0 | 347 |
import sys, os, csv, pprint, math
from collections import OrderedDict
import numpy as np
import random
import shutil
import math
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
import itertools
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import json
from operator import itemgetter
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
from SimParams import SimParams
NUM_WORKFLOWS = range(8, 19, 1)
#NUM_WORKFLOWS = [9]
#NUM_NODES = [2,4,8,16,32]
NUM_NODES = 9
NOC_XY = [(2,1), (2,2), (2,4), (2,8), (2,16)]
IBUFF_TASKS_LATENESS_RATIO_RANGE = [0.3, 0.7]
TQ_TASKS_LATENESS_RATIO_RANGE = [0.3, 0.7]
SEED = 26358
EXP_DATA_DIR = "../experiment_data/hrt_video/scenario_4"
RANDOM_SEED_LIST = \
[81665, 33749, 43894, 26358, 80505, \
83660, 22817, 70263, 29917, 26044, \
5558, 76891, 22250, 42198, 18065, \
74076, 98652, 21149, 50399, 64217, \
44117, 57824, 42267, 83200, 99108, \
95928, 53864, 44289, 77379, 80521, \
88117, 23327, 73337, 94064, 31982, \
6878, 66093, 69541]
def _get_payload_from_flowbl(flw_bl, bytes_sf=1.0):
p = SimParams.NOC_PERIOD
payload = (16.0*(flw_bl - (70.0*p*p)))/p
return float(payload)/float(bytes_sf)
###################################################################################################
# Computation cost vs. Communication cost
###################################################################################################
def plot_CompCost_vs_CommCost(fname=None, m=None,p=None, wf=None, seed=None, all_seeds=False):
if (m==None) and (p==None) and (wf==None) and (seed==None):
sys.exit("plot_CompCost_vs_CommCost:: error")
flwscompleted_file_data = []
obuff_file_data = []
# plot single seed ?
if (all_seeds == False):
if(fname == None):
# get execution costs
FNAME_PREFIX = "Exp_m"+str(m) + "_p"+str(p) + "_"
fname = EXP_DATA_DIR + "/seed_" + str(seed) + "/" + FNAME_PREFIX + 'wf'+str(wf)+'_cores'+str(NUM_NODES) + "_obuff.js"
json_data=open(fname)
obuff_file_data = json.load(json_data)
# get communication costs
FNAME_PREFIX = "Exp_m"+str(m) + "_p"+str(p) + "_"
fname = EXP_DATA_DIR + "/seed_" + str(seed) + "/" + FNAME_PREFIX + 'wf'+str(wf)+'_cores'+str(NUM_NODES) + "_flwcompleted.js"
json_data=open(fname)
flwscompleted_file_data = json.load(json_data)
else:
# fname supplied
json_data=open(fname[0])
obuff_file_data = json.load(json_data)
json_data=open(fname[1])
flwscompleted_file_data = json.load(json_data)
else:
file_data = []
for ix, each_seed in enumerate(RANDOM_SEED_LIST):
# get execution costs
FNAME_PREFIX = "Exp_m"+str(m) + "_p"+str(p) + "_"
fname = EXP_DATA_DIR + "/seed_" + str(each_seed) + "/" + FNAME_PREFIX + 'wf'+str(wf)+'_cores'+str(NUM_NODES) + "_obuff.js"
json_data=open(fname)
temp_file_data = json.load(json_data)
obuff_file_data.extend(temp_file_data)
# get communication costs
FNAME_PREFIX = "Exp_m"+str(m) + "_p"+str(p) + "_"
fname = EXP_DATA_DIR + "/seed_" + str(each_seed) + "/" + FNAME_PREFIX + 'wf'+str(wf)+'_cores'+str(NUM_NODES) + "_flwcompleted.js"
json_data=open(fname)
temp_file_data = json.load(json_data)
flwscompleted_file_data.extend(temp_file_data)
# plot box plots
f, axarr = plt.subplots(1, sharex=True)
f.canvas.set_window_title('plot_CompCost_vs_CommCost - ')
all_distributions = []
iframe_execution_distribution = [t['cc'] for t in obuff_file_data if t['type'] == "I"]
pframe_execution_distribution = [t['cc'] for t in obuff_file_data if t['type'] == "P"]
bframe_execution_distribution = [t['cc'] for t in obuff_file_data if t['type'] == "B"]
iframe_resptime_distribution = [t['et']-t['dt'] for t in obuff_file_data if t['type'] == "I"]
pframe_resptime_distribution = [t['et']-t['dct'] for t in obuff_file_data if t['type'] == "P"]
bframe_resptime_distribution = [t['et']-t['dct'] for t in obuff_file_data if t['type'] == "B"]
#flow_commcost_distribution = [f['bl'] for f in flwscompleted_file_data if f['tp'] in [1,15]]
data_flow_commcost_distribution = [f[0] for f in flwscompleted_file_data['flows_completed'] if f[2] in [1,15]]
#flow_totaltime_distribution = [f['et']-f['st'] for f in flwscompleted_file_data if f['type'] in [1,15]]
#flow_totaltime_distribution = [f['l_var']+f['bl'] for f in flwscompleted_file_data if f['tp'] in [1,15]]
data_flow_totaltime_distribution = [f[0]+f[1] for f in flwscompleted_file_data['flows_completed'] if f[2] in [1,15]]
all_mem_flow_commcost_distribution = [f[0] for f in flwscompleted_file_data['flows_completed'] if f[2] not in [1,15]]
all_mem_flow_totaltime_distribution = [f[0]+f[1] for f in flwscompleted_file_data['flows_completed'] if f[2] not in [1,15]]
memrd_flow_commcost_distribution = [f[0] for f in flwscompleted_file_data['flows_completed'] if f[2] in [8]]
memrd_flow_totaltime_distribution = [f[0]+f[1] for f in flwscompleted_file_data['flows_completed'] if f[2] in [8]]
memwr_flow_commcost_distribution = [f[0] for f in flwscompleted_file_data['flows_completed'] if f[2] in [9]]
memwr_flow_totaltime_distribution = [f[0]+f[1] for f in flwscompleted_file_data['flows_completed'] if f[2] in [9]]
#pprint.pprint(flow_totaltime_distribution)
#communication_data_payload = [_get_payload_from_flowbl(bl,bytes_sf=10e7) for bl in data_flow_commcost_distribution]
#communication_mem_payload = [_get_payload_from_flowbl(bl,bytes_sf=10e7) for bl in all_mem_flow_commcost_distribution]
#print "np.sum(communication_data_payload) : " , np.sum(communication_data_payload)
#print "np.sum(communication_mem_payload) : ", np.sum(communication_mem_payload)
all_types_data = [iframe_execution_distribution,
pframe_execution_distribution,
bframe_execution_distribution,
iframe_resptime_distribution,
pframe_resptime_distribution,
bframe_resptime_distribution,
data_flow_commcost_distribution,
data_flow_totaltime_distribution,
# data traffic (all)
#communication_data_payload,
#communication_mem_payload,
# mem traffic (all)
#all_mem_flow_commcost_distribution,
#all_mem_flow_totaltime_distribution,
# mem traffic (rd)
memrd_flow_commcost_distribution,
memrd_flow_totaltime_distribution,
# mem traffic (wr)
memwr_flow_commcost_distribution,
memwr_flow_totaltime_distribution
]
pprint.pprint([len(x) for x in all_types_data])
boxpos=np.arange(len(all_types_data))
axarr.boxplot(all_types_data, 0, 'x', whis=1, positions=boxpos, widths=0.8)
x_labels = [
"i_cc",
"p_cc",
"b_cc",
"i_tt",
"p_tt",
"b_tt",
# data flow timings
"df_bl",
"df_r",
# data traffic (all)
#communication_data_payload,
#communication_mem_payload,
# mem traffic (all)
#all_mem_flow_commcost_distribution,
#all_mem_flow_totaltime_distribution,
# mem traffic (rd)
"mrd_cc",
"mrd_r",
# mem traffic (wr)
"mwr_cc",
"mwr_r",
]
axarr.set_xticks(boxpos)
axarr.set_xticklabels(x_labels)
axarr.grid(True)
def _set_bp(ax, bp, col):
plt.setp(bp['boxes'], color=col, linewidth=1, alpha=0.7)
plt.setp(bp['caps'], color=col)
plt.setp(bp['whiskers'], color=col)
plt.setp(bp['fliers'], color=col)
plt.setp(bp['medians'], color=col)
###################################
# HELPERS
###################################
def _getEntry(file_data):
entry = {
"num_vids_accepted_success": file_data['num_vids_accepted_success'],
"num_dropped_tasks": file_data['num_dropped_tasks'],
"num_vids_accepted_late": file_data['num_vids_accepted_late'],
"num_vids_rejected": file_data['num_vids_rejected']
}
return entry
def _getUtilData(fname):
file = open(fname, 'r')
data = file.readlines()
data = [(float(x.strip())) for x in data]
return data
def _movingaverage (values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'same')
return sma
def _get_resfactor_specific_info(data_wfressumm):
vs_specific_data = {}
for each_wf_id, each_wf in data_wfressumm.iteritems():
for each_vs_id, each_vs in each_wf.iteritems():
res_factor = each_vs["res_w"] * each_vs["res_h"]
print "wf=" + str(each_wf_id) + ",vs=" + str(each_vs_id) + ", res="+str(res_factor)
try:
if(each_vs['result'] == False):
if res_factor not in vs_specific_data:
vs_specific_data[res_factor] = {
'h' : each_vs["res_h"],
'w' : each_vs["res_w"],
'gops_late_but_fully_complete' : each_vs["gops_late_but_fully_complete"],
'res' : res_factor,
'lateness' : []
}
else:
vs_specific_data[res_factor]['gops_late_but_fully_complete'].extend(each_vs["gops_late_but_fully_complete"])
else:
if res_factor not in vs_specific_data:
vs_specific_data[res_factor] = {
'h' : each_vs["res_h"],
'w' : each_vs["res_w"],
'gops_late_but_fully_complete' : [-1],
'res' : res_factor,
'lateness' : [0.0]
}
except:
pprint.pprint(each_vs)
sys.exit("error")
return vs_specific_data
###################################
# MAIN
###################################
if __name__ == '__main__':
plot_CompCost_vs_CommCost(fname=["Z:/MCASim/experiment_data/hevc_mapping_highccr_test/WL2/ac0mp0pr0cmb914mmp0/seed_26358/HEVCTileSplitTest__ac0mp0pr0cmb914mmp0_8_8__obuff.js",
"Z:/MCASim/experiment_data/hevc_mapping_highccr_test/WL2/ac0mp0pr0cmb914mmp0/seed_26358/HEVCTileSplitTest__ac0mp0pr0cmb914mmp0_8_8__flwcompletedshort.js"],
m=-1,p=-1, wf=-1, seed=-1, all_seeds=False)
# plot_CompCost_vs_CommCost(fname=["../experiment_data/hevc_tile_mapping_wMemPSel/WL1/ac0mp0pr0cmb903mmp1/seed_80505/HEVCTileSplitTest__ac0mp0pr0cmb903mmp1_8_8__obuff.js",
# "../experiment_data/hevc_tile_mapping_wMemPSel/WL1/ac0mp0pr0cmb903mmp1/seed_80505/HEVCTileSplitTest__ac0mp0pr0cmb903mmp1_8_8__flwcompletedshort.js"],
# m=-1,p=-1, wf=-1, seed=-1, all_seeds=False)
# plot_CompCost_vs_CommCost(fname=["C:/Users/Rosh/Documents/EngD/Work/AbstractSimulator/Multicore_Abstract_Sim/src/experiment_data/hevc_tile_mapping_wMemPSel/WL1/ac0mp0pr0cmb911mmp0/seed_1234/HEVCTileSplitTest__ac0mp0pr0cmb911mmp0_6_6__obuff.js",
# "C:/Users/Rosh/Documents/EngD/Work/AbstractSimulator/Multicore_Abstract_Sim/src/experiment_data/hevc_tile_mapping_wMemPSel/WL1/ac0mp0pr0cmb911mmp0/seed_1234/HEVCTileSplitTest__ac0mp0pr0cmb911mmp0_6_6__flwcompletedshort.js"],
# m=-1,p=-1, wf=-1, seed=-1, all_seeds=False)
# plot_CompCost_vs_CommCost(fname=["Z:/MCASim/experiment_data/hevc_tiles_mapping/WL2/ac0mp0pr0cmb905/seed_80505/HEVCTileSplitTest__ac0mp0pr0cmb905_6_6__obuff.js",
# "Z:/MCASim/experiment_data/hevc_tiles_mapping/WL2/ac0mp0pr0cmb905/seed_80505/HEVCTileSplitTest__ac0mp0pr0cmb905_6_6__flwcompletedshort.js"],
# m=-1,p=-1, wf=-1, seed=-1, all_seeds=False)
# plot_CompCost_vs_CommCost(fname=["../experiment_data/hevc_tilesplit_test/ac0mp0pr0cmb905/seed_1234/HEVCTileSplitTest__ac0mp0pr0cmb905_6_6__obuff.js",
# "../experiment_data/hevc_tilesplit_test/ac0mp0pr0cmb905/seed_1234/HEVCTileSplitTest__ac0mp0pr0cmb905_6_6__flwcompleted.js"],
# m=-1,p=-1, wf=-1, seed=-1, all_seeds=False)
#plot_Pri_vs_BlockingTime_vs_Lateness_vs_InputBuffWaitTime(p=4,m=8,wf=12,seed=-1, all_seeds=True)
print "finished"
plt.show()
| roshantha9/AbstractManycoreSim | src/analyse_results/AnalyseResults_ExecutionCosts.py | Python | gpl-3.0 | 14,740 |
from scrapy.item import Item, Field
class SapItem(Item):
uid = Field() # user id, unique and identifier for each post
type = Field() # question, answer
author = Field()
title = Field()
text = Field()
date_time = Field()
tags = Field()
views = Field()
answers = Field() # #answers
resolve = Field()
upvotes = Field() # likes
url = Field()
def __str__(self):
return "Item(" + str(self['type']) + ") #" + str(self['uid'])
| collab-uniba/qa-scrapers | scn/scnscraper/items.py | Python | mit | 484 |
#!/usr/bin/env python3
import kxg, vecrec, networkx, itertools
import messages, helpers, gui, arguments
class World (kxg.World):
def __init__ (self):
kxg.World.__init__(self)
self.map = None
self.players = []
self.winner = None
self.losers = []
self.families = []
self.miracles = []
self.game_started = False
self.game_ended = False
def update(self, time):
for family in self.families:
family.update(time)
def start_game(self, map):
self.game_started = True
self.map = self.add_token(map)
def game_over(self, winner):
self.game_ended = True
self.winner = winner
@kxg.read_only
def has_game_started (self):
return self.game_started
@kxg.read_only
def has_game_ended (self):
return self.game_ended
def create_player(self, player):
self.add_token(player, self.players)
def create_family(self, family):
self.add_token(family, family.player.families)
def defeat_player(self, player):
player.dead = True
self.losers.append(player)
self.players.remove(player)
player.teardown()
class Referee (kxg.Referee):
def __init__(self):
kxg.Referee.__init__(self)
self.world = None
def get_name(self):
return 'referee'
def setup(self, world):
kxg.Referee.setup(self, world)
message = messages.StartGame()
self.send_message(message)
def update(self, time):
kxg.Referee.update(self, time)
def teardown(self):
pass
class Player (kxg.Token):
def __init__(self, name, color):
kxg.Token.__init__(self)
self.name = name
self.color = color
self.world = None
self.families = []
def __extend__(self):
return {}
def __str__(self):
return '<Player name=%s>' % self.name
def setup(self, world):
pass
def update(self, time):
pass
@kxg.read_only
def report(self, messenger):
if self.was_defeated() and not self.is_dead():
message = messages.DefeatPlayer(self)
messenger.send_message(message)
def teardown(self):
pass
@kxg.before_setup
def set_actor(self, id):
self.actor = id
@kxg.read_only
def is_dead(self):
return self.dead
@kxg.read_only
def was_defeated(self):
return False
class Map (kxg.Token):
climates = { # (fold)
( 0, 0, 255): 'water',
(100, 50, 0): 'dirt',
( 0, 255, 0): 'grass',
(255, 255, 0): 'desert',
(255, 255, 255): 'tundra',
}
def __init__(self, path):
kxg.Token.__init__(self)
self.path = path
self.tiles = {}
self.graphs = {}
self.rows, self.columns = 0, 0
def __extend__(self):
return {gui.Gui: gui.MapExtension}
def __str__(self):
return '<Map w={} h={}>'.format(self.columns, self.rows)
def __getitem__(self, index):
# index is (row, col)
# y, x ~ row, col
return self.tiles[index]
def setup(self, world):
self.setup_tiles()
self.setup_graphs()
def setup_tiles(self):
from PIL import Image
image = Image.open(self.path)
self.columns, self.rows = image.size
for row, col in self.yield_indices():
pixel = image.getpixel((col, row))
climate = Map.climates[pixel]
self.tiles[row, col] = Tile(row, col, climate)
def setup_graphs(self):
# Connect the tiles into graph structures that can easily be searched
# (for shortest paths and so forth). Two graphs are currently created:
# one with all the land tiles and another with all the sea tiles. This
# makes it possible to find paths for both land and sea units. More
# graphs may be necessary as we add units that move in different ways.
self.graphs = {
'land': networkx.Graph(),
'sea': networkx.Graph()
}
# Fill each graphs with the appropriate tiles.
for tile in self.yield_tiles():
if tile.is_land: self.graphs['land'].add_node(tile)
if tile.is_sea: self.graphs['sea'].add_node(tile)
# Create edges for all the graphs. Diagonal edges are included.
half_neighbors = lambda row, col: [
(row + 1, col + 0),
(row + 1, col + 1),
(row + 0, col + 1),
(row - 1, col + 1) ]
for row_1, col_1 in self.yield_indices():
index_1 = row_1, col_1
tile_1 = self.tiles[index_1]
for row_2, col_2 in half_neighbors(row_1, col_1):
index_2 = row_2, col_2
tile_2 = self.tiles.get(index_2)
if tile_2 is None:
continue
weight = vecrec.get_distance(index_1, index_2)
if tile_1.is_land and tile_2.is_land:
self.graphs['land'].add_edge(tile_1, tile_2, weight=weight)
if tile_1.is_sea and tile_2.is_sea:
self.graphs['sea'].add_edge(tile_1, tile_2, weight=weight)
def find_path(self, source, target, graph='land'):
from networkx.algorithms.shortest_paths import astar_path
return astar_path(
self.graphs[graph], source, target, self.a_star_heuristic)
def find_path_distance(self, source, target, graph='land'):
from networkx.algorithms.shortest_paths import astar_path_length
return astar_path_length(
self.graphs[graph], source, target, self.a_star_heuristic)
@staticmethod
def a_star_heuristic(a, b):
return vecrec.get_distance(a.index, b.index)
def yield_indices(self):
return itertools.product(range(self.rows), range(self.columns))
def yield_tiles(self):
return iter(self.tiles.values())
def yield_neighbors(self, tile):
row, col = tile.index
neighbors = [
(row - 1, col),
(row + 1, col),
(row, col - 1),
(row, col + 1),
]
for index in neighbors:
tile = self.tiles.get(index)
if tile is not None: yield tile
@property
def land_tiles(self):
return self.graphs['land'].nodes()
class Tile:
def __init__(self, row, col, climate):
self.row, self.col = row, col
self.climate = climate
self.families = {}
def __str__(self):
msg = '<Tile row={0.row} col={0.col} climate={0.climate}>'
return msg.format(self)
def __eq__(self, other):
# No two tiles should have the same index.
return self.index == other.index
def __hash__(self):
# No two tiles should have the same index.
return hash(self.index)
@property
def index(self):
return self.row, self.col
@property
def is_land(self):
return self.climate != 'water'
@property
def is_sea(self):
return self.climate == 'water'
class Family (kxg.Token):
def __init__(self, player, name):
super(Family, self).__init__()
self.player = player
self.name = name
self.map = None
def __extend__(self):
return {gui.Gui: gui.FamilyExtension}
def setup(self, world):
self.map = world.map
def get_tiles_occupied(self):
return [x for x in self.map.land_tiles if self in x.families]
def get_num_tiles_occupied(self):
return len(self.get_tiles_occupied())
def get_mean_position(self):
position_sum = 0
population_sum = 0
for tile in self.map.land_tiles:
population = tile.families.get(self, 0)
position_sum += population * vecrec.Vector(*tile.index)
population_sum += population
return position_sum / population_sum
| kxgames/kingdoms-of-life | tokens.py | Python | gpl-2.0 | 8,118 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Authors: Jussi Nurminen <jnu@iki.fi>
# License: BSD Style.
import tarfile
import os.path as op
import os
from ...utils import _fetch_file, verbose, _check_option
from ..utils import _get_path, logger, _do_path_update
@verbose
def data_path(dataset='evoked', path=None, force_update=False,
update_path=True, verbose=None):
u"""Get path to local copy of the high frequency SEF dataset.
Gets a local copy of the high frequency SEF MEG dataset [1]_.
Parameters
----------
dataset : 'evoked' | 'raw'
Whether to get the main dataset (evoked, structural and the rest) or
the separate dataset containing raw MEG data only.
path : None | str
Where to look for the HF-SEF data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the HF-SEF dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : str
Local path to the directory where the HF-SEF data is stored.
References
----------
.. [1] Nurminen, J., Paananen, H., Mäkelä, J. (2017): High frequency
somatosensory MEG dataset. https://doi.org/10.5281/zenodo.889234
"""
key = 'MNE_DATASETS_HF_SEF_PATH'
name = 'HF_SEF'
path = _get_path(path, key, name)
destdir = op.join(path, 'HF_SEF')
urls = {'evoked':
'https://zenodo.org/record/3523071/files/hf_sef_evoked.tar.gz',
'raw':
'https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz'}
hashes = {'evoked': '13d34cb5db584e00868677d8fb0aab2b',
'raw': '33934351e558542bafa9b262ac071168'}
_check_option('dataset', dataset, sorted(urls.keys()))
url = urls[dataset]
hash_ = hashes[dataset]
fn = url.split('/')[-1] # pick the filename from the url
archive = op.join(destdir, fn)
# check for existence of evoked and raw sets
has = dict()
subjdir = op.join(destdir, 'subjects')
megdir_a = op.join(destdir, 'MEG', 'subject_a')
has['evoked'] = op.isdir(destdir) and op.isdir(subjdir)
has['raw'] = op.isdir(megdir_a) and any(['raw' in fn_ for fn_ in
os.listdir(megdir_a)])
if not has[dataset] or force_update:
if not op.isdir(destdir):
os.mkdir(destdir)
_fetch_file(url, archive, hash_=hash_)
with tarfile.open(archive) as tar:
logger.info('Decompressing %s' % archive)
for member in tar.getmembers():
# strip the leading dirname 'hf_sef/' from the archive paths
# this should be fixed when making next version of archives
member.name = member.name[7:]
try:
tar.extract(member, destdir)
except IOError:
# check whether file exists but could not be overwritten
fn_full = op.join(destdir, member.name)
if op.isfile(fn_full):
os.remove(fn_full)
tar.extract(member, destdir)
else: # some more sinister cause for IOError
raise
os.remove(archive)
_do_path_update(path, update_path, key, name)
return destdir
| olafhauk/mne-python | mne/datasets/hf_sef/hf_sef.py | Python | bsd-3-clause | 3,751 |
#
# GeoCoon - GIS data analysis library based on Pandas and Shapely
#
# Copyright (C) 2014 by Artur Wroblewski <wrobell@pld-linux.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
GeoCoon GeoDataFrame class and GIS series classes.
The module performs mapping between GIS series classes and Shapely
geometries.
"""
from functools import partial
import logging
import pandas
from shapely.geometry import Point, LineString, Polygon
from .meta import META_POINT, META_LINE_STRING, META_POLYGON
logger = logging.getLogger(__name__)
#
# GIS data frame and series definitions
#
class GeoSeries(pandas.Series):
"""
Base implementation of GIS series based on Pandas' series.
"""
def __getitem__(self, key):
value = super().__getitem__(key)
if isinstance(value, pandas.Series):
return self._constructor(value)
else:
return value
def __getslice__(self, slice):
print(slice)
value = super().__getslice__(slice)
return self._constructor(value)
@property
def _constructor(self):
return self.__class__
class PointSeries(GeoSeries):
"""
GIS point series.
"""
class LineStringSeries(GeoSeries):
"""
GIS line string series.
"""
class PolygonSeries(GeoSeries):
"""
GIS polygon series.
"""
class GeoDataFrame(pandas.DataFrame):
"""
GIS data frame based on Pandas' data frame.
The `data` parameter has to be a dictionary.
"""
def __init__(self, data, *args, **kw):
"""
Create GIS data frame.
Overrides Pandas' data frame constructor to determine, which
columns are GIS series.
.. seealso:: `pandas.DataFrame`
"""
super().__init__(data, *args, **kw)
self._geom_columns = {}
if isinstance(data, dict):
for k, col in data.items():
if isinstance(col, GeoSeries):
self._geom_columns[k] = type(col)
else:
logger.warn('Non-dictionary data argument, cannot detect GIS data')
def __setitem__(self, key, value):
"""
Overrides Pandas' data frame `__setitem__` method to store
information about geometry series.
"""
super().__setitem__(key, value)
if isinstance(value, GeoSeries):
self._geom_columns[key] = type(value)
@property
def _constructor(self):
"""
Return such GIS data frame constructor, so geometry columns
information are preserved.
"""
def f(*args, **kw):
obj = self.__class__(*args, **kw)
# copy the geometry columns information
obj._geom_columns = self._geom_columns.copy()
# do we need the below?
#for col in obj.columns:
# if col not in obj.columns:
# del obj._geom_columns[col]
return obj
return f
#
# GIS data frames and series adaptation functions
#
def wrap_df_method(method):
"""
Wrap GeoDataFrame method to support GIS data frames and series.
"""
def f(self, key):
v = method(self, key)
if isinstance(v, pandas.DataFrame):
df = GeoDataFrame(v, index=v.index)
df._geom_columns = self._geom_columns.copy()
return df
elif isinstance(key, str) and key in self._geom_columns:
cls = self._geom_columns[key]
return cls(v, index=self.index)
else:
return v
return f
def fetch_attr(series, name):
"""
Create series using attribute value of each object stored in the
GIS series.
The function is used to adapt GIS series.
:param series: GIS series.
:param name: Attribute name.
"""
data = [getattr(obj, name) for obj in series]
return pandas.Series(data, index=series.index)
def adapt_attr(cls, gis, name):
"""
Adapt GIS series to return series using attribute value of each object
stored in the series.
:param cls: GIS series class.
:param gis: Shapely geometry class.
:param name: Attribute name.
"""
f = partial(fetch_attr, name=name)
f.__doc__ = 'Vectorized version of :py:attr:`{}.{}` property.'.format(
gis.__qualname__, name
)
setattr(cls, name, property(f))
def create_series_method(cls, gis, method, meta):
"""
Create GIS series method to return series of values returned by method
call on each object in the series.
:param cls: GIS series class.
:param gis: Shapely geometry class.
:param method: Method name to be adapted.
:param meta: Method metadata.
:seealso:: :py:class:`geocoon.meta.Meta`
"""
first_is_geom = meta.first_is_geom
returns_geom = meta.returns_geom
# determine series class returned by method
# - pandas.Series if returned value is non-geom object
# - return the same series if returns_geom is true
# - otherwise returns_geom is class - the series class
if returns_geom == True:
series_cls = cls
elif returns_geom == False:
series_cls = pandas.Series
else:
series_cls = MAP_GEOM[meta.returns_geom]
# Shapely geometry method call to be adapted
mcall = getattr(gis, method)
def f_geom(self, other, *args, **kw):
data = (mcall(s, o, *args, **kw) for s, o in zip(self, other))
return series_cls(data, index=self.index)
def f_non_geom(self, *args, **kw):
data = (mcall(s, *args, **kw) for s in self)
return series_cls(data, index=self.index)
doc = 'Vectorized version of :py:meth:`{}.{}` method.'.format(
gis.__qualname__, method
)
if first_is_geom:
f = f_geom
f.__doc__ = doc + '\n\n' \
+ 'The `other` parameter of the method is GIS series object.'
else:
f = f_non_geom
f.__doc__ = doc
f.__doc__ += '\n\nThe method returns {} object.'.format(
series_cls.__qualname__
)
return f
def adapt_series(cls, gis, gis_meta):
"""
Adapt GIS series to return data stored in GIS object.
:param cls: GIS series class.
:param gis: GIS object class.
:param gis_meta: GIS object class metadata.
"""
for name, meta in gis_meta.items():
attr = getattr(gis, name)
if meta.is_property:
adapt_attr(cls, gis, name)
else:
wrapper = create_series_method(cls, gis, name, meta)
setattr(cls, name, wrapper)
#
# perform GIS data frames and series adaptation
#
# adapt GIS data frame methods to return GIS data frames and methods
df_methods = ('__getitem__', 'sort')
for m in df_methods:
mt = getattr(GeoDataFrame, m)
setattr(GeoDataFrame, m, wrap_df_method(mt))
# adapt GIS series
# geometry to gis series mapping
MAP_GEOM = {
Point: PointSeries,
LineString: LineStringSeries,
Polygon: PolygonSeries,
}
adapt_series(PointSeries, Point, META_POINT)
adapt_series(LineStringSeries, LineString, META_LINE_STRING)
adapt_series(PolygonSeries, Polygon, META_POLYGON)
# vim: sw=4:et:ai
| wrobell/geocoon | geocoon/core.py | Python | gpl-3.0 | 7,707 |
import unittest
from openmdao.main.api import Assembly, set_as_top
from openmdao.test.rosen_suzuki import Simulation, ScalingPreProc, \
ScalingPostProc
from openmdao.lib.drivers.slsqpdriver import SLSQPdriver
class Replace2TestCase(unittest.TestCase):
def test_replace2(self):
top = set_as_top(Simulation())
top.replace('preproc', ScalingPreProc())
top.replace('postproc', ScalingPostProc())
top.replace('driver', SLSQPdriver())
top.replace('comp', Assembly())
top._setup()
if __name__ == "__main__":
unittest.main()
| HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_replace2.py | Python | gpl-2.0 | 618 |
#!/usr/bin/env python
import os
from settings import DATABASES
from env_settings import path_to_store_settings
for fname in os.listdir(path_to_store_settings):
full_path = os.path.join(path_to_store_settings, fname)
f = open(full_path)
content = f.read()
f.close()
exec(content) #watch out for what content is put there
| yejia/osl_notebook | settings_manager.py | Python | mit | 346 |
"""
Parses command line arguments for SonosBar
Define which Sonos player to use for controlling
-p --player Name of the Sonos Player (eg: "Living Room")
-i --ip IP address of the Sonos Player
=> IP addresses can be shortened if on the same subnet.
eg: 192.168.1.15 can be chosen entering 15 when remote is also within
192.168.1.x subnet
-l --playlist Selects a playlist to play (from Sonos playlists)
-r --radio Selects a radiostation to play
eg: x-sonosapi-stream:s25111?sid=254&flags=32
-j --join Join another player/group
use the name of the player to be joined
-k --ipjoin Join another payer/group
use the IP address of the player to be joined
-v --vol Change volume (1-100)
FLAGS
-g --group Apply the chosen action to the whole group
If not set, only the selected player performs the action
-u --unjoin Unjoin from current group
-o --verbose Display which action was just taken
-b --bitbar Output system information for BitBar
ACTIONS
- play
- pause
- next
- previous
- shuffle
- normal (disable shuffle)
"""
import argparse
import socket
def parse_ip(ip_string):
"""Parsing the user supplied IP address to use on the local subnet"""
host_ip = socket.gethostbyname(socket.gethostname())
subnets = host_ip.split(".")
sonos_subnets = ip_string.split(".")
new_ip = subnets[0:(4-len(sonos_subnets))] + sonos_subnets
return ".".join(new_ip)
def parse_cli_arguments():
"""Main function that parses command line arguments"""
parser = argparse.ArgumentParser(description='Control your Sonos')
player_args = parser.add_mutually_exclusive_group()
player_args.add_argument(
"-p", "--player",
metavar="SPEAKER_NAME",
type=str,
# default="Living Room",
help="The name of the player/zone")
player_args.add_argument(
"-i", "--ip",
metavar="IP_ADDRESS",
type=str,
help="The IP address of the player/zone")
control_args = parser.add_mutually_exclusive_group()
control_args.add_argument(
"-l", "--playlist",
metavar="PLAYLIST_NAME",
type=str,
help="The name of the playlist to play")
control_args.add_argument(
"-r", "--radio",
metavar="RADIO_STATION",
type=str,
help="The name of the radio station to play")
control_args.add_argument(
"-v", "--vol",
metavar="VOLUME",
type=int,
choices=range(0, 101),
help="0-100")
control_args.add_argument(
"-j", "--join",
metavar="SPEAKER_NAME",
type=str,
help="Name of the speaker to join")
control_args.add_argument(
"-k", "--ipjoin",
metavar="SPEAKER_IP",
type=str,
help="IP of the speaker to join")
control_args.add_argument(
"-u", "--unjoin",
action='store_const',
const=True,
help="Unjoin the player from all groups")
control_args.add_argument(
'action',
metavar='action',
nargs="?",
choices=["play", "pause", "next", "previous", "shuffle", "normal"],
help="""Action to take if non is set via flags.
Can be either: play, pause, next, previous, shuffle, normal""")
parser.add_argument(
"-g", "--group",
action='store_const',
const=True,
help="Apply the action to the whole group")
output = parser.add_mutually_exclusive_group()
output.add_argument(
"-o", "--verbose",
action='store_const',
const=True,
help="Display feedback about current actions")
output.add_argument(
"-b", "--bitbar",
action='store_const',
const=True,
help="Display bitbar controls")
args = parser.parse_args()
if args.ip:
args.ip = parse_ip(args.ip)
if args.ipjoin:
args.ipjoin = parse_ip(args.ipjoin)
return args
ARGUMENTS = parse_cli_arguments()
| anergictcell/SonosBar | src/cli_arguments.py | Python | mit | 4,037 |
from nose.tools import *
from airtime_analyzer.playability_analyzer import *
def check_default_metadata(metadata):
''' Stub function for now in case we need it later.'''
pass
def test_missing_liquidsoap():
old_ls = PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = 'foosdaf'
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
PlayabilityAnalyzer.LIQUIDSOAP_EXECUTABLE = old_ls # Need to put this back
@raises(UnplayableFileError)
def test_invalid_filepath():
metadata = PlayabilityAnalyzer.analyze(u'non-existent-file', dict())
def test_mp3_utf8():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-utf8.mp3', dict())
check_default_metadata(metadata)
def test_mp3_dualmono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-dualmono.mp3', dict())
check_default_metadata(metadata)
def test_mp3_jointstereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-jointstereo.mp3', dict())
check_default_metadata(metadata)
def test_mp3_simplestereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-simplestereo.mp3', dict())
check_default_metadata(metadata)
def test_mp3_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.mp3', dict())
check_default_metadata(metadata)
def test_mp3_mono():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-mono.mp3', dict())
check_default_metadata(metadata)
def test_ogg_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.ogg', dict())
check_default_metadata(metadata)
@raises(UnplayableFileError)
def test_invalid_wma():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo-invalid.wma', dict())
def test_m4a_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.m4a', dict())
check_default_metadata(metadata)
def test_wav_stereo():
metadata = PlayabilityAnalyzer.analyze(u'tests/test_data/44100Hz-16bit-stereo.wav', dict())
check_default_metadata(metadata)
@raises(UnplayableFileError)
def test_unknown():
metadata = PlayabilityAnalyzer.analyze(u'http://www.google.com', dict())
check_default_metadata(metadata) | comiconomenclaturist/libretime | python_apps/airtime_analyzer/tests/playability_analyzer_tests.py | Python | agpl-3.0 | 2,400 |
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" The code generation.
No language specifics at all are supposed to be present here. Instead it is
using primitives from the given generator to build code sequences (list of
strings).
As such this is the place that knows how to take a condition and two code
branches and make a code block out of it. But it doesn't contain any target
language syntax.
"""
from nuitka import Options
from nuitka.__past__ import iterItems
from nuitka.PythonVersions import python_version
from . import Contexts, Emission, Helpers
from .AttributeCodes import (
generateAssignmentAttributeCode,
generateAttributeLookupCode,
generateAttributeLookupSpecialCode,
generateBuiltinGetattrCode,
generateBuiltinHasattrCode,
generateBuiltinSetattrCode,
generateDelAttributeCode
)
from .BranchCodes import generateBranchCode
from .BuiltinCodes import (
generateBuiltinAnonymousRefCode,
generateBuiltinBinCode,
generateBuiltinBoolCode,
generateBuiltinBytearrayCode,
generateBuiltinComplexCode,
generateBuiltinFloatCode,
generateBuiltinHexCode,
generateBuiltinOctCode,
generateBuiltinOpenCode,
generateBuiltinRange1Code,
generateBuiltinRange2Code,
generateBuiltinRange3Code,
generateBuiltinRefCode,
generateBuiltinType1Code,
generateBuiltinType3Code,
generateBuiltinXrange1Code,
generateBuiltinXrange2Code,
generateBuiltinXrange3Code
)
from .CallCodes import generateCallCode, getCallsCode, getCallsDecls
from .ClassCodes import (
generateBuiltinIsinstanceCode,
generateBuiltinSuperCode,
generateSelectMetaclassCode
)
from .ComparisonCodes import generateComparisonExpressionCode
from .ConditionalCodes import (
generateConditionalAndOrCode,
generateConditionalCode
)
from .ConstantCodes import (
generateConstantEllipsisReferenceCode,
generateConstantFalseReferenceCode,
generateConstantNoneReferenceCode,
generateConstantReferenceCode,
generateConstantTrueReferenceCode
)
from .CoroutineCodes import (
generateAsyncIterCode,
generateAsyncNextCode,
generateAsyncWaitCode,
generateMakeCoroutineObjectCode,
getCoroutineObjectCode
)
from .DictCodes import (
generateBuiltinDictCode,
generateDictionaryCreationCode,
generateDictOperationGetCode,
generateDictOperationInCode,
generateDictOperationRemoveCode,
generateDictOperationSetCode,
generateDictOperationUpdateCode
)
from .EvalCodes import (
generateBuiltinCompileCode,
generateEvalCode,
generateExecCode,
generateExecfileCode,
generateLocalsDictSyncCode
)
from .ExceptionCodes import (
generateBuiltinMakeExceptionCode,
generateExceptionCaughtTracebackCode,
generateExceptionCaughtTypeCode,
generateExceptionCaughtValueCode,
generateExceptionPublishCode,
generateExceptionRefCode
)
from .ExpressionCodes import (
generateExpressionOnlyCode,
generateSideEffectsCode
)
from .FrameCodes import (
generateFramePreserveExceptionCode,
generateFrameRestoreExceptionCode,
generateStatementsFrameCode
)
from .FunctionCodes import (
generateFunctionCallCode,
generateFunctionCreationCode,
generateFunctionDeclCode,
generateFunctionOutlineCode,
getExportScopeCode,
getFunctionCode,
getFunctionDirectDecl
)
from .GeneratorCodes import (
generateGeneratorEntryCode,
generateMakeGeneratorObjectCode,
getGeneratorObjectCode
)
from .GlobalsLocalsCodes import (
generateBuiltinDir1Code,
generateBuiltinGlobalsCode,
generateBuiltinLocalsCode,
generateBuiltinVarsCode,
generateSetLocalsCode
)
from .Helpers import generateStatementCode
from .IdCodes import generateBuiltinHashCode, generateBuiltinIdCode
from .ImportCodes import (
generateBuiltinImportCode,
generateImportModuleCode,
generateImportModuleHardCode,
generateImportNameCode,
generateImportStarCode
)
from .IntegerCodes import generateBuiltinIntCode, generateBuiltinLongCode
from .IteratorCodes import (
generateBuiltinIter1Code,
generateBuiltinIter2Code,
generateBuiltinLenCode,
generateBuiltinNext1Code,
generateBuiltinNext2Code,
generateSpecialUnpackCode,
generateUnpackCheckCode
)
from .LabelCodes import getStatementTrace
from .ListCodes import (
generateBuiltinListCode,
generateListCreationCode,
generateListOperationAppendCode,
generateListOperationExtendCode,
generateListOperationPopCode
)
from .LoaderCodes import getMetapathLoaderBodyCode
from .LoopCodes import (
generateLoopBreakCode,
generateLoopCode,
generateLoopContinueCode
)
from .ModuleCodes import (
generateModuleFileAttributeCode,
getModuleCode,
getModuleValues
)
from .OperationCodes import (
generateOperationBinaryCode,
generateOperationUnaryCode
)
from .PrintCodes import generatePrintNewlineCode, generatePrintValueCode
from .RaisingCodes import generateRaiseCode
from .ReturnCodes import (
generateGeneratorReturnCode,
generateReturnCode,
generateReturnedValueRefCode
)
from .SetCodes import (
generateBuiltinSetCode,
generateSetCreationCode,
generateSetOperationAddCode,
generateSetOperationUpdateCode
)
from .SliceCodes import (
generateAssignmentSliceCode,
generateBuiltinSliceCode,
generateDelSliceCode,
generateSliceLookupCode
)
from .StringCodes import (
generateBuiltinAsciiCode,
generateBuiltinChrCode,
generateBuiltinFormatCode,
generateBuiltinOrdCode,
generateBuiltinStrCode,
generateBuiltinUnicodeCode,
generateStringContenationCode
)
from .SubscriptCodes import (
generateAssignmentSubscriptCode,
generateDelSubscriptCode,
generateSubscriptLookupCode
)
from .TryCodes import generateTryCode
from .TupleCodes import generateBuiltinTupleCode, generateTupleCreationCode
from .VariableCodes import (
generateAssignmentVariableCode,
generateDelVariableCode,
generateVariableReferenceCode,
generateVariableReleaseCode
)
from .YieldCodes import generateYieldCode, generateYieldFromCode
_generated_functions = {}
def generateFunctionBodyCode(function_body, context):
function_identifier = function_body.getCodeName()
if function_identifier in _generated_functions:
return _generated_functions[function_identifier]
# TODO: Generate both codes, and base direct/etc. decisions on context.
if function_body.isExpressionGeneratorObjectBody():
function_context = Contexts.PythonGeneratorObjectContext(
parent = context,
function = function_body
)
elif function_body.isExpressionCoroutineObjectBody():
function_context = Contexts.PythonCoroutineObjectContext(
parent = context,
function = function_body
)
elif function_body.isExpressionClassBody():
function_context = Contexts.PythonFunctionDirectContext(
parent = context,
function = function_body
)
elif function_body.needsCreation():
function_context = Contexts.PythonFunctionCreatedContext(
parent = context,
function = function_body
)
else:
function_context = Contexts.PythonFunctionDirectContext(
parent = context,
function = function_body
)
function_codes = Emission.SourceCodeCollector()
generateStatementSequenceCode(
statement_sequence = function_body.getBody(),
allow_none = True,
emit = function_codes,
context = function_context
)
needs_exception_exit = function_body.mayRaiseException(BaseException)
if function_body.isExpressionGeneratorObjectBody():
function_code = getGeneratorObjectCode(
context = function_context,
function_identifier = function_identifier,
user_variables = function_body.getUserLocalVariables(),
temp_variables = function_body.getTempVariables(),
function_codes = function_codes.codes,
needs_exception_exit = needs_exception_exit,
needs_generator_return = function_body.needsGeneratorReturnExit()
)
elif function_body.isExpressionCoroutineObjectBody():
function_code = getCoroutineObjectCode(
context = function_context,
function_identifier = function_identifier,
user_variables = function_body.getUserLocalVariables(),
temp_variables = function_body.getTempVariables(),
function_codes = function_codes.codes,
needs_exception_exit = needs_exception_exit,
needs_generator_return = function_body.needsGeneratorReturnExit()
)
elif function_body.isExpressionClassBody():
function_code = getFunctionCode(
context = function_context,
function_identifier = function_identifier,
parameters = None,
closure_variables = function_body.getClosureVariables(),
user_variables = function_body.getUserLocalVariables(),
temp_variables = function_body.getTempVariables(),
function_codes = function_codes.codes,
function_doc = function_body.getDoc(),
needs_exception_exit = needs_exception_exit,
file_scope = getExportScopeCode(
cross_module = False
)
)
else:
parameters = function_body.getParameters()
function_code = getFunctionCode(
context = function_context,
function_identifier = function_identifier,
parameters = parameters,
closure_variables = function_body.getClosureVariables(),
user_variables = function_body.getUserLocalVariables(),
temp_variables = function_body.getTempVariables(),
function_codes = function_codes.codes,
function_doc = function_body.getDoc(),
needs_exception_exit = needs_exception_exit,
file_scope = getExportScopeCode(
cross_module = function_body.isCrossModuleUsed()
)
)
return function_code, function_context
def _generateStatementSequenceCode(statement_sequence, emit, context):
if statement_sequence is None:
return
for statement in statement_sequence.getStatements():
if Options.shallTraceExecution():
source_ref = statement.getSourceReference()
statement_repr = repr(statement)
source_repr = source_ref.getAsString()
if python_version >= 300:
statement_repr = statement_repr.encode("utf8")
source_repr = source_repr.encode("utf8")
emit(
getStatementTrace(
source_repr,
statement_repr
)
)
# Might contain frame statement sequences as children.
if statement.isStatementsFrame():
generateStatementsFrameCode(
statement_sequence = statement,
emit = emit,
context = context
)
else:
generateStatementCode(
statement = statement,
emit = emit,
context = context
)
def generateStatementSequenceCode(statement_sequence, emit, context,
allow_none = False):
if allow_none and statement_sequence is None:
return None
assert statement_sequence.kind == "STATEMENTS_SEQUENCE", statement_sequence
statement_context = Contexts.PythonStatementCContext(context)
_generateStatementSequenceCode(
statement_sequence = statement_sequence,
emit = emit,
context = statement_context
)
# Complain if any temporary was not dealt with yet.
assert not statement_context.getCleanupTempnames(), \
statement_context.getCleanupTempnames()
def prepareModuleCode(global_context, module, module_name):
# As this not only creates all modules, but also functions, it deals
# also with its functions.
assert module.isCompiledPythonModule(), module
context = Contexts.PythonModuleContext(
module = module,
module_name = module_name,
code_name = module.getCodeName(),
filename = module.getFilename(),
global_context = global_context
)
context.setExceptionEscape("module_exception_exit")
statement_sequence = module.getBody()
codes = Emission.SourceCodeCollector()
generateStatementSequenceCode(
statement_sequence = statement_sequence,
emit = codes,
allow_none = True,
context = context,
)
function_decl_codes = []
function_body_codes = []
for function_body in module.getUsedFunctions():
function_code, function_context = generateFunctionBodyCode(
function_body = function_body,
context = context
)
assert type(function_code) is str, type(function_code)
function_body_codes.append(function_code)
function_decl = generateFunctionDeclCode(
function_body = function_body,
context = function_context
)
if function_decl is not None:
function_decl_codes.append(function_decl)
# These are for functions used from other modules. Due to cyclic
# dependencies, we cannot rely on those to be already created.
for function_body in module.getCrossUsedFunctions():
assert function_body.isCrossModuleUsed()
function_decl = getFunctionDirectDecl(
function_identifier = function_body.getCodeName(),
closure_variables = function_body.getClosureVariables(),
file_scope = getExportScopeCode(
cross_module = function_body.isCrossModuleUsed()
),
context = Contexts.PythonFunctionDirectContext(
parent = context,
function = function_body
)
)
function_decl_codes.append(function_decl)
for _identifier, code in sorted(iterItems(context.getHelperCodes())):
function_body_codes.append(code)
for _identifier, code in sorted(iterItems(context.getDeclarations())):
function_decl_codes.append(code)
function_body_codes = "\n\n".join(function_body_codes)
function_decl_codes = "\n\n".join(function_decl_codes)
template_values = getModuleValues(
module_name = module_name,
module_identifier = module.getCodeName(),
codes = codes.codes,
function_decl_codes = function_decl_codes,
function_body_codes = function_body_codes,
temp_variables = module.getTempVariables(),
is_main_module = module.isMainModule(),
is_internal_module = module.isInternalModule(),
context = context
)
if python_version >= 330:
context.getConstantCode("__loader__")
return template_values, context
def generateModuleCode(module_context, template_values):
return getModuleCode(
module_context = module_context,
template_values = template_values
)
def generateHelpersCode(other_modules):
calls_decl_code = getCallsDecls()
loader_code = getMetapathLoaderBodyCode(other_modules)
calls_body_code = getCallsCode()
return calls_decl_code, calls_body_code + loader_code
def makeGlobalContext():
return Contexts.PythonGlobalContext()
Helpers.setExpressionDispatchDict(
{
"EXPRESSION_ATTRIBUTE_LOOKUP" : generateAttributeLookupCode,
"EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL" : generateAttributeLookupSpecialCode,
"EXPRESSION_BUILTIN_SLICE" : generateBuiltinSliceCode,
"EXPRESSION_BUILTIN_HASH" : generateBuiltinHashCode,
"EXPRESSION_BUILTIN_ID" : generateBuiltinIdCode,
"EXPRESSION_BUILTIN_COMPILE" : generateBuiltinCompileCode,
"EXPRESSION_BUILTIN_EXECFILE" : generateExecfileCode,
"EXPRESSION_BUILTIN_EVAL" : generateEvalCode,
"EXPRESSION_BUILTIN_EXEC" : generateEvalCode,
"EXPRESSION_BUILTIN_ITER1" : generateBuiltinIter1Code,
"EXPRESSION_BUILTIN_ITER2" : generateBuiltinIter2Code,
"EXPRESSION_BUILTIN_NEXT1" : generateBuiltinNext1Code,
"EXPRESSION_BUILTIN_NEXT2" : generateBuiltinNext2Code,
"EXPRESSION_BUILTIN_TYPE1" : generateBuiltinType1Code,
"EXPRESSION_BUILTIN_TYPE3" : generateBuiltinType3Code,
"EXPRESSION_BUILTIN_IMPORT" : generateBuiltinImportCode,
"EXPRESSION_BUILTIN_BOOL" : generateBuiltinBoolCode,
"EXPRESSION_BUILTIN_BYTEARRAY" : generateBuiltinBytearrayCode,
"EXPRESSION_BUILTIN_INT" : generateBuiltinIntCode,
"EXPRESSION_BUILTIN_LONG" : generateBuiltinLongCode,
"EXPRESSION_BUILTIN_FLOAT" : generateBuiltinFloatCode,
"EXPRESSION_BUILTIN_COMPLEX" : generateBuiltinComplexCode,
"EXPRESSION_BUILTIN_LEN" : generateBuiltinLenCode,
"EXPRESSION_BUILTIN_STR" : generateBuiltinStrCode,
"EXPRESSION_BUILTIN_UNICODE" : generateBuiltinUnicodeCode,
"EXPRESSION_BUILTIN_CHR" : generateBuiltinChrCode,
"EXPRESSION_BUILTIN_ORD" : generateBuiltinOrdCode,
"EXPRESSION_BUILTIN_BIN" : generateBuiltinBinCode,
"EXPRESSION_BUILTIN_OCT" : generateBuiltinOctCode,
"EXPRESSION_BUILTIN_HEX" : generateBuiltinHexCode,
"EXPRESSION_BUILTIN_TUPLE" : generateBuiltinTupleCode,
"EXPRESSION_BUILTIN_LIST" : generateBuiltinListCode,
"EXPRESSION_BUILTIN_SET" : generateBuiltinSetCode,
"EXPRESSION_BUILTIN_DICT" : generateBuiltinDictCode,
"EXPRESSION_BUILTIN_LOCALS" : generateBuiltinLocalsCode,
"EXPRESSION_BUILTIN_GLOBALS" : generateBuiltinGlobalsCode,
"EXPRESSION_BUILTIN_SUPER" : generateBuiltinSuperCode,
"EXPRESSION_BUILTIN_ISINSTANCE" : generateBuiltinIsinstanceCode,
"EXPRESSION_BUILTIN_DIR1" : generateBuiltinDir1Code,
"EXPRESSION_BUILTIN_VARS" : generateBuiltinVarsCode,
"EXPRESSION_BUILTIN_HASATTR" : generateBuiltinHasattrCode,
"EXPRESSION_BUILTIN_GETATTR" : generateBuiltinGetattrCode,
"EXPRESSION_BUILTIN_SETATTR" : generateBuiltinSetattrCode,
"EXPRESSION_BUILTIN_OPEN" : generateBuiltinOpenCode,
"EXPRESSION_BUILTIN_RANGE1" : generateBuiltinRange1Code,
"EXPRESSION_BUILTIN_RANGE2" : generateBuiltinRange2Code,
"EXPRESSION_BUILTIN_RANGE3" : generateBuiltinRange3Code,
"EXPRESSION_BUILTIN_XRANGE1" : generateBuiltinXrange1Code,
"EXPRESSION_BUILTIN_XRANGE2" : generateBuiltinXrange2Code,
"EXPRESSION_BUILTIN_XRANGE3" : generateBuiltinXrange3Code,
"EXPRESSION_BUILTIN_MAKE_EXCEPTION" : generateBuiltinMakeExceptionCode,
"EXPRESSION_BUILTIN_REF" : generateBuiltinRefCode,
"EXPRESSION_BUILTIN_EXCEPTION_REF" : generateExceptionRefCode,
"EXPRESSION_BUILTIN_ANONYMOUS_REF" : generateBuiltinAnonymousRefCode,
"EXPRESSION_CAUGHT_EXCEPTION_TYPE_REF" : generateExceptionCaughtTypeCode,
"EXPRESSION_CAUGHT_EXCEPTION_VALUE_REF" : generateExceptionCaughtValueCode,
"EXPRESSION_CAUGHT_EXCEPTION_TRACEBACK_REF" : generateExceptionCaughtTracebackCode,
"EXPRESSION_CALL_EMPTY" : generateCallCode,
"EXPRESSION_CALL_KEYWORDS_ONLY" : generateCallCode,
"EXPRESSION_CALL_NO_KEYWORDS" : generateCallCode,
"EXPRESSION_CALL" : generateCallCode,
"EXPRESSION_CONSTANT_NONE_REF" : generateConstantNoneReferenceCode,
"EXPRESSION_CONSTANT_TRUE_REF" : generateConstantTrueReferenceCode,
"EXPRESSION_CONSTANT_FALSE_REF" : generateConstantFalseReferenceCode,
"EXPRESSION_CONSTANT_STR_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_UNICODE_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_BYTES_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_INT_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_LONG_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_FLOAT_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_COMPLEX_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_ELLIPSIS_REF" : generateConstantEllipsisReferenceCode,
"EXPRESSION_CONSTANT_DICT_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_DICT_EMPTY_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TUPLE_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_LIST_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_SET_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_SLICE_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_XRANGE_REF" : generateConstantReferenceCode,
"EXPRESSION_CONSTANT_TYPE_REF" : generateConstantReferenceCode,
"EXPRESSION_CONDITIONAL" : generateConditionalCode,
"EXPRESSION_CONDITIONAL_OR" : generateConditionalAndOrCode,
"EXPRESSION_CONDITIONAL_AND" : generateConditionalAndOrCode,
"EXPRESSION_COMPARISON" : generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IS" : generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IS_NOT" : generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_IN" : generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_NOT_IN" : generateComparisonExpressionCode,
"EXPRESSION_COMPARISON_EXCEPTION_MATCH" : generateComparisonExpressionCode,
"EXPRESSION_DICT_OPERATION_GET" : generateDictOperationGetCode,
"EXPRESSION_DICT_OPERATION_IN" : generateDictOperationInCode,
"EXPRESSION_DICT_OPERATION_NOT_IN" : generateDictOperationInCode,
"EXPRESSION_FUNCTION_CREATION" : generateFunctionCreationCode,
"EXPRESSION_FUNCTION_CALL" : generateFunctionCallCode,
"EXPRESSION_IMPORT_MODULE" : generateImportModuleCode,
"EXPRESSION_IMPORT_MODULE_HARD" : generateImportModuleHardCode,
"EXPRESSION_IMPORT_NAME" : generateImportNameCode,
"EXPRESSION_LIST_OPERATION_EXTEND" : generateListOperationExtendCode,
"EXPRESSION_LIST_OPERATION_POP" : generateListOperationPopCode,
"EXPRESSION_MODULE_FILE_ATTRIBUTE_REF" : generateModuleFileAttributeCode,
"EXPRESSION_MAKE_GENERATOR_OBJECT" : generateMakeGeneratorObjectCode,
"EXPRESSION_MAKE_COROUTINE_OBJECT" : generateMakeCoroutineObjectCode,
"EXPRESSION_MAKE_SET" : generateSetCreationCode,
"EXPRESSION_MAKE_TUPLE" : generateTupleCreationCode,
"EXPRESSION_MAKE_LIST" : generateListCreationCode,
"EXPRESSION_MAKE_DICT" : generateDictionaryCreationCode,
"EXPRESSION_OPERATION_BINARY" : generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_ADD" : generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_MULT" : generateOperationBinaryCode,
"EXPRESSION_OPERATION_BINARY_INPLACE" : generateOperationBinaryCode,
"EXPRESSION_OPERATION_UNARY" : generateOperationUnaryCode,
"EXPRESSION_OPERATION_NOT" : generateOperationUnaryCode,
"EXPRESSION_OUTLINE_BODY" : generateFunctionOutlineCode,
"EXPRESSION_RETURNED_VALUE_REF" : generateReturnedValueRefCode,
"EXPRESSION_SUBSCRIPT_LOOKUP" : generateSubscriptLookupCode,
"EXPRESSION_SLICE_LOOKUP" : generateSliceLookupCode,
"EXPRESSION_SET_OPERATION_UPDATE" : generateSetOperationUpdateCode,
"EXPRESSION_SIDE_EFFECTS" : generateSideEffectsCode,
"EXPRESSION_SPECIAL_UNPACK" : generateSpecialUnpackCode,
"EXPRESSION_TEMP_VARIABLE_REF" : generateVariableReferenceCode,
"EXPRESSION_VARIABLE_REF" : generateVariableReferenceCode,
"EXPRESSION_YIELD" : generateYieldCode,
"EXPRESSION_YIELD_FROM" : generateYieldFromCode,
"EXPRESSION_SELECT_METACLASS" : generateSelectMetaclassCode,
"EXPRESSION_ASYNC_WAIT" : generateAsyncWaitCode,
"EXPRESSION_ASYNC_ITER" : generateAsyncIterCode,
"EXPRESSION_ASYNC_NEXT" : generateAsyncNextCode,
"EXPRESSION_STRING_CONCATENATION" : generateStringContenationCode,
"EXPRESSION_BUILTIN_FORMAT" : generateBuiltinFormatCode,
"EXPRESSION_BUILTIN_ASCII" : generateBuiltinAsciiCode,
}
)
Helpers.setStatementDispatchDict(
{
"STATEMENT_ASSIGNMENT_VARIABLE" : generateAssignmentVariableCode,
"STATEMENT_ASSIGNMENT_ATTRIBUTE" : generateAssignmentAttributeCode,
"STATEMENT_ASSIGNMENT_SUBSCRIPT" : generateAssignmentSubscriptCode,
"STATEMENT_ASSIGNMENT_SLICE" : generateAssignmentSliceCode,
"STATEMENT_DEL_VARIABLE" : generateDelVariableCode,
"STATEMENT_DEL_ATTRIBUTE" : generateDelAttributeCode,
"STATEMENT_DEL_SUBSCRIPT" : generateDelSubscriptCode,
"STATEMENT_DEL_SLICE" : generateDelSliceCode,
"STATEMENT_DICT_OPERATION_REMOVE" : generateDictOperationRemoveCode,
"STATEMENT_DICT_OPERATION_UPDATE" : generateDictOperationUpdateCode,
"STATEMENT_RELEASE_VARIABLE" : generateVariableReleaseCode,
"STATEMENT_EXPRESSION_ONLY" : generateExpressionOnlyCode,
"STATEMENT_RETURN" : generateReturnCode,
"STATEMENT_GENERATOR_RETURN" : generateGeneratorReturnCode,
"STATEMENT_CONDITIONAL" : generateBranchCode,
"STATEMENT_TRY" : generateTryCode,
"STATEMENT_PRINT_VALUE" : generatePrintValueCode,
"STATEMENT_PRINT_NEWLINE" : generatePrintNewlineCode,
"STATEMENT_IMPORT_STAR" : generateImportStarCode,
"STATEMENT_LIST_OPERATION_APPEND" : generateListOperationAppendCode,
"STATEMENT_SET_OPERATION_ADD" : generateSetOperationAddCode,
"STATEMENT_DICT_OPERATION_SET" : generateDictOperationSetCode,
"STATEMENT_LOOP" : generateLoopCode,
"STATEMENT_LOOP_BREAK" : generateLoopBreakCode,
"STATEMENT_LOOP_CONTINUE" : generateLoopContinueCode,
"STATEMENT_RAISE_EXCEPTION" : generateRaiseCode,
"STATEMENT_RAISE_EXCEPTION_IMPLICIT" : generateRaiseCode,
"STATEMENT_SPECIAL_UNPACK_CHECK" : generateUnpackCheckCode,
"STATEMENT_EXEC" : generateExecCode,
"STATEMENT_LOCALS_DICT_SYNC" : generateLocalsDictSyncCode,
"STATEMENT_SET_LOCALS" : generateSetLocalsCode,
"STATEMENT_GENERATOR_ENTRY" : generateGeneratorEntryCode,
"STATEMENT_PRESERVE_FRAME_EXCEPTION" : generateFramePreserveExceptionCode,
"STATEMENT_RESTORE_FRAME_EXCEPTION" : generateFrameRestoreExceptionCode,
"STATEMENT_PUBLISH_EXCEPTION" : generateExceptionPublishCode
}
)
| fluxer/spm | nuitka/nuitka/codegen/CodeGeneration.py | Python | gpl-2.0 | 30,002 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.cmd import netns_cleanup as util
from neutron.tests import base
class TestNetnsCleanup(base.BaseTestCase):
def setUp(self):
super(TestNetnsCleanup, self).setUp()
conn_patcher = mock.patch(
'neutron.agent.ovsdb.native.connection.Connection.start')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
def test_kill_dhcp(self, dhcp_active=True):
conf = mock.Mock()
conf.dhcp_driver = 'driver'
method_to_patch = 'oslo_utils.importutils.import_object'
with mock.patch(method_to_patch) as import_object:
driver = mock.Mock()
driver.active = dhcp_active
import_object.return_value = driver
util.kill_dhcp(conf, 'ns')
expected_params = {'conf': conf, 'network': mock.ANY,
'process_monitor': mock.ANY,
'plugin': mock.ANY}
import_object.assert_called_once_with('driver', **expected_params)
if dhcp_active:
driver.assert_has_calls([mock.call.disable()])
else:
self.assertFalse(driver.called)
def test_kill_dhcp_no_active(self):
self.test_kill_dhcp(False)
def test_eligible_for_deletion_ns_not_uuid(self):
conf = mock.Mock()
conf.agent_type = None
ns = 'not_a_uuid'
self.assertFalse(util.eligible_for_deletion(conf, ns))
def _test_eligible_for_deletion_helper(self, prefix, force, is_empty,
expected):
ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
conf.agent_type = None
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.namespace_is_empty.return_value = is_empty
self.assertEqual(expected,
util.eligible_for_deletion(conf, ns, force))
expected_calls = [mock.call(namespace=ns)]
if not force:
expected_calls.append(mock.call().namespace_is_empty())
ip_wrap.assert_has_calls(expected_calls)
def test_eligible_for_deletion_empty(self):
self._test_eligible_for_deletion_helper('qrouter-', False, True, True)
def test_eligible_for_deletion_not_empty(self):
self._test_eligible_for_deletion_helper('qdhcp-', False, False, False)
def test_eligible_for_deletion_not_empty_forced(self):
self._test_eligible_for_deletion_helper('qdhcp-', True, False, True)
def test_eligible_for_deletion_fip_namespace(self):
self._test_eligible_for_deletion_helper('fip-', False, True, True)
def test_eligible_for_deletion_lbaas_namespace(self):
self._test_eligible_for_deletion_helper('qlbaas-', False, True, True)
def test_eligible_for_deletion_snat_namespace(self):
self._test_eligible_for_deletion_helper('snat-', False, True, True)
def test_eligible_for_deletion_filtered_by_agent_type(self):
ns_dhcp = 'qdhcp-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
ns_l3 = 'qrouter-' + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
conf.agent_type = 'dhcp'
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.namespace_is_empty.return_value = True
self.assertTrue(util.eligible_for_deletion(conf, ns_dhcp, False))
self.assertFalse(util.eligible_for_deletion(conf, ns_l3, False))
expected_calls = [mock.call(namespace=ns_dhcp),
mock.call().namespace_is_empty()]
ip_wrap.assert_has_calls(expected_calls)
def test_unplug_device_regular_device(self):
conf = mock.Mock()
device = mock.Mock()
util.unplug_device(conf, device)
device.assert_has_calls([mock.call.link.delete()])
def test_unplug_device_ovs_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
mock_get_bridge_for_iface.return_value = 'br-int'
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with('tap1')
ovs_br_cls.assert_called_once_with('br-int')
ovs_bridge.assert_has_calls(
[mock.call.delete_port(device.name)])
def test_unplug_device_cannot_determine_bridge_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'neutron.agent.common.ovs_lib.BaseOVS.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
with mock.patch.object(util.LOG, 'debug') as debug:
mock_get_bridge_for_iface.return_value = None
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with('tap1')
self.assertEqual([], ovs_br_cls.mock_calls)
self.assertTrue(debug.called)
def _test_destroy_namespace_helper(self, force, num_devices):
ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
lo_device = mock.Mock()
lo_device.name = 'lo'
devices = [lo_device]
while num_devices:
dev = mock.Mock()
dev.name = 'tap%d' % num_devices
devices.append(dev)
num_devices -= 1
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.get_devices.return_value = devices
ip_wrap.return_value.netns.exists.return_value = True
with mock.patch.object(util, 'unplug_device') as unplug:
with mock.patch.object(util, 'kill_dhcp') as kill_dhcp:
util.destroy_namespace(conf, ns, force)
expected = [mock.call(namespace=ns)]
if force:
expected.extend([
mock.call().netns.exists(ns),
mock.call().get_devices(exclude_loopback=True)])
self.assertTrue(kill_dhcp.called)
unplug.assert_has_calls(
[mock.call(conf, d) for d in
devices[1:]])
expected.append(mock.call().garbage_collect_namespace())
ip_wrap.assert_has_calls(expected)
def test_destroy_namespace_empty(self):
self._test_destroy_namespace_helper(False, 0)
def test_destroy_namespace_not_empty(self):
self._test_destroy_namespace_helper(False, 1)
def test_destroy_namespace_not_empty_forced(self):
self._test_destroy_namespace_helper(True, 2)
def test_destroy_namespace_exception(self):
ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.side_effect = Exception()
util.destroy_namespace(conf, ns)
def test_main(self):
namespaces = ['ns1', 'ns2']
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('time.sleep') as time_sleep:
conf = mock.Mock()
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = True
mocks['setup_conf'].return_value = conf
with mock.patch('neutron.common.config.setup_logging'):
util.main()
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
mocks['destroy_namespace'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
ip_wrap.assert_has_calls(
[mock.call.get_namespaces()])
time_sleep.assert_called_once_with(2)
def test_main_no_candidates(self):
namespaces = ['ns1', 'ns2']
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('time.sleep') as time_sleep:
conf = mock.Mock()
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = False
mocks['setup_conf'].return_value = conf
with mock.patch('neutron.common.config.setup_logging'):
util.main()
ip_wrap.assert_has_calls(
[mock.call.get_namespaces()])
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
self.assertFalse(mocks['destroy_namespace'].called)
self.assertFalse(time_sleep.called)
| sebrandon1/neutron | neutron/tests/unit/cmd/test_netns_cleanup.py | Python | apache-2.0 | 11,272 |
print '''
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10,001st prime number?
'''
def problem():
x,limit = 1,0
while limit != 10001:
x += 1
if isprime(x): limit += 1
print x
problem()
| willybh11/python | projectEuler/problems/e7.py | Python | gpl-3.0 | 287 |
#!/usr/bin/env python
#
# Copyright 2006,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import random
import pmt
class test_unpack(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [1, 0, 1, 1, 0, 1, 1, 0]
expected_results = [1, 0, 1, 1, 0, 1, 1, 0]
src = blocks.vector_source_b(src_data, False)
op = blocks.unpack_k_bits_bb(1)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_002(self):
src_data = [2, 3, 0, 1]
expected_results = [1, 0, 1, 1, 0, 0, 0, 1]
src = blocks.vector_source_b(src_data, False)
op = blocks.unpack_k_bits_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_003(self):
# Tags on the incoming bytes
src_data = [2, 3, 0, 1]
src_tag_offsets = [0, 1, 1, 2, 3]
# Ground Truth
expected_data = [1, 0, 1, 1, 0, 0, 0, 1]
expected_tag_offsets = [0, 2, 2, 4, 6]
test_tags = list()
tag_indexs = range(len(src_tag_offsets))
for src_tag in tag_indexs:
test_tags.append(
gr.tag_utils.python_to_tag((src_tag_offsets[src_tag],
pmt.intern('tag_byte'),
pmt.from_long(src_tag),
None
))
)
src = blocks.vector_source_b(src_data, False, 1, test_tags)
op = blocks.unpack_k_bits_bb(2)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
# Check the data
self.assertEqual(expected_data, dst.data())
# Check the tag values
self.assertEqual(list(tag_indexs), [
pmt.to_python(x.value) for x in dst.tags()])
# Check the tag offsets
self.assertEqual(expected_tag_offsets, [x.offset for x in dst.tags()])
if __name__ == '__main__':
gr_unittest.run(test_unpack)
| dl1ksv/gnuradio | gr-blocks/python/blocks/qa_unpack_k_bits.py | Python | gpl-3.0 | 2,451 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
class SparseToDenseTest(xla_test.XLATestCase):
def testInt(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, 0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0)
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testSetValue(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1)
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([1, 3], [5], 1, -1)
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1)
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.cached_session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1)
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testDegenerateIndexMatrix(self):
with self.cached_session(), self.test_scope():
tf_ans = _SparseToDense([[2], [3], [4], [5], [6], [7], [8], [9]], [10],
[1, 2, 3, 4, 5, 6, 7, 8], -1)
self.assertAllClose([-1, -1, 1, 2, 3, 4, 5, 6, 7, 8], tf_ans)
def testBadShape(self):
with self.cached_session(), self.test_scope():
with self.assertRaisesWithPredicateMatch(ValueError, "must be rank 1"):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.cached_session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [[5], [3]], -1)
def testBadNumValues(self):
with self.cached_session(), self.test_scope():
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
_SparseToDense([1, 3], [5], [1, 2, 3], -1)
def testBadDefault(self):
with self.cached_session(), self.test_scope():
with self.assertRaisesOpError("default_value should be a scalar"):
_SparseToDense([1, 3], [5], [1, 2], [0])
if __name__ == "__main__":
test.main()
| kevin-coder/tensorflow-fork | tensorflow/compiler/tests/sparse_to_dense_op_test.py | Python | apache-2.0 | 4,606 |
from datetime import date
import uuid
from flask.json import JSONEncoder
from flask._compat import text_type
from itsdangerous import json as _json
from dateutil.tz import tzutc
from stevedore import driver
class OpsyJSONEncoder(JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, date):
# TODO (testeddoughnut): proper timezone support
o = o.replace(tzinfo=tzutc())
return o.isoformat()
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
def get_filters_list(filters):
filters_list = []
for items, db_object in filters:
if items:
include, exclude = parse_include_excludes(items)
if include:
filters_list.append(db_object.in_(include))
if exclude:
filters_list.append(~db_object.in_(exclude))
return filters_list
def parse_include_excludes(items):
if items:
item_list = items.split(',')
# Wrap in a set to remove duplicates
include = list(set([x for x in item_list if not x.startswith('!')]))
exclude = list(set([x[1:] for x in item_list if x.startswith('!')]))
else:
include, exclude = [], []
return include, exclude
def load_plugins(app):
opsy_config = app.config.get('opsy')
plugins = opsy_config.get('enabled_plugins')
if plugins:
for plugin in plugins.split(','):
plugin_manager = driver.DriverManager(
namespace='opsy.plugin',
name=plugin,
invoke_args=(app,),
invoke_on_load=True)
yield plugin_manager.driver
| derekmoyes/opsy | opsy/utils.py | Python | mit | 1,794 |
fields = ['sta', 'ondate', 'offdate', 'lat', 'lon', 'elev', 'staname', 'statype', 'refsta', 'dnorth', 'deast', 'lddate']
| cloudmesh/sp17-i524 | project/F16-EG-1002/code/migration/mysite.py | Python | apache-2.0 | 124 |
#!/usr/bin/env python3
# Copyright (c) 2015-2022 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import gzip
import sys
import inspect
import os
_currentFile = os.path.abspath(inspect.getfile(inspect.currentframe()))
_currentDir = os.path.dirname(_currentFile)
_parentDir = os.path.dirname(os.path.dirname(_currentDir))
sys.path.insert(0, _parentDir)
import Fixie
NO_COLOR = '\033[00m'
RED = '\033[0;31m'
GREEN = '\033[0;32m'
YELLOW = '\033[0;33m'
BLUE = '\033[0;34m'
CYAN = '\033[0;36m'
def getPrettyTagValue(tag, value):
"""
Pretty prints a tag value to a string by adding an explanation if it is an enum.
:param tag: int
:param value: str
:return: str
"""
enumValues = Fixie.TAG_ENUM_VALUES.get(tag)
enumValue = ' [%s]' % enumValues.get(value, 'ERROR: Unknown enum value') if enumValues is not None else ''
return '%s%s' % (value, enumValue)
def printMessage(indent, messageStr, colorize=None):
"""
Pretty prints a single (unparsed) FIX message.
:param indent: int
:param messageStr: string
"""
assert(type(indent) is int)
assert(type(messageStr) is str)
#Skip blank lines
if messageStr == '':
return
#Colorize by default on TTY's, not otherwise
if colorize is None:
colorize = sys.stdout.isatty()
#Print the message
color = CYAN
print('%s%6d: %s%s' % (color if colorize else '', indent,
messageStr[:100].replace(Fixie.SEPARATOR, '|'), '...' if len(messageStr) > 100 else ''),
NO_COLOR if colorize else '')
bodyLengthTag = Fixie.TAG_NAME_TO_TAG['BodyLength']
checksumTag = Fixie.TAG_NAME_TO_TAG['CheckSum']
#TODO: error handling
message = Fixie.FIXMessage(messageStr)
parsedMessage = message.parsedMessage()
for k in sorted(parsedMessage.keys()):
extra = ''
color = NO_COLOR if colorize else ''
value = parsedMessage[k]
if type(value) is list:
separator = ', ' if len(value) < 6 else ',\n' + ' ' * 39
valueString = separator.join(getPrettyTagValue(k, item) for item in value)
else:
valueString = getPrettyTagValue(k, value)
tag = Fixie.TAG_ID_TO_TAG.get(k)
if tag is None:
name = ''
color = YELLOW
else:
name = tag.name()
#Does the value parse correctly?
try:
if type(value) is list:
parsedValue = [tag.type().parse(item) for item in value]
else:
parsedValue = tag.type().parse(value)
except Exception as e:
parsedValue = None
extra = str(e)
color = YELLOW
#Is it part of a repeating group?
if tag.repeatingHeaderId() is not None:
headerValue = parsedMessage.get(tag.repeatingHeaderId())
if headerValue is None:
extra = 'No group header found [tag %d]' % tag.repeatingHeaderId()
color = RED
elif parsedValue is not None:
try:
#Header counts should be parseable, and match the count of items
parsedHeaderValue = int(headerValue)
valueLength = len(parsedValue) if type(parsedValue) is list else 1
if parsedHeaderValue != valueLength:
extra = 'Group header [tag %d] disagrees with item count [%d vs %d]' % (
tag.repeatingHeaderId(), parsedHeaderValue, valueLength)
color = YELLOW
except ValueError:
color = YELLOW
#Extra handling for certain tags
if tag.id() == bodyLengthTag.id():
bodyLengthStr = '%s%d=%s' % (Fixie.SEPARATOR, bodyLengthTag.id(), value)
bodyLengthIndex = messageStr.index(bodyLengthStr)
checksumIndex = messageStr.index('%s%d=' % (Fixie.SEPARATOR, checksumTag.id()))
calculatedLength = checksumIndex - bodyLengthIndex - len(bodyLengthStr)
extra = 'Calculated length = %d' % calculatedLength
color = GREEN if parsedValue == calculatedLength else RED
elif tag.id() == checksumTag.id():
calculatedChecksum = message.calculateChecksum()
extra = 'Calculated checksum = %d' % calculatedChecksum
color = GREEN if parsedValue == calculatedChecksum else RED
print('%s%28s [%5d] = %s%s%s' % (color if colorize else '',
name, k, valueString, ' (%s)' % extra if extra != '' else '', NO_COLOR if colorize else ''))
print('')
def printFile(file, colorize=False):
"""
Pretty prints the contents of a file, line by line.
:param file: file object to print
:param colorize: bool Flag indicating whether to colorize the output.
"""
for n, message in enumerate(file):
#Decode if necessary
if sys.version_info >= (3,):
message = message.decode('utf8')
#Remove newlines
if len(message) > 0 and message[-1] == '\n':
message = message[:-1]
printMessage(n, message, colorize=colorize)
def main():
parser = argparse.ArgumentParser(description='FIX Viewer')
parser.add_argument('-c', '--colorize', action='store_true', default=None,
help='Colorize the output (default True in a TTY, false otherwise).')
parser.add_argument('file', nargs='?', help='FIX file to view.')
arguments = parser.parse_args(sys.argv[1:])
#Read from the file name passed as an argument, or stdin if none is passed
if arguments.file is None:
printFile(sys.stdin, colorize=arguments.colorize)
else:
openF = gzip.open if arguments.file.endswith('.gz') else open
try:
with openF(arguments.file, 'rb') as fixFile:
printFile(fixFile, colorize=arguments.colorize)
except BrokenPipeError:
pass #These happen when you pipe to less and quit
return 0
if __name__ == '__main__':
sys.exit(main())
| AgalmicVentures/Fixie | Fixie/Tools/ViewFix.py | Python | mit | 6,353 |
"""
dsq_postgres.backend
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
| disqus/disqus-postgres | src/dsq_postgres/backend/__init__.py | Python | apache-2.0 | 145 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions call' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Call(base.Command):
"""Call function synchronously for testing."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='Name of the function to be called.',
type=util.ValidateFunctionNameOrRaise)
parser.add_argument(
'--data', default='',
help='Data passed to the function (JSON string)')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Function call results (error or result with execution id)
"""
project = properties.VALUES.core.project.Get(required=True)
registry = self.context['registry']
client = self.context['functions_client']
messages = self.context['functions_messages']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
return client.projects_locations_functions.Call(
messages.CloudfunctionsProjectsLocationsFunctionsCallRequest(
name=function_ref.RelativeName(),
callFunctionRequest=messages.CallFunctionRequest(data=args.data)))
| KaranToor/MA450 | google-cloud-sdk/lib/surface/functions/call.py | Python | apache-2.0 | 2,127 |
#!/usr/bin/env python
"""compares BSR values between two groups in a BSR matrix
Numpy and BioPython need to be installed. Python version must be at
least 2.7 to use collections"""
from optparse import OptionParser
import subprocess
from ls_bsr.util import prune_matrix
from ls_bsr.util import compare_values
from ls_bsr.util import find_uniques
import sys
import os
def test_file(option, opt_str, value, parser):
try:
with open(value): setattr(parser.values, option.dest, value)
except IOError:
print('%s file cannot be opened' % option)
sys.exit()
def add_headers(infile, outfile, lower, upper):
file_out = open(outfile, "w")
file_out.write("marker"+"\t"+"group1_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group_1"+"\t"+">="+str(lower)+"\t"+"group2_mean"+"\t"+">="+str(upper)+"\t"+"total_in_group2"+"\t"+">="+str(lower)+"\n")
with open(infile) as my_file:
for line in my_file:
file_out.write(line)
file_out.close()
def main(matrix,group1,group2,fasta,upper,lower):
prune_matrix(matrix,group1,group2)
compare_values("group1_pruned.txt","group2_pruned.txt",upper,lower)
subprocess.check_call("paste group1_out.txt group2_out.txt > groups_combined.txt", shell=True)
find_uniques("groups_combined.txt",fasta)
add_headers("groups_combined.txt","groups_combined_header.txt",lower,upper)
os.system("rm group1_out.txt group2_out.txt")
if __name__ == "__main__":
usage="usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-b", "--bsr_matrix", dest="matrix",
help="/path/to/bsr_matrix [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-f", "--fasta", dest="fasta",
help="/path/to/ORF_fasta_file [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-1", "--group_1_ids", dest="group1",
help="new line separated file with group1 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-2", "--group_2_ids", dest="group2",
help="new line separated file with group2 ids [REQUIRED]",
action="callback", callback=test_file, type="string")
parser.add_option("-u", "--upper_bound", dest="upper",
help="upper bound for BSR comparisons, defaults to 0.8",
default="0.8", type="float")
parser.add_option("-l", "--lower_bound", dest="lower",
help="lower bound for BSR comparisons, defaults to 0.4",
default="0.4", type="float")
options, args = parser.parse_args()
mandatories = ["matrix", "group1", "group2", "fasta"]
for m in mandatories:
if not options.__dict__[m]:
print("\nMust provide %s.\n" %m)
parser.print_help()
exit(-1)
main(options.matrix,options.group1,options.group2,options.fasta,options.upper,options.lower)
| jasonsahl/LS-BSR | tools/compare_BSR.py | Python | gpl-3.0 | 3,059 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layers', '24_to_26'),
]
operations = [
migrations.CreateModel(
name='QGISServerLayer',
fields=[
('layer', models.OneToOneField(primary_key=True, serialize=False, to='layers.Layer')),
('base_layer_path', models.CharField(help_text=b'Location of the base layer.', max_length=100, verbose_name=b'Base Layer Path')),
],
),
]
| kartoza/geonode | geonode/qgis_server/migrations/0001_initial.py | Python | gpl-3.0 | 597 |
from werkzeug.middleware.proxy_fix import ProxyFix
class CustomProxyFix(object):
def __init__(self, app, forwarded_proto):
self.app = ProxyFix(app, x_for=1, x_proto=1, x_host=1, x_port=0, x_prefix=0)
self.forwarded_proto = forwarded_proto
def __call__(self, environ, start_response):
environ.update({
"HTTP_X_FORWARDED_PROTO": self.forwarded_proto
})
return self.app(environ, start_response)
def init_app(app):
app.wsgi_app = CustomProxyFix(app.wsgi_app, app.config.get('HTTP_PROTOCOL', 'http'))
| alphagov/notifications-admin | app/proxy_fix.py | Python | mit | 565 |
from __future__ import print_function
import errno
import time
import socket
import struct
import traceback
import zmq.green as zmq
from zope.interface import Interface, implements
from zope.interface.verify import verifyClass
from gevent import sleep, spawn, spawn_later
from gevent.socket import gethostbyname
from gevent.lock import RLock
from spinoff.remoting.hublogic import (
HubLogic, Connect, Disconnect, SigDisconnect, Send, Ping,
RelaySigNew, RelayConnect, RelaySigConnected, RelaySend, RelayForward, RelaySigNodeDown, RelayNvm,
Receive, SendFailed, NodeDown, NextBeat, Bind, IN, OUT, flatten)
from spinoff.util.logging import err
__all__ = ['Hub']
MSG_HEADER_FORMAT = '!I'
_signals = [struct.pack(MSG_HEADER_FORMAT, x) for x in range(9)]
SIG_DISCONNECT, SIG_NEW_RELAY, SIG_RELAY_CONNECT, SIG_RELAY_CONNECTED, SIG_RELAY_SEND, SIG_RELAY_FORWARDED, SIG_RELAY_NODEDOWN, SIG_RELAY_NVM, SIG_VERIFY_IDENTITY = _signals
MIN_VERSION_VALUE = len(_signals)
MIN_VERSION_BITS = struct.pack(MSG_HEADER_FORMAT, MIN_VERSION_VALUE)
class IHub(Interface):
def __init__(nid, is_relay=False, on_node_down=None, on_receive=None):
pass
def send_message(nid, msg_h):
pass
def watch_node(nid, watch_handle):
pass
def unwatch_node(nid, watch_handle):
pass
def stop():
pass
_DELETED = object()
class Hub(object):
"""Handles traffic between actors on different nodes.
The wire-transport implementation is specified/overridden by the `incoming` and `outgoing` parameters.
"""
implements(IHub)
FAKE_INACCESSIBLE_NADDRS = set()
def __init__(self, nid, is_relay=False, on_node_down=lambda ref, nid: ref << ('_node_down', nid),
on_receive=lambda sender_nid, msg_h: print("deliver", msg_h, "from", sender_nid),
heartbeat_interval=1.0, heartbeat_max_silence=3.0):
self.nid = nid
self.is_relay = is_relay
self._on_node_down = on_node_down
self._on_receive = on_receive
self._lock = RLock()
self._logic = HubLogic(nid, is_relay=is_relay,
heartbeat_interval=heartbeat_interval,
heartbeat_max_silence=heartbeat_max_silence)
self._ctx = zmq.Context()
self._ctx.linger = 0
self._insock = self._ctx.socket(zmq.ROUTER)
self._outsock = self._ctx.socket(zmq.ROUTER)
self._insock.identity = self._outsock.identity = nid
self._listener_in = spawn(self._listen, self._insock, IN)
self._listener_in.link_exception(lambda _: self.stop())
self._listener_out = spawn(self._listen, self._outsock, OUT)
self._listener_out.link_exception(lambda _: self.stop())
self._heartbeater = None
self._watched_nodes = {}
self._initialized = True
self._start()
def _start(self):
self._execute(self._logic.start)
def send_message(self, nid, msg_h):
self._execute(self._logic.send_message, nid, msg_h, time.time())
def watch_node(self, nid, watch_handle):
if nid not in self._watched_nodes:
self._watched_nodes[nid] = set([watch_handle])
self._execute(self._logic.ensure_connected, nid, time.time())
else:
self._watched_nodes[nid].add(watch_handle)
def unwatch_node(self, nid, watch_handle):
try:
self._watched_nodes[nid].discard(watch_handle)
except KeyError:
pass
def stop(self):
self.stop = lambda: None
self.send_message = lambda nid, msg_h: None
self.watch_node = lambda nid, watch_handle: None
self.unwatch_node = lambda nid, watch_handle: None
if hasattr(self, '_heartbeater'):
self._heartbeater.kill()
self._heartbeater = _DELETED
if hasattr(self, '_listener_out'):
self._listener_out.kill()
self._listener_out = None
if hasattr(self, '_listener_in'):
self._listener_in.kill()
self._listener_in = None
if hasattr(self, '_initialized'):
logic, self._logic = self._logic, None
self._execute(logic.shutdown)
sleep(.1) # XXX: needed?
if hasattr(self, '_ctx'):
self._insock = self._outsock = None
self._ctx.destroy(linger=0)
self._ctx = None
def __del__(self):
self.stop()
def __repr__(self):
return "Hub(%s)" % (self.nid,)
def _listen(self, sock, on_sock):
recv, t, execute, message_received, ping_received, sig_disconnect_received = (
sock.recv_multipart, time.time, self._execute, self._logic.message_received, self._logic.ping_received, self._logic.sig_disconnect_received)
while True:
try:
data = recv()
except zmq.ZMQError as e:
if e.errno != errno.EINTR: # Sometimes "Interrupted system call" happens on Linux. Nobody knows which signal is interrupting it.
raise
continue
try:
sender_nid, msg_bytes = data
except ValueError:
continue # malformed input
# dbg("recv", repr(msg_bytes), "from", sender_nid)
msg_header, msg_bytes = msg_bytes[:4], msg_bytes[4:]
if msg_header == SIG_DISCONNECT:
assert not msg_bytes
execute(sig_disconnect_received, sender_nid)
elif msg_header == SIG_NEW_RELAY:
assert not msg_bytes
self._logic.new_relay_received(sender_nid)
elif msg_header == SIG_RELAY_CONNECT:
execute(self._logic.relay_connect_received, on_sock, relayer_nid=sender_nid, relayee_nid=msg_bytes)
elif msg_header == SIG_RELAY_CONNECTED:
execute(self._logic.relay_connected_received, relayee_nid=msg_bytes)
elif msg_header == SIG_RELAY_NODEDOWN:
execute(self._logic.relay_nodedown_received, relay_nid=sender_nid, relayee_nid=msg_bytes)
elif msg_header == SIG_RELAY_SEND:
relayee_nid, relayed_bytes = msg_bytes.split('\0', 1)
execute(self._logic.relay_send_received, sender_nid, relayee_nid, relayed_bytes)
elif msg_header == SIG_RELAY_FORWARDED:
relayer_nid, relayed_bytes = msg_bytes.split('\0', 1)
execute(self._logic.relay_forwarded_received, relayer_nid, relayed_bytes)
elif msg_header == SIG_RELAY_NVM:
execute(self._logic.relay_nvm_received, sender_nid, relayee_nid=msg_bytes)
elif msg_header < MIN_VERSION_BITS:
continue # malformed input
else:
try:
unpacked = struct.unpack(MSG_HEADER_FORMAT, msg_header)
except Exception:
continue # malformed input
version = unpacked[0] - MIN_VERSION_VALUE
if msg_bytes:
execute(message_received, on_sock, sender_nid, version, msg_bytes, t())
else:
execute(ping_received, on_sock, sender_nid, version, t())
def _execute(self, fn, *args, **kwargs):
g = fn(*args, **kwargs)
if g is None:
return
insock_send, outsock_send, on_receive = self._insock.send_multipart, self._outsock.send_multipart, self._on_receive
with self._lock:
for action in flatten(g):
cmd = action[0]
# if cmd not in (NextBeat,):
# dbg("%s -> %s: %s" % (fn.__name__.ljust(25), cmd, ", ".join(repr(x) for x in action[1:])))
if cmd is Send:
_, use_sock, nid, version, msg_h = action
(outsock_send if use_sock == OUT else insock_send)((nid, struct.pack(MSG_HEADER_FORMAT, MIN_VERSION_VALUE + version) + msg_h.serialize()))
elif cmd is Receive:
_, sender_nid, msg_bytes = action
on_receive(sender_nid, msg_bytes)
elif cmd is RelaySend:
_, use_sock, relay_nid, relayee_nid, msg_h = action
(outsock_send if use_sock == OUT else insock_send)((relay_nid, SIG_RELAY_SEND + relayee_nid + '\0' + msg_h.serialize()))
elif cmd is RelayForward:
_, use_sock, recipient_nid, relayer_nid, relayed_bytes = action
(outsock_send if use_sock == OUT else insock_send)((recipient_nid, SIG_RELAY_FORWARDED + relayer_nid + '\0' + relayed_bytes))
elif cmd is Ping:
_, use_sock, nid, version = action
(outsock_send if use_sock == OUT else insock_send)((nid, struct.pack(MSG_HEADER_FORMAT, MIN_VERSION_VALUE + version)))
elif cmd is NextBeat:
_, time_to_next = action
if self._heartbeater is not _DELETED:
self._heartbeater = spawn_later(time_to_next, self._heartbeat)
elif cmd is RelaySigNew:
_, use_sock, nid = action
(outsock_send if use_sock == OUT else insock_send)((nid, SIG_NEW_RELAY))
elif cmd is RelayConnect:
_, use_sock, relay_nid, relayee_nid = action
(outsock_send if use_sock == OUT else insock_send)((relay_nid, SIG_RELAY_CONNECT + relayee_nid))
elif cmd is RelaySigConnected:
_, use_sock, relayer_nid, relayee_nid = action
(outsock_send if use_sock == OUT else insock_send)((relayer_nid, SIG_RELAY_CONNECTED + relayee_nid))
elif cmd is RelaySigNodeDown:
_, use_sock, relayer_nid, relayee_nid = action
(outsock_send if use_sock == OUT else insock_send)((relayer_nid, SIG_RELAY_NODEDOWN + relayee_nid))
elif cmd is RelayNvm:
_, use_sock, relay_nid, relayee_nid = action
(outsock_send if use_sock == OUT else insock_send)((relay_nid, SIG_RELAY_NVM + relayee_nid))
elif cmd is SendFailed:
_, msg_h = action
msg_h.send_failed()
elif cmd is SigDisconnect:
_, use_sock, nid = action
(outsock_send if use_sock == OUT else insock_send)([nid, SIG_DISCONNECT])
elif cmd is NodeDown:
_, nid = action
for watch_handle in self._watched_nodes.pop(nid, []):
self._on_node_down(watch_handle, nid)
elif cmd is Connect:
_, naddr = action
if naddr not in self.FAKE_INACCESSIBLE_NADDRS:
zmqaddr = naddr_to_zmq_endpoint(naddr)
if zmqaddr:
self._outsock.connect(zmqaddr)
sleep(0.001)
elif cmd is Disconnect:
_, naddr = action
if naddr not in self.FAKE_INACCESSIBLE_NADDRS:
zmqaddr = naddr_to_zmq_endpoint(naddr)
if zmqaddr:
try:
self._outsock.disconnect(zmqaddr)
except zmq.ZMQError:
pass
elif cmd is Bind:
_, naddr = action
zmqaddr = naddr_to_zmq_endpoint(naddr)
if not zmqaddr:
raise Exception("Failed to bind to %s" % (naddr,))
self._insock.bind(zmqaddr)
else:
assert False, "unknown command: %r" % (cmd,)
def _heartbeat(self):
self._execute(self._logic.heartbeat, time.time())
verifyClass(IHub, Hub)
EAI_ERRNO_TEMPORARY_FAILURE_IN_NAME_RESOLUTION = -3 # no EAI_... in socket for this errno
def naddr_to_zmq_endpoint(nid):
if '\0' in nid:
return None
try:
host, port = nid.split(':')
except ValueError:
return None
try:
return 'tcp://%s:%s' % (gethostbyname(host), port)
except socket.gaierror as e:
# XXX: perhaps we should retry in a few sec in case of EAI_ERRNO_TEMPORARY_FAILURE_IN_NAME_RESOLUTION?
if e.errno not in (socket.EAI_NONAME, EAI_ERRNO_TEMPORARY_FAILURE_IN_NAME_RESOLUTION):
err("%s\n%s" % (e, traceback.format_exc()))
return None
| eallik/spinoff | spinoff/remoting/hub.py | Python | bsd-2-clause | 12,588 |
"""SCons.Tool.aixcc
Tool-specific initialization for IBM xlc / Visual Age C compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
import SCons.Platform.aix
import cc
packages = ['vac.C', 'ibmcxx.cmp']
def get_xlc(env):
xlc = env.get('CC', 'xlc')
xlc_r = env.get('SHCC', 'xlc_r')
return SCons.Platform.aix.get_xlc(env, xlc, xlc_r, packages)
def generate(env):
"""Add Builders and construction variables for xlc / Visual Age
suite to an Environment."""
path, _cc, _shcc, version = get_xlc(env)
if path:
_cc = os.path.join(path, _cc)
_shcc = os.path.join(path, _shcc)
cc.generate(env)
env['CC'] = _cc
env['SHCC'] = _shcc
env['CCVERSION'] = version
def exists(env):
path, _cc, _shcc, version = get_xlc(env)
if path and _cc:
xlc = os.path.join(path, _cc)
if os.path.exists(xlc):
return xlc
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Distrotech/scons | src/engine/SCons/Tool/aixcc.py | Python | mit | 2,288 |
# Generated by Django 2.2.13 on 2021-03-12 10:29
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack', '0022_remove_tenant_extra_configuration'),
('waldur_rancher', '0035_drop_spl'),
]
operations = [
migrations.AddField(
model_name='cluster',
name='management_security_group',
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to='openstack.SecurityGroup',
),
),
]
| opennode/nodeconductor-assembly-waldur | src/waldur_rancher/migrations/0036_cluster_management_security_group.py | Python | mit | 664 |
# This file is part of Pimlico
# Copyright (C) 2020 Mark Granroth-Wilding
# Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html
"""Extract NP chunks
Performs the full spaCy pipeline including tokenization, sentence
segmentation, POS tagging and parsing and outputs documents containing
only a list of the noun phrase chunks that were found by the parser.
This functionality is provided very conveniently by spaCy's ``Doc.noun_chunks``
after parsing, so this is a light wrapper around spaCy.
The output is presented as a tokenized document. Each sentence in the
document represents a single NP.
"""
from pimlico.core.dependencies.python import spacy_dependency
from pimlico.core.modules.map import DocumentMapModuleInfo
from pimlico.core.modules.options import str_to_bool
from pimlico.datatypes import GroupedCorpus
from pimlico.datatypes.corpora.data_points import RawTextDocumentType
from pimlico.datatypes.corpora.tokenized import TokenizedDocumentType
class ModuleInfo(DocumentMapModuleInfo):
module_type_name = "spacy_extract_nps"
module_readable_name = "NP chunk extractor"
module_inputs = [("text", GroupedCorpus(RawTextDocumentType()))]
module_outputs = [("nps", GroupedCorpus(TokenizedDocumentType()))]
module_options = {
"model": {
"help": "spaCy model to use. This may be a name of a standard spaCy model or a path to the "
"location of a trained model on disk, if on_disk=T. "
"If it's not a path, the spaCy download command will be run before execution",
"default": "en_core_web_sm",
},
"on_disk": {
"help": "Load the specified model from a location on disk (the model parameter gives the path)",
"type": str_to_bool,
}
}
module_supports_python2 = True
def get_software_dependencies(self):
return super(ModuleInfo, self).get_software_dependencies() + [spacy_dependency]
| markgw/pimlico | src/python/pimlico/modules/spacy/extract_nps/info.py | Python | gpl-3.0 | 1,980 |
# use numpy.distutils to simplify setup.py scripts involving SWIG
from numpy.distutils.core import setup, Extension
import os
name = 'hw' # name of the module
version = 1.0 # the module's version number
sources = ['hw.i', '../HelloWorld.cpp', '../HelloWorld2.cpp']
setup(name=name, version=version,
ext_modules = [Extension('_' + name, # SWIG requires _
sources,
include_dirs=[os.pardir])
])
| sniemi/SamPy | sandbox/src1/TCSE3-3rd-examples/src/py/mixed/hw/C++/class/swig-hw/setup2.py | Python | bsd-2-clause | 502 |
import multiprocessing
import os
import sys
_is_travis = os.environ.get('TRAVIS') == 'true'
workers = multiprocessing.cpu_count() * 3
if _is_travis:
workers = 2
bind = "0.0.0.0:8080"
keepalive = 120
errorlog = '-'
pidfile = 'gunicorn.pid'
pythonpath = 'web2py'
worker_class = "meinheld.gmeinheld.MeinheldWorker"
def post_fork(server, worker):
# Disable access log
import meinheld.server
meinheld.server.set_access_logger(None)
| actframework/FrameworkBenchmarks | frameworks/Python/web2py/gunicorn_conf.py | Python | bsd-3-clause | 448 |
import abc
import json
import logging
import os
import re
import deepdiff
from configs import EnjoliverConfig
ec = EnjoliverConfig(importer=__file__)
logger = logging.getLogger(__name__)
class Generator(object):
"""
Generator ensure the coherence from group -> profile -> ignition
"""
def __init__(self,
api_uri: str,
profile_id: str,
name: str,
ignition_id: str,
matchbox_path: str,
selector=None,
group_id=None,
extra_metadata=None,
pxe_redirect=False):
self.profile = GenerateProfile(
api_uri=api_uri,
_id=profile_id,
name=name,
ignition_id=ignition_id,
matchbox_path=matchbox_path,
pxe_redirect=pxe_redirect,
)
self.group = GenerateGroup(
api_uri=api_uri,
_id=group_id if group_id else profile_id,
name=name,
profile=profile_id, # TODO
selector=selector,
metadata=extra_metadata,
matchbox_path=matchbox_path)
def generate_profile(self):
return self.profile.generate()
def generate_group(self):
return self.group.generate()
def dumps(self):
self.profile.dump()
self.group.dump()
class GenerateCommon(object):
"""
Common set of methods used to generate groups and profiles
"""
__metaclass__ = abc.ABCMeta
_target_data = None
_raise_enof = IOError
@abc.abstractmethod
def generate(self):
return
@property
def target_data(self):
if self._target_data is not None:
return self._target_data
return self.generate()
def render(self, indent=2):
self.generate()
return json.dumps(self._target_data, indent=indent, sort_keys=True)
def dump(self):
file_path = "%s/%s.json" % (self.target_path, self.target_data["id"])
try:
with open(file_path, 'r') as f:
on_disk = json.loads(f.read())
except Exception as e:
logger.warning("get data of %s raise: %s" % (file_path, e))
on_disk = dict()
render = self.render()
diff = deepdiff.DeepDiff(self._target_data, on_disk, ignore_order=True)
if not diff:
logger.debug("no diff: %s" % file_path)
return False
if on_disk:
logger.info("diff on %s: %s" % (file_path, diff))
with open(file_path, "w") as fd:
fd.write(render)
logger.info("replaced: %s" % file_path)
return True
@staticmethod
def ensure_directory(path):
if os.path.isdir(path) is False:
raise IOError("%s not a valid as directory" % path)
return path
def ensure_file(self, path):
if os.path.isfile(path) is False:
raise self._raise_enof("%s not a valid as file" % path)
return path
class GenerateProfile(GenerateCommon):
def __repr__(self):
return "GenProfile-%s" % self._target_data["id"]
def __init__(self,
api_uri: str,
_id: str,
name: str,
ignition_id: str,
matchbox_path: str,
pxe_redirect=False):
self.api_uri = api_uri
self.pxe_redirect = pxe_redirect
self.ensure_directory(matchbox_path)
self.ensure_directory("%s/ignition" % matchbox_path)
try:
self.ensure_file("%s/ignition/%s" % (matchbox_path, ignition_id))
except Warning:
logger.warning("not here %s/ignition/%s\n" % (matchbox_path, ignition_id))
self.target_path = self.ensure_directory("%s/profiles" % matchbox_path)
self._target_data = {
"id": "%s" % _id,
"name": "%s" % name,
"boot": {},
"cloud_id": "",
"ignition_id": "%s" % ignition_id
}
def _boot(self):
if ec.assets_server_uri:
logger.debug("custom assets_server_uri=%s" % ec.assets_server_uri)
uri = ec.assets_server_uri
else:
uri = self.api_uri
path_for_ignition = "ignition" if self.pxe_redirect is False else "ignition-pxe"
self._target_data["boot"] = {
"kernel": "%s%s" % (uri, ec.kernel),
"initrd": ["%s%s" % (uri, ec.initrd)],
"cmdline": {
"coreos.config.url":
"%s/%s?uuid=${uuid}&mac=${net0/mac:hexhyp}" % (self.api_uri, path_for_ignition),
"coreos.first_boot": "",
"coreos.oem.id": "pxe",
"console": "ttyS0 console=ttyS1",
}
}
def generate(self):
self._boot()
logger.debug("done: %s" % self._target_data["name"])
return self.target_data
class GenerateGroup(GenerateCommon):
def __repr__(self):
return "GenGroup[%s]" % self._target_data["id"]
def __init__(self,
api_uri: str,
_id: str,
name: str,
profile: str,
matchbox_path: str,
selector=None,
metadata=None,
):
self.api_uri = api_uri
self.ensure_directory(matchbox_path)
self.target_path = self.ensure_directory("%s/groups" % matchbox_path)
self.ssh_authorized_keys_dir = "%s/ssh_authorized_keys" % matchbox_path
self.extra_selector = None if not selector else dict(selector)
self.extra_metadata = {} if not metadata else dict(metadata)
self._target_data = {
"id": _id,
"name": name,
"profile": profile,
"metadata": {
"api_uri": "",
}
}
def _get_ssh_authorized_keys(self):
keys = []
if os.path.isdir(self.ssh_authorized_keys_dir) is False:
return keys
for k in os.listdir(self.ssh_authorized_keys_dir):
fp = "%s/%s" % (self.ssh_authorized_keys_dir, k)
with open(fp, 'r') as key:
content = key.read()
if len(content.split(" ")) < 2:
logger.debug("%s not valid as ssh_authorized_keys" % fp)
continue
keys.append(content)
return keys
def _metadata(self):
self._target_data["metadata"]["api_uri"] = self.api_uri
self._target_data["metadata"]["ssh_authorized_keys"] = self._get_ssh_authorized_keys()
for k, v in self.extra_metadata.items():
logger.debug("add %s: %s in metadata" % (k, v))
self._target_data["metadata"][k] = v
def _selector(self):
if self.extra_selector is None:
return
if type(self.extra_selector) is not dict:
raise TypeError("selector is not a dict")
try:
self.extra_selector["mac"] = self.extra_selector["mac"].lower()
match = re.match(r"^([0-9a-f]{2}[:]){5}([0-9a-f]{2})$",
self.extra_selector["mac"])
if match is None:
raise TypeError("%s is not a valid MAC address" % self.extra_selector["mac"].lower())
except KeyError:
pass
self._target_data["selector"] = self.extra_selector
self._target_data["metadata"]["selector"] = self.extra_selector
def generate(self):
self._metadata()
self._selector()
logger.debug("done: %s" % self._target_data["id"])
return self.target_data
| kirek007/enjoliver | app/generator.py | Python | mit | 7,655 |
class GameError(Exception):
def __init__(self, msg, code=None):
self.msg = msg
self.code = code
def to_dict(self):
return {
"message": self.msg,
"code": self.code
} | SArnab/JHU-605.401.82-SupaFly | server/clue/game/error.py | Python | mit | 183 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from types import MethodType
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _, ungettext
from django.utils.encoding import force_text
from django.utils.html import format_html
from cms.constants import REFRESH_PAGE
class SegmentPluginModelMixin(object):
def get_context_override(self, request):
"""
Return a dictionary to override the request context object during evaluation with
alternative values. Normally this is an empty dict. However, when a staff user overrides
the segmentation, then update the context with this returned dict.
"""
return {}
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
context.update(self.get_context_override(context['request']))
content = super(SegmentPluginModelMixin, self).render_plugin(context, placeholder, admin, processors)
context.pop()
return content
class EmulateUserModelMixin(SegmentPluginModelMixin):
UserModel = get_user_model()
def get_context_override(self, request):
"""
Override the request object with an emulated user.
"""
context_override = super(EmulateUserModelMixin, self).get_context_override(request)
try:
if request.user.is_staff:
user = self.UserModel.objects.get(pk=request.session['emulate_user_id'])
context_override.update(user=user)
except (self.UserModel.DoesNotExist, KeyError):
pass
return context_override
class EmulateUserAdminMixin(object):
UserModel = get_user_model()
@staticmethod
def populate_toolbar(segmentation_menu, request):
active = 'emulate_user_id' in request.session
segmentation_menu.add_sideframe_item(_("Emulate User"), url=reverse('admin:emulate-users'),
active=active)
segmentation_menu.add_ajax_item(_("Clear emulations"),
action=reverse('admin:clear-emulations'),
on_success=REFRESH_PAGE)
def get_urls(self):
return [
url(r'^emulate_users/$', self.admin_site.admin_view(self.emulate_users), name='emulate-users'),
url(r'^emulate_user/(?P<user_id>\d+)/$', self.admin_site.admin_view(self.emulate_user), name='emulate-user'),
url(r'^clear_emulations/$', self.admin_site.admin_view(self.clear_emulations), name='clear-emulations'),
] + super(EmulateUserAdminMixin, self).get_urls()
def emulate_user(self, request, user_id):
try:
request.session['emulate_user_id'] = int(user_id)
return HttpResponse('OK')
except TypeError as err:
return HttpResponseBadRequest(err.message)
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(self, obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = MethodType(display_as_link, self, EmulateUserAdminMixin)
ChangeList = self.get_changelist(request)
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, current_app=self.admin_site.name)
def clear_emulations(self, request):
request.session.pop('emulate_user_id', None)
return HttpResponse('OK')
| jtiki/djangocms-cascade | cmsplugin_cascade/segmentation/mixins.py | Python | mit | 6,940 |
#! /usr/bin/env python
from ppclass import pp
ff = "ref64_p20b_diagfi.nc"
ff = "ref80_p20b_diagfi.nc"
apbp = open("apbp.txt", "w")
prof = open("temp_profile.txt","w")
ap = pp(file=ff,x=0,y=0,var="ap").getf()
bp = pp(file=ff,x=0,y=0,var="bp").getf()
temp = pp(file=ff,x=0,y=0,t=1e10,var="temp").getf()
for nn in range(len(ap)):
apbp.write("%12.5e%12.5e\n"%(ap[nn],bp[nn]))
for nn in range(len(temp)):
prof.write("%12.5f \n"%(temp[nn]))
apbp.close()
prof.close()
| aymeric-spiga/planetoplot | examples/ppclass_additional/printapbp.py | Python | gpl-2.0 | 475 |
import re
import pytest
from spacy.attrs import IS_PUNCT, LOWER, ORTH
from spacy.errors import MatchPatternError
from spacy.lang.en import English
from spacy.lang.lex_attrs import LEX_ATTRS
from spacy.matcher import Matcher
from spacy.tokens import Doc, Span, Token
from spacy.vocab import Vocab
pattern1 = [{"ORTH": "A"}, {"ORTH": "A", "OP": "*"}]
pattern2 = [{"ORTH": "A", "OP": "*"}, {"ORTH": "A"}]
pattern3 = [{"ORTH": "A"}, {"ORTH": "A"}]
pattern4 = [{"ORTH": "B"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
pattern5 = [{"ORTH": "B", "OP": "*"}, {"ORTH": "A", "OP": "*"}, {"ORTH": "B"}]
re_pattern1 = "AA*"
re_pattern2 = "A*A"
re_pattern3 = "AA"
re_pattern4 = "BA*B"
re_pattern5 = "B*A*B"
longest1 = "A A A A A"
longest2 = "A A A A A"
longest3 = "A A"
longest4 = "B A A A A A B" # "FIRST" would be "B B"
longest5 = "B B A A A A A B"
@pytest.fixture
def text():
return "(BBAAAAAB)."
@pytest.fixture
def doc(en_tokenizer, text):
doc = en_tokenizer(" ".join(text))
return doc
@pytest.mark.issue(118)
@pytest.mark.parametrize(
"patterns",
[
[[{"LOWER": "celtics"}], [{"LOWER": "boston"}, {"LOWER": "celtics"}]],
[[{"LOWER": "boston"}, {"LOWER": "celtics"}], [{"LOWER": "celtics"}]],
],
)
def test_issue118(en_tokenizer, patterns):
"""Test a bug that arose from having overlapping matches"""
text = (
"how many points did lebron james score against the boston celtics last night"
)
doc = en_tokenizer(text)
ORG = doc.vocab.strings["ORG"]
matcher = Matcher(doc.vocab)
matcher.add("BostonCeltics", patterns)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
assert matches == [(ORG, 9, 11), (ORG, 10, 11)]
doc.ents = matches[:1]
ents = list(doc.ents)
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
@pytest.mark.issue(118)
@pytest.mark.parametrize(
"patterns",
[
[[{"LOWER": "boston"}], [{"LOWER": "boston"}, {"LOWER": "celtics"}]],
[[{"LOWER": "boston"}, {"LOWER": "celtics"}], [{"LOWER": "boston"}]],
],
)
def test_issue118_prefix_reorder(en_tokenizer, patterns):
"""Test a bug that arose from having overlapping matches"""
text = (
"how many points did lebron james score against the boston celtics last night"
)
doc = en_tokenizer(text)
ORG = doc.vocab.strings["ORG"]
matcher = Matcher(doc.vocab)
matcher.add("BostonCeltics", patterns)
assert len(list(doc.ents)) == 0
matches = [(ORG, start, end) for _, start, end in matcher(doc)]
doc.ents += tuple(matches)[1:]
assert matches == [(ORG, 9, 10), (ORG, 9, 11)]
ents = doc.ents
assert len(ents) == 1
assert ents[0].label == ORG
assert ents[0].start == 9
assert ents[0].end == 11
@pytest.mark.issue(242)
def test_issue242(en_tokenizer):
"""Test overlapping multi-word phrases."""
text = "There are different food safety standards in different countries."
patterns = [
[{"LOWER": "food"}, {"LOWER": "safety"}],
[{"LOWER": "safety"}, {"LOWER": "standards"}],
]
doc = en_tokenizer(text)
matcher = Matcher(doc.vocab)
matcher.add("FOOD", patterns)
matches = [(ent_type, start, end) for ent_type, start, end in matcher(doc)]
match1, match2 = matches
assert match1[1] == 3
assert match1[2] == 5
assert match2[1] == 4
assert match2[2] == 6
with pytest.raises(ValueError):
# One token can only be part of one entity, so test that the matches
# can't be added as entities
doc.ents += tuple(matches)
@pytest.mark.issue(587)
def test_issue587(en_tokenizer):
"""Test that Matcher doesn't segfault on particular input"""
doc = en_tokenizer("a b; c")
matcher = Matcher(doc.vocab)
matcher.add("TEST1", [[{ORTH: "a"}, {ORTH: "b"}]])
matches = matcher(doc)
assert len(matches) == 1
matcher.add("TEST2", [[{ORTH: "a"}, {ORTH: "b"}, {IS_PUNCT: True}, {ORTH: "c"}]])
matches = matcher(doc)
assert len(matches) == 2
matcher.add("TEST3", [[{ORTH: "a"}, {ORTH: "b"}, {IS_PUNCT: True}, {ORTH: "d"}]])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(588)
def test_issue588(en_vocab):
"""Test if empty specs still cause an error when adding patterns"""
matcher = Matcher(en_vocab)
with pytest.raises(ValueError):
matcher.add("TEST", [[]])
@pytest.mark.issue(590)
def test_issue590(en_vocab):
"""Test overlapping matches"""
doc = Doc(en_vocab, words=["n", "=", "1", ";", "a", ":", "5", "%"])
matcher = Matcher(en_vocab)
matcher.add(
"ab", [[{"IS_ALPHA": True}, {"ORTH": ":"}, {"LIKE_NUM": True}, {"ORTH": "%"}]]
)
matcher.add("ab", [[{"IS_ALPHA": True}, {"ORTH": "="}, {"LIKE_NUM": True}]])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(615)
def test_issue615(en_tokenizer):
def merge_phrases(matcher, doc, i, matches):
"""Merge a phrase. We have to be careful here because we'll change the
token indices. To avoid problems, merge all the phrases once we're called
on the last match."""
if i != len(matches) - 1:
return None
spans = [Span(doc, start, end, label=label) for label, start, end in matches]
with doc.retokenize() as retokenizer:
for span in spans:
tag = "NNP" if span.label_ else span.root.tag_
attrs = {"tag": tag, "lemma": span.text}
retokenizer.merge(span, attrs=attrs)
doc.ents = doc.ents + (span,)
text = "The golf club is broken"
pattern = [{"ORTH": "golf"}, {"ORTH": "club"}]
label = "Sport_Equipment"
doc = en_tokenizer(text)
matcher = Matcher(doc.vocab)
matcher.add(label, [pattern], on_match=merge_phrases)
matcher(doc)
entities = list(doc.ents)
assert entities != []
assert entities[0].label != 0
@pytest.mark.issue(850)
def test_issue850():
"""The variable-length pattern matches the succeeding token. Check we
handle the ambiguity correctly."""
vocab = Vocab(lex_attr_getters={LOWER: lambda string: string.lower()})
matcher = Matcher(vocab)
pattern = [{"LOWER": "bob"}, {"OP": "*"}, {"LOWER": "frank"}]
matcher.add("FarAway", [pattern])
doc = Doc(matcher.vocab, words=["bob", "and", "and", "frank"])
match = matcher(doc)
assert len(match) == 1
ent_id, start, end = match[0]
assert start == 0
assert end == 4
@pytest.mark.issue(850)
def test_issue850_basic():
"""Test Matcher matches with '*' operator and Boolean flag"""
vocab = Vocab(lex_attr_getters={LOWER: lambda string: string.lower()})
matcher = Matcher(vocab)
pattern = [{"LOWER": "bob"}, {"OP": "*", "LOWER": "and"}, {"LOWER": "frank"}]
matcher.add("FarAway", [pattern])
doc = Doc(matcher.vocab, words=["bob", "and", "and", "frank"])
match = matcher(doc)
assert len(match) == 1
ent_id, start, end = match[0]
assert start == 0
assert end == 4
@pytest.mark.issue(1434)
def test_issue1434():
"""Test matches occur when optional element at end of short doc."""
pattern = [{"ORTH": "Hello"}, {"IS_ALPHA": True, "OP": "?"}]
vocab = Vocab(lex_attr_getters=LEX_ATTRS)
hello_world = Doc(vocab, words=["Hello", "World"])
hello = Doc(vocab, words=["Hello"])
matcher = Matcher(vocab)
matcher.add("MyMatcher", [pattern])
matches = matcher(hello_world)
assert matches
matches = matcher(hello)
assert matches
@pytest.mark.parametrize(
"string,start,end",
[
("a", 0, 1),
("a b", 0, 2),
("a c", 0, 1),
("a b c", 0, 2),
("a b b c", 0, 3),
("a b b", 0, 3),
],
)
@pytest.mark.issue(1450)
def test_issue1450(string, start, end):
"""Test matcher works when patterns end with * operator."""
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher = Matcher(Vocab())
matcher.add("TSTEND", [pattern])
doc = Doc(Vocab(), words=string.split())
matches = matcher(doc)
if start is None or end is None:
assert matches == []
assert matches[-1][1] == start
assert matches[-1][2] == end
@pytest.mark.issue(1945)
def test_issue1945():
"""Test regression in Matcher introduced in v2.0.6."""
matcher = Matcher(Vocab())
matcher.add("MWE", [[{"orth": "a"}, {"orth": "a"}]])
doc = Doc(matcher.vocab, words=["a", "a", "a"])
matches = matcher(doc) # we should see two overlapping matches here
assert len(matches) == 2
assert matches[0][1:] == (0, 2)
assert matches[1][1:] == (1, 3)
@pytest.mark.issue(1971)
def test_issue1971(en_vocab):
# Possibly related to #2675 and #2671?
matcher = Matcher(en_vocab)
pattern = [
{"ORTH": "Doe"},
{"ORTH": "!", "OP": "?"},
{"_": {"optional": True}, "OP": "?"},
{"ORTH": "!", "OP": "?"},
]
Token.set_extension("optional", default=False)
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["Hello", "John", "Doe", "!"])
# We could also assert length 1 here, but this is more conclusive, because
# the real problem here is that it returns a duplicate match for a match_id
# that's not actually in the vocab!
matches = matcher(doc)
assert all([match_id in en_vocab.strings for match_id, start, end in matches])
@pytest.mark.issue(1971)
def test_issue_1971_2(en_vocab):
matcher = Matcher(en_vocab)
pattern1 = [{"ORTH": "EUR", "LOWER": {"IN": ["eur"]}}, {"LIKE_NUM": True}]
pattern2 = [{"LIKE_NUM": True}, {"ORTH": "EUR"}] # {"IN": ["EUR"]}}]
doc = Doc(en_vocab, words=["EUR", "10", "is", "10", "EUR"])
matcher.add("TEST1", [pattern1, pattern2])
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.issue(1971)
def test_issue_1971_3(en_vocab):
"""Test that pattern matches correctly for multiple extension attributes."""
Token.set_extension("a", default=1, force=True)
Token.set_extension("b", default=2, force=True)
doc = Doc(en_vocab, words=["hello", "world"])
matcher = Matcher(en_vocab)
matcher.add("A", [[{"_": {"a": 1}}]])
matcher.add("B", [[{"_": {"b": 2}}]])
matches = sorted((en_vocab.strings[m_id], s, e) for m_id, s, e in matcher(doc))
assert len(matches) == 4
assert matches == sorted([("A", 0, 1), ("A", 1, 2), ("B", 0, 1), ("B", 1, 2)])
@pytest.mark.issue(1971)
def test_issue_1971_4(en_vocab):
"""Test that pattern matches correctly with multiple extension attribute
values on a single token.
"""
Token.set_extension("ext_a", default="str_a", force=True)
Token.set_extension("ext_b", default="str_b", force=True)
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=["this", "is", "text"])
pattern = [{"_": {"ext_a": "str_a", "ext_b": "str_b"}}] * 3
matcher.add("TEST", [pattern])
matches = matcher(doc)
# Uncommenting this caused a segmentation fault
assert len(matches) == 1
assert matches[0] == (en_vocab.strings["TEST"], 0, 3)
@pytest.mark.issue(2464)
def test_issue2464(en_vocab):
"""Test problem with successive ?. This is the same bug, so putting it here."""
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=["a", "b"])
matcher.add("4", [[{"OP": "?"}, {"OP": "?"}]])
matches = matcher(doc)
assert len(matches) == 3
@pytest.mark.issue(2569)
def test_issue2569(en_tokenizer):
"""Test that operator + is greedy."""
doc = en_tokenizer("It is May 15, 1993.")
doc.ents = [Span(doc, 2, 6, label=doc.vocab.strings["DATE"])]
matcher = Matcher(doc.vocab)
matcher.add("RULE", [[{"ENT_TYPE": "DATE", "OP": "+"}]])
matched = [doc[start:end] for _, start, end in matcher(doc)]
matched = sorted(matched, key=len, reverse=True)
assert len(matched) == 10
assert len(matched[0]) == 4
assert matched[0].text == "May 15, 1993"
@pytest.mark.issue(2671)
def test_issue2671():
"""Ensure the correct entity ID is returned for matches with quantifiers.
See also #2675
"""
nlp = English()
matcher = Matcher(nlp.vocab)
pattern_id = "test_pattern"
pattern = [
{"LOWER": "high"},
{"IS_PUNCT": True, "OP": "?"},
{"LOWER": "adrenaline"},
]
matcher.add(pattern_id, [pattern])
doc1 = nlp("This is a high-adrenaline situation.")
doc2 = nlp("This is a high adrenaline situation.")
matches1 = matcher(doc1)
for match_id, start, end in matches1:
assert nlp.vocab.strings[match_id] == pattern_id
matches2 = matcher(doc2)
for match_id, start, end in matches2:
assert nlp.vocab.strings[match_id] == pattern_id
@pytest.mark.issue(3009)
def test_issue3009(en_vocab):
"""Test problem with matcher quantifiers"""
patterns = [
[{"ORTH": "has"}, {"LOWER": "to"}, {"LOWER": "do"}, {"TAG": "IN"}],
[
{"ORTH": "has"},
{"IS_ASCII": True, "IS_PUNCT": False, "OP": "*"},
{"LOWER": "to"},
{"LOWER": "do"},
{"TAG": "IN"},
],
[
{"ORTH": "has"},
{"IS_ASCII": True, "IS_PUNCT": False, "OP": "?"},
{"LOWER": "to"},
{"LOWER": "do"},
{"TAG": "IN"},
],
]
words = ["also", "has", "to", "do", "with"]
tags = ["RB", "VBZ", "TO", "VB", "IN"]
pos = ["ADV", "VERB", "ADP", "VERB", "ADP"]
doc = Doc(en_vocab, words=words, tags=tags, pos=pos)
matcher = Matcher(en_vocab)
for i, pattern in enumerate(patterns):
matcher.add(str(i), [pattern])
matches = matcher(doc)
assert matches
@pytest.mark.issue(3328)
def test_issue3328(en_vocab):
doc = Doc(en_vocab, words=["Hello", ",", "how", "are", "you", "doing", "?"])
matcher = Matcher(en_vocab)
patterns = [
[{"LOWER": {"IN": ["hello", "how"]}}],
[{"LOWER": {"IN": ["you", "doing"]}}],
]
matcher.add("TEST", patterns)
matches = matcher(doc)
assert len(matches) == 4
matched_texts = [doc[start:end].text for _, start, end in matches]
assert matched_texts == ["Hello", "how", "you", "doing"]
@pytest.mark.issue(3549)
def test_issue3549(en_vocab):
"""Test that match pattern validation doesn't raise on empty errors."""
matcher = Matcher(en_vocab, validate=True)
pattern = [{"LOWER": "hello"}, {"LOWER": "world"}]
matcher.add("GOOD", [pattern])
with pytest.raises(MatchPatternError):
matcher.add("BAD", [[{"X": "Y"}]])
@pytest.mark.skip("Matching currently only works on strings and integers")
@pytest.mark.issue(3555)
def test_issue3555(en_vocab):
"""Test that custom extensions with default None don't break matcher."""
Token.set_extension("issue3555", default=None)
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "have"}, {"_": {"issue3555": True}}]
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["have", "apple"])
matcher(doc)
@pytest.mark.issue(3839)
def test_issue3839(en_vocab):
"""Test that match IDs returned by the matcher are correct, are in the string"""
doc = Doc(en_vocab, words=["terrific", "group", "of", "people"])
matcher = Matcher(en_vocab)
match_id = "PATTERN"
pattern1 = [{"LOWER": "terrific"}, {"OP": "?"}, {"LOWER": "group"}]
pattern2 = [{"LOWER": "terrific"}, {"OP": "?"}, {"OP": "?"}, {"LOWER": "group"}]
matcher.add(match_id, [pattern1])
matches = matcher(doc)
assert matches[0][0] == en_vocab.strings[match_id]
matcher = Matcher(en_vocab)
matcher.add(match_id, [pattern2])
matches = matcher(doc)
assert matches[0][0] == en_vocab.strings[match_id]
@pytest.mark.issue(3879)
def test_issue3879(en_vocab):
doc = Doc(en_vocab, words=["This", "is", "a", "test", "."])
assert len(doc) == 5
pattern = [{"ORTH": "This", "OP": "?"}, {"OP": "?"}, {"ORTH": "test"}]
matcher = Matcher(en_vocab)
matcher.add("TEST", [pattern])
assert len(matcher(doc)) == 2 # fails because of a FP match 'is a test'
@pytest.mark.issue(3951)
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", [pattern])
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
@pytest.mark.issue(4120)
def test_issue4120(en_vocab):
"""Test that matches without a final {OP: ?} token are returned."""
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}]])
doc1 = Doc(en_vocab, words=["a"])
assert len(matcher(doc1)) == 1 # works
doc2 = Doc(en_vocab, words=["a", "b", "c"])
assert len(matcher(doc2)) == 2 # fixed
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}, {"ORTH": "b"}]])
doc3 = Doc(en_vocab, words=["a", "b", "b", "c"])
assert len(matcher(doc3)) == 2 # works
matcher = Matcher(en_vocab)
matcher.add("TEST", [[{"ORTH": "a"}, {"OP": "?"}, {"ORTH": "b", "OP": "?"}]])
doc4 = Doc(en_vocab, words=["a", "b", "b", "c"])
assert len(matcher(doc4)) == 3 # fixed
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_greedy_matching_first(doc, text, pattern, re_pattern):
"""Test that the greedy matching behavior "FIRST" is consistent with
other re implementations."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
for (key, m_s, m_e), (re_s, re_e) in zip(matches, re_matches):
# matching the string, not the exact position
assert doc[m_s:m_e].text == doc[re_s:re_e].text
@pytest.mark.parametrize(
"pattern,longest",
[
(pattern1, longest1),
(pattern2, longest2),
(pattern3, longest3),
(pattern4, longest4),
(pattern5, longest5),
],
)
def test_greedy_matching_longest(doc, text, pattern, longest):
"""Test the "LONGEST" greedy matching behavior"""
matcher = Matcher(doc.vocab)
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
for (key, s, e) in matches:
assert doc[s:e].text == longest
def test_greedy_matching_longest_first(en_tokenizer):
"""Test that "LONGEST" matching prefers the first of two equally long matches"""
doc = en_tokenizer(" ".join("CCC"))
matcher = Matcher(doc.vocab)
pattern = [{"ORTH": "C"}, {"ORTH": "C"}]
matcher.add("RULE", [pattern], greedy="LONGEST")
matches = matcher(doc)
# out of 0-2 and 1-3, the first should be picked
assert len(matches) == 1
assert matches[0][1] == 0
assert matches[0][2] == 2
def test_invalid_greediness(doc, text):
matcher = Matcher(doc.vocab)
with pytest.raises(ValueError):
matcher.add("RULE", [pattern1], greedy="GREEDY")
@pytest.mark.parametrize(
"pattern,re_pattern",
[
(pattern1, re_pattern1),
(pattern2, re_pattern2),
(pattern3, re_pattern3),
(pattern4, re_pattern4),
(pattern5, re_pattern5),
],
)
def test_match_consuming(doc, text, pattern, re_pattern):
"""Test that matcher.__call__ consumes tokens on a match similar to
re.findall."""
matcher = Matcher(doc.vocab)
matcher.add(re_pattern, [pattern], greedy="FIRST")
matches = matcher(doc)
re_matches = [m.span() for m in re.finditer(re_pattern, text)]
assert len(matches) == len(re_matches)
def test_operator_combos(en_vocab):
cases = [
("aaab", "a a a b", True),
("aaab", "a+ b", True),
("aaab", "a+ a+ b", True),
("aaab", "a+ a+ a b", True),
("aaab", "a+ a+ a+ b", True),
("aaab", "a+ a a b", True),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaa", "a+ b", False),
("aaa", "a+ a+ b", False),
("aaa", "a+ a+ a+ b", False),
("aaa", "a+ a b", False),
("aaa", "a+ a a b", False),
("aaab", "a+ a a", True),
("aaab", "a+", True),
("aaab", "a+ a b", True),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern])
matches = matcher(doc)
if result:
assert matches, (string, pattern_str)
else:
assert not matches, (string, pattern_str)
@pytest.mark.issue(1450)
def test_matcher_end_zero_plus(en_vocab):
"""Test matcher works when patterns end with * operator. (issue 1450)"""
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher.add("TSTEND", [pattern])
nlp = lambda string: Doc(matcher.vocab, words=string.split())
assert len(matcher(nlp("a"))) == 1
assert len(matcher(nlp("a b"))) == 2
assert len(matcher(nlp("a c"))) == 1
assert len(matcher(nlp("a b c"))) == 2
assert len(matcher(nlp("a b b c"))) == 3
assert len(matcher(nlp("a b b"))) == 3
def test_matcher_sets_return_correct_tokens(en_vocab):
matcher = Matcher(en_vocab)
patterns = [
[{"LOWER": {"IN": ["zero"]}}],
[{"LOWER": {"IN": ["one"]}}],
[{"LOWER": {"IN": ["two"]}}],
]
matcher.add("TEST", patterns)
doc = Doc(en_vocab, words="zero one two three".split())
matches = matcher(doc)
texts = [Span(doc, s, e, label=L).text for L, s, e in matches]
assert texts == ["zero", "one", "two"]
@pytest.mark.filterwarnings("ignore:\\[W036")
def test_matcher_remove():
nlp = English()
matcher = Matcher(nlp.vocab)
text = "This is a test case."
pattern = [{"ORTH": "test"}, {"OP": "?"}]
assert len(matcher) == 0
matcher.add("Rule", [pattern])
assert "Rule" in matcher
# should give two matches
results1 = matcher(nlp(text))
assert len(results1) == 2
# removing once should work
matcher.remove("Rule")
# should not return any maches anymore
results2 = matcher(nlp(text))
assert len(results2) == 0
# removing again should throw an error
with pytest.raises(ValueError):
matcher.remove("Rule")
def test_matcher_with_alignments_greedy_longest(en_vocab):
cases = [
("aaab", "a* b", [0, 0, 0, 1]),
("baab", "b a* b", [0, 1, 1, 2]),
("aaab", "a a a b", [0, 1, 2, 3]),
("aaab", "a+ b", [0, 0, 0, 1]),
("aaba", "a+ b a+", [0, 0, 1, 2]),
("aabaa", "a+ b a+", [0, 0, 1, 2, 2]),
("aaba", "a+ b a*", [0, 0, 1, 2]),
("aaaa", "a*", [0, 0, 0, 0]),
("baab", "b a* b b*", [0, 1, 1, 2]),
("aabb", "a* b* a*", [0, 0, 1, 1]),
("aaab", "a+ a+ a b", [0, 1, 2, 3]),
("aaab", "a+ a+ a+ b", [0, 1, 2, 3]),
("aaab", "a+ a a b", [0, 1, 2, 3]),
("aaab", "a+ a a", [0, 1, 2]),
("aaab", "a+ a a?", [0, 1, 2]),
("aaaa", "a a a a a?", [0, 1, 2, 3]),
("aaab", "a+ a b", [0, 0, 1, 2]),
("aaab", "a+ a+ b", [0, 0, 1, 2]),
]
for string, pattern_str, result in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
elif part.endswith("*"):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern], greedy="LONGEST")
matches = matcher(doc, with_alignments=True)
n_matches = len(matches)
_, s, e, expected = matches[0]
assert expected == result, (string, pattern_str, s, e, n_matches)
def test_matcher_with_alignments_nongreedy(en_vocab):
cases = [
(0, "aaab", "a* b", [[0, 1], [0, 0, 1], [0, 0, 0, 1], [1]]),
(1, "baab", "b a* b", [[0, 1, 1, 2]]),
(2, "aaab", "a a a b", [[0, 1, 2, 3]]),
(3, "aaab", "a+ b", [[0, 1], [0, 0, 1], [0, 0, 0, 1]]),
(4, "aaba", "a+ b a+", [[0, 1, 2], [0, 0, 1, 2]]),
(
5,
"aabaa",
"a+ b a+",
[[0, 1, 2], [0, 0, 1, 2], [0, 0, 1, 2, 2], [0, 1, 2, 2]],
),
(6, "aaba", "a+ b a*", [[0, 1], [0, 0, 1], [0, 0, 1, 2], [0, 1, 2]]),
(7, "aaaa", "a*", [[0], [0, 0], [0, 0, 0], [0, 0, 0, 0]]),
(8, "baab", "b a* b b*", [[0, 1, 1, 2]]),
(
9,
"aabb",
"a* b* a*",
[[1], [2], [2, 2], [0, 1], [0, 0, 1], [0, 0, 1, 1], [0, 1, 1], [1, 1]],
),
(10, "aaab", "a+ a+ a b", [[0, 1, 2, 3]]),
(11, "aaab", "a+ a+ a+ b", [[0, 1, 2, 3]]),
(12, "aaab", "a+ a a b", [[0, 1, 2, 3]]),
(13, "aaab", "a+ a a", [[0, 1, 2]]),
(14, "aaab", "a+ a a?", [[0, 1], [0, 1, 2]]),
(15, "aaaa", "a a a a a?", [[0, 1, 2, 3]]),
(16, "aaab", "a+ a b", [[0, 1, 2], [0, 0, 1, 2]]),
(17, "aaab", "a+ a+ b", [[0, 1, 2], [0, 0, 1, 2]]),
]
for case_id, string, pattern_str, results in cases:
matcher = Matcher(en_vocab)
doc = Doc(matcher.vocab, words=list(string))
pattern = []
for part in pattern_str.split():
if part.endswith("+"):
pattern.append({"ORTH": part[0], "OP": "+"})
elif part.endswith("*"):
pattern.append({"ORTH": part[0], "OP": "*"})
elif part.endswith("?"):
pattern.append({"ORTH": part[0], "OP": "?"})
else:
pattern.append({"ORTH": part})
matcher.add("PATTERN", [pattern])
matches = matcher(doc, with_alignments=True)
n_matches = len(matches)
for _, s, e, expected in matches:
assert expected in results, (case_id, string, pattern_str, s, e, n_matches)
assert len(expected) == e - s
| explosion/spaCy | spacy/tests/matcher/test_matcher_logic.py | Python | mit | 26,593 |
import random, math, sys, time
SIZE = 9
GAMES = 200
KOMI = 7.5
EMPTY, WHITE, BLACK = 0, 1, 2
SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'}
PASS = -1
MAXMOVES = SIZE*SIZE*3
TIMESTAMP = 0
MOVES = 0
def to_pos(x,y):
return y * SIZE + x
def to_xy(pos):
y, x = divmod(pos, SIZE)
return x, y
class Square:
def __init__(self, board, pos):
self.board = board
self.pos = pos
self.timestamp = TIMESTAMP
self.removestamp = TIMESTAMP
self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)]
def set_neighbours(self):
x, y = self.pos % SIZE, self.pos / SIZE;
self.neighbours = []
for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
newx, newy = x + dx, y + dy
if 0 <= newx < SIZE and 0 <= newy < SIZE:
self.neighbours.append(self.board.squares[to_pos(newx, newy)])
def move(self, color):
global TIMESTAMP, MOVES
TIMESTAMP += 1
MOVES += 1
self.board.zobrist.update(self, color)
self.color = color
self.reference = self
self.ledges = 0
self.used = True
for neighbour in self.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
self.ledges += 1
else:
neighbour_ref = neighbour.find(update=True)
if neighcolor == color:
if neighbour_ref.reference.pos != self.pos:
self.ledges += neighbour_ref.ledges
neighbour_ref.reference = self
self.ledges -= 1
else:
neighbour_ref.ledges -= 1
if neighbour_ref.ledges == 0:
neighbour.remove(neighbour_ref)
self.board.zobrist.add()
def remove(self, reference, update=True):
self.board.zobrist.update(self, EMPTY)
self.removestamp = TIMESTAMP
if update:
self.color = EMPTY
self.board.emptyset.add(self.pos)
# if color == BLACK:
# self.board.black_dead += 1
# else:
# self.board.white_dead += 1
for neighbour in self.neighbours:
if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP:
neighbour_ref = neighbour.find(update)
if neighbour_ref.pos == reference.pos:
neighbour.remove(reference, update)
else:
if update:
neighbour_ref.ledges += 1
def find(self, update=False):
reference = self.reference
if reference.pos != self.pos:
reference = reference.find(update)
if update:
self.reference = reference
return reference
def __repr__(self):
return repr(to_xy(self.pos))
class EmptySet:
def __init__(self, board):
self.board = board
self.empties = range(SIZE*SIZE)
self.empty_pos = range(SIZE*SIZE)
def random_choice(self):
choices = len(self.empties)
while choices:
i = int(random.random()*choices)
pos = self.empties[i]
if self.board.useful(pos):
return pos
choices -= 1
self.set(i, self.empties[choices])
self.set(choices, pos)
return PASS
def add(self, pos):
self.empty_pos[pos] = len(self.empties)
self.empties.append(pos)
def remove(self, pos):
self.set(self.empty_pos[pos], self.empties[len(self.empties)-1])
self.empties.pop()
def set(self, i, pos):
self.empties[i] = pos
self.empty_pos[pos] = i
class ZobristHash:
def __init__(self, board):
self.board = board
self.hash_set = set()
self.hash = 0
for square in self.board.squares:
self.hash ^= square.zobrist_strings[EMPTY]
self.hash_set.clear()
self.hash_set.add(self.hash)
def update(self, square, color):
self.hash ^= square.zobrist_strings[square.color]
self.hash ^= square.zobrist_strings[color]
def add(self):
self.hash_set.add(self.hash)
def dupe(self):
return self.hash in self.hash_set
class Board:
def __init__(self):
self.squares = [Square(self, pos) for pos in range(SIZE*SIZE)]
for square in self.squares:
square.set_neighbours()
self.reset()
def reset(self):
for square in self.squares:
square.color = EMPTY
square.used = False
self.emptyset = EmptySet(self)
self.zobrist = ZobristHash(self)
self.color = BLACK
self.finished = False
self.lastmove = -2
self.history = []
self.white_dead = 0
self.black_dead = 0
def move(self, pos):
square = self.squares[pos]
if pos != PASS:
square.move(self.color)
self.emptyset.remove(square.pos)
elif self.lastmove == PASS:
self.finished = True
if self.color == BLACK: self.color = WHITE
else: self.color = BLACK
self.lastmove = pos
self.history.append(pos)
def random_move(self):
return self.emptyset.random_choice()
def useful_fast(self, square):
if not square.used:
for neighbour in square.neighbours:
if neighbour.color == EMPTY:
return True
return False
def useful(self, pos):
global TIMESTAMP
TIMESTAMP += 1
square = self.squares[pos]
if self.useful_fast(square):
return True
old_hash = self.zobrist.hash
self.zobrist.update(square, self.color)
empties = opps = weak_opps = neighs = weak_neighs = 0
for neighbour in square.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
empties += 1
continue
neighbour_ref = neighbour.find()
if neighbour_ref.timestamp != TIMESTAMP:
if neighcolor == self.color:
neighs += 1
else:
opps += 1
neighbour_ref.timestamp = TIMESTAMP
neighbour_ref.temp_ledges = neighbour_ref.ledges
neighbour_ref.temp_ledges -= 1
if neighbour_ref.temp_ledges == 0:
if neighcolor == self.color:
weak_neighs += 1
else:
weak_opps += 1
neighbour_ref.remove(neighbour_ref, update=False)
dupe = self.zobrist.dupe()
self.zobrist.hash = old_hash
strong_neighs = neighs-weak_neighs
strong_opps = opps-weak_opps
return not dupe and \
(empties or weak_opps or (strong_neighs and (strong_opps or weak_neighs)))
def useful_moves(self):
return [pos for pos in self.emptyset.empties if self.useful(pos)]
def replay(self, history):
for pos in history:
self.move(pos)
def score(self, color):
if color == WHITE:
count = KOMI + self.black_dead
else:
count = self.white_dead
for square in self.squares:
squarecolor = square.color
if squarecolor == color:
count += 1
elif squarecolor == EMPTY:
surround = 0
for neighbour in square.neighbours:
if neighbour.color == color:
surround += 1
if surround == len(square.neighbours):
count += 1
return count
def check(self):
for square in self.squares:
if square.color == EMPTY:
continue
members1 = set([square])
changed = True
while changed:
changed = False
for member in members1.copy():
for neighbour in member.neighbours:
if neighbour.color == square.color and neighbour not in members1:
changed = True
members1.add(neighbour)
ledges1 = 0
for member in members1:
for neighbour in member.neighbours:
if neighbour.color == EMPTY:
ledges1 += 1
root = square.find()
#print 'members1', square, root, members1
#print 'ledges1', square, ledges1
members2 = set()
for square2 in self.squares:
if square2.color != EMPTY and square2.find() == root:
members2.add(square2)
ledges2 = root.ledges
#print 'members2', square, root, members1
#print 'ledges2', square, ledges2
assert members1 == members2
assert ledges1 == ledges2, ('ledges differ at %r: %d %d' % (square, ledges1, ledges2))
empties1 = set(self.emptyset.empties)
empties2 = set()
for square in self.squares:
if square.color == EMPTY:
empties2.add(square.pos)
def __repr__(self):
result = []
for y in range(SIZE):
start = to_pos(0, y)
result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]]))
return '\n'.join(result)
class UCTNode:
def __init__(self):
self.bestchild = None
self.pos = -1
self.wins = 0
self.losses = 0
self.pos_child = [None for x in range(SIZE*SIZE)]
self.parent = None
def play(self, board):
""" uct tree search """
color = board.color
node = self
path = [node]
while True:
pos = node.select(board)
if pos == PASS:
break
board.move(pos)
child = node.pos_child[pos]
if not child:
child = node.pos_child[pos] = UCTNode()
child.unexplored = board.useful_moves()
child.pos = pos
child.parent = node
path.append(child)
break
path.append(child)
node = child
self.random_playout(board)
self.update_path(board, color, path)
def select(self, board):
""" select move; unexplored children first, then according to uct value """
if self.unexplored:
i = random.randrange(len(self.unexplored))
pos = self.unexplored[i]
self.unexplored[i] = self.unexplored[len(self.unexplored)-1]
self.unexplored.pop()
return pos
elif self.bestchild:
return self.bestchild.pos
else:
return PASS
def random_playout(self, board):
""" random play until both players pass """
for x in range(MAXMOVES): # XXX while not self.finished?
if board.finished:
break
board.move(board.random_move())
def update_path(self, board, color, path):
""" update win/loss count along path """
wins = board.score(BLACK) >= board.score(WHITE)
for node in path:
if color == BLACK: color = WHITE
else: color = BLACK
if wins == (color == BLACK):
node.wins += 1
else:
node.losses += 1
if node.parent:
node.parent.bestchild = node.parent.best_child()
def score(self):
winrate = self.wins/float(self.wins+self.losses)
parentvisits = self.parent.wins+self.parent.losses
if not parentvisits:
return winrate
nodevisits = self.wins+self.losses
return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits))
def best_child(self):
maxscore = -1
maxchild = None
for child in self.pos_child:
if child and child.score() > maxscore:
maxchild = child
maxscore = child.score()
return maxchild
def best_visited(self):
maxvisits = -1
maxchild = None
for child in self.pos_child:
# if child:
# print to_xy(child.pos), child.wins, child.losses, child.score()
if child and (child.wins+child.losses) > maxvisits:
maxvisits, maxchild = (child.wins+child.losses), child
return maxchild
def user_move(board):
while True:
text = raw_input('?').strip()
if text == 'p':
return PASS
if text == 'q':
raise EOFError
try:
x, y = [int(i) for i in text.split()]
except ValueError:
continue
if not (0 <= x < SIZE and 0 <= y < SIZE):
continue
pos = to_pos(x, y)
if board.useful(pos):
return pos
def computer_move(board):
global MOVES
pos = board.random_move()
if pos == PASS:
return PASS
tree = UCTNode()
tree.unexplored = board.useful_moves()
nboard = Board()
for game in range(GAMES):
node = tree
nboard.reset()
nboard.replay(board.history)
node.play(nboard)
# print 'moves', MOVES
return tree.best_visited().pos
def versus_cpu():
random.seed(1)
board = Board()
pos = computer_move(board)
def main(n):
times = []
for i in range(5):
versus_cpu() # warmup
for i in range(n):
t1 = time.time()
versus_cpu()
t2 = time.time()
times.append(t2 - t1)
return times
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Go benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
| kmod/icbd | icbd/compiler/benchmarks/pypy/go.py | Python | mit | 14,045 |
"""Cleanup script."""
from grr.lib import export_utils
# After you do this the UI complains a little, but creating a new hunt fixes it.
hunts = aff4.FACTORY.Open("aff4:/hunts/")
for hunt in hunts.ListChildren():
aff4.FACTORY.Delete(hunt)
# Delete clients that haven't polled in for 2hours
for fd in aff4.FACTORY.MultiOpen(export_utils.GetAllClients()):
cutoff = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("2h")
if fd.Get(fd.Schema.PING) < cutoff:
aff4.FACTORY.Delete(fd.urn)
# Delete all flows
for client in export_utils.GetAllClients():
aff4.FACTORY.Delete(client.Add("flows"))
| destijl/grr-workshop-setup | cleanup.py | Python | apache-2.0 | 602 |
"""896. Monotonic Array
https://leetcode.com/problems/monotonic-array/description/
An array is monotonic if it is either monotone increasing or monotone
decreasing.
An array A is monotone increasing if for all i <= j, A[i] <= A[j]. An array
A is monotone decreasing if for all i <= j, A[i] >= A[j].
Return true if and only if the given array A is monotonic.
Example 1:
Input: [1,2,2,3]
Output: true
Example 2:
Input: [6,5,4,4]
Output: true
Example 3:
Input: [1,3,2]
Output: false
Example 4:
Input: [1,2,4,5]
Output: true
Example 5:
Input: [1,1,1]
Output: true
Note:
1 <= A.length <= 50000
-100000 <= A[i] <= 100000
"""
from typing import List
class Solution:
def is_monotonic(self, a: List[int]) -> bool:
flag = 0
for i in range(1, len(a)):
diff = a[i] - a[i - 1]
if flag == 0:
if diff < 0:
flag = -1
elif diff > 0:
flag = 1
elif flag == 1:
if diff < 0:
return False
elif flag == -1:
if diff > 0:
return False
return True
| isudox/leetcode-solution | python-algorithm/leetcode/problem_896.py | Python | mit | 1,155 |
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from actstream.models import user_stream
from dnstorm.app import DNSTORM_URL
from dnstorm.app.utils import get_option
from dnstorm.app.models import Problem, Idea
from dnstorm.app.utils import get_option
def base(request):
"""
Provides basic variables used for all templates.
"""
context = dict()
context['dnstorm_url'] = DNSTORM_URL
# Links
if not context.get('site_title', None):
context['site_title'] = '%s | %s' % (
get_option('site_title'), get_option('site_description'))
context['site_url'] = get_option('site_url')
context['login_form'] = AuthenticationForm()
context['login_url'] = reverse('login') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
context['logout_url'] = reverse('logout') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
# Checks
context['is_update'] = 'update' in request.resolver_match.url_name
# Activity
context['user_activity'] = user_stream(request.user, with_user_activity=True) if request.user.is_authenticated() else None
context['user_activity_counter'] = get_option('user_%d_activity_counter' % request.user.id) if request.user.is_authenticated() else None
return context
| vmassuchetto/dnstorm | dnstorm/app/context_processors.py | Python | gpl-2.0 | 1,367 |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import re
import unicodedata
h1_start = re.compile(r"^\s*=(?P<title>[^=]+)=*[ \t]*")
valid_title = re.compile(r"[^=]+")
general_heading = re.compile(r"^\s*(={2,6}(?P<title>" + valid_title.pattern +
")=*)\s*$", flags=re.MULTILINE)
invalid_symbols = re.compile(r"[^\w\-_\s]+")
def strip_accents(s):
return ''.join(
(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(
c) != 'Mn'))
REPLACEMENTS = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss',
ord('Ä'): 'Ae',
ord('Ö'): 'Oe',
ord('Ü'): 'Ue',
ord('ẞ'): 'SS'
}
def substitute_umlauts(s):
return s.translate(REPLACEMENTS)
def remove_unallowed_chars(s):
s = invalid_symbols.sub('', s)
return s
def remove_and_compress_whitespaces(s):
return '_'.join(s.split()).strip('_')
def turn_into_valid_short_title(title, short_title_set=(), max_length=20):
st = substitute_umlauts(title)
st = strip_accents(st)
st = remove_unallowed_chars(st)
st = remove_and_compress_whitespaces(st)
st = st.lstrip('1234567890-_')
st = st[:min(len(st), max_length)]
if not st:
st = 'sub'
if st not in short_title_set:
return st
else:
i = 0
while True:
i += 1
suffix = str(i)
new_st = st[:min(max_length - len(suffix), len(st))] + suffix
if new_st not in short_title_set:
return new_st
def get_heading_matcher(level=0):
if 0 < level < 7:
s = "%d" % level
elif level == 0:
s = "1, 6"
else:
raise ValueError(
"level must be between 1 and 6 or 0, but was %d." % level)
pattern = r"^\s*={%s}(?P<title>[^=§]+)" \
r"(?:§\s*(?P<short_title>[^=§\s][^=§]*))?=*\s*$"
return re.compile(pattern % s, flags=re.MULTILINE) | Qwlouse/Findeco | node_storage/validation.py | Python | gpl-3.0 | 1,992 |
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TASeries.py
# firstflamingo/treinenaapje
#
# Created by Berend Schotanus on 21-Feb-13.
#
import webapp2, logging, xml.sax, bisect
from google.appengine.ext import db
from google.appengine.api import memcache
from datetime import timedelta, datetime
from ffe import config
from ffe.gae import counter_dict, issue_tasks
from ffe.markup import XMLDocument, XMLElement
from ffe.ffe_time import now_utc, now_cet, mark_utc, minutes_from_string, cet_from_string, minutes_from_time, time_from_minutes
from TABasics import TAModel, TAResourceHandler
from TAScheduledPoint import TAScheduledPoint, Direction
from TAMission import TAMission, MissionStatuses, round_mission_offset
from TSStation import TSStation
from TAStop import TAStop
from TAChart import TAChart
# ====== Series Model ==========================================================================
class TASeries(TAModel):
agent_url = '/TASeries'
# Stored attributes:
type = db.StringProperty(indexed=False)
# Transient attributes
_points = None
_points_dict = None
_missions_list = None
@classmethod
def import_xml(cls, filename):
fp = open(filename, 'r')
xml.sax.parse(fp, SeriesImporter())
def import_schedule(self):
filename = 'series.data/%s.xml' % self.id
logging.info('import %s' % filename)
xml_string = open(filename, 'r').read()
TAScheduledPoint.parse_schedule(xml_string, self)
@classmethod
def statistics(cls, now=None):
if now is None:
now = now_cet()
status_hist = {}
delay_hist = {}
for seriesID in TASeries.all_ids():
series = TASeries.get(seriesID)
for mission_id in series.current_mission_ids(Direction.up, now) + series.current_mission_ids(Direction.down, now):
mission = TAMission.get(mission_id)
status, delay = mission.status_at_time(now)
data = MissionStatuses.s[status]
status_hist[data] = status_hist.get(data, 0) + 1
if status == MissionStatuses.running:
data = '%.0f' % delay
delay_hist[data] = delay_hist.get(data, 0) + 1
return {'status': status_hist, 'delay': delay_hist, 'counter': counter_dict()}
@property
def name(self):
if self.country == 'eu':
return self.code
else:
return '%d00' % int(self.code)
def load_points(self):
query = db.Query(TAScheduledPoint).filter('series_id =', self.id).order('km')
array = query.fetch(100)
if not array:
array = []
self._points = array
self._points_dict = {}
for index in range(len(self._points)):
point = self._points[index]
self._points_dict[point.station_id] = index
name = point.stationName
if name:
self._points_dict[name] = index
self.cache_set()
def reset_points(self):
self._points = None
self._points_dict = None
self.cache_set()
@property
def points(self):
if self._points is None:
self.load_points()
return self._points
@property
def points_dict(self):
if self._points_dict is None:
self.load_points()
return self._points_dict
@property
def first_point(self):
if self.points:
return self.points[0]
@property
def last_point(self):
if self.points:
return self.points[-1]
@property
def origin(self):
return self.name_for_point(self.first_point)
@property
def destination(self):
return self.name_for_point(self.last_point)
@staticmethod
def name_for_point(point):
if point is None:
return '-'
else:
station_name = point.stationName
if station_name is None:
station = TAStation.get(point.station_id)
station_name = station.name
point.stationName = station_name
point.put()
return station_name
@property
def mission_lists(self):
if self._missions_list is None:
down_array = []
up_array = []
query = db.Query(TAMission).filter('series_id =', self.id)
for mission in query.fetch(200):
if mission.up:
up_array.append((mission.offset_time, mission.number))
else:
down_array.append((mission.offset_time, mission.number))
up_array.sort()
down_array.sort()
self.mission_lists = [down_array, up_array]
return self._missions_list
@mission_lists.setter
def mission_lists(self, array):
self._missions_list = array
self.cache_set()
@property
def planned_mission_ids(self):
array = []
for direction in (Direction.up, Direction.down):
for offset, number in self.mission_lists[direction]:
if number < 99999:
array.append('%s.%d' % (self.country, number))
return array
@property
def nr_of_missions(self):
return len(self.mission_lists[Direction.down]) + len(self.mission_lists[Direction.up])
def get_missions(self, direction, current=False):
array = []
if current:
id_list = self.current_mission_ids(direction)
else:
id_list = self.all_mission_ids(direction)
for missionID in id_list:
mission = TAMission.get(missionID)
if not mission:
continue
array.append(mission)
return array
@property
def up_missions(self):
return self.get_missions(Direction.up, current=False)
@property
def down_missions(self):
return self.get_missions(Direction.down, current=False)
@property
def current_up_missions(self):
return self.get_missions(Direction.up, current=True)
@property
def current_down_missions(self):
return self.get_missions(Direction.down, current=True)
@property
def offset_overview(self):
overview = [{}, {}, {}, {}]
for direction in (Direction.up, Direction.down):
for offset, number in self.mission_lists[direction]:
histogram = overview[number % 4]
key = offset.minute
histogram[key] = histogram.get(key, 0) + 1
return overview
@property
def needed_offset_changes(self):
delta_offsets = [None, None]
overview = self.offset_overview
for group in range(4):
max_frequency = 0
offset = 0
for key, value in overview[group].iteritems():
if value > max_frequency:
max_frequency = value
offset = int(key)
direction = group % 2
if direction == Direction.up:
departure = offset + self.first_point.upDeparture
else:
departure = offset + self.last_point.downDeparture
if departure >= 60:
offset -= 60
if delta_offsets[direction] is None or offset < delta_offsets[direction]:
delta_offsets[direction] = offset
return delta_offsets
@property
def xml(self):
element = XMLElement('series', {'id': self.id, 'type': self.type})
return element
@property
def xml_schedule(self):
element = self.xml
points_tag = XMLElement('routePoints')
up_tag = XMLElement('upSchedule')
down_tag = XMLElement('downSchedule')
for point in self.points:
points_tag.add(point.station_xml)
up_tag.add(point.up_xml)
down_tag.add(point.down_xml)
element.add(points_tag)
element.add(up_tag)
element.add(down_tag)
return element
@property
def xml_missions(self):
element = self.xml
missions_tag = XMLElement('missions')
for missionID in self.planned_mission_ids:
mission = TAMission.get(missionID)
missions_tag.add(mission.xml)
element.add(missions_tag)
return element
@property
def xml_document(self):
document = TimetableDocument()
document.root.add(self.xml_schedule)
return document
def activate_new_day(self, now):
year, week, iso_day = now.isocalendar()
chart_id = '%s_%04d%02d' % (self.id, year, week)
chart = self.get_chart_with_id(chart_id)
new_missions_list = [[], []]
updated_missions = {}
expired_mission_ids = []
expired_missions = []
updated_points = {}
updated_objects = [chart]
for mission in (self.down_missions + self.up_missions):
chart.add_mission(mission)
if mission.supplementary:
expired_mission_ids.append(mission.id)
expired_missions.append(mission)
else:
mission.stops = []
mission.nominalDate = now.date()
mission.activate_mission(now)
new_missions_list[mission.up].append((mission.offset_time, mission.number))
updated_missions[mission.id] = mission
updated_objects.append(mission)
if iso_day == 7:
for point in self.points:
chart.verifyPoint(point)
if point.needs_datastore_put:
updated_points[point.id] = point
updated_objects.append(point)
self.mission_lists = new_missions_list
self.cache_set()
chart.cache_set()
memcache.delete_multi(expired_mission_ids, namespace='TAMission')
memcache.set_multi(updated_missions, namespace='TAMission')
memcache.set_multi(updated_points, namespace='TAScheduledPoint')
db.delete(expired_missions)
db.put(updated_objects)
@staticmethod
def get_chart_with_id(identifier):
chart = TAChart.get(identifier)
if not chart:
chart = TAChart.new(identifier)
return chart
# Managing RoutePoints:
def point_at_index(self, index):
if index is not None and len(self.points) > index:
return self.points[index]
else:
logging.warning('series %s tries retrieving None index' % self.id)
return None
def index_for_station(self, name_or_id):
index = self.points_dict.get(name_or_id, None)
if index is None:
identifier = TSStation.id_for_name(name_or_id)
if identifier:
index = self.points_dict.get(identifier, None)
if index is not None:
point = self.points[index]
point.stationName = name_or_id
point.put()
self._points_dict = None
if index is None:
logging.info('station %s not found in series %s' % (name_or_id, self.id))
return index
def point_for_station(self, name_or_id):
index = self.index_for_station(name_or_id)
if index is not None:
return self.points[index]
def points_in_range(self, from_station, to_station):
from_index = self.index_for_station(from_station)
to_index = self.index_for_station(to_station)
result = []
if from_index is not None and to_index is not None:
start = min(from_index, to_index)
stop = max(from_index, to_index) + 1
for index in range(start, stop):
result.append(self.points[index])
if from_index > to_index:
result.reverse()
return result
def delete_point(self, station_id):
expired_point = self.point_for_station(station_id)
if expired_point:
logging.info('Delete point %s' % station_id)
tasks = []
issue_time_cet = now_cet()
for mission_id in self.all_mission_ids(Direction.up) + self.all_mission_ids(Direction.down):
issue_time_cet += timedelta(seconds=config.INTERVAL_BETWEEN_UPDATE_MSG)
tasks.append(self.stop_task(TAStop.revoked_stop(mission_id, station_id), issue_time_cet))
issue_tasks(tasks)
expired_point.delete()
self.reset_points()
else:
logging.warning('Point %s could not be found for deletion' % station_id)
# Managing missions
def add_mission(self, mission):
if mission.up:
array = self.mission_lists[Direction.up]
else:
array = self.mission_lists[Direction.down]
mission_tuple = (mission.offset_time, mission.number)
if not mission_tuple in array:
bisect.insort(array, mission_tuple)
self.cache_set()
def all_mission_ids(self, direction):
array = []
for offset, number in self.mission_lists[direction]:
array.append('%s.%d' % (self.country, number))
return array
def current_mission_ids(self, direction, now=None):
if now is None:
now = now_cet()
if direction == Direction.up:
start_time = now - timedelta(minutes=(self.last_point.upArrival + 30))
end_time = now - timedelta(minutes=self.first_point.upDeparture)
else:
start_time = now - timedelta(minutes=(self.first_point.downArrival + 30))
end_time = now - timedelta(minutes=self.last_point.downDeparture)
min_time = (now - timedelta(hours=3)).replace(hour=0, minute=0, second=0)
if start_time < min_time: start_time = min_time
max_time = min_time.replace(hour=23, minute=59, second=59)
if end_time > max_time: end_time = max_time
source = self.mission_lists[direction]
start_index = bisect.bisect_left(source, (start_time.time(), 0))
end_index = bisect.bisect_right(source, (end_time.time(), 999999), lo=start_index)
output = []
for index in range(start_index, end_index):
offset, number = source[index]
output.append('%s.%d' % (self.country, number))
return output
def relevant_mission_tuples(self, originID, startTime, timeSpan, direction=None, destinationID=None):
origin_point = self.point_for_station(originID)
if not origin_point: return None
if direction is None:
destination_point = self.point_for_station(destinationID)
if not destination_point: return None
if origin_point.upDeparture < destination_point.upDeparture:
direction = Direction.up
else:
direction = Direction.down
source = self.mission_lists[direction]
departure = origin_point.departure_in_direction(direction)
start_minutes = minutes_from_time(startTime) - departure
end_minutes = start_minutes + (timeSpan.seconds // 60)
start_search = time_from_minutes(max(0, start_minutes))
end_search = time_from_minutes(min(1439, end_minutes))
start_index = bisect.bisect_left(source, (start_search, 0))
end_index = bisect.bisect_right(source, (end_search, 999999), lo=start_index)
output = []
for index in range(start_index, end_index):
offset, number = source[index]
base_time = startTime.replace(hour=offset.hour, minute=offset.minute)
departure_time = base_time + timedelta(minutes=departure)
mission_id = '%s.%d' % (self.country, number)
output.append((departure_time, mission_id))
return output
def change_offsets(self, deltaOffsets):
new_list = [[], []]
processed_objects = []
processed_missions = {}
processed_points = {}
for point in self.points:
point.upArrival += deltaOffsets[Direction.up]
point.upDeparture += deltaOffsets[Direction.up]
point.downArrival += deltaOffsets[Direction.down]
point.downDeparture += deltaOffsets[Direction.down]
processed_points[point.id] = point
processed_objects.append(point)
for direction in (Direction.up, Direction.down):
if deltaOffsets[direction]:
for missionID in self.all_mission_ids(direction):
mission = TAMission.get(missionID)
old_offset = datetime(2002, 2, 2).replace(hour=mission.offset_time.hour, minute=mission.offset_time.minute)
new_offset = round_mission_offset(old_offset - timedelta(minutes=deltaOffsets[direction]))
mission.offset_time = new_offset.time()
new_list[direction].append((mission.offset_time, mission.number))
processed_missions[missionID] = mission
processed_objects.append(mission)
self._missions_list = new_list
memcache.set_multi(processed_points, namespace='TAScheduledPoint')
memcache.set_multi(processed_missions, namespace='TAMission')
db.put(processed_objects)
self.cache_set()
# ====== Series Handler ==========================================================================
class TASeriesHandler(TAResourceHandler):
resourceClass = TASeries
def perform(self):
instruction = self.request.get('inst')
if instruction == 'fetch':
if self.resource:
self.resource.import_schedule()
self.response.out.write('<a href=\"/console/series?id=%s\">terug naar serie</a>' % self.resource.id)
else:
TASeries.import_xml('series.data/series.xml')
self.redirect('/console/series')
return
if not self.resource:
logging.warning('Resource not found.')
return
if instruction == 'new_day':
now_string = self.request.get('now')
if now_string:
now = cet_from_string(self.request.get('now'))
else:
now = now_cet()
self.resource.activate_new_day(now)
elif instruction == 'delete_point':
sender = self.request.get('sender')
self.resource.delete_point(sender)
self.response.out.write('<a href=\"/console/series?id=%s\">terug naar serie</a>' % self.resource.id)
elif instruction == 'optimize_odids':
changed_missions = []
for mission in self.resource.up_missions + self.resource.down_missions:
mission.optimize_odIDs_dictionary()
if mission.needs_datastore_put:
changed_missions.append(mission)
memcache.set_multi(TAMission.dictionary_from_list(changed_missions), namespace='TAMission')
db.put(changed_missions)
self.response.out.write('<a href=\"/console/missions?kind=pattern&series=%s\">terug naar serie</a>' %
self.resource.id)
# ========== Mission Handler =====================================================================
class TAMissionHandler(TAResourceHandler):
resourceClass = TAMission
def perform(self):
instruction = self.request.get('inst')
if instruction == 'check':
self.resource.check_mission_announcements(now_cet())
def receive(self, dictionary):
stop = TAStop.fromRepr(dictionary)
mission = TAMission.get(self.resource_id, create=True)
mission.update_stop(stop)
# ====== XML Parsers ==========================================================================
class SeriesImporter(xml.sax.handler.ContentHandler):
series = None
mission = None
def startElement(self, name, attrs):
self.data = []
if name == 'series':
id = attrs['id']
logging.info('import series: %s', id)
self.series = TASeries.new(id)
self.series.type = attrs.get('type')
self.routePoints = {}
if self.series:
if name == 'station':
point = TAScheduledPoint.new_with(self.series.id, attrs['id'])
point.km = float(attrs['km'])
stationName = attrs.get('name', None)
if stationName: point.stationName = stationName
self.routePoints[attrs['id']] = point
if name == 'up':
point = self.routePoints[attrs['station']]
point.upArrival = minutes_from_string(attrs['arr'])
point.upDeparture = minutes_from_string(attrs['dep'])
point.set_platform_string(Direction.up, attrs.get('platform', '-'))
if name == 'down':
point = self.routePoints[attrs['station']]
point.downArrival = minutes_from_string(attrs['arr'])
point.downDeparture = minutes_from_string(attrs['dep'])
point.set_platform_string(Direction.down, attrs.get('platform', '-'))
if name == 'mission':
self.mission = TAMission.new(attrs['id'])
self.mission.series_id = self.series.id
self.mission.offset_string = attrs['offset']
if self.mission:
if name == 'od':
origin = attrs['from']
if origin == 'None': origin = None
destination = attrs['to']
if destination == 'None': destination = None
self.mission.odIDs_dictionary[str(attrs['day'])] = [origin, destination]
def endElement(self, name):
if name == 'series':
self.series.put()
for point in self.routePoints.itervalues():
point.put()
self.series = None
self.routePoints = None
if name == 'mission' and self.mission:
self.mission.put()
def characters(self, string):
self.data.append(string.strip())
# ====== Timetable Document ==========================================================================
class TimetableDocument(XMLDocument):
def __init__(self):
XMLDocument.__init__(self, 'timetable')
# ====== WSGI Application ==========================================================================
SERIES_URL_SCHEMA = [('/TASeries.*', TASeriesHandler),
('/TAMission.*', TAMissionHandler)]
app = webapp2.WSGIApplication(SERIES_URL_SCHEMA, debug=True)
| firstflamingo/treinenaapje | app/TASeries.py | Python | apache-2.0 | 23,563 |
#!/usr/bin/python3
import logging
from gi.repository import Gst, GstNet
__all__ = ['Clock', 'NetTimeProvider']
port = 9998
log = logging.getLogger('Clock')
log.debug("Obtaining System-Clock")
Clock = Gst.SystemClock.obtain()
log.info("Using System-Clock for all Pipelines: %s", Clock)
log.info("Starting NetTimeProvider on Port %u", port)
NetTimeProvider = GstNet.NetTimeProvider.new(Clock, '::', port)
| h01ger/voctomix | voctocore/lib/clock.py | Python | mit | 407 |
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import biblepay_hash
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = biblepay_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r", encoding="utf8")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'bf0c6bbd'
if 'genesis' not in settings:
settings['genesis'] = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| biblepay/biblepay | contrib/linearize/linearize-data.py | Python | mit | 11,820 |
def count_units(number):
sum(int(x) for x bin(number)[2:]
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert count_units(4) == 1
assert count_units(15) == 4
assert count_units(1) == 1
assert count_units(1022) == 9
| tomkun/empireofcode | sentry_gun/binary_count.py | Python | mit | 304 |
#!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru
from gnuradio import usrp
from gnuradio import msdd
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, fftsink2, waterfallsink2, scopesink2, form, slider
from optparse import OptionParser
import wx
import sys
import numpy
def pick_subdevice(u):
"""
The user didn't specify a subdevice on the command line.
If there's a daughterboard on A, select A.
If there's a daughterboard on B, select B.
Otherwise, select A.
"""
return (0, 0)
class app_top_block(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-w", "--which", type="int", default=0,
help="select which USRP (0, 1, ...) default is %default",
metavar="NUM")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="select USRP Rx side A or B (default=first one with a daughterboard)")
parser.add_option("-A", "--antenna", default=None,
help="select Rx Antenna (only on RFX-series boards)")
parser.add_option("-d", "--decim", type="int", default=16,
help="set fgpa decimation rate to DECIM [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-W", "--waterfall", action="store_true", default=False,
help="Enable waterfall display")
parser.add_option("-8", "--width-8", action="store_true", default=False,
help="Enable 8-bit samples across USB")
parser.add_option( "--no-hb", action="store_true", default=False,
help="don't use halfband filter in usrp")
parser.add_option("-S", "--oscilloscope", action="store_true", default=False,
help="Enable oscilloscope display")
parser.add_option("", "--avg-alpha", type="eng_float", default=1e-1,
help="Set fftsink averaging factor, default=[%default]")
parser.add_option("", "--ref-scale", type="eng_float", default=13490.0,
help="Set dBFS=0dB input value, default=[%default]")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.options = options
self.show_debug_info = True
# build the graph
if options.no_hb or (options.decim<8):
#Min decimation of this firmware is 4.
#contains 4 Rx paths without halfbands and 0 tx paths.
self.fpga_filename="std_4rx_0tx.rbf"
# self.u = usrp.source_c(which=options.which, decim_rate=options.decim, fpga_filename=self.fpga_filename)
self.u = msdd.source_simple("192.168.1.200",0);
else:
#Min decimation of standard firmware is 8.
#standard fpga firmware "std_2rxhb_2tx.rbf"
#contains 2 Rx paths with halfband filters and 2 tx paths (the default)
#self.u = usrp.source_c(which=options.which, decim_rate=options.decim)
self.u = msdd.source_simple("192.168.1.200",0);
input_rate = self.u.adc_freq() / self.u.decim_rate()
if options.waterfall:
self.scope = \
waterfallsink2.waterfall_sink_c (panel, fft_size=1024, sample_rate=input_rate)
elif options.oscilloscope:
self.scope = scopesink2.scope_sink_c(panel, sample_rate=input_rate)
else:
self.scope = fftsink2.fft_sink_c (panel, fft_size=1024, sample_rate=input_rate,
ref_scale=options.ref_scale, ref_level=0.0, y_divs = 10,
avg_alpha=options.avg_alpha)
self.conv = gr.interleaved_short_to_complex();
self.connect(self.u, self.conv, self.scope)
self._build_gui(vbox)
self._setup_events()
# set initial values
if options.gain is None:
# if no gain was specified, use the mid-point in dB
#g = self.subdev.gain_range()
#g = self.u.gain_range()
g = [0,10]
options.gain = float(g[0]+g[1])/2
if options.freq is None:
# if no freq was specified, use the mid-point
#r = self.subdev.freq_range()
#r = self.u.freq_range()
r = [30e6, 6e9]
options.freq = float(r[0]+r[1])/2
self.set_gain(options.gain)
if options.antenna is not None:
print "Selecting antenna %s" % (options.antenna,)
self.subdev.select_rx_antenna(options.antenna)
if self.show_debug_info:
self.myform['decim'].set_value(self.u.decim_rate())
self.myform['fs@usb'].set_value(self.u.adc_freq() / self.u.decim_rate())
self.myform['dbname'].set_value("no subdevs used")
self.myform['baseband'].set_value(0)
self.myform['ddc'].set_value(0)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
vbox.Add(self.scope.win, 10, wx.EXPAND)
# add control area at the bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0, 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Center freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0, 0)
#g = self.subdev.gain_range()
#g = self.u.gain_range()
g = [0,10]
myform['gain'] = form.slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3,
min=int(g[0]), max=int(g[1]),
callback=self.set_gain)
hbox.Add((5,0), 0, 0)
vbox.Add(hbox, 0, wx.EXPAND)
self._build_subpanel(vbox)
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
def _form_set_decim(kv):
return self.set_decim(kv['decim'])
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['decim'] = form.int_field(
parent=panel, sizer=hbox, label="Decim",
callback=myform.check_input_and_call(_form_set_decim, self._set_status_msg))
hbox.Add((5,0), 1)
myform['fs@usb'] = form.static_float_field(
parent=panel, sizer=hbox, label="Fs@gigE")
hbox.Add((5,0), 1)
myform['dbname'] = form.static_text_field(
parent=panel, sizer=hbox)
hbox.Add((5,0), 1)
myform['baseband'] = form.static_float_field(
parent=panel, sizer=hbox, label="Analog BB")
hbox.Add((5,0), 1)
myform['ddc'] = form.static_float_field(
parent=panel, sizer=hbox, label="DDC")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
Tuning is a two step process. First we ask the front-end to
tune as close to the desired frequency as it can. Then we use
the result of that operation and our target_frequency to
determine the value for the digital down converter.
"""
#r = self.u.tune(0, self.subdev, target_freq)
r = self.u.set_rx_freq(0, target_freq)
if r:
self.myform['freq'].set_value(target_freq) # update displayed value
# if self.show_debug_info:
# self.myform['baseband'].set_value(r.baseband_freq)
# self.myform['ddc'].set_value(r.dxc_freq)
if not self.options.waterfall and not self.options.oscilloscope:
self.scope.set_baseband_freq(target_freq)
return True
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.u.set_pga(0,gain)
def set_decim(self, decim):
ok = self.u.set_decim_rate(decim)
if not ok:
print "set_decim failed"
input_rate = self.u.adc_freq() / self.u.decim_rate()
self.scope.set_sample_rate(input_rate)
if self.show_debug_info: # update displayed values
self.myform['decim'].set_value(self.u.decim_rate())
self.myform['fs@usb'].set_value(self.u.adc_freq() / self.u.decim_rate())
return ok
def _setup_events(self):
if not self.options.waterfall and not self.options.oscilloscope:
self.scope.win.Bind(wx.EVT_LEFT_DCLICK, self.evt_left_dclick)
def evt_left_dclick(self, event):
(ux, uy) = self.scope.win.GetXY(event)
if event.CmdDown():
# Re-center on maximum power
points = self.scope.win._points
if self.scope.win.peak_hold:
if self.scope.win.peak_vals is not None:
ind = numpy.argmax(self.scope.win.peak_vals)
else:
ind = int(points.shape()[0]/2)
else:
ind = numpy.argmax(points[:,1])
(freq, pwr) = points[ind]
target_freq = freq/self.scope.win._scale_factor
print ind, freq, pwr
self.set_freq(target_freq)
else:
# Re-center on clicked frequency
target_freq = ux/self.scope.win._scale_factor
self.set_freq(target_freq)
def main ():
app = stdgui2.stdapp(app_top_block, "USRP FFT", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
| pgoeser/gnuradio | gr-msdd6000/src/python-examples/new_msdd_fft.py | Python | gpl-3.0 | 11,393 |
import os
import nipype
from nipype.interfaces.utility import Function,IdentityInterface
import nipype.pipeline.engine as pe # pypeline engine
from nipype.interfaces.freesurfer import *
from nipype.interfaces.io import DataGrabber
from nipype.interfaces.utility import Merge
def create_ba_maps_wf(name="Brodmann_Area_Maps", th3=True, exvivo=True,
entorhinal=True):
# Brodmann Area Maps (BA Maps) and Hinds V1 Atlas
inputs = ['lh_sphere_reg',
'rh_sphere_reg',
'lh_white',
'rh_white',
'lh_pial',
'rh_pial',
'lh_orig',
'rh_orig',
'transform',
'lh_thickness',
'rh_thickness',
'lh_cortex_label',
'rh_cortex_label',
'brainmask',
'aseg',
'ribbon',
'wm',
'src_subject_id',
'src_subject_dir',
'color_table']
inputspec = pe.Node(IdentityInterface(fields=inputs),
name="inputspec")
ba_WF = pe.Workflow(name=name)
ba_outputs = ['lh_BAMaps_stats',
'lh_color',
'lh_BAMaps_labels',
'lh_BAMaps_annotation',
'lh_thresh_BAMaps_stats',
'lh_thresh_color',
'lh_thresh_BAMaps_labels',
'lh_thresh_BAMaps_annotation',
'rh_BAMaps_stats',
'rh_color',
'rh_BAMaps_labels',
'rh_BAMaps_annotation',
'rh_thresh_BAMaps_stats',
'rh_thresh_color',
'rh_thresh_BAMaps_labels',
'rh_thresh_BAMaps_annotation']
outputspec = pe.Node(IdentityInterface(fields=ba_outputs),
name="outputspec")
labels = ["BA1", "BA2", "BA3a", "BA3b", "BA4a", "BA4p", "BA6",
"BA44", "BA45", "V1", "V2", "MT", "perirhinal"]
if entorhinal:
labels.insert(-1, 'entorhinal')
for hemisphere in ['lh', 'rh']:
for threshold in [True, False]:
field_template = dict(sphere_reg='surf/{0}.sphere.reg'.format(hemisphere),
white='surf/{0}.white'.format(hemisphere))
out_files = list()
source_fields = list()
if threshold:
for label in labels:
if label == 'perirhinal' and not entorhinal:
# versions < 6.0 do not use thresh.perirhinal
continue
if exvivo:
out_file = '{0}.{1}_exvivo.thresh.label'.format(hemisphere, label)
else:
out_file = '{0}.{1}.thresh.label'.format(hemisphere, label)
out_files.append(out_file)
field_template[label] = 'label/' + out_file
source_fields.append(label)
node_name = 'BA_Maps_' + hemisphere + '_Thresh'
else:
for label in labels:
if exvivo:
out_file = '{0}.{1}_exvivo.label'.format(hemisphere, label)
else:
out_file = '{0}.{1}.label'.format(hemisphere, label)
out_files.append(out_file)
field_template[label] = 'label/' + out_file
source_fields.append(label)
node_name = 'BA_Maps_' + hemisphere
source_subject = pe.Node(DataGrabber(outfields=source_fields + ['sphere_reg', 'white']),
name=node_name + "_srcsubject")
source_subject.inputs.template = '*'
source_subject.inputs.sort_filelist = False
source_subject.inputs.field_template = field_template
ba_WF.connect([(inputspec, source_subject, [('src_subject_dir', 'base_directory')])])
merge_labels = pe.Node(Merge(len(out_files)),
name=node_name + "_Merge")
for i,label in enumerate(source_fields):
ba_WF.connect([(source_subject, merge_labels, [(label, 'in{0}'.format(i+1))])])
node = pe.MapNode(Label2Label(), name=node_name + '_Label2Label',
iterfield=['source_label', 'out_file'])
node.inputs.hemisphere = hemisphere
node.inputs.out_file = out_files
node.inputs.copy_inputs = True
ba_WF.connect([(merge_labels, node, [('out', 'source_label')]),
(source_subject, node, [('sphere_reg', 'source_sphere_reg'),
('white', 'source_white')]),
(inputspec, node, [('src_subject_id', 'source_subject')])])
label2annot = pe.Node(Label2Annot(), name=node_name + '_2_Annot')
label2annot.inputs.hemisphere = hemisphere
label2annot.inputs.verbose_off = True
label2annot.inputs.keep_max = True
label2annot.inputs.copy_inputs = True
stats_node = pe.Node(ParcellationStats(), name=node_name + '_Stats')
stats_node.inputs.hemisphere = hemisphere
stats_node.inputs.mgz = True
stats_node.inputs.th3 = th3
stats_node.inputs.surface = 'white'
stats_node.inputs.tabular_output = True
stats_node.inputs.copy_inputs = True
if threshold:
label2annot.inputs.out_annot = "BA_exvivo.thresh"
ba_WF.connect([(stats_node, outputspec, [('out_color',
'{0}_thresh_color'.format(hemisphere)),
('out_table',
'{0}_thresh_BAMaps_stats'.format(hemisphere))]),
(label2annot, outputspec,
[('out_file', '{0}_thresh_BAMaps_annotation'.format(hemisphere))]),
(node, outputspec,
[('out_file', '{0}_thresh_BAMaps_labels'.format(hemisphere))])])
else:
label2annot.inputs.out_annot = "BA_exvivo"
ba_WF.connect([(stats_node, outputspec, [('out_color',
'{0}_color'.format(hemisphere)),
('out_table',
'{0}_BAMaps_stats'.format(hemisphere))]),
(label2annot, outputspec,
[('out_file', '{0}_BAMaps_annotation'.format(hemisphere))]),
(node, outputspec, [('out_file',
'{0}_BAMaps_labels'.format(hemisphere))])])
ba_WF.connect([(inputspec, node, [('{0}_sphere_reg'.format(hemisphere),
'sphere_reg'),
('{0}_white'.format(hemisphere), 'white'),
]),
(node, label2annot, [('out_file', 'in_labels')]),
(inputspec, label2annot, [('{0}_orig'.format(hemisphere), 'orig'),
('color_table', 'color_table')]),
(label2annot, stats_node,
[('out_file', 'in_annotation')]),
(inputspec, stats_node, [('{0}_thickness'.format(hemisphere),
'thickness'),
('{0}_cortex_label'.format(hemisphere),
'cortex_label'),
('lh_white', 'lh_white'),
('rh_white', 'rh_white'),
('lh_pial', 'lh_pial'),
('rh_pial', 'rh_pial'),
('transform', 'transform'),
('brainmask', 'brainmask'),
('aseg', 'aseg'),
('wm', 'wm'),
('ribbon', 'ribbon')])])
return ba_WF, ba_outputs
| FCP-INDI/nipype | nipype/workflows/smri/freesurfer/ba_maps.py | Python | bsd-3-clause | 8,682 |
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Stacked chart related tests"""
from pygal import StackedLine
def test_stacked_line():
"""Test stacked line"""
stacked = StackedLine()
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
def test_stacked_line_reverse():
"""Test stack from top stacked line"""
stacked = StackedLine(stack_from_top=True)
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('11', '14', '10', '12'))
def test_stacked_line_log():
"""Test logarithmic stacked line"""
stacked = StackedLine(logarithmic=True)
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
def test_stacked_line_interpolate():
"""Test interpolated stacked line"""
stacked = StackedLine(interpolate='cubic')
stacked.add('one_two', [1, 2])
stacked.add('ten_twelve', [10, 12])
q = stacked.render_pyquery()
assert set(q("desc.value").text().split(' ')) == set(
('1', '2', '11', '14'))
| supracd/pygal | pygal/test/test_stacked.py | Python | lgpl-3.0 | 2,083 |
import bottle
import uuid
import time
import threading
from naoqi import ALProxy
from threading import Thread
from bottle import auth_basic
from NaoService import check_auth
app = bottle.Bottle()
name = 'NaoProfilingModule'
path = '/profiling'
sub = []
processes = {}
# This proxy is used to gain access to all
# sensor values of the Nao through NaoQi
memProxy = ALProxy("ALMemory", "localhost", 9559)
ONE = 0
MULT = 1
@app.route('/')
@app.route('/index.html')
@auth_basic(check_auth)
def index():
return "This module has no web interface."
def profile(args):
pointer = args['pointer']
sensor_value = args['sensor_values']
pointer.append({'timestamp':"%.6f" % time.time(), 'value':str(memProxy.getData(sensor_value))})
@app.route('/start/:interval/:sensor#.+#')
@auth_basic(check_auth)
def start(interval=100, sensor=""):
'''
@param interval: in ms
@param sensor: the sensor you want to use for profiling, use '+' to provide multiple sensors
@return: an unique ID for your profiling process. Use this ID to stop
the profiling using method 'stop'
'''
p_id = str(uuid.uuid4())
if sensor.find('+') != -1:
sensors = sensor.split('+')
mapping = {}
workerlist = []
for s in sensors:
list = []
mapping[s] = list
worker = StoppableWorker(profile, {'pointer':list, 'sensor_values':s}, int(interval))
workerlist.append(worker)
worker.start()
processes[p_id] = (MULT, workerlist, mapping)
else:
list = []
worker = StoppableWorker(profile, {'pointer':list, 'sensor_values':sensor}, int(interval))
processes[p_id] = (ONE, worker, list)
worker.start()
return p_id
@app.route('/stop/:id')
@auth_basic(check_auth)
def stop(id=""):
'''
@param id: the ID of the profiling job you want to stop
@return: an JSON object containing the results with time stamps for each result
It will be empty if the given ID is unknown or if there was not enough time between start and stop to profile anything.
Format will be: {result:[{timestamp, value}*]} or the name of the sensor instead of 'result' if you have started the profiling
with multiple sensors, i.e.: {name:[{timestamp, value}*]*}
'''
def cleanup(workerlist):
for w in workerlist:
w.join()
if id in processes.keys():
if (processes[id])[0] == ONE:
worker = (processes[id])[1]
worker.stop()
worker.join()
return {'result':(processes[id])[2]}
else:
result = {}
workerlist = (processes[id])[1]
for w in workerlist:
w.stop()
Thread(target=cleanup, args=(workerlist,)).start()
resultmap = (processes[id])[2]
for r in resultmap.keys():
result[r] = resultmap[r]
return result
else:
return {}
class StoppableWorker(threading.Thread):
'''
Stoppable thread which allows you to start a function in background which will
run infinitely until you stop it. You have to define an interval between each run.
'''
def __init__(self, work, args, interval):
'''
param work: a function object
param args: argument(s) for work
param interval: interval in ms between each call to work
'''
threading.Thread.__init__(self)
self.work = work
self.args = args
self.interval = interval
self._stop = threading.Event()
def run(self):
while(not self.is_stopped()):
self.work(self.args)
time.sleep(self.interval / 1000)
def stop(self):
self._stop.set()
def is_stopped(self):
return self._stop.isSet()
| max-leuthaeuser/naoservice | modules/NaoProfilingModule.py | Python | gpl-3.0 | 3,380 |
#!/usr/bin/env python2
# coding=utf-8
__docformat__ = "restructuredtext"
'''
Created on 16/12/2011
@author: Jesus Becerril Navarrete (jbnkuma)
@email: jesusbn5@gmail.com
Copyright (C) 2013 Jesus Becerril Navarrete <jesusbn5@gmail.com>
capturescreen_puesto.py is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
capturescreen_puesto.py is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import pygtk
pygtk.require("2.0")
import gtk
from paramiko import SSHClient, AutoAddPolicy
from PythonMagick import Image
from os import path, makedirs
from shutil import rmtree
from visor import exe
from IPy import IP
class TomaFoto(object):
"""
Clase que optiene una imagen remota del escritorio, guarda y manipula para su
presentacion en pantalla.
"""
def valida_ip(self, ip):
"Valida que el dato dad sea realmente una ip"
try:
IP(ip)
return True
except:
return False
def conexion(self, widget, entry):
"""Realiza la conexion remota y ejecuta el comando para obtener el screenshot de la pantalla."""
ip = entry.get_text().strip()
if ip != "Ip del puesto" and ip != "" and self.valida_ip(ip):
imagen_tmp = self.dir_tmp + "img_tmp.xwd"
ssh_client = SSHClient()
ssh_client.set_missing_host_key_policy(AutoAddPolicy())
ssh_client.connect(ip, username="", password="")
cmd = "/usr/bin/hostname"
stdin, stdout, stderr = ssh_client.exec_command(cmd)
nombre_suc = stdout.read()
ssh_client.connect(ip, username="", password="")
cmd = "/usr/bin/xwd -display :0.0 -root -silent "
stdin, stdout, stderr = ssh_client.exec_command(cmd)
imagen = stdout.read()
archivo = open(imagen_tmp, "w")
archivo.write(imagen)
archivo.close()
self.manipula_img(imagen_tmp, nombre_suc.strip("\n"))
def comprueba_nombre(self, nombre_img, nmsuc):
"""Funcion que evita nombrar un archivo con el mismo nombre"""
tmp = nombre_img.split("_")
contador = int(tmp[2].split(".")[0])
contador += 1
image_final = self.dir_tmp + nmsuc + "_" + str(contador) + ".jpg"
if path.exists(image_final):
return self.comprueba_nombre(image_final, nmsuc)
else:
return image_final
def manipula_img(self, img_r, nmsuc):
"""Procesa la imagen adquirida en formato xwd y la trasforma en jpg"""
image_final = self.dir_tmp + nmsuc + ".jpg"
contador = 0
if path.exists(image_final):
contador += 1
image_final = self.dir_tmp + nmsuc + "_" + str(contador) + ".jpg"
if path.exists(image_final):
image_final = self.comprueba_nombre(image_final, nmsuc)
img = Image(img_r)
img.scale("1024x768")
img.write(image_final)
exe(image_final, nmsuc)
def __init__(self):
self.dir_tmp = "/tmp/dirs_img/"
if path.exists(self.dir_tmp):
rmtree(self.dir_tmp)
makedirs(self.dir_tmp)
else:
makedirs(self.dir_tmp)
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_size_request(300, 100)
window.set_title("Capturar Pantalla Remota")
window.connect("delete_event", lambda w, e: gtk.main_quit())
vbox = gtk.VBox(gtk.FALSE, 0)
window.add(vbox)
vbox.show()
entry = gtk.Entry()
entry.set_max_length(45)
entry.connect("activate", self.conexion, entry)
entry.set_text("Ip del puesto")
entry.insert_text("", len(entry.get_text()))
entry.select_region(0, len(entry.get_text()))
vbox.pack_start(entry, gtk.TRUE, gtk.TRUE, 0)
entry.show()
hbox = gtk.HBox(gtk.FALSE, 0)
vbox.add(hbox)
hbox.show()
vbox2 = gtk.HBox(gtk.FALSE, 0)
vbox.add(vbox2)
vbox2.show()
button = gtk.Button("Capturar")
button.connect("clicked", self.conexion, entry)
vbox2.pack_start(button, gtk.TRUE, gtk.TRUE, 0)
button.show()
button = gtk.Button(stock=gtk.STOCK_CLOSE)
button.connect("clicked", lambda w: gtk.main_quit())
vbox2.pack_end(button, gtk.TRUE, gtk.TRUE, 0)
button.set_flags(gtk.CAN_DEFAULT)
button.grab_default()
button.show()
window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
TomaFoto()
main()
| jbnkuma/remote_screenshot | src/capturescreen_puesto.py | Python | gpl-3.0 | 5,034 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.contrib.pipeline.images import ImagesPipeline
from scrapy.http import Request
class OopImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['pic_url']:
yield Request(image_url)
def item_completed(self, results, item, info):
# results - [(success, image_info_or_failure)] image_info - {url: x, path: x, checksum: x}
pic_paths = []
for success, image_info_or_failure in results:
if success:
pic_paths.append(image_info_or_failure['path'])
else:
pic_paths.append([])
item['pic_path'] = pic_paths
return item
| huangchuchuan/Spider | OopSpider/oop/oop/pipelines.py | Python | apache-2.0 | 880 |
# coding=utf-8
"""Test for GIS utilities functions."""
import unittest
import numpy
from os.path import join
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
from PyQt4.QtCore import QVariant
from os.path import join
from safe.utilities.gis import (
layer_attribute_names,
is_polygon_layer,
buffer_points,
validate_geo_array)
from safe.common.exceptions import RadiiException
from safe.test.utilities import (
TESTDATA,
HAZDATA,
clone_shp_layer,
compare_two_vector_layers,
clone_raster_layer,
standard_data_path,
load_layer,
get_qgis_app)
from safe.utilities.gis import get_optimal_extent
from safe.common.exceptions import BoundingBoxError, InsufficientOverlapError
from safe.storage.core import read_layer
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class TestQGIS(unittest.TestCase):
def test_get_layer_attribute_names(self):
"""Test we can get the correct attributes back"""
layer = clone_shp_layer(
name='district_osm_jakarta',
include_keywords=True,
source_directory=standard_data_path('boundaries'))
# with good attribute name
attributes, position = layer_attribute_names(
layer,
[QVariant.Int, QVariant.String],
'TEST_STR')
expected_attributes = ['KAB_NAME', 'TEST_STR', 'TEST_INT']
expected_position = 1
message = 'expected_attributes, got %s, expected %s' % (
attributes, expected_attributes)
self.assertEqual(attributes, expected_attributes, message)
message = 'expected_position, got %s, expected %s' % (
position, expected_position)
self.assertEqual(position, expected_position, message)
# with non existing attribute name
attributes, position = layer_attribute_names(
layer,
[QVariant.Int, QVariant.String],
'MISSING_ATTR')
expected_attributes = ['KAB_NAME', 'TEST_STR', 'TEST_INT']
expected_position = None
message = 'expected_attributes, got %s, expected %s' % (
attributes, expected_attributes)
self.assertEqual(attributes, expected_attributes, message)
message = 'expected_position, got %s, expected %s' % (
position, expected_position)
self.assertEqual(position, expected_position, message)
# with raster layer
layer = clone_raster_layer(
name='padang_tsunami_mw8',
extension='.tif',
include_keywords=True,
source_directory=standard_data_path('hazard')
)
attributes, position = layer_attribute_names(layer, [], '')
message = 'Should return None, None for raster layer, got %s, %s' % (
attributes, position)
assert (attributes is None and position is None), message
def test_is_polygonal_layer(self):
"""Test we can get the correct attributes back"""
# Polygon layer
layer = clone_shp_layer(
name='district_osm_jakarta',
include_keywords=True,
source_directory=standard_data_path('boundaries'))
message = 'isPolygonLayer, %s layer should be polygonal' % layer
self.assertTrue(is_polygon_layer(layer), message)
# Point layer
layer = clone_shp_layer(
name='volcano_point',
include_keywords=True,
source_directory=standard_data_path('hazard'))
message = '%s layer should be polygonal' % layer
self.assertFalse(is_polygon_layer(layer), message)
layer = clone_raster_layer(
name='padang_tsunami_mw8',
extension='.tif',
include_keywords=True,
source_directory=standard_data_path('hazard')
)
message = ('%s raster layer should not be polygonal' % layer)
self.assertFalse(is_polygon_layer(layer), message)
def test_validate_geo_array(self):
"""Test validate geographic extent method.
.. versionadded:: 3.2
"""
# Normal case
min_longitude = 20.389938354492188
min_latitude = -34.10782492987083
max_longitude = 20.712661743164062
max_latitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertTrue(validate_geo_array(extent))
# min_latitude >= max_latitude
min_latitude = 34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude >= max_longitude
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 34.10782492987083
max_longitude = -34.008273470938335
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_latitude < -90 or > 90
min_latitude = -134.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_latitude < -90 or > 90
min_latitude = -9.10782492987083
max_latitude = 91.10782492987083
min_longitude = 20.389938354492188
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# min_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = -184.10782492987083
max_longitude = 20.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
# max_longitude < -180 or > 180
min_latitude = -34.10782492987083
max_latitude = -34.008273470938335
min_longitude = 20.389938354492188
max_longitude = 180.712661743164062
extent = [min_longitude, min_latitude, max_longitude, max_latitude]
self.assertFalse(validate_geo_array(extent))
def test_get_optimal_extent(self):
"""Optimal extent is calculated correctly"""
exposure_path = join(TESTDATA, 'Population_2010.asc')
hazard_path = join(HAZDATA, 'Lembang_Earthquake_Scenario.asc')
# Expected data
haz_metadata = {
'bounding_box': (
105.3000035,
-8.3749994999999995,
110.2914705,
-5.5667784999999999),
'resolution': (
0.0083330000000000001,
0.0083330000000000001)}
exp_metadata = {
'bounding_box': (
94.972335000000001,
-11.009721000000001,
141.0140016666665,
6.0736123333332639),
'resolution': (
0.0083333333333333003,
0.0083333333333333003)}
# Verify relevant metada is ok
H = read_layer(hazard_path)
E = read_layer(exposure_path)
hazard_bbox = H.get_bounding_box()
assert numpy.allclose(hazard_bbox, haz_metadata['bounding_box'],
rtol=1.0e-12, atol=1.0e-12)
exposure_bbox = E.get_bounding_box()
assert numpy.allclose(exposure_bbox, exp_metadata['bounding_box'],
rtol=1.0e-12, atol=1.0e-12)
hazard_res = H.get_resolution()
assert numpy.allclose(hazard_res, haz_metadata['resolution'],
rtol=1.0e-12, atol=1.0e-12)
exposure_res = E.get_resolution()
assert numpy.allclose(exposure_res, exp_metadata['resolution'],
rtol=1.0e-12, atol=1.0e-12)
# First, do some examples that produce valid results
ref_box = [105.3000035, -8.3749995, 110.2914705, -5.5667785]
view_port = [94.972335, -11.009721, 141.014002, 6.073612]
bbox = get_optimal_extent(hazard_bbox, exposure_bbox, view_port)
assert numpy.allclose(bbox, ref_box, rtol=1.0e-12, atol=1.0e-12)
# testing with viewport clipping disabled
bbox = get_optimal_extent(hazard_bbox, exposure_bbox, None)
assert numpy.allclose(bbox, ref_box, rtol=1.0e-12, atol=1.0e-12)
view_port = [105.3000035,
-8.3749994999999995,
110.2914705,
-5.5667784999999999]
bbox = get_optimal_extent(hazard_bbox, exposure_bbox, view_port)
assert numpy.allclose(bbox, ref_box,
rtol=1.0e-12, atol=1.0e-12)
# Very small viewport fully inside other layers
view_port = [106.0, -6.0, 108.0, -5.8]
bbox = get_optimal_extent(hazard_bbox, exposure_bbox, view_port)
assert numpy.allclose(bbox, view_port,
rtol=1.0e-12, atol=1.0e-12)
# viewport that intersects hazard layer
view_port = [107.0, -6.0, 112.0, -3.0]
ref_box = [107, -6, 110.2914705, -5.5667785]
bbox = get_optimal_extent(hazard_bbox, exposure_bbox, view_port)
assert numpy.allclose(bbox, ref_box,
rtol=1.0e-12, atol=1.0e-12)
# Then one where boxes don't overlap
view_port = [105.3, -4.3, 110.29, -2.5]
try:
get_optimal_extent(hazard_bbox, exposure_bbox, view_port)
except InsufficientOverlapError, e:
message = 'Did not find expected error message in %s' % str(e)
assert 'did not overlap' in str(e), message
else:
message = ('Non ovelapping bounding boxes should have raised '
'an exception')
raise Exception(message)
# Try with wrong input data
try:
# noinspection PyTypeChecker
get_optimal_extent(haz_metadata, exp_metadata, view_port)
except BoundingBoxError:
# good this was expected
pass
except InsufficientOverlapError, e:
message = 'Did not find expected error message in %s' % str(e)
assert 'Invalid' in str(e), message
else:
message = 'Wrong input data should have raised an exception'
raise Exception(message)
try:
# noinspection PyTypeChecker
get_optimal_extent(None, None, view_port)
except BoundingBoxError, e:
message = 'Did not find expected error message in %s' % str(e)
assert 'cannot be None' in str(e), message
else:
message = 'Wrong input data should have raised an exception'
raise Exception(message)
try:
# noinspection PyTypeChecker
get_optimal_extent('aoeush', 'oeuuoe', view_port)
except BoundingBoxError, e:
message = 'Did not find expected error message in %s' % str(e)
assert 'Instead i got "aoeush"' in str(e), message
else:
message = 'Wrong input data should have raised an exception'
raise Exception(message)
def test_buffer_points(self):
"""Test if we can make buffers correctly, whatever the projection."""
# Original data in 3857.
data_path = standard_data_path('other', 'buffer_points_3857.shp')
layer, _ = load_layer(data_path)
output_crs = qgis.core.QgsCoordinateReferenceSystem('EPSG:4326')
# Wrong radii order.
radii = [1, 5, 3]
self.assertRaises(
RadiiException, buffer_points, layer, radii, 'test', output_crs)
# Wrong projection
radii = [1, 2, 3]
output_crs = qgis.core.QgsCoordinateReferenceSystem('EPSG:3857')
result = buffer_points(layer, radii, 'test', output_crs)
data_path = standard_data_path(
'other', 'buffer_points_expected_4326.shp')
control_layer, _ = load_layer(data_path)
is_equal, msg = compare_two_vector_layers(control_layer, result)
self.assertFalse(is_equal, msg)
# Expected result in 4326.
output_crs = qgis.core.QgsCoordinateReferenceSystem('EPSG:4326')
result = buffer_points(layer, radii, 'test', output_crs)
data_path = standard_data_path(
'other', 'buffer_points_expected_4326.shp')
control_layer, _ = load_layer(data_path)
is_equal, msg = compare_two_vector_layers(control_layer, result)
self.assertTrue(is_equal, msg)
# Expected result in 3857.
output_crs = qgis.core.QgsCoordinateReferenceSystem('EPSG:3857')
result = buffer_points(layer, radii, 'test', output_crs)
data_path = standard_data_path(
'other', 'buffer_points_expected_3857.shp')
control_layer, _ = load_layer(data_path)
is_equal, msg = compare_two_vector_layers(control_layer, result)
self.assertTrue(is_equal, msg)
if __name__ == '__main__':
unittest.main()
| Samweli/inasafe | safe/utilities/test/test_gis.py | Python | gpl-3.0 | 13,312 |
# ------------------------------------------------------------------
import itertools
#
# Bayes Optimal Classifier
#
# In this quiz we will compute the optimal label for a second missing word
# in a row
# based on the possible words that could be in the first blank
#
# Finish the procedure, LaterWords(), below
#
# You may want to import your code from the previous programming exercise!
#
import string
from pprint import pprint
sample_memo = '''
Milt, we're gonna need to go ahead and move you downstairs into storage B. We
have some new people coming in, and we need all the space we can get. So if you
could just go ahead and pack up your stuff and move it down there, that would
be terrific, OK? Oh, and remember: next Friday... is Hawaiian shirt day. So,
you know, if you want to, go ahead and wear a Hawaiian shirt and jeans.
Oh, oh, and I almost forgot. Ahh, I'm also gonna need you to go ahead and come
in on Sunday, too... Hello Peter, whats happening? Ummm, I'm gonna need you to
go ahead and come in tomorrow. So if you could be here around 9 that would be
great, mmmk... oh oh! and I almost forgot ahh, I'm also gonna need you to go
ahead and come in on Sunday too, kay. We ahh lost some people this week and ah,
we sorta need to play catch up. move you to move you to
'''
corrupted_memo = '''
Yeah, I'm gonna xxx you to go ahead xxx xxx complain about this. Oh, and if you
could xxx xxx and sit at the kids' table, that'd be xxx
'''
data_list = [w.lower().replace('\n', '') for w in sample_memo.split(' ')]
words_to_guess = ["ahead", "could"]
def LaterWords(sample, word, distance):
"""
@param sample: a sample of text to draw from
@param word: a word occurring before a corrupted sequence
@param distance: how many words later to estimate (i.e. 1 for the next
word, 2 for the word after that)
@returns: a single word which is the most likely possibility
"""
sample = sample.translate(
str.maketrans({k: None for k in string.punctuation}))
# sample = sample.translate(None, string.punctuation)
words = [w.lower().replace('\n', '') for w in sample.split(' ')]
# TODO:
# Given a word, collect the relative probabilities of possible following
# words from @sample. You may want to import your code from the maximum
# likelihood exercise.
# TODO:
# Repeat the above process--for each distance beyond 1, evaluate
# the words that might come after each word, and combine them weighting by
# relative probability into an estimate of what might appear next.
words_matrix = [tuple(y)[:distance] for x, y in
itertools.groupby(words, lambda z: z == word) if not x][1:]
words_matrix = [len(words_matrix) * [word]] + [[w[i] for w in words_matrix]
for i in range(distance)]
def prob_of_a_word(current_layer_num, search_word):
if current_layer_num == 0:
return 1
current_layer = words_matrix[current_layer_num]
previous_layer = words_matrix[current_layer_num - 1]
prob_word_in_current = 0
for unique_word_in_previous_layer in set(previous_layer):
cond_prob_word_in_current = 0
for word_idx, word_in_current_layer in enumerate(current_layer):
if word_in_current_layer == search_word and previous_layer[
word_idx] == unique_word_in_previous_layer:
cond_prob_word_in_current += 1.0
cond_prob_word_in_current = \
cond_prob_word_in_current / previous_layer.count(
unique_word_in_previous_layer)
if cond_prob_word_in_current:
prob_word_in_current += cond_prob_word_in_current * prob_of_a_word(
current_layer_num - 1, unique_word_in_previous_layer)
return prob_word_in_current
return max([(word_, prob_of_a_word(distance, word_)) for word_ in
words_matrix[distance]], key=lambda x: x[1])[0]
words = corrupted_memo.split()
res = []
for idx, word in enumerate(words):
if word == 'xxx':
word = LaterWords(sample_memo, words[idx - 1], 1)
words[idx] = word
res.append(word)
pprint(' '.join(res))
| ZhukovGreen/UMLND | bayes_nlp_mini_project/q6.py | Python | gpl-3.0 | 4,264 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RCubist(RPackage):
"""Regression modeling using rules with added instance-based corrections"""
homepage = "https://cran.r-project.org/package=Cubist"
url = "https://cran.r-project.org/src/contrib/Cubist_0.0.19.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/Cubist"
version('0.0.19', 'bf9364f655536ec03717fd2ad6223a47')
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-reshape2', type=('build', 'run'))
| krafczyk/spack | var/spack/repos/builtin/packages/r-cubist/package.py | Python | lgpl-2.1 | 1,733 |
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Imports should not be a requirement for building documentation
try:
from bokeh.plotting import figure
from bokeh.palettes import brewer
from bokeh.models import Range1d
from bokeh.embed import components
from jinja2 import Template
except ImportError:
pass
def x_label(epoch_axis):
"""
Get the x axis label depending on the boolean epoch_axis.
Arguments:
epoch_axis (bool): If true, use Epoch, if false use Minibatch
Returns:
str: "Epoch" or "Minibatch"
"""
return "Epoch" if epoch_axis else "Minibatch"
def cost_fig(cost_data, plot_height, plot_width, epoch_axis=True):
"""
Generate a figure with lines for each element in cost_data.
Arguments:
cost_data (list): Cost data to plot
plot_height (int): Plot height
plot_width (int): Plot width
epoch_axis (bool, optional): If true, use Epoch, if false use Minibatch
Returns:
bokeh.plotting.figure: cost_data figure
"""
fig = figure(plot_height=plot_height,
plot_width=plot_width,
title="Cost",
x_axis_label=x_label(epoch_axis),
y_axis_label="Cross Entropy Error (%)")
# Spectral palette supports 3 - 11 distinct colors
num_colors_required = len(cost_data)
assert num_colors_required <= 11, "Insufficient colors in predefined palette."
colors = list(brewer["Spectral"][max(3, len(cost_data))])
if num_colors_required < 3:
# manually adjust pallette for better contrast
colors[0] = brewer["Spectral"][6][0]
if num_colors_required == 2:
colors[1] = brewer["Spectral"][6][-1]
for name, x, y in cost_data:
fig.line(x, y, legend=name, color=colors.pop(0), line_width=2)
return fig
def hist_fig(hist_data, plot_height, plot_width, x_range=None, epoch_axis=True):
"""
Generate a figure with an image plot for hist_data, bins on the Y axis and
time on the X axis.
Arguments:
hist_data (tuple): Hist data to plot
plot_height (int): Plot height
plot_width (int): Plot width
x_range (tuple, optional): (start, end) range for x
epoch_axis (boolm optional): If true, use Epoch, if false use Minibatch
Returns:
bokeh.plotting.figure: hist_data figure
"""
name, hdata, dh, dw, bins, offset = hist_data
if x_range is None:
x_range = (0, dw)
fig = figure(plot_height=plot_height,
plot_width=plot_width,
title=name,
x_axis_label=x_label(epoch_axis),
x_range=x_range,
y_range=(offset, offset + bins))
fig.image(image=[hdata], x=[0], y=[offset], dw=[dw], dh=[dh], palette="Spectral11")
return fig
def image_fig(data, h, w, x_range, y_range, plot_size):
"""
Helper function to generate a figure
Arguments:
data (int): data to plot
h (int): height
w (int): width
x_range (tuple, optional): (start, end) range for x
y_range (tuple, optional): (start, end) range for y
plot_size (int): plot size
Returns:
bokeh.plotting.figure: Generated figure
"""
fig = figure(x_range=x_range, y_range=y_range,
plot_width=plot_size, plot_height=plot_size,
toolbar_location=None)
fig.image_rgba([data], x=[0], y=[0], dw=[w], dh=[h])
fig.axis.visible = None
fig.min_border = 0
return fig
def deconv_figs(layer_name, layer_data, fm_max=8, plot_size=120):
"""
Helper function to generate deconv visualization figures
Arguments:
layer_name (str): Layer name
layer_data (list): Layer data to plot
fm_max (int): Max layers to process
plot_size (int, optional): Plot size
Returns:
tuple if vis_keys, img_keys, fig_dict
"""
vis_keys = dict()
img_keys = dict()
fig_dict = dict()
for fm_num, (fm_name, deconv_data, img_data) in enumerate(layer_data):
if fm_num >= fm_max:
break
img_h, img_w = img_data.shape
x_range = Range1d(start=0, end=img_w)
y_range = Range1d(start=0, end=img_h)
img_fig = image_fig(img_data, img_h, img_w, x_range, y_range, plot_size)
deconv_fig = image_fig(deconv_data, img_h, img_w, x_range, y_range, plot_size)
title = "{}_fmap_{:04d}".format(layer_name, fm_num)
vis_keys[fm_num] = "vis_" + title
img_keys[fm_num] = "img_" + title
fig_dict[vis_keys[fm_num]] = deconv_fig
fig_dict[img_keys[fm_num]] = img_fig
return vis_keys, img_keys, fig_dict
def deconv_summary_page(filename, cost_data, deconv_data):
"""
Generate an HTML page with a Deconv visualization
Arguments:
filename: Output filename
cost_data (list): Cost data to plot
deconv_data (tuple): deconv data to plot
"""
fig_dict = dict()
cost_key = "cost_plot"
fig_dict[cost_key] = cost_fig(cost_data, 300, 533, epoch_axis=True)
vis_keys = dict()
img_keys = dict()
for layer, layer_data in deconv_data:
lyr_vis_keys, lyr_img_keys, lyr_fig_dict = deconv_figs(layer, layer_data, fm_max=4)
vis_keys[layer] = lyr_vis_keys
img_keys[layer] = lyr_img_keys
fig_dict.update(lyr_fig_dict)
script, div = components(fig_dict)
template = Template('''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{page_title}}</title>
<style> div{float: left;} </style>
<link rel="stylesheet"
href="http://cdn.pydata.org/bokeh/release/bokeh-0.9.0.min.css"
type="text/css" />
<script type="text/javascript"
src="http://cdn.pydata.org/bokeh/release/bokeh-0.9.0.min.js"></script>
{{ script }}
</head>
<body>
<div id=cost_plot style="width:100%; padding:10px">
{{ div[cost_key]}}
</div>
{% for layer in sorted_layers %}
<div id=Outer{{layer}} style="padding:20px">
<div id={{layer}} style="background-color: #C6FFF1; padding:10px">
Layer {{layer}}<br>
{% for fm in vis_keys[layer].keys() %}
<div id={{fm}} style="padding:10px">
Feature Map {{fm}}<br>
{{ div[vis_keys[layer][fm]] }}
{{ div[img_keys[layer][fm]] }}
</div>
{% endfor %}
</div>
</div>
<br><br>
{% endfor %}
</body>
</html>
''')
with open(filename, 'w') as htmlfile:
htmlfile.write(template.render(page_title="Deconv Visualization", script=script,
div=div, cost_key=cost_key, vis_keys=vis_keys,
img_keys=img_keys,
sorted_layers=sorted(vis_keys)))
| Jokeren/neon | neon/visualizations/figure.py | Python | apache-2.0 | 7,615 |
# Copyright (C) 2007-2008 www.stani.be
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
# Follows PEP8
import glob
import os
import re
import subprocess
import sys
import system
from lib import safe
USER_FONTS_CACHE_PATH = None
ROOT_FONTS_CACHE_PATH = None
WRITABLE_FONTS_CACHE_PATH = None
_FONT_DICTIONARY = None
_FONT_NAMES = None
re_WORD = re.compile('([A-Z0-9]+[^A-Z0-9]*)', re.UNICODE)
re_SPACE = re.compile('_|\W+', re.UNICODE)
LOCATE = [
['locate', '-i', '.ttf', '.otf'],
['find', '/', '-iname', '*.ttf', '-o', '-name', '*.otf'],
]
#collect_fonts (system dependent)
if sys.platform.startswith('win'):
#3thrd party module in other
from other.findsystem import findFonts
def collect_fonts():
"""Collect a list of all font filenames."""
return findFonts()
else:
#better unix alternative for collect_fonts/findFonts
#presume findutils are present
if not system.find_exe('locate'):
sys.exit(_('Please install "%s" first.') % 'locate')
def locate_files(command):
return subprocess.Popen(command,
stdout=subprocess.PIPE).stdout.read().splitlines()
def collect_fonts():
"""Collect a list of all font filenames."""
#try first with locate otherwise with find
for command in LOCATE:
try:
if system.find_exe(command[0]):
output = locate_files(command)
files = [line for line in output
if line[-4:].lower() in ['.ttf', '.otf']]
if files:
return files
except:
pass
from other.findsystem import findFonts
return findFonts()
def basename(font_file):
return os.path.splitext(os.path.basename(font_file))[0]
def name(x):
"""\
Split camelcase filenames and ensure title case.
>>> name('ArialBlack italic')
'Arial Black Italic'
"""
words = ' '.join(re_SPACE.split(' '.join(re_WORD.split(x))))
return words.replace(' ', ' ').title().strip()
def _font_name(font_name, base='xxx'):
"""\
Expand an abbreviated font name.
"""
if font_name == 'Ariblk':
return 'Arial', 'Arial Black'
elif font_name == 'Cour':
return 'Cour', 'Courier New'
elif font_name == 'Micross':
return 'Microsoft Sans Serif', 'Microsoft Sans Serif Regular'
elif font_name == 'Lucon':
return 'Lucida', 'Lucida Console'
elif font_name == 'L 10646':
return 'Lucida', 'Lucida Sans Unicode'
elif font_name == 'Pala':
return 'Pala', 'Palatino Linotype'
elif font_name == 'Trebuc':
return 'Trebuc', 'Trebuchet'
elif font_name[:5] == 'Gen A':
font_name = 'Gentium Alt ' + font_name[5:].title()
elif font_name[:4] == 'Gen ':
font_name = 'Gentium ' + font_name[4:]
if font_name[:len(base)] == base:
#base is still valid
rest = font_name[len(base):].strip().split(' ')
p = ' '.join(rest[:-1]).replace('Mo', 'Mono').replace('Se', 'Serif')
prefix = ' '.join([base, p]).strip()
suffix = rest[-1].lower()
if suffix in ['it', 'i']:
font_name = base + ' Italic'
elif suffix in ['bd', 'b']:
font_name = prefix + ' Bold'
elif suffix in ['bi', 'bdit', 'z']:
font_name = prefix + ' Bold Italic'
elif suffix == 'mr':
font_name = prefix + ' Mono Regular'
elif suffix == 'mri':
font_name = prefix + ' Mono Italic'
elif suffix == 'mb':
font_name = prefix + ' Mono Bold'
elif suffix == 'mbi':
font_name = prefix + ' Mono Bold Italic'
elif suffix in ['rr', 'se']:
font_name = prefix + ' Serif'
elif suffix == 'rri':
font_name = prefix + ' Serif Italic'
elif suffix in ['rb', 'sebd']:
font_name = prefix + ' Serif Bold'
elif suffix == 'rbi':
font_name = prefix + ' Serif Bold Italic'
elif suffix in ['sb', 'sansbold']:
font_name = prefix + ' Sans Bold'
elif suffix == 'sbi':
font_name = prefix + ' Sans Bold Italic'
elif suffix == 'sr':
font_name = prefix + ' Sans'
elif suffix == 'sri':
font_name = prefix + ' Sans Italic'
else:
#new base
base = font_name.split(' ')[0]
if len(base) < 4:
base = font_name
if font_name[-3:] == ' It':
font_name += 'alic'
elif font_name[-3:] == ' Bd':
font_name = font_name[-1:] + 'old'
font_name = font_name.replace(' Ms', ' Microsoft ')\
.replace(' Std', ' Standard ')\
.replace('Mg ', 'Magenta ')\
.replace('Tlwg ', 'Thai ')\
.replace('I102', 'Italic')\
.replace('R102', 'Regular')\
.replace('Cour ', 'Courier New ')\
.replace('Trebuc ', 'Trebuchet ')\
.replace('Pala ', 'Palatino Linotype ')
if sys.platform.startswith('win'):
font_name = font_name.replace('Times', 'Times New Roman')
return font_name, base
def _font_dictionary(font_files=None):
if font_files is None:
font_files = collect_fonts()
#step 1: temporary font names derived from file names
t = {}
for font_file in font_files:
t[name(basename(font_file))] = font_file
#step 2: fix font names derived from context
#normally a base come first, than italic, bold
font_names = t.keys()
font_names.sort()
d = {}
base = 'xxx' # non existing font name as base
for font_name in font_names:
new_font_name, base = _font_name(font_name, base)
if new_font_name[0].upper() == new_font_name[0]:
d[new_font_name] = t[font_name]
return d
def font_dictionary(filename=None, force=False):
"""\
Path specification for the font dictionary, cached
"""
global _FONT_DICTIONARY
if _FONT_DICTIONARY is None:
if filename is None:
if os.path.exists(USER_FONTS_CACHE_PATH):
filename = USER_FONTS_CACHE_PATH
else:
filename = ROOT_FONTS_CACHE_PATH
if filename and os.path.exists(filename) and not force:
_FONT_DICTIONARY = safe.eval_safe(file(filename, 'rb').read())
else:
_FONT_DICTIONARY = {}
if not _FONT_DICTIONARY:
_FONT_DICTIONARY = _font_dictionary()
if not (WRITABLE_FONTS_CACHE_PATH is None):
f = file(WRITABLE_FONTS_CACHE_PATH, 'wb')
f.write(unicode(_FONT_DICTIONARY))
f.close()
if not _FONT_DICTIONARY:
# 'empty' dict for ui
_FONT_DICTIONARY = {'': ''}
_FONT_DICTIONARY.update(SHIPPED_FONTS)
return _FONT_DICTIONARY
def font_names(filename=None):
global _FONT_NAMES
if _FONT_NAMES is None:
_FONT_NAMES = font_dictionary(filename).keys()
_FONT_NAMES.sort()
return _FONT_NAMES
def merge(*paths):
font_files = []
for path in paths:
font_files += glob.glob(os.path.join(path, "*.ttf"))
return _font_dictionary(font_files)
def set_font_cache(user_fonts_path, root_fonts_path,
user_fonts_cache_path, root_fonts_cache_path):
"""Expose global variables"""
# maybe this should generate the cache immediately
global SHIPPED_FONTS
global USER_FONTS_CACHE_PATH
global ROOT_FONTS_CACHE_PATH
global WRITABLE_FONTS_CACHE_PATH
SHIPPED_FONTS = merge(root_fonts_path, user_fonts_path)
USER_FONTS_PATH = user_fonts_path
ROOT_FONTS_PATH = root_fonts_path
USER_FONTS_CACHE_PATH = user_fonts_cache_path
ROOT_FONTS_CACHE_PATH = root_fonts_cache_path
if not hasattr(os, 'getuid') or os.getuid():
WRITABLE_FONTS_CACHE_PATH = USER_FONTS_CACHE_PATH
else:
WRITABLE_FONTS_CACHE_PATH = ROOT_FONTS_CACHE_PATH
def example():
names = font_dictionary().keys()
names.sort()
sys.stdout.write(unicode(names) + '\n')
if __name__ == '__main__':
example()
| tibor95/phatch-python2.7 | phatch/lib/fonts.py | Python | gpl-3.0 | 8,731 |
"""
/***************************************************************************
Name : PythonObject
Description : provides abstract methods for creating a python class from a table name
Date : 5/March/2015
copyright : (C) 2015 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : stdm@unhabitat.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from stdm.data.database import Model
def create_dynamic_class(clsname, **attr):
"""create a python object from database table name"""
return type(clsname, (Model,), dict(**attr))
def class_from_table(clsname):
return create_dynamic_class(clsname)
| gltn/stdm | stdm/ui/python_object.py | Python | gpl-2.0 | 1,433 |
import ob2.config as config
from os.path import exists, join
def get_inst_account_form_path(login):
"""
Returns the path to the PDF file that contains the account form for user with login.
login -- The instructional account login without any prefix (e.g. aa)
"""
if not config.inst_account_enabled:
raise RuntimeError("Tried to get instructional account form path when account forms are "
"disabled")
path = join(config.inst_account_forms_path, "%s.pdf" % login)
if not exists(path):
raise ValueError("No such account form with login: %s" % login)
return path
| octobear2/ob2 | ob2/util/inst_account.py | Python | bsd-2-clause | 640 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
from mach.decorators import (
CommandProvider,
Command,
)
@CommandProvider
class BuiltinCommands(object):
def __init__(self, context):
self.context = context
@Command('mach-commands', category='misc',
description='List all mach commands.')
def commands(self):
print("\n".join(self.context.commands.command_handlers.keys()))
@Command('mach-debug-commands', category='misc',
description='Show info about available mach commands.')
def debug_commands(self):
import inspect
handlers = self.context.commands.command_handlers
for command in sorted(handlers.keys()):
handler = handlers[command]
cls = handler.cls
method = getattr(cls, getattr(handler, 'method'))
print(command)
print('=' * len(command))
print('')
print('File: %s' % inspect.getsourcefile(method))
print('Class: %s' % cls.__name__)
print('Method: %s' % handler.method)
print('')
| michath/ConMonkey | python/mach/mach/commands/commandinfo.py | Python | mpl-2.0 | 1,308 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any, Dict, List, Mapping, Optional, Sequence, Text
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib import cache
from zerver.lib.test_helpers import (
queries_captured, tornado_redirected_to_list
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.decorator import (
JsonableError
)
from zerver.lib.response import (
json_error,
json_success,
)
from zerver.lib.test_runner import (
slow
)
from zerver.models import (
get_display_recipient, Message, Realm, Recipient, Stream, Subscription,
UserProfile, get_user_profile_by_id
)
from zerver.lib.actions import (
do_add_default_stream, do_change_is_admin, do_set_realm_waiting_period_threshold,
do_create_realm, do_remove_default_stream, do_set_realm_create_stream_by_admins_only,
gather_subscriptions_helper, bulk_add_subscriptions, bulk_remove_subscriptions,
gather_subscriptions, get_default_streams_for_realm, get_realm, get_stream,
get_user_profile_by_email, set_default_streams, get_subscription,
create_streams_if_needed, active_user_ids
)
from zerver.views.streams import (
compose_views
)
from django.http import HttpResponse
import mock
import random
import ujson
import six
from six.moves import range, urllib, zip
class TestCreateStreams(ZulipTestCase):
def test_creating_streams(self):
# type: () -> None
stream_names = [u'new1', u'new2', u'new3']
stream_descriptions = [u'des1', u'des2', u'des3']
realm = get_realm('zulip')
new_streams, existing_streams = create_streams_if_needed(
realm,
[{"name": stream_name,
"description": stream_description,
"invite_only": True}
for (stream_name, stream_description) in zip(stream_names, stream_descriptions)])
self.assertEqual(len(new_streams), 3)
self.assertEqual(len(existing_streams), 0)
actual_stream_names = {stream.name for stream in new_streams}
self.assertEqual(actual_stream_names, set(stream_names))
actual_stream_descriptions = {stream.description for stream in new_streams}
self.assertEqual(actual_stream_descriptions, set(stream_descriptions))
for stream in new_streams:
self.assertTrue(stream.invite_only)
new_streams, existing_streams = create_streams_if_needed(
realm,
[{"name": stream_name,
"description": stream_description,
"invite_only": True}
for (stream_name, stream_description) in zip(stream_names, stream_descriptions)])
self.assertEqual(len(new_streams), 0)
self.assertEqual(len(existing_streams), 3)
actual_stream_names = {stream.name for stream in existing_streams}
self.assertEqual(actual_stream_names, set(stream_names))
actual_stream_descriptions = {stream.description for stream in existing_streams}
self.assertEqual(actual_stream_descriptions, set(stream_descriptions))
for stream in existing_streams:
self.assertTrue(stream.invite_only)
class RecipientTest(ZulipTestCase):
def test_recipient(self):
# type: () -> None
realm = get_realm('zulip')
stream = get_stream('Verona', realm)
recipient = Recipient.objects.get(
type_id=stream.id,
type=Recipient.STREAM,
)
self.assertEqual(str(recipient), '<Recipient: Verona (%d, %d)>' % (
stream.id, Recipient.STREAM))
class StreamAdminTest(ZulipTestCase):
def test_make_stream_public(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
self.make_stream('private_stream', invite_only=True)
do_change_is_admin(user_profile, True)
params = {
'stream_name': ujson.dumps('private_stream'),
'is_private': ujson.dumps(False)
}
stream_id = Stream.objects.get(realm=user_profile.realm, name='private_stream').id
result = self.client_patch("/json/streams/%d" % (stream_id,), params)
self.assert_json_error(result, 'You are not invited to this stream.')
self.subscribe_to_stream(email, 'private_stream')
do_change_is_admin(user_profile, True)
params = {
'stream_name': ujson.dumps('private_stream'),
'is_private': ujson.dumps(False)
}
result = self.client_patch("/json/streams/%d" % (stream_id,), params)
self.assert_json_success(result)
realm = user_profile.realm
stream = Stream.objects.get(name='private_stream', realm=realm)
self.assertFalse(stream.invite_only)
def test_make_stream_private(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
self.make_stream('public_stream', realm=realm)
do_change_is_admin(user_profile, True)
params = {
'stream_name': ujson.dumps('public_stream'),
'is_private': ujson.dumps(True)
}
stream_id = Stream.objects.get(realm=user_profile.realm, name='public_stream').id
result = self.client_patch("/json/streams/%d" % (stream_id,), params)
self.assert_json_success(result)
stream = Stream.objects.get(name='public_stream', realm=realm)
self.assertTrue(stream.invite_only)
def test_deactivate_stream_backend(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream = self.make_stream('new_stream')
self.subscribe_to_stream(user_profile.email, stream.name)
do_change_is_admin(user_profile, True)
result = self.client_delete('/json/streams/%d' % (stream.id,))
self.assert_json_success(result)
subscription_exists = Subscription.objects.filter(
user_profile=user_profile,
recipient__type_id=stream.id,
recipient__type=Recipient.STREAM,
active=True,
).exists()
self.assertFalse(subscription_exists)
def test_deactivate_stream_backend_requires_existing_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
self.make_stream('new_stream')
do_change_is_admin(user_profile, True)
result = self.client_delete('/json/streams/999999999')
self.assert_json_error(result, u'Invalid stream id')
def test_deactivate_stream_backend_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
self.subscribe_to_stream(email, 'new_stream')
stream_id = Stream.objects.get(name='new_stream').id
result = self.client_delete('/json/streams/%d' % (stream_id,))
self.assert_json_error(result, 'Must be a realm administrator')
def test_private_stream_live_updates(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
self.make_stream('private_stream', invite_only=True)
self.subscribe_to_stream(email, 'private_stream')
self.subscribe_to_stream('cordelia@zulip.com', 'private_stream')
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
stream_id = Stream.objects.get(name='private_stream').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
cordelia = get_user_profile_by_email('cordelia@zulip.com')
prospero = get_user_profile_by_email('prospero@zulip.com')
notified_user_ids = set(events[-1]['users'])
self.assertIn(user_profile.id, notified_user_ids)
self.assertIn(cordelia.id, notified_user_ids)
self.assertNotIn(prospero.id, notified_user_ids)
events = []
with tornado_redirected_to_list(events):
stream_id = Stream.objects.get(name='private_stream').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps('whatever')})
self.assert_json_success(result)
notified_user_ids = set(events[-1]['users'])
self.assertIn(user_profile.id, notified_user_ids)
self.assertIn(cordelia.id, notified_user_ids)
self.assertNotIn(prospero.id, notified_user_ids)
def test_rename_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
self.subscribe_to_stream(email, 'stream_name1')
do_change_is_admin(user_profile, True)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
stream_id = Stream.objects.get(name='stream_name1').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps('stream_name2')})
self.assert_json_success(result)
event = events[1]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='name',
value='stream_name2',
name='stream_name1'
))
notified_user_ids = set(events[1]['users'])
stream_name1_exists = get_stream('stream_name1', realm)
self.assertFalse(stream_name1_exists)
stream_name2_exists = get_stream('stream_name2', realm)
self.assertTrue(stream_name2_exists)
self.assertEqual(notified_user_ids, set(active_user_ids(realm)))
self.assertIn(user_profile.id,
notified_user_ids)
self.assertIn(get_user_profile_by_email('prospero@zulip.com').id,
notified_user_ids)
# Test case to handle unicode stream name change
# *NOTE: Here Encoding is needed when Unicode string is passed as an argument*
with tornado_redirected_to_list(events):
stream_id = stream_name2_exists.id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps(u'नया नाम'.encode('utf-8'))})
self.assert_json_success(result)
# While querying, system can handle unicode strings.
stream_name_uni_exists = get_stream(u'नया नाम', realm)
self.assertTrue(stream_name_uni_exists)
# Test case to handle changing of unicode stream name to newer name
# NOTE: Unicode string being part of URL is handled cleanly
# by client_patch call, encoding of URL is not needed.
with tornado_redirected_to_list(events):
stream_id = stream_name_uni_exists.id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps(u'नाम में क्या रक्खा हे'.encode('utf-8'))})
self.assert_json_success(result)
# While querying, system can handle unicode strings.
stream_name_old_uni_exists = get_stream(u'नया नाम', realm)
self.assertFalse(stream_name_old_uni_exists)
stream_name_new_uni_exists = get_stream(u'नाम में क्या रक्खा हे', realm)
self.assertTrue(stream_name_new_uni_exists)
# Test case to change name from one language to other.
with tornado_redirected_to_list(events):
stream_id = stream_name_new_uni_exists.id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps(u'français'.encode('utf-8'))})
self.assert_json_success(result)
stream_name_fr_exists = get_stream(u'français', realm)
self.assertTrue(stream_name_fr_exists)
# Test case to change name to mixed language name.
with tornado_redirected_to_list(events):
stream_id = stream_name_fr_exists.id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps(u'français name'.encode('utf-8'))})
self.assert_json_success(result)
stream_name_mixed_exists = get_stream(u'français name', realm)
self.assertTrue(stream_name_mixed_exists)
def test_rename_stream_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
self.make_stream('stream_name1')
stream_id = Stream.objects.get(name='stream_name1').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'new_name': ujson.dumps('stream_name2')})
self.assert_json_error(result, 'Must be a realm administrator')
def test_change_stream_description(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
self.subscribe_to_stream(email, 'stream_name1')
do_change_is_admin(user_profile, True)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
stream_id = Stream.objects.get(realm=realm, name='stream_name1').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
event = events[0]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='description',
value='Test description',
name='stream_name1'
))
notified_user_ids = set(events[0]['users'])
stream = Stream.objects.get(
name='stream_name1',
realm=realm,
)
self.assertEqual(notified_user_ids, set(active_user_ids(realm)))
self.assertIn(user_profile.id,
notified_user_ids)
self.assertIn(get_user_profile_by_email('prospero@zulip.com').id,
notified_user_ids)
self.assertEqual('Test description', stream.description)
def test_change_stream_description_requires_realm_admin(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
self.subscribe_to_stream(email, 'stream_name1')
do_change_is_admin(user_profile, False)
stream_id = Stream.objects.get(realm=user_profile.realm, name='stream_name1').id
result = self.client_patch('/json/streams/%d' % (stream_id,),
{'description': ujson.dumps('Test description')})
self.assert_json_error(result, 'Must be a realm administrator')
def set_up_stream_for_deletion(self, stream_name, invite_only=False,
subscribed=True):
# type: (str, bool, bool) -> Stream
"""
Create a stream for deletion by an administrator.
"""
email = 'hamlet@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream = self.make_stream(stream_name, invite_only=invite_only)
# For testing deleting streams you aren't on.
if subscribed:
self.subscribe_to_stream(email, stream_name)
do_change_is_admin(user_profile, True)
return stream
def delete_stream(self, stream, subscribed=True):
# type: (Stream, bool) -> None
"""
Delete the stream and assess the result.
"""
active_name = stream.name
realm = stream.realm
stream_id = stream.id
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_delete('/json/streams/' + str(stream_id))
self.assert_json_success(result)
deletion_events = [e['event'] for e in events if e['event']['type'] == 'subscription']
if subscribed:
self.assertEqual(deletion_events[0], dict(
op='remove',
type='subscription',
subscriptions=[{'name': active_name, 'stream_id': stream.id}]
))
else:
# You could delete the stream, but you weren't on it so you don't
# receive an unsubscription event.
self.assertEqual(deletion_events, [])
with self.assertRaises(Stream.DoesNotExist):
Stream.objects.get(realm=get_realm("zulip"), name=active_name)
# A deleted stream's name is changed, is deactivated, is invite-only,
# and has no subscribers.
deactivated_stream_name = "!DEACTIVATED:" + active_name
deactivated_stream = Stream.objects.get(name=deactivated_stream_name)
self.assertTrue(deactivated_stream.deactivated)
self.assertTrue(deactivated_stream.invite_only)
self.assertEqual(deactivated_stream.name, deactivated_stream_name)
subscribers = self.users_subscribed_to_stream(
deactivated_stream_name, realm)
self.assertEqual(subscribers, [])
# It doesn't show up in the list of public streams anymore.
result = self.client_get("/json/streams?include_subscribed=false")
public_streams = [s["name"] for s in ujson.loads(result.content)["streams"]]
self.assertNotIn(active_name, public_streams)
self.assertNotIn(deactivated_stream_name, public_streams)
# Even if you could guess the new name, you can't subscribe to it.
result = self.client_post(
"/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([{"name": deactivated_stream_name}])})
self.assert_json_error(
result, "Unable to access stream (%s)." % (deactivated_stream_name,))
def test_delete_public_stream(self):
# type: () -> None
"""
When an administrator deletes a public stream, that stream is not
visible to users at all anymore.
"""
stream = self.set_up_stream_for_deletion("newstream")
self.delete_stream(stream)
def test_delete_private_stream(self):
# type: () -> None
"""
Administrators can delete private streams they are on.
"""
stream = self.set_up_stream_for_deletion("newstream", invite_only=True)
self.delete_stream(stream)
def test_delete_streams_youre_not_on(self):
# type: () -> None
"""
Administrators can delete public streams they aren't on, but cannot
delete private streams they aren't on.
"""
pub_stream = self.set_up_stream_for_deletion(
"pubstream", subscribed=False)
self.delete_stream(pub_stream, subscribed=False)
priv_stream = self.set_up_stream_for_deletion(
"privstream", subscribed=False, invite_only=True)
result = self.client_delete('/json/streams/' + str(priv_stream.id))
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def attempt_unsubscribe_of_principal(self, is_admin=False, is_subbed=True,
invite_only=False, other_user_subbed=True):
# type: (bool, bool, bool, bool) -> HttpResponse
# Set up the main user, who is in most cases an admin.
email = "hamlet@zulip.com"
self.login(email)
user_profile = get_user_profile_by_email(email)
if is_admin:
do_change_is_admin(user_profile, True)
# Set up the stream.
stream_name = u"hümbüǵ"
self.make_stream(stream_name, invite_only=invite_only)
# Set up the principal to be unsubscribed.
other_email = "cordelia@zulip.com"
other_user_profile = get_user_profile_by_email(other_email)
# Subscribe the admin and/or principal as specified in the flags.
if is_subbed:
self.subscribe_to_stream(user_profile.email, stream_name)
if other_user_subbed:
self.subscribe_to_stream(other_user_profile.email, stream_name)
result = self.client_delete(
"/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([stream_name]),
"principals": ujson.dumps([other_email])})
# If the removal succeeded, then assert that Cordelia is no longer subscribed.
if result.status_code not in [400]:
subbed_users = self.users_subscribed_to_stream(stream_name, other_user_profile.realm)
self.assertNotIn(other_user_profile, subbed_users)
return result
def test_cant_remove_others_from_stream(self):
# type: () -> None
"""
If you're not an admin, you can't remove other people from streams.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=False, is_subbed=True, invite_only=False,
other_user_subbed=True)
self.assert_json_error(
result, "This action requires administrative rights")
def test_admin_remove_others_from_public_stream(self):
# type: () -> None
"""
If you're an admin, you can remove people from public streams, even
those you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=False,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_subbed_private_stream(self):
# type: () -> None
"""
If you're an admin, you can remove other people from private streams you
are on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=True, invite_only=True,
other_user_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_subscribed"]), 0)
def test_admin_remove_others_from_unsubbed_private_stream(self):
# type: () -> None
"""
Even if you're an admin, you can't remove people from private
streams you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=True,
other_user_subbed=True)
self.assert_json_error(
result, "Cannot administer invite-only streams this way")
def test_create_stream_by_admins_only_setting(self):
# type: () -> None
"""
When realm.create_stream_by_admins_only setting is active and
the number of days since the user had joined is less than waiting period
threshold, non admin users shouldn't be able to create new streams.
"""
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
self.login(email)
do_set_realm_create_stream_by_admins_only(user_profile.realm, True)
stream_name = ['adminsonlysetting']
result = self.common_subscribe_to_streams(
email,
stream_name
)
self.assert_json_error(result, 'User cannot create streams.')
def test_create_stream_by_waiting_period_threshold(self):
# type: () -> None
"""
Non admin users with account age greater or equal to waiting period
threshold should be able to create new streams.
"""
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
self.login(email)
do_change_is_admin(user_profile, False)
do_set_realm_waiting_period_threshold(user_profile.realm, 10)
stream_name = ['waitingperiodtest']
result = self.common_subscribe_to_streams(
email,
stream_name
)
self.assert_json_error(result, 'User cannot create streams.')
do_set_realm_waiting_period_threshold(user_profile.realm, 0)
result = self.common_subscribe_to_streams(
email,
stream_name
)
self.assert_json_success(result)
def test_remove_already_not_subbed(self):
# type: () -> None
"""
Trying to unsubscribe someone who already isn't subscribed to a stream
fails gracefully.
"""
result = self.attempt_unsubscribe_of_principal(
is_admin=True, is_subbed=False, invite_only=False,
other_user_subbed=False)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 0)
self.assertEqual(len(json["not_subscribed"]), 1)
def test_remove_invalid_user(self):
# type: () -> None
"""
Trying to unsubscribe an invalid user from a stream fails gracefully.
"""
admin_email = "hamlet@zulip.com"
self.login(admin_email)
user_profile = get_user_profile_by_email(admin_email)
do_change_is_admin(user_profile, True)
stream_name = u"hümbüǵ"
self.make_stream(stream_name)
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([stream_name]),
"principals": ujson.dumps(["baduser@zulip.com"])})
self.assert_json_error(
result,
"User not authorized to execute queries on behalf of 'baduser@zulip.com'",
status_code=403)
class DefaultStreamTest(ZulipTestCase):
def get_default_stream_names(self, realm):
# type: (Realm) -> Set[Text]
streams = get_default_streams_for_realm(realm)
stream_names = [s.name for s in streams]
return set(stream_names)
def get_default_stream_descriptions(self, realm):
# type: (Realm) -> Set[Text]
streams = get_default_streams_for_realm(realm)
stream_descriptions = [s.description for s in streams]
return set(stream_descriptions)
def test_set_default_streams(self):
# type: () -> None
(realm, _) = do_create_realm("testrealm", "Test Realm")
stream_dict = {
"apple": {"description": "A red fruit", "invite_only": False},
"banana": {"description": "A yellow fruit", "invite_only": False},
"Carrot Cake": {"description": "A delicious treat", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
expected_names = list(stream_dict.keys())
expected_names.append("announce")
expected_descriptions = [i["description"] for i in stream_dict.values()] + [""]
set_default_streams(realm, stream_dict)
stream_names_set = self.get_default_stream_names(realm)
stream_descriptions_set = self.get_default_stream_descriptions(realm)
self.assertEqual(stream_names_set, set(expected_names))
self.assertEqual(stream_descriptions_set, set(expected_descriptions))
def test_set_default_streams_no_notifications_stream(self):
# type: () -> None
(realm, _) = do_create_realm("testrealm", "Test Realm")
realm.notifications_stream = None
realm.save(update_fields=["notifications_stream"])
stream_dict = {
"apple": {"description": "A red fruit", "invite_only": False},
"banana": {"description": "A yellow fruit", "invite_only": False},
"Carrot Cake": {"description": "A delicious treat", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
expected_names = list(stream_dict.keys())
expected_descriptions = [i["description"] for i in stream_dict.values()]
set_default_streams(realm, stream_dict)
stream_names_set = self.get_default_stream_names(realm)
stream_descriptions_set = self.get_default_stream_descriptions(realm)
self.assertEqual(stream_names_set, set(expected_names))
self.assertEqual(stream_descriptions_set, set(expected_descriptions))
def test_add_and_remove_default_stream(self):
# type: () -> None
realm = get_realm("zulip")
orig_stream_names = self.get_default_stream_names(realm)
do_add_default_stream(realm, 'Added Stream')
new_stream_names = self.get_default_stream_names(realm)
added_stream_names = new_stream_names - orig_stream_names
self.assertEqual(added_stream_names, set(['Added Stream']))
# idempotentcy--2nd call to add_default_stream should be a noop
do_add_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), new_stream_names)
# start removing
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
# idempotentcy--2nd call to remove_default_stream should be a noop
do_remove_default_stream(realm, 'Added Stream')
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
def test_api_calls(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, True)
stream_name = 'stream ADDED via api'
result = self.client_post('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertTrue(stream_name in self.get_default_stream_names(user_profile.realm))
# and remove it
result = self.client_delete('/json/default_streams', dict(stream_name=stream_name))
self.assert_json_success(result)
self.assertFalse(stream_name in self.get_default_stream_names(user_profile.realm))
class SubscriptionPropertiesTest(ZulipTestCase):
def test_set_stream_color(self):
# type: () -> None
"""
A POST request to /json/subscriptions/property with stream_name and
color data sets the stream color, and for that stream only.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
old_subs, _ = gather_subscriptions(get_user_profile_by_email(test_email))
sub = old_subs[0]
stream_name = sub['name']
new_color = "#ffffff" # TODO: ensure that this is different from old_color
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": stream_name,
"value": "#ffffff"}])})
self.assert_json_success(result)
new_subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
found_sub = None
for sub in new_subs:
if sub['name'] == stream_name:
found_sub = sub
break
self.assertIsNotNone(found_sub)
self.assertEqual(found_sub['color'], new_color)
new_subs.remove(found_sub)
for sub in old_subs:
if sub['name'] == stream_name:
found_sub = sub
break
old_subs.remove(found_sub)
self.assertEqual(old_subs, new_subs)
def test_set_color_missing_stream_name(self):
# type: () -> None
"""
Updating the color property requires a `stream` key.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"value": "#ffffff"}])})
self.assert_json_error(
result, "stream key is missing from subscription_data[0]")
def test_set_color_unsubscribed_stream_name(self):
# type: () -> None
"""
Updating the color property requires a subscribed stream.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
unsubs_stream = 'Rome'
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": unsubs_stream,
"value": "#ffffff"}])})
self.assert_json_error(
result, "Not subscribed to stream %s" % (unsubs_stream,))
def test_json_subscription_property_invalid_verb(self):
# type: () -> None
"""
Called by invalid request method. No other request method other than
'post' is allowed in this case.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client_get(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "in_home_view",
"stream": subs[0]["name"],
"value": False}])})
self.assert_json_error(result, "Invalid verb")
def test_set_color_missing_color(self):
# type: () -> None
"""
Updating the color property requires a color.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "color",
"stream": subs[0]["name"]}])})
self.assert_json_error(
result, "value key is missing from subscription_data[0]")
def test_set_pin_to_top(self):
# type: () -> None
"""
A POST request to /json/subscriptions/property with stream_name and
pin_to_top data pins the stream.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
user_profile = get_user_profile_by_email(test_email)
old_subs, _ = gather_subscriptions(user_profile)
sub = old_subs[0]
stream_name = sub['name']
new_pin_to_top = not sub['pin_to_top']
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "pin_to_top",
"stream": stream_name,
"value": new_pin_to_top}])})
self.assert_json_success(result)
updated_sub = get_subscription(stream_name, user_profile)
self.assertIsNotNone(updated_sub)
self.assertEqual(updated_sub.pin_to_top, new_pin_to_top)
def test_set_subscription_property_incorrect(self):
# type: () -> None
"""
Trying to set a property incorrectly returns a JSON error.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
property_name = "in_home_view"
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
'%s is not a boolean' % (property_name,))
property_name = "desktop_notifications"
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
'%s is not a boolean' % (property_name,))
property_name = "audible_notifications"
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
'%s is not a boolean' % (property_name,))
property_name = "color"
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": False,
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
'%s is not a string' % (property_name,))
def test_json_subscription_property_invalid_stream(self):
# type: () -> None
test_email = "hamlet@zulip.com"
self.login(test_email)
stream_name = "invalid_stream"
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "in_home_view",
"stream": stream_name,
"value": False}])})
self.assert_json_error(result, "Invalid stream %s" % (stream_name,))
def test_set_invalid_property(self):
# type: () -> None
"""
Trying to set an invalid property returns a JSON error.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
subs = gather_subscriptions(get_user_profile_by_email(test_email))[0]
result = self.client_post(
"/json/subscriptions/property",
{"subscription_data": ujson.dumps([{"property": "bad",
"value": "bad",
"stream": subs[0]["name"]}])})
self.assert_json_error(result,
"Unknown subscription property: bad")
class SubscriptionRestApiTest(ZulipTestCase):
def test_basic_add_delete(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
# add
request = {
'add': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' in streams)
# now delete the same stream
request = {
'delete': ujson.dumps(['my_test_stream_1'])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_success(result)
streams = self.get_streams(email)
self.assertTrue('my_test_stream_1' not in streams)
def test_bad_add_parameters(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
def check_for_error(val, expected_message):
# type: (Any, str) -> None
request = {
'add': ujson.dumps(val)
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, expected_message)
check_for_error(['foo'], 'add[0] is not a dict')
check_for_error([{'bogus': 'foo'}], 'name key is missing from add[0]')
check_for_error([{'name': {}}], 'add[0]["name"] is not a string')
def test_bad_principals(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
request = {
'add': ujson.dumps([{'name': 'my_new_stream'}]),
'principals': ujson.dumps([{}]),
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, 'principals[0] is not a string')
def test_bad_delete_parameters(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
request = {
'delete': ujson.dumps([{'name': 'my_test_stream_1'}])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result, "delete[0] is not a string")
def test_add_or_delete_not_specified(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
result = self.client_patch(
"/api/v1/users/me/subscriptions",
{},
**self.api_auth(email)
)
self.assert_json_error(result,
'Nothing to do. Specify at least one of "add" or "delete".')
def test_patch_enforces_valid_stream_name_check(self):
# type: () -> None
"""
Only way to force an error is with a empty string.
"""
email = 'hamlet@zulip.com'
self.login(email)
invalid_stream_name = ""
request = {
'delete': ujson.dumps([invalid_stream_name])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result,
"Invalid stream name (%s)." % (invalid_stream_name,))
def test_stream_name_too_long(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
long_stream_name = "a" * 61
request = {
'delete': ujson.dumps([long_stream_name])
}
result = self.client_patch(
"/api/v1/users/me/subscriptions",
request,
**self.api_auth(email)
)
self.assert_json_error(result,
"Stream name (%s) too long." % (long_stream_name,))
def test_compose_views_rollback(self):
# type: () -> None
'''
The compose_views function() is used under the hood by
update_subscriptions_backend. It's a pretty simple method in terms of
control flow, but it uses a Django rollback, which may make it brittle
code when we upgrade Django. We test the functions's rollback logic
here with a simple scenario to avoid false positives related to
subscription complications.
'''
user_profile = get_user_profile_by_email('hamlet@zulip.com')
user_profile.full_name = 'Hamlet'
user_profile.save()
def method1(req, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
user_profile.full_name = 'Should not be committed'
user_profile.save()
return json_success()
def method2(req, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_error(_('random failure'))
with self.assertRaises(JsonableError):
compose_views(None, user_profile, [(method1, {}), (method2, {})])
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user_profile.full_name, 'Hamlet')
class SubscriptionAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
"""
All tests will be logged in as hamlet. Also save various useful values
as attributes that tests can access.
"""
self.test_email = "hamlet@zulip.com"
self.login(self.test_email)
self.user_profile = get_user_profile_by_email(self.test_email)
self.realm = self.user_profile.realm
self.streams = self.get_streams(self.test_email)
def make_random_stream_names(self, existing_stream_names):
# type: (List[Text]) -> List[Text]
"""
Helper function to make up random stream names. It takes
existing_stream_names and randomly appends a digit to the end of each,
but avoids names that appear in the list names_to_avoid.
"""
random_streams = []
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
for stream in existing_stream_names:
random_stream = stream + str(random.randint(0, 9))
if random_stream not in all_stream_names:
random_streams.append(random_stream)
return random_streams
def test_successful_subscriptions_list(self):
# type: () -> None
"""
Calling /api/v1/users/me/subscriptions should successfully return your subscriptions.
"""
email = self.test_email
result = self.client_get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for stream in json['subscriptions']:
self.assertIsInstance(stream['name'], six.string_types)
self.assertIsInstance(stream['color'], six.string_types)
self.assertIsInstance(stream['invite_only'], bool)
# check that the stream name corresponds to an actual stream
try:
Stream.objects.get(name__iexact=stream['name'], realm=self.realm)
except Stream.DoesNotExist:
self.fail("stream does not exist")
list_streams = [stream['name'] for stream in json["subscriptions"]]
# also check that this matches the list of your subscriptions
self.assertEqual(sorted(list_streams), sorted(self.streams))
def helper_check_subs_before_and_after_add(self, subscriptions, other_params,
subscribed, already_subscribed,
email, new_subs, invite_only=False):
# type: (List[Text], Dict[str, Any], List[Text], List[Text], Text, List[Text], bool) -> None
"""
Check result of adding subscriptions.
You can add subscriptions for yourself or possibly many
principals, which is why e-mails map to subscriptions in the
result.
The result json is of the form
{"msg": "",
"result": "success",
"already_subscribed": {"iago@zulip.com": ["Venice", "Verona"]},
"subscribed": {"iago@zulip.com": ["Venice8"]}}
"""
result = self.common_subscribe_to_streams(self.test_email, subscriptions,
other_params, invite_only=invite_only)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(sorted(subscribed), sorted(json["subscribed"][email]))
self.assertEqual(sorted(already_subscribed), sorted(json["already_subscribed"][email]))
new_streams = self.get_streams(email)
self.assertEqual(sorted(new_streams), sorted(new_subs))
def test_successful_subscriptions_add(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and should determine which are new subscriptions vs
which were already subscribed. We add 2 new streams to the
list of subscriptions and confirm the right number of events
are generated.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
add_streams = [u"Verona2", u"Denmark5"]
self.assertNotEqual(len(add_streams), 0) # necessary for full test coverage
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, {},
add_streams, self.streams, self.test_email, self.streams + add_streams)
self.assert_length(events, 6)
def test_successful_subscriptions_add_with_announce(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and should determine which are new subscriptions vs
which were already subscribed. We add 2 new streams to the
list of subscriptions and confirm the right number of events
are generated.
"""
self.assertNotEqual(len(self.streams), 0)
add_streams = [u"Verona2", u"Denmark5"]
self.assertNotEqual(len(add_streams), 0)
events = [] # type: List[Dict[str, Any]]
other_params = {
'announce': 'true',
}
notifications_stream = Stream.objects.get(name=self.streams[0], realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(self.test_email))
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, other_params,
add_streams, self.streams, self.test_email, self.streams + add_streams)
self.assertEqual(len(events), 7)
def test_successful_subscriptions_notifies_pm(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, Recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "Hi there! %s just created a new stream #**%s**." % (invitee_full_name, invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_subscriptions_notifies_stream(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
notifications_stream = Stream.objects.get(name=current_stream, realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(invitee))
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data=dict(
announce='true',
principals='["%s"]' % (self.user_profile.email,)
),
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "%s just created a new stream #**%s**." % (invitee_full_name, invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_successful_cross_realm_notification(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions in a new realm
should notify with a proper new stream link
"""
(realm, _) = do_create_realm("testrealm", "Test Realm")
notifications_stream = Stream.objects.get(name='announce', realm=realm)
realm.notifications_stream = notifications_stream
realm.save()
invite_streams = ["cross_stream"]
user = get_user_profile_by_email("AARON@zulip.com")
user.realm = realm
user.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(user.email))
result = self.common_subscribe_to_streams(
user.email,
invite_streams,
extra_post_data=dict(
announce='true'
),
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
stream_id = Stream.objects.latest('id').id
expected_rendered_msg = '<p>%s just created a new stream <a class="stream" data-stream-id="%d" href="/#narrow/stream/%s">#%s</a>.</p>' % (
user.full_name, stream_id, invite_streams[0], invite_streams[0])
self.assertEqual(msg.rendered_content, expected_rendered_msg)
def test_successful_subscriptions_notifies_with_escaping(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = "iago@zulip.com"
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
notifications_stream = Stream.objects.get(name=current_stream, realm=self.realm)
self.realm.notifications_stream = notifications_stream
self.realm.save()
invite_streams = ['strange ) \\ test']
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': '["%s"]' % (self.user_profile.email,)
},
)
self.assert_json_success(result)
msg = self.get_last_message()
self.assertEqual(msg.sender_id,
get_user_profile_by_email('notification-bot@zulip.com').id)
expected_msg = "%s just created a new stream #**%s**." % (invitee_full_name, invite_streams[0])
self.assertEqual(msg.content, expected_msg)
def test_non_ascii_stream_subscription(self):
# type: () -> None
"""
Subscribing to a stream name with non-ASCII characters succeeds.
"""
self.helper_check_subs_before_and_after_add(self.streams + [u"hümbüǵ"], {},
[u"hümbüǵ"], self.streams, self.test_email, self.streams + [u"hümbüǵ"])
def test_subscriptions_add_too_long(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions on a stream whose name is >60
characters should return a JSON error.
"""
# character limit is 60 characters
long_stream_name = "a" * 61
result = self.common_subscribe_to_streams(self.test_email, [long_stream_name])
self.assert_json_error(result,
"Stream name (%s) too long." % (long_stream_name,))
def test_user_settings_for_adding_streams(self):
# type: () -> None
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=False):
result = self.common_subscribe_to_streams(self.test_email, ['stream1'])
self.assert_json_error(result, 'User cannot create streams.')
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=True):
result = self.common_subscribe_to_streams(self.test_email, ['stream2'])
self.assert_json_success(result)
# User should still be able to subscribe to an existing stream
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=False):
result = self.common_subscribe_to_streams(self.test_email, ['stream2'])
self.assert_json_success(result)
def test_subscriptions_add_invalid_stream(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid name is the empty string
invalid_stream_name = ""
result = self.common_subscribe_to_streams(self.test_email, [invalid_stream_name])
self.assert_json_error(result,
"Invalid stream name (%s)." % (invalid_stream_name,))
def assert_adding_subscriptions_for_principal(self, invitee, streams, invite_only=False):
# type: (Text, List[Text], bool) -> None
"""
Calling POST /json/users/me/subscriptions on behalf of another principal (for
whom you have permission to add subscriptions) should successfully add
those subscriptions and send a message to the subscribee notifying
them.
"""
other_profile = get_user_profile_by_email(invitee)
current_streams = self.get_streams(invitee)
self.assertIsInstance(other_profile, UserProfile)
self.assertNotEqual(len(current_streams), 0) # necessary for full test coverage
self.assertNotEqual(len(streams), 0) # necessary for full test coverage
streams_to_sub = streams[:1] # just add one, to make the message easier to check
streams_to_sub.extend(current_streams)
self.helper_check_subs_before_and_after_add(streams_to_sub,
{"principals": ujson.dumps([invitee])}, streams[:1], current_streams,
invitee, streams_to_sub, invite_only=invite_only)
# verify that the user was sent a message informing them about the subscription
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, msg.recipient.PERSONAL)
self.assertEqual(msg.sender_id,
get_user_profile_by_email("notification-bot@zulip.com").id)
expected_msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the %sstream #**%s**."
% (self.user_profile.full_name,
'**invite-only** ' if invite_only else '',
streams[0]))
if not Stream.objects.get(name=streams[0]).invite_only:
expected_msg += ("\nYou can see historical content on a "
"non-invite-only stream by narrowing to it.")
self.assertEqual(msg.content, expected_msg)
recipients = get_display_recipient(msg.recipient)
self.assertEqual(len(recipients), 1)
assert isinstance(recipients, Sequence)
assert isinstance(recipients[0], Mapping)
# The 2 assert statements above are required to make the mypy check pass.
# They inform mypy that in the line below, recipients is a Sequence of Mappings.
self.assertEqual(recipients[0]['email'], invitee)
def test_multi_user_subscription(self):
# type: () -> None
email1 = 'cordelia@zulip.com'
email2 = 'iago@zulip.com'
realm = get_realm("zulip")
streams_to_sub = ['multi_user_stream']
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([email1, email2])),
)
self.assert_max_length(queries, 43)
self.assert_length(events, 8)
for ev in [x for x in events if x['event']['type'] not in ('message', 'stream')]:
if isinstance(ev['event']['subscriptions'][0], dict):
self.assertEqual(ev['event']['op'], 'add')
self.assertEqual(
set(ev['event']['subscriptions'][0]['subscribers']),
set([email1, email2])
)
else:
# Check "peer_add" events for streams users were
# never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
self.assertEqual(ev['event']['op'], 'peer_add')
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 2)
# Now add ourselves
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps([self.test_email])),
)
self.assert_max_length(queries, 8)
self.assert_length(events, 2)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(self.test_email).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, self.test_email])
)
self.assertEqual(len(add_peer_event['users']), 16)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], self.user_profile.id)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(stream.num_subscribers(), 3)
# Finally, add othello.
events = []
email3 = 'othello@zulip.com'
user_profile = get_user_profile_by_email(email3)
stream = get_stream('multi_user_stream', realm)
with tornado_redirected_to_list(events):
bulk_add_subscriptions([stream], [user_profile])
self.assert_length(events, 2)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user_profile_by_email(email3).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
set([email1, email2, email3, self.test_email])
)
# We don't send a peer_add event to othello
self.assertNotIn(user_profile.id, add_peer_event['users'])
self.assertEqual(len(add_peer_event['users']), 16)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], user_profile.id)
def test_users_getting_add_peer_event(self):
# type: () -> None
"""
Check users getting add_peer_event is correct
"""
streams_to_sub = ['multi_user_stream']
users_to_subscribe = [self.test_email, "othello@zulip.com"]
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps(users_to_subscribe)))
new_users_to_subscribe = ["iago@zulip.com", "cordelia@zulip.com"]
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
self.common_subscribe_to_streams(
self.test_email,
streams_to_sub,
dict(principals=ujson.dumps(new_users_to_subscribe)),
)
add_peer_events = [events[2], events[3]]
for add_peer_event in add_peer_events:
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
event_sent_to_ids = add_peer_event['users']
user_dict = [get_user_profile_by_id(user_id).email
for user_id in event_sent_to_ids]
for user in new_users_to_subscribe:
# Make sure new users subscribed to stream is not in
# peer_add event recipient list
self.assertNotIn(user, user_dict)
for old_user in users_to_subscribe:
# Check non new users are in peer_add event recipient list.
self.assertIn(old_user, user_dict)
def test_users_getting_remove_peer_event(self):
# type: () -> None
"""
Check users getting add_peer_event is correct
"""
email1 = 'othello@zulip.com'
email2 = 'cordelia@zulip.com'
email3 = 'hamlet@zulip.com'
email4 = 'iago@zulip.com'
stream1 = self.make_stream('stream1')
stream2 = self.make_stream('stream2')
private = self.make_stream('private_stream', invite_only=True)
self.subscribe_to_stream(email1, 'stream1')
self.subscribe_to_stream(email2, 'stream1')
self.subscribe_to_stream(email3, 'stream1')
self.subscribe_to_stream(email2, 'stream2')
self.subscribe_to_stream(email1, 'private_stream')
self.subscribe_to_stream(email2, 'private_stream')
self.subscribe_to_stream(email3, 'private_stream')
user1 = get_user_profile_by_email(email1)
user2 = get_user_profile_by_email(email2)
user3 = get_user_profile_by_email(email3)
user4 = get_user_profile_by_email(email4)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
bulk_remove_subscriptions(
users=[user1, user2],
streams=[stream1, stream2, private]
)
peer_events = [e for e in events
if e['event'].get('op') == 'peer_remove']
notifications = set()
for event in peer_events:
for user_id in event['users']:
for stream_name in event['event']['subscriptions']:
removed_user_id = event['event']['user_id']
notifications.add((user_id, removed_user_id, stream_name))
# POSITIVE CASES FIRST
self.assertIn((user3.id, user1.id, 'stream1'), notifications)
self.assertIn((user4.id, user1.id, 'stream1'), notifications)
self.assertIn((user3.id, user2.id, 'stream1'), notifications)
self.assertIn((user4.id, user2.id, 'stream1'), notifications)
self.assertIn((user1.id, user2.id, 'stream2'), notifications)
self.assertIn((user3.id, user2.id, 'stream2'), notifications)
self.assertIn((user4.id, user2.id, 'stream2'), notifications)
self.assertIn((user3.id, user1.id, 'private_stream'), notifications)
self.assertIn((user3.id, user2.id, 'private_stream'), notifications)
# NEGATIVE
# don't be notified if you are being removed yourself
self.assertNotIn((user1.id, user1.id, 'stream1'), notifications)
# don't send false notifications for folks that weren't actually
# subscribed int he first place
self.assertNotIn((user3.id, user1.id, 'stream2'), notifications)
# don't send notifications for random people
self.assertNotIn((user3.id, user4.id, 'stream2'), notifications)
# don't send notifications to unsubscribed people for private streams
self.assertNotIn((user4.id, user1.id, 'private_stream'), notifications)
def test_bulk_subscribe_MIT(self):
# type: () -> None
realm = get_realm("mit")
streams = ["stream_%s" % i for i in range(40)]
for stream_name in streams:
self.make_stream(stream_name, realm=realm)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
'starnine@mit.edu',
streams,
dict(principals=ujson.dumps(['starnine@mit.edu'])),
)
# Make sure Zephyr mirroring realms such as MIT do not get
# any tornado subscription events
self.assert_length(events, 0)
self.assert_max_length(queries, 7)
def test_bulk_subscribe_many(self):
# type: () -> None
# Create a whole bunch of streams
streams = ["stream_%s" % i for i in range(20)]
for stream_name in streams:
self.make_stream(stream_name)
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_email,
streams,
dict(principals=ujson.dumps([self.test_email])),
)
# Make sure we don't make O(streams) queries
self.assert_max_length(queries, 10)
@slow("common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal(self):
# type: () -> None
"""
You can subscribe other people to streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
@slow("common_subscribe_to_streams is slow")
def test_subscriptions_add_for_principal_invite_only(self):
# type: () -> None
"""
You can subscribe other people to invite only streams.
"""
invitee = "iago@zulip.com"
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee, invite_streams,
invite_only=True)
@slow("common_subscribe_to_streams is slow")
def test_non_ascii_subscription_for_principal(self):
# type: () -> None
"""
You can subscribe other people to streams even if they containing
non-ASCII characters.
"""
self.assert_adding_subscriptions_for_principal("iago@zulip.com", [u"hümbüǵ"])
def test_subscription_add_invalid_principal(self):
# type: () -> None
"""
Calling subscribe on behalf of a principal that does not exist
should return a JSON error.
"""
invalid_principal = "rosencrantz-and-guildenstern@zulip.com"
# verify that invalid_principal actually doesn't exist
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email(invalid_principal)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([invalid_principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (invalid_principal,), status_code=403)
def test_subscription_add_principal_other_realm(self):
# type: () -> None
"""
Calling subscribe on behalf of a principal in another realm
should return a JSON error.
"""
principal = "starnine@mit.edu"
profile = get_user_profile_by_email(principal)
# verify that principal exists (thus, the reason for the error is the cross-realming)
self.assertIsInstance(profile, UserProfile)
result = self.common_subscribe_to_streams(self.test_email, self.streams,
{"principals": ujson.dumps([principal])})
self.assert_json_error(result, "User not authorized to execute queries on behalf of '%s'"
% (principal,), status_code=403)
def helper_check_subs_before_and_after_remove(self, subscriptions, json_dict,
email, new_subs):
# type: (List[Text], Dict[str, Any], Text, List[Text]) -> None
"""
Check result of removing subscriptions.
Unlike adding subscriptions, you can only remove subscriptions
for yourself, so the result format is different.
{"msg": "",
"removed": ["Denmark", "Scotland", "Verona"],
"not_subscribed": ["Rome"], "result": "success"}
"""
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps(subscriptions)})
self.assert_json_success(result)
json = ujson.loads(result.content)
for key, val in six.iteritems(json_dict):
self.assertEqual(sorted(val), sorted(json[key])) # we don't care about the order of the items
new_streams = self.get_streams(email)
self.assertEqual(sorted(new_streams), sorted(new_subs))
def test_successful_subscriptions_remove(self):
# type: () -> None
"""
Calling DELETE /json/users/me/subscriptions should successfully remove streams,
and should determine which were removed vs which weren't subscribed to.
We cannot randomly generate stream names because the remove code
verifies whether streams exist.
"""
if len(self.streams) < 2:
self.fail() # necesssary for full test coverage
streams_to_remove = self.streams[1:]
not_subbed = []
for stream in Stream.objects.all():
if stream.name not in self.streams:
not_subbed.append(stream.name)
random.shuffle(not_subbed)
self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage
try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to
streams_to_remove.extend(try_to_remove)
self.helper_check_subs_before_and_after_remove(streams_to_remove,
{"removed": self.streams[1:], "not_subscribed": try_to_remove},
self.test_email, [self.streams[0]])
def test_subscriptions_remove_fake_stream(self):
# type: () -> None
"""
Calling DELETE /json/users/me/subscriptions on a stream that doesn't exist
should return a JSON error.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
streams_to_remove = random_streams[:1] # pick only one fake stream, to make checking the error message easy
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps(streams_to_remove)})
self.assert_json_error(result, "Stream(s) (%s) do not exist" % (random_streams[0],))
def helper_subscriptions_exists(self, stream, exists, subscribed):
# type: (Text, bool, bool) -> None
"""
A helper function that calls /json/subscriptions/exists on a stream and
verifies that the returned JSON dictionary has the exists and
subscribed values passed in as parameters. (If subscribed should not be
present, pass in None.)
"""
result = self.client_post("/json/subscriptions/exists",
{"stream": stream})
json = ujson.loads(result.content)
self.assertIn("exists", json)
self.assertEqual(json["exists"], exists)
if exists:
self.assert_json_success(result)
else:
self.assertEqual(result.status_code, 404)
if subscribed:
self.assertIn("subscribed", json)
self.assertEqual(json["subscribed"], subscribed)
def test_successful_subscriptions_exists_subbed(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream to which you are subbed
should return that it exists and that you are subbed.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(self.streams[0], True, True)
def test_successful_subscriptions_exists_not_subbed(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream to which you are not
subbed should return that it exists and that you are not subbed.
"""
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.realm)]
streams_not_subbed = list(set(all_stream_names) - set(self.streams))
self.assertNotEqual(len(streams_not_subbed), 0) # necessary for full test coverage
self.helper_subscriptions_exists(streams_not_subbed[0], True, False)
def test_subscriptions_does_not_exist(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream that doesn't exist should
return that it doesn't exist.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(random_streams[0], False, False)
def test_subscriptions_exist_invalid_name(self):
# type: () -> None
"""
Calling /json/subscriptions/exist on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid stream name is the empty string
invalid_stream_name = ""
result = self.client_post("/json/subscriptions/exists",
{"stream": invalid_stream_name})
self.assert_json_error(result, "Invalid characters in stream name")
def test_existing_subscriptions_autosubscription(self):
# type: () -> None
"""
Call /json/subscriptions/exist on an existing stream and autosubscribe to it.
"""
stream_name = self.streams[0]
result = self.client_post("/json/subscriptions/exists",
{"stream": stream_name, "autosubscribe": True})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("exists", json)
self.assertTrue(json["exists"])
def get_subscription(self, user_profile, stream_name):
# type: (UserProfile, Text) -> Subscription
stream = Stream.objects.get(realm=self.realm, name=stream_name)
return Subscription.objects.get(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
)
def test_subscriptions_add_notification_default_true(self):
# type: () -> None
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = True
user_profile.enable_stream_sounds = True
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
with mock.patch('zerver.models.Recipient.__unicode__', return_value='recip'):
self.assertEqual(str(subscription),
u'<Subscription: '
'<UserProfile: iago@zulip.com <Realm: zulip.com 1>> -> recip>')
self.assertTrue(subscription.desktop_notifications)
self.assertTrue(subscription.audible_notifications)
def test_subscriptions_add_notification_default_false(self):
# type: () -> None
"""
When creating a subscription, the desktop and audible notification
settings for that stream are derived from the global notification
settings.
"""
invitee = "iago@zulip.com"
user_profile = get_user_profile_by_email(invitee)
user_profile.enable_stream_desktop_notifications = False
user_profile.enable_stream_sounds = False
user_profile.save()
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])
self.assert_adding_subscriptions_for_principal(invitee, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
self.assertFalse(subscription.desktop_notifications)
self.assertFalse(subscription.audible_notifications)
class GetPublicStreamsTest(ZulipTestCase):
def test_public_streams_api(self):
# type: () -> None
"""
Ensure that the query we use to get public streams successfully returns
a list of streams
"""
email = 'hamlet@zulip.com'
self.login(email)
# Check it correctly lists the user's subs with include_public=false
result = self.client_get("/api/v1/streams?include_public=false", **self.api_auth(email))
result2 = self.client_get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(result2)
json2 = ujson.loads(result2.content)
self.assertEqual(sorted([s["name"] for s in json["streams"]]),
sorted([s["name"] for s in json2["subscriptions"]]))
# Check it correctly lists all public streams with include_subscribed=false
result = self.client_get("/api/v1/streams?include_public=true&include_subscribed=false",
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
all_streams = [stream.name for stream in
Stream.objects.filter(realm=get_user_profile_by_email(email).realm)]
self.assertEqual(sorted(s["name"] for s in json["streams"]),
sorted(all_streams))
# Check non-superuser can't use include_all_active
result = self.client_get("/api/v1/streams?include_all_active=true",
**self.api_auth(email))
self.assertEqual(result.status_code, 400)
class StreamIdTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
self.login(self.email)
def test_get_stream_id(self):
# type: () -> None
stream = gather_subscriptions(self.user_profile)[0][0]
result = self.client_get("/json/get_stream_id?stream=%s" % (stream['name'],))
self.assert_json_success(result)
self.assertEqual(result.json()['stream_id'], stream['stream_id'])
def test_get_stream_id_wrong_name(self):
# type: () -> None
result = self.client_get("/json/get_stream_id?stream=wrongname")
self.assert_json_error(result, u'No such stream name')
class InviteOnlyStreamTest(ZulipTestCase):
def test_must_be_subbed_to_send(self):
# type: () -> None
"""
If you try to send a message to an invite-only stream to which
you aren't subscribed, you'll get a 400.
"""
self.login("hamlet@zulip.com")
# Create Saxony as an invite-only stream.
self.assert_json_success(
self.common_subscribe_to_streams("hamlet@zulip.com", ["Saxony"],
invite_only=True))
email = "cordelia@zulip.com"
with self.assertRaises(JsonableError):
self.send_message(email, "Saxony", Recipient.STREAM)
def test_list_respects_invite_only_bit(self):
# type: () -> None
"""
Make sure that /api/v1/users/me/subscriptions properly returns
the invite-only bit for streams that are invite-only
"""
email = 'hamlet@zulip.com'
self.login(email)
result1 = self.common_subscribe_to_streams(email, ["Saxony"], invite_only=True)
self.assert_json_success(result1)
result2 = self.common_subscribe_to_streams(email, ["Normandy"], invite_only=False)
self.assert_json_success(result2)
result = self.client_get("/api/v1/users/me/subscriptions", **self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("subscriptions", json)
for sub in json["subscriptions"]:
if sub['name'] == "Normandy":
self.assertEqual(sub['invite_only'], False, "Normandy was mistakenly marked invite-only")
if sub['name'] == "Saxony":
self.assertEqual(sub['invite_only'], True, "Saxony was not properly marked invite-only")
@slow("lots of queries")
def test_inviteonly(self):
# type: () -> None
# Creating an invite-only stream is allowed
email = 'hamlet@zulip.com'
stream_name = "Saxony"
result = self.common_subscribe_to_streams(email, [stream_name], invite_only=True)
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {email: [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Subscribing oneself to an invite-only stream is not allowed
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name])
self.assert_json_error(result, 'Unable to access stream (Saxony).')
# authorization_errors_fatal=False works
email = "othello@zulip.com"
self.login(email)
result = self.common_subscribe_to_streams(email, [stream_name],
extra_post_data={'authorization_errors_fatal': ujson.dumps(False)})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["unauthorized"], [stream_name])
self.assertEqual(json["subscribed"], {})
self.assertEqual(json["already_subscribed"], {})
# Inviting another user to an invite-only stream is allowed
email = 'hamlet@zulip.com'
self.login(email)
result = self.common_subscribe_to_streams(
email, [stream_name],
extra_post_data={'principals': ujson.dumps(["othello@zulip.com"])})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(json["subscribed"], {"othello@zulip.com": [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Make sure both users are subscribed to this stream
stream_id = Stream.objects.get(name=stream_name).id
result = self.client_get("/api/v1/streams/%d/members" % (stream_id,),
**self.api_auth(email))
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertTrue('othello@zulip.com' in json['subscribers'])
self.assertTrue('hamlet@zulip.com' in json['subscribers'])
class GetSubscribersTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.email = "hamlet@zulip.com"
self.user_profile = get_user_profile_by_email(self.email)
self.login(self.email)
def check_well_formed_result(self, result, stream_name, realm):
# type: (Dict[str, Any], Text, Realm) -> None
"""
A successful call to get_subscribers returns the list of subscribers in
the form:
{"msg": "",
"result": "success",
"subscribers": ["hamlet@zulip.com", "prospero@zulip.com"]}
"""
self.assertIn("subscribers", result)
self.assertIsInstance(result["subscribers"], list)
true_subscribers = [user_profile.email for user_profile in self.users_subscribed_to_stream(
stream_name, realm)]
self.assertEqual(sorted(result["subscribers"]), sorted(true_subscribers))
def make_subscriber_request(self, stream_id, email=None):
# type: (int, Optional[str]) -> HttpResponse
if email is None:
email = self.email
return self.client_get("/api/v1/streams/%d/members" % (stream_id,),
**self.api_auth(email))
def make_successful_subscriber_request(self, stream_name):
# type: (Text) -> None
stream_id = Stream.objects.get(name=stream_name).id
result = self.make_subscriber_request(stream_id)
self.assert_json_success(result)
self.check_well_formed_result(ujson.loads(result.content),
stream_name, self.user_profile.realm)
def test_subscriber(self):
# type: () -> None
"""
get_subscribers returns the list of subscribers.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
self.make_successful_subscriber_request(stream_name)
@slow("common_subscribe_to_streams is slow")
def test_gather_subscriptions(self):
# type: () -> None
"""
gather_subscriptions returns correct results with only 3 queries
"""
streams = ["stream_%s" % i for i in range(10)]
for stream_name in streams:
self.make_stream(stream_name)
users_to_subscribe = [self.email, "othello@zulip.com", "cordelia@zulip.com"]
ret = self.common_subscribe_to_streams(
self.email,
streams,
dict(principals=ujson.dumps(users_to_subscribe)))
self.assert_json_success(ret)
ret = self.common_subscribe_to_streams(
self.email,
["stream_invite_only_1"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(self.user_profile)
self.assertTrue(len(subscriptions[0]) >= 11)
for sub in subscriptions[0]:
if not sub["name"].startswith("stream_"):
continue
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
self.assert_length(queries, 4)
@slow("common_subscribe_to_streams is slow")
def test_never_subscribed_streams(self):
# type: () -> None
"""
Check never_subscribed streams are fetched correctly and not include invite_only streams.
"""
realm = get_realm("zulip")
streams = ["stream_%s" % i for i in range(10)]
for stream_name in streams:
self.make_stream(stream_name, realm=realm)
users_to_subscribe = ["othello@zulip.com", "cordelia@zulip.com"]
ret = self.common_subscribe_to_streams(
self.email,
streams,
dict(principals=ujson.dumps(users_to_subscribe)))
self.assert_json_success(ret)
ret = self.common_subscribe_to_streams(
self.email,
["stream_invite_only_1"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(self.user_profile)
self.assertTrue(len(never_subscribed) >= 10)
# Invite only stream should not be there in never_subscribed streams
for stream_dict in never_subscribed:
if stream_dict["name"].startswith("stream_"):
self.assertFalse(stream_dict['name'] == "stream_invite_only_1")
self.assertTrue(len(stream_dict["subscribers"]) == len(users_to_subscribe))
self.assert_length(queries, 3)
@slow("common_subscribe_to_streams is slow")
def test_gather_subscriptions_mit(self):
# type: () -> None
"""
gather_subscriptions returns correct results with only 3 queries
"""
# Subscribe only ourself because invites are disabled on mit.edu
users_to_subscribe = ["starnine@mit.edu", "espuser@mit.edu"]
for email in users_to_subscribe:
self.subscribe_to_stream(email, "mit_stream")
ret = self.common_subscribe_to_streams(
"starnine@mit.edu",
["mit_invite_only"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
with queries_captured() as queries:
subscriptions = gather_subscriptions(get_user_profile_by_email("starnine@mit.edu"))
self.assertTrue(len(subscriptions[0]) >= 2)
for sub in subscriptions[0]:
if not sub["name"].startswith("mit_"):
continue
if sub["name"] == "mit_invite_only":
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
else:
self.assertTrue(len(sub["subscribers"]) == 0)
self.assert_length(queries, 4)
def test_nonsubscriber(self):
# type: () -> None
"""
Even a non-subscriber to a public stream can query a stream's membership
with get_subscribers.
"""
# Create a stream for which Hamlet is the only subscriber.
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name])
other_email = "othello@zulip.com"
# Fetch the subscriber list as a non-member.
self.login(other_email)
self.make_successful_subscriber_request(stream_name)
def test_subscriber_private_stream(self):
# type: () -> None
"""
A subscriber to a private stream can query that stream's membership.
"""
stream_name = "Saxony"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
self.make_successful_subscriber_request(stream_name)
def test_json_get_subscribers_stream_not_exist(self):
# type: () -> None
"""
json_get_subscribers also returns the list of subscribers for a stream.
"""
stream_id = 99999999
result = self.client_get("/json/streams/%d/members" % (stream_id,))
self.assert_json_error(result, u'Invalid stream id')
def test_json_get_subscribers(self):
# type: () -> None
"""
json_get_subscribers in zerver/views/streams.py
also returns the list of subscribers for a stream.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
stream_id = Stream.objects.get(realm=self.user_profile.realm, name=stream_name).id
expected_subscribers = gather_subscriptions(self.user_profile)[0][0]['subscribers']
result = self.client_get("/json/streams/%d/members" % (stream_id,))
self.assert_json_success(result)
result_dict = ujson.loads(result.content)
self.assertIn('subscribers', result_dict)
self.assertIsInstance(result_dict['subscribers'], list)
subscribers = [] # type: List[Text]
for subscriber in result_dict['subscribers']:
self.assertIsInstance(subscriber, six.string_types)
subscribers.append(subscriber)
self.assertEqual(set(subscribers), set(expected_subscribers))
def test_nonsubscriber_private_stream(self):
# type: () -> None
"""
A non-subscriber to a private stream can't query that stream's membership.
"""
# Create a private stream for which Hamlet is the only subscriber.
stream_name = "NewStream"
self.common_subscribe_to_streams(self.email, [stream_name],
invite_only=True)
other_email = "othello@zulip.com"
# Try to fetch the subscriber list as a non-member.
stream_id = Stream.objects.get(name=stream_name).id
result = self.make_subscriber_request(stream_id, email=other_email)
self.assert_json_error(result,
"Unable to retrieve subscribers for invite-only stream")
| isht3/zulip | zerver/tests/test_subs.py | Python | apache-2.0 | 98,466 |
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, random, cPickle
from androguard.core.androconf import error, warning, debug, is_ascii_problem
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes.api_permissions import DVM_PERMISSIONS_BY_PERMISSION, DVM_PERMISSIONS_BY_ELEMENT
class ContextField(object):
def __init__(self, mode):
self.mode = mode
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ContextMethod(object):
def __init__(self):
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ExternalFM(object):
def __init__(self, class_name, name, descriptor):
self.class_name = class_name
self.name = name
self.descriptor = descriptor
def get_class_name(self):
return self.class_name
def get_name(self):
return self.name
def get_descriptor(self):
return self.descriptor
class ToString(object):
def __init__(self, tab):
self.__tab = tab
self.__re_tab = {}
for i in self.__tab:
self.__re_tab[i] = []
for j in self.__tab[i]:
self.__re_tab[i].append( re.compile( j ) )
self.__string = ""
def push(self, name):
for i in self.__tab:
for j in self.__re_tab[i]:
if j.match(name) != None:
if len(self.__string) > 0:
if i == 'O' and self.__string[-1] == 'O':
continue
self.__string += i
def get_string(self):
return self.__string
class BreakBlock(object):
def __init__(self, _vm, idx):
self._vm = _vm
self._start = idx
self._end = self._start
self._ins = []
self._ops = []
self._fields = {}
self._methods = {}
def get_ops(self):
return self._ops
def get_fields(self):
return self._fields
def get_methods(self):
return self._methods
def push(self, ins):
self._ins.append(ins)
self._end += ins.get_length()
def get_start(self):
return self._start
def get_end(self):
return self._end
def show(self):
for i in self._ins:
print "\t\t",
i.show(0)
DVM_FIELDS_ACCESS = {
"iget" : "R",
"iget-wide" : "R",
"iget-object" : "R",
"iget-boolean" : "R",
"iget-byte" : "R",
"iget-char" : "R",
"iget-short" : "R",
"iput" : "W",
"iput-wide" : "W",
"iput-object" : "W",
"iput-boolean" : "W",
"iput-byte" : "W",
"iput-char" : "W",
"iput-short" : "W",
"sget" : "R",
"sget-wide" : "R",
"sget-object" : "R",
"sget-boolean" : "R",
"sget-byte" : "R",
"sget-char" : "R",
"sget-short" : "R",
"sput" : "W",
"sput-wide" : "W",
"sput-object" : "W",
"sput-boolean" : "W",
"sput-byte" : "W",
"sput-char" : "W",
"sput-short" : "W",
}
class DVMBasicBlock(object):
"""
A simple basic block of a dalvik method
"""
def __init__(self, start, vm, method, context):
self.__vm = vm
self.method = method
self.context = context
self.last_length = 0
self.nb_instructions = 0
self.fathers = []
self.childs = []
self.start = start
self.end = self.start
self.special_ins = {}
self.name = "%s-BB@0x%x" % (self.method.get_name(), self.start)
self.exception_analysis = None
self.tainted_variables = self.context.get_tainted_variables()
self.tainted_packages = self.context.get_tainted_packages()
self.notes = []
def get_notes(self):
return self.notes
def set_notes(self, value):
self.notes = [value]
def add_note(self, note):
self.notes.append(note)
def clear_notes(self):
self.notes = []
def get_instructions(self):
"""
Get all instructions from a basic block.
:rtype: Return all instructions in the current basic block
"""
tmp_ins = []
idx = 0
for i in self.method.get_instructions():
if idx >= self.start and idx < self.end:
tmp_ins.append(i)
idx += i.get_length()
return tmp_ins
def get_nb_instructions(self):
return self.nb_instructions
def get_method(self):
return self.method
def get_name(self):
return "%s-BB@0x%x" % (self.method.get_name(), self.start)
def get_start(self):
return self.start
def get_end(self):
return self.end
def get_last(self):
return self.get_instructions()[-1]
def get_next(self):
"""
Get next basic blocks
:rtype: a list of the next basic blocks
"""
return self.childs
def get_prev(self):
"""
Get previous basic blocks
:rtype: a list of the previous basic blocks
"""
return self.fathers
def set_fathers(self, f):
self.fathers.append(f)
def get_last_length(self):
return self.last_length
def set_childs(self, values):
#print self, self.start, self.end, values
if values == []:
next_block = self.context.get_basic_block( self.end + 1 )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), self.end, next_block ) )
else:
for i in values:
if i != -1:
next_block = self.context.get_basic_block( i )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), i, next_block) )
for c in self.childs:
if c[2] != None:
c[2].set_fathers( ( c[1], c[0], self ) )
def push(self, i):
try:
self.nb_instructions += 1
idx = self.end
self.last_length = i.get_length()
self.end += self.last_length
op_value = i.get_op_value()
# field access
if (op_value >= 0x52 and op_value <= 0x6d):
desc = self.__vm.get_cm_field(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_FIELD, desc, DVM_FIELDS_ACCESS[i.get_name()][0], idx, self.method)
# invoke
elif (op_value >= 0x6e and op_value <= 0x72) or (op_value >= 0x74 and op_value <= 0x78):
idx_meth = i.get_ref_kind()
method_info = self.__vm.get_cm_method(idx_meth)
if self.tainted_packages != None:
self.tainted_packages.push_info(method_info[0], TAINTED_PACKAGE_CALL, idx, self.method, idx_meth)
# new_instance
elif op_value == 0x22:
idx_type = i.get_ref_kind()
type_info = self.__vm.get_cm_type(idx_type)
if self.tainted_packages != None:
self.tainted_packages.push_info(type_info, TAINTED_PACKAGE_CREATE, idx, self.method, None)
# const-string
elif (op_value >= 0x1a and op_value <= 0x1b):
string_name = self.__vm.get_cm_string(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_STRING, string_name, "R", idx, self.method)
elif op_value == 0x26 or (op_value >= 0x2b and op_value <= 0x2c):
code = self.method.get_code().get_bc()
self.special_ins[idx] = code.get_ins_off(idx + i.get_ref_off() * 2)
except:
pass
def get_special_ins(self, idx):
"""
Return the associated instruction to a specific instruction (for example a packed/sparse switch)
:param idx: the index of the instruction
:rtype: None or an Instruction
"""
try:
return self.special_ins[idx]
except:
return None
def get_exception_analysis(self):
return self.exception_analysis
def set_exception_analysis(self, exception_analysis):
self.exception_analysis = exception_analysis
TAINTED_LOCAL_VARIABLE = 0
TAINTED_FIELD = 1
TAINTED_STRING = 2
class PathVar(object):
def __init__(self, access, idx, dst_idx, info_obj):
self.access_flag = access
self.idx = idx
self.dst_idx = dst_idx
self.info_obj = info_obj
def get_var_info(self):
return self.info_obj.get_info()
def get_access_flag(self):
return self.access_flag
def get_src(self, cm):
method = cm.get_method_ref( self.idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_dst(self, cm):
method = cm.get_method_ref( self.dst_idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
class TaintedVariable(object):
def __init__(self, var, _type):
self.var = var
self.type = _type
self.paths = {}
self.__cache = []
def get_type(self):
return self.type
def get_info(self):
if self.type == TAINTED_FIELD:
return [ self.var[0], self.var[2], self.var[1] ]
return self.var
def push(self, access, idx, ref):
m_idx = ref.get_method_idx()
if m_idx not in self.paths:
self.paths[ m_idx ] = []
self.paths[ m_idx ].append( (access, idx) )
def get_paths_access(self, mode):
for i in self.paths:
for j in self.paths[ i ]:
for k, v in self.paths[ i ][ j ]:
if k in mode:
yield i, j, k, v
def get_paths(self):
if self.__cache != []:
return self.__cache
for i in self.paths:
for j in self.paths[ i ]:
self.__cache.append( [j, i] )
#yield j, i
return self.__cache
def get_paths_length(self):
return len(self.paths)
def show_paths(self, vm):
show_PathVariable( vm, self.get_paths() )
class TaintedVariables(object):
def __init__(self, _vm):
self.__vm = _vm
self.__vars = {
TAINTED_LOCAL_VARIABLE : {},
TAINTED_FIELD : {},
TAINTED_STRING : {},
}
self.__cache_field_by_method = {}
self.__cache_string_by_method = {}
# functions to get particulars elements
def get_string(self, s):
try:
return self.__vars[ TAINTED_STRING ][ s ]
except KeyError:
return None
def get_field(self, class_name, name, descriptor):
key = class_name + descriptor + name
try:
return self.__vars[ TAINTED_FIELD ] [ key ]
except KeyError:
return None
def toPathVariable(self, obj):
z = []
for i in obj.get_paths():
access, idx = i[0]
m_idx = i[1]
z.append( PathVar(access, idx, m_idx, obj ) )
return z
# permission functions
def get_permissions_method(self, method):
permissions = []
for f, f1 in self.get_fields():
data = "%s-%s-%s" % (f1[0], f1[1], f1[2])
if data in DVM_PERMISSIONS_BY_ELEMENT:
for path in f.get_paths():
access, idx = path[0]
m_idx = path[1]
if m_idx == method.get_idx():
if DVM_PERMISSIONS_BY_ELEMENT[ data ] not in permissions:
permissions.append( DVM_PERMISSIONS_BY_ELEMENT[ data ] )
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = permissions_needed
if permissions_needed == []:
pn = DVM_PERMISSIONS_BY_PERMISSION.keys()
for f, f1 in self.get_fields():
data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1])
if data in DVM_PERMISSIONS_BY_ELEMENT:
if DVM_PERMISSIONS_BY_ELEMENT[ data ] in pn:
try:
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ].extend( self.toPathVariable( f ) )
except KeyError:
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ] = []
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ].extend( self.toPathVariable( f ) )
return permissions
# global functions
def get_strings(self):
for i in self.__vars[ TAINTED_STRING ]:
yield self.__vars[ TAINTED_STRING ][ i ], i
def get_fields(self):
for i in self.__vars[ TAINTED_FIELD ]:
yield self.__vars[ TAINTED_FIELD ][ i ], i
# specifics functions
def get_strings_by_method(self, method):
z = {}
try:
for i in self.__cache_string_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def get_fields_by_method(self, method):
z = {}
try:
for i in self.__cache_field_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def add(self, var, _type, _method=None):
if _type == TAINTED_FIELD:
key = var[0] + var[1] + var[2]
if key not in self.__vars[ TAINTED_FIELD ]:
self.__vars[ TAINTED_FIELD ][ key ] = TaintedVariable( var, _type )
elif _type == TAINTED_STRING:
if var not in self.__vars[ TAINTED_STRING ]:
self.__vars[ TAINTED_STRING ][ var ] = TaintedVariable( var, _type )
elif _type == TAINTED_LOCAL_VARIABLE:
if _method not in self.__vars[ TAINTED_LOCAL_VARIABLE ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ] = {}
if var not in self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ][ var ] = TaintedVariable( var, _type )
def push_info(self, _type, var, access, idx, ref):
if _type == TAINTED_FIELD:
self.add( var, _type )
key = var[0] + var[1] + var[2]
self.__vars[ _type ][ key ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_field_by_method:
self.__cache_field_by_method[ method_idx ] = set()
self.__cache_field_by_method[ method_idx ].add( self.__vars[ TAINTED_FIELD ][ key ] )
elif _type == TAINTED_STRING:
self.add( var, _type )
self.__vars[ _type ][ var ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_string_by_method:
self.__cache_string_by_method[ method_idx ] = set()
self.__cache_string_by_method[ method_idx ].add( self.__vars[ TAINTED_STRING ][ var ] )
TAINTED_PACKAGE_CREATE = 0
TAINTED_PACKAGE_CALL = 1
TAINTED_PACKAGE = {
TAINTED_PACKAGE_CREATE : "C",
TAINTED_PACKAGE_CALL : "M"
}
def show_Path(vm, path):
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
print "%s %s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
info_var,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
print "%d %s->%s%s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
print "%d %s->%s%s (0x%x)" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx())
def get_Path(vm, path):
x = {}
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
x["src"] = "%s" % info_var
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
x["idx"] = path.get_idx()
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["idx"] = path.get_idx()
return x
def show_Paths(vm, paths):
"""
Show paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
for path in paths:
show_Path( vm, path )
def get_Paths(vm, paths):
"""
Return paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
full_paths = []
for path in paths:
full_paths.append(get_Path( vm, path ))
return full_paths
def show_PathVariable(vm, paths):
for path in paths:
access, idx = path[0]
m_idx = path[1]
method = vm.get_cm_method(m_idx)
print "%s %x %s->%s %s" % (access, idx, method[0], method[1], method[2][0] + method[2][1])
class PathP(object):
def __init__(self, access, idx, src_idx, dst_idx):
self.access_flag = access
self.idx = idx
self.src_idx = src_idx
self.dst_idx = dst_idx
def get_access_flag(self):
return self.access_flag
def get_dst(self, cm):
method = cm.get_method_ref(self.dst_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_src(self, cm):
method = cm.get_method_ref(self.src_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
def get_src_idx(self):
return self.src_idx
def get_dst_idx(self):
return self.dst_idx
class TaintedPackage(object):
def __init__(self, vm, name):
self.vm = vm
self.name = name
self.paths = {TAINTED_PACKAGE_CREATE : [], TAINTED_PACKAGE_CALL : []}
def get_name(self):
return self.name
def gets(self):
return self.paths
def push(self, access, idx, src_idx, dst_idx):
p = PathP( access, idx, src_idx, dst_idx )
self.paths[ access ].append( p )
return p
def get_objects_paths(self):
return self.paths[ TAINTED_PACKAGE_CREATE ]
def search_method(self, name, descriptor):
"""
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called paths
"""
l = []
m_name = re.compile(name)
m_descriptor = re.compile(descriptor)
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
_, dst_name, dst_descriptor = path.get_dst(self.vm.get_class_manager())
if m_name.match( dst_name ) != None and m_descriptor.match( dst_descriptor ) != None:
l.append( path )
return l
def get_method(self, name, descriptor):
l = []
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
if path.get_name() == name and path.get_descriptor() == descriptor:
l.append( path )
return l
def get_paths(self):
for i in self.paths:
for j in self.paths[ i ]:
yield j
def get_paths_length(self):
x = 0
for i in self.paths:
x += len(self.paths[ i ])
return x
def get_methods(self):
return [path for path in self.paths[TAINTED_PACKAGE_CALL]]
def get_new(self):
return [path for path in self.paths[TAINTED_PACKAGE_CREATE]]
def show(self):
cm = self.vm.get_class_manager()
print self.get_name()
for _type in self.paths:
print "\t -->", _type
if _type == TAINTED_PACKAGE_CALL:
for path in self.paths[_type]:
print "\t\t => %s <-- %x in %s" % (path.get_dst(cm), path.get_idx(), path.get_src(cm))
else:
for path in self.paths[_type]:
print "\t\t => %x in %s" % (path.get_idx(), path.get_src(cm))
def show_Permissions(dx):
"""
Show where permissions are used in a specific application
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
p = dx.get_permissions( [] )
for i in p:
print i, ":"
for j in p[i]:
show_Path( dx.get_vm(), j )
def show_DynCode(dx):
"""
Show where dynamic code is used
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = []
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."))
show_Paths( dx.get_vm(), paths )
def show_NativeMethods(dx):
"""
Show the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
print get_NativeMethods(dx)
def show_ReflectionCode(dx):
"""
Show the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", ".")
show_Paths(dx.get_vm(), paths)
def get_NativeMethods(dx):
"""
Return the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [tuple]
"""
d = dx.get_vm()
native_methods = []
for i in d.get_methods():
if i.get_access_flags() & 0x100:
native_methods.append(
(i.get_class_name(), i.get_name(), i.get_descriptor()))
return native_methods
def get_ReflectionCode(dx):
"""
Return the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [dict]
"""
paths = dx.get_tainted_packages().search_methods(
"Ljava/lang/reflect/Method;", ".", ".")
return get_Paths(dx.get_vm(), paths)
def is_crypto_code(dx):
"""
Crypto code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljavax/crypto/.",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/security/spec/.",
".",
"."):
return True
return False
def is_dyn_code(dx):
"""
Dalvik Dynamic code loading is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."):
return True
return False
def is_reflection_code(dx):
"""
Reflection is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Field;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Class;",
"forName",
"."):
return True
return False
def is_native_code(dx):
"""
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/System;",
"load.",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Runtime;",
"load.",
"."):
return True
return False
class TaintedPackages(object):
def __init__(self, _vm):
self.__vm = _vm
self.__packages = {}
self.__methods = {}
def _add_pkg(self, name):
if name not in self.__packages:
self.__packages[ name ] = TaintedPackage( self.__vm, name )
#self.context.get_tainted_packages().push_info( method_info[0], TAINTED_PACKAGE_CALL, idx, self, self.method, method_info[1], method_info[2][0] + method_info[2][1] )
def push_info(self, class_name, access, idx, method, idx_method):
self._add_pkg( class_name )
p = self.__packages[ class_name ].push( access, idx, method.get_method_idx(), idx_method )
try:
self.__methods[ method ][ class_name ].append( p )
except:
try:
self.__methods[ method ][ class_name ] = []
except:
self.__methods[ method ] = {}
self.__methods[ method ][ class_name ] = []
self.__methods[ method ][ class_name ].append( p )
def get_packages_by_method(self, method):
try:
return self.__methods[method]
except KeyError:
return {}
def get_package(self, name):
return self.__packages[name]
def get_packages_by_bb(self, bb):
"""
:rtype: return a list of packaged used in a basic block
"""
l = []
for i in self.__packages:
paths = self.__packages[i].gets()
for j in paths:
for k in paths[j]:
if k.get_bb() == bb:
l.append( (i, k.get_access_flag(), k.get_idx(), k.get_method()) )
return l
def get_packages(self):
for i in self.__packages:
yield self.__packages[i], i
def get_internal_packages_from_package(self, package):
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name == package and dst_class_name in classes:
l.append(j)
return l
def get_internal_packages(self):
"""
:rtype: return a list of the internal packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if dst_class_name in classes and m.get_name() in classes:
l.append(j)
return l
def get_internal_new_packages(self):
"""
:rtype: return a list of the internal packages created in the application
"""
classes = self.__vm.get_classes_names()
l = {}
for m, _ in self.get_packages():
paths = m.get_new()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
if src_class_name in classes and m.get_name() in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CREATE:
try:
l[m.get_name()].append(j)
except:
l[m.get_name()] = []
l[m.get_name()].append(j)
return l
def get_external_packages(self):
"""
:rtype: return a list of the external packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name in classes and dst_class_name not in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
l.append(j)
return l
def search_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
:rtype: a list of called packages' paths
"""
ex = re.compile(package_name)
l = []
for m, _ in self.get_packages():
if ex.search(m.get_name()) != None:
l.extend(m.get_methods())
return l
def search_unique_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
"""
ex = re.compile( package_name )
l = []
d = {}
for m, _ in self.get_packages():
if ex.match( m.get_info() ) != None:
for path in m.get_methods():
try:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] += 1
except KeyError:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] = 0
l.append( [ path.get_class_name(), path.get_name(), path.get_descriptor() ] )
return l, d
def search_methods(self, class_name, name, descriptor, re_expr=True):
"""
@param class_name : a regexp for the class name of the method (the package)
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called methods' paths
"""
l = []
if re_expr == True:
ex = re.compile( class_name )
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.search_method( name, descriptor ) )
return l
def search_objects(self, class_name):
"""
@param class_name : a regexp for the class name
@rtype : a list of created objects' paths
"""
ex = re.compile( class_name )
l = []
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.get_objects_paths() )
return l
def search_crypto_packages(self):
"""
@rtype : a list of called crypto packages
"""
return self.search_packages( "Ljavax/crypto/" )
def search_telephony_packages(self):
"""
@rtype : a list of called telephony packages
"""
return self.search_packages( "Landroid/telephony/" )
def search_net_packages(self):
"""
@rtype : a list of called net packages
"""
return self.search_packages( "Landroid/net/" )
def get_method(self, class_name, name, descriptor):
try:
return self.__packages[ class_name ].get_method( name, descriptor )
except KeyError:
return []
def get_permissions_method(self, method):
permissions = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_method() == method:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
tmp = j.get_descriptor()
tmp = tmp[ : tmp.rfind(")") + 1 ]
data = "%s-%s-%s" % (m.get_info(), j.get_name(), tmp)
if data in DVM_PERMISSIONS_BY_ELEMENT:
if DVM_PERMISSIONS_BY_ELEMENT[ data ] not in permissions:
permissions.append( DVM_PERMISSIONS_BY_ELEMENT[ data ] )
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = permissions_needed
if permissions_needed == []:
pn = DVM_PERMISSIONS_BY_PERMISSION.keys()
classes = self.__vm.get_classes_names()
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, src_method_name, src_descriptor = j.get_src( self.__vm.get_class_manager() )
dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() )
if src_class_name in classes and m.get_name() not in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
tmp = dst_descriptor
tmp = tmp[ : tmp.rfind(")") + 1 ]
#data = "%s-%s-%s" % (m.get_info(), j.get_name(), j.get_descriptor())
data = "%s-%s-%s" % (m.get_name(), dst_method_name, tmp)
if data in DVM_PERMISSIONS_BY_ELEMENT:
if DVM_PERMISSIONS_BY_ELEMENT[ data ] in pn:
try:
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ].append( j )
except KeyError:
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ] = []
permissions[ DVM_PERMISSIONS_BY_ELEMENT[ data ] ].append( j )
return permissions
class Enum(object):
def __init__(self, names):
self.names = names
for value, name in enumerate(self.names):
setattr(self, name.upper(), value)
def tuples(self):
return tuple(enumerate(self.names))
TAG_ANDROID = Enum([ 'ANDROID', 'TELEPHONY', 'SMS', 'SMSMESSAGE', 'ACCESSIBILITYSERVICE', 'ACCOUNTS',
'ANIMATION', 'APP', 'BLUETOOTH', 'CONTENT', 'DATABASE', 'DEBUG', 'DRM', 'GESTURE',
'GRAPHICS', 'HARDWARE', 'INPUTMETHODSERVICE', 'LOCATION', 'MEDIA', 'MTP',
'NET', 'NFC', 'OPENGL', 'OS', 'PREFERENCE', 'PROVIDER', 'RENDERSCRIPT',
'SAX', 'SECURITY', 'SERVICE', 'SPEECH', 'SUPPORT', 'TEST', 'TEXT', 'UTIL',
'VIEW', 'WEBKIT', 'WIDGET', 'DALVIK_BYTECODE', 'DALVIK_SYSTEM', 'JAVA_REFLECTION'])
TAG_REVERSE_ANDROID = dict((i[0], i[1]) for i in TAG_ANDROID.tuples())
TAGS_ANDROID = { TAG_ANDROID.ANDROID : [ 0, "Landroid" ],
TAG_ANDROID.TELEPHONY : [ 0, "Landroid/telephony"],
TAG_ANDROID.SMS : [ 0, "Landroid/telephony/SmsManager"],
TAG_ANDROID.SMSMESSAGE : [ 0, "Landroid/telephony/SmsMessage"],
TAG_ANDROID.DEBUG : [ 0, "Landroid/os/Debug"],
TAG_ANDROID.ACCESSIBILITYSERVICE : [ 0, "Landroid/accessibilityservice" ],
TAG_ANDROID.ACCOUNTS : [ 0, "Landroid/accounts" ],
TAG_ANDROID.ANIMATION : [ 0, "Landroid/animation" ],
TAG_ANDROID.APP : [ 0, "Landroid/app" ],
TAG_ANDROID.BLUETOOTH : [ 0, "Landroid/bluetooth" ],
TAG_ANDROID.CONTENT : [ 0, "Landroid/content" ],
TAG_ANDROID.DATABASE : [ 0, "Landroid/database" ],
TAG_ANDROID.DRM : [ 0, "Landroid/drm" ],
TAG_ANDROID.GESTURE : [ 0, "Landroid/gesture" ],
TAG_ANDROID.GRAPHICS : [ 0, "Landroid/graphics" ],
TAG_ANDROID.HARDWARE : [ 0, "Landroid/hardware" ],
TAG_ANDROID.INPUTMETHODSERVICE : [ 0, "Landroid/inputmethodservice" ],
TAG_ANDROID.LOCATION : [ 0, "Landroid/location" ],
TAG_ANDROID.MEDIA : [ 0, "Landroid/media" ],
TAG_ANDROID.MTP : [ 0, "Landroid/mtp" ],
TAG_ANDROID.NET : [ 0, "Landroid/net" ],
TAG_ANDROID.NFC : [ 0, "Landroid/nfc" ],
TAG_ANDROID.OPENGL : [ 0, "Landroid/opengl" ],
TAG_ANDROID.OS : [ 0, "Landroid/os" ],
TAG_ANDROID.PREFERENCE : [ 0, "Landroid/preference" ],
TAG_ANDROID.PROVIDER : [ 0, "Landroid/provider" ],
TAG_ANDROID.RENDERSCRIPT : [ 0, "Landroid/renderscript" ],
TAG_ANDROID.SAX : [ 0, "Landroid/sax" ],
TAG_ANDROID.SECURITY : [ 0, "Landroid/security" ],
TAG_ANDROID.SERVICE : [ 0, "Landroid/service" ],
TAG_ANDROID.SPEECH : [ 0, "Landroid/speech" ],
TAG_ANDROID.SUPPORT : [ 0, "Landroid/support" ],
TAG_ANDROID.TEST : [ 0, "Landroid/test" ],
TAG_ANDROID.TEXT : [ 0, "Landroid/text" ],
TAG_ANDROID.UTIL : [ 0, "Landroid/util" ],
TAG_ANDROID.VIEW : [ 0, "Landroid/view" ],
TAG_ANDROID.WEBKIT : [ 0, "Landroid/webkit" ],
TAG_ANDROID.WIDGET : [ 0, "Landroid/widget" ],
TAG_ANDROID.DALVIK_BYTECODE : [ 0, "Ldalvik/bytecode" ],
TAG_ANDROID.DALVIK_SYSTEM : [ 0, "Ldalvik/system" ],
TAG_ANDROID.JAVA_REFLECTION : [ 0, "Ljava/lang/reflect"],
}
class Tags(object):
"""
Handle specific tags
:param patterns:
:params reverse:
"""
def __init__(self, patterns=TAGS_ANDROID, reverse=TAG_REVERSE_ANDROID):
self.tags = set()
self.patterns = patterns
self.reverse = TAG_REVERSE_ANDROID
for i in self.patterns:
self.patterns[i][1] = re.compile(self.patterns[i][1])
def emit(self, method):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( method.get_class() ) != None:
self.tags.add( i )
def emit_by_classname(self, classname):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( classname ) != None:
self.tags.add( i )
def get_list(self):
return [ self.reverse[ i ] for i in self.tags ]
def __contains__(self, key):
return key in self.tags
def __str__(self):
return str([ self.reverse[ i ] for i in self.tags ])
def empty(self):
return self.tags == set()
class BasicBlocks(object):
"""
This class represents all basic blocks of a method
"""
def __init__(self, _vm, tv):
self.__vm = _vm
self.tainted = tv
self.bb = []
def push(self, bb):
self.bb.append(bb)
def pop(self, idx):
return self.bb.pop(idx)
def get_basic_block(self, idx):
for i in self.bb:
if idx >= i.get_start() and idx < i.get_end():
return i
return None
def get_tainted_integers(self):
try:
return self.tainted.get_tainted_integers()
except:
return None
def get_tainted_packages(self):
try:
return self.tainted.get_tainted_packages()
except:
return None
def get_tainted_variables(self):
try:
return self.tainted.get_tainted_variables()
except:
return None
def get(self):
"""
:rtype: return each basic block (:class:`DVMBasicBlock` object)
"""
for i in self.bb:
yield i
def gets(self):
"""
:rtype: a list of basic blocks (:class:`DVMBasicBlock` objects)
"""
return self.bb
def get_basic_block_pos(self, idx):
return self.bb[idx]
class ExceptionAnalysis(object):
def __init__(self, exception, bb):
self.start = exception[0]
self.end = exception[1]
self.exceptions = exception[2:]
for i in self.exceptions:
i.append(bb.get_basic_block(i[1]))
def show_buff(self):
buff = "%x:%x\n" % (self.start, self.end)
for i in self.exceptions:
if i[2] == None:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2])
else:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
return buff[:-1]
def get(self):
d = {"start": self.start, "end": self.end, "list": []}
for i in self.exceptions:
d["list"].append({"name": i[0], "idx": i[1], "bb": i[2].get_name()})
return d
class Exceptions(object):
def __init__(self, _vm):
self.__vm = _vm
self.exceptions = []
def add(self, exceptions, basic_blocks):
for i in exceptions:
self.exceptions.append( ExceptionAnalysis( i, basic_blocks ) )
def get_exception(self, addr_start, addr_end):
for i in self.exceptions:
# print hex(i.start), hex(i.end), hex(addr_start), hex(addr_end), i.start >= addr_start and i.end <= addr_end, addr_end <= i.end and addr_start >= i.start
if i.start >= addr_start and i.end <= addr_end:
return i
elif addr_end <= i.end and addr_start >= i.start:
return i
return None
def gets(self):
return self.exceptions
def get(self):
for i in self.exceptions:
yield i
BO = { "BasicOPCODES" : dvm.BRANCH_DVM_OPCODES, "BasicClass" : DVMBasicBlock, "Dnext" : dvm.determineNext, "Dexception" : dvm.determineException }
BO["BasicOPCODES_H"] = []
for i in BO["BasicOPCODES"]:
BO["BasicOPCODES_H"].append( re.compile( i ) )
class MethodAnalysis(object):
"""
This class analyses in details a method of a class/dex file
:param vm: the object which represent the dex file
:param method: the original method
:param tv: a virtual object to get access to tainted information
:type vm: a :class:`DalvikVMFormat` object
:type method: a :class:`EncodedMethod` object
"""
def __init__(self, vm, method, tv):
self.__vm = vm
self.method = method
self.tainted = tv
self.basic_blocks = BasicBlocks(self.__vm, self.tainted)
self.exceptions = Exceptions(self.__vm)
code = self.method.get_code()
if code == None:
return
current_basic = BO["BasicClass"](0, self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
##########################################################
bc = code.get_bc()
l = []
h = {}
idx = 0
debug("Parsing instructions")
instructions = [i for i in bc.get_instructions()]
for i in instructions:
for j in BO["BasicOPCODES_H"]:
if j.match(i.get_name()) != None:
v = BO["Dnext"](i, idx, self.method)
h[ idx ] = v
l.extend(v)
break
idx += i.get_length()
debug("Parsing exceptions")
excepts = BO["Dexception"]( self.__vm, self.method )
for i in excepts:
l.extend( [i[0]] )
for handler in i[2:]:
l.append( handler[1] )
debug("Creating basic blocks")
idx = 0
for i in instructions:
# index is a destination
if idx in l:
if current_basic.get_nb_instructions() != 0:
current_basic = BO["BasicClass"](current_basic.get_end(), self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
current_basic.push(i)
# index is a branch instruction
if idx in h:
current_basic = BO["BasicClass"]( current_basic.get_end(), self.__vm, self.method, self.basic_blocks )
self.basic_blocks.push( current_basic )
idx += i.get_length()
if current_basic.get_nb_instructions() == 0:
self.basic_blocks.pop(-1)
debug("Settings basic blocks childs")
for i in self.basic_blocks.get():
try:
i.set_childs( h[ i.end - i.get_last_length() ] )
except KeyError:
i.set_childs( [] )
debug("Creating exceptions")
# Create exceptions
self.exceptions.add(excepts, self.basic_blocks)
for i in self.basic_blocks.get():
# setup exception by basic block
i.set_exception_analysis(self.exceptions.get_exception( i.start, i.end - 1 ))
del instructions
del h, l
def get_basic_blocks(self):
"""
:rtype: a :class:`BasicBlocks` object
"""
return self.basic_blocks
def get_length(self):
"""
:rtype: an integer which is the length of the code
"""
return self.get_code().get_length()
def get_vm(self):
return self.__vm
def get_method(self):
return self.method
def get_local_variables(self):
return self.tainted.get_tainted_variables().get_local_variables( self.method )
def show(self):
print "METHOD", self.method.get_class_name(), self.method.get_name(), self.method.get_descriptor()
for i in self.basic_blocks.get():
print "\t", i
i.show()
print ""
def show_methods(self):
print "\t #METHODS :"
for i in self.__bb:
methods = i.get_methods()
for method in methods:
print "\t\t-->", method.get_class_name(), method.get_name(), method.get_descriptor()
for context in methods[method]:
print "\t\t\t |---|", context.details
def create_tags(self):
"""
Create the tags for the method
"""
self.tags = Tags()
for i in self.tainted.get_tainted_packages().get_packages_by_method( self.method ):
self.tags.emit_by_classname( i )
def get_tags(self):
"""
Return the tags of the method
:rtype: a :class:`Tags` object
"""
return self.tags
SIGNATURE_L0_0 = "L0_0"
SIGNATURE_L0_1 = "L0_1"
SIGNATURE_L0_2 = "L0_2"
SIGNATURE_L0_3 = "L0_3"
SIGNATURE_L0_4 = "L0_4"
SIGNATURE_L0_5 = "L0_5"
SIGNATURE_L0_6 = "L0_6"
SIGNATURE_L0_0_L1 = "L0_0:L1"
SIGNATURE_L0_1_L1 = "L0_1:L1"
SIGNATURE_L0_2_L1 = "L0_2:L1"
SIGNATURE_L0_3_L1 = "L0_3:L1"
SIGNATURE_L0_4_L1 = "L0_4:L1"
SIGNATURE_L0_5_L1 = "L0_5:L1"
SIGNATURE_L0_0_L2 = "L0_0:L2"
SIGNATURE_L0_0_L3 = "L0_0:L3"
SIGNATURE_HEX = "hex"
SIGNATURE_SEQUENCE_BB = "sequencebb"
SIGNATURES = {
SIGNATURE_L0_0 : { "type" : 0 },
SIGNATURE_L0_1 : { "type" : 1 },
SIGNATURE_L0_2 : { "type" : 2, "arguments" : ["Landroid"] },
SIGNATURE_L0_3 : { "type" : 2, "arguments" : ["Ljava"] },
SIGNATURE_L0_4 : { "type" : 2, "arguments" : ["Landroid", "Ljava"] },
SIGNATURE_L0_5 : { "type" : 3, "arguments" : ["Landroid"] },
SIGNATURE_L0_6 : { "type" : 3, "arguments" : ["Ljava"] },
SIGNATURE_SEQUENCE_BB : {},
SIGNATURE_HEX : {},
}
from sign import Signature
class VMAnalysis(object):
"""
This class analyses a dex file
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
VMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, _vm):
self.__vm = _vm
self.tainted_variables = TaintedVariables( self.__vm )
self.tainted_packages = TaintedPackages( self.__vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
for i in self.__vm.get_all_fields():
self.tainted_variables.add( [ i.get_class_name(), i.get_descriptor(), i.get_name() ], TAINTED_FIELD )
self.methods = []
self.hmethods = {}
self.__nmethods = {}
for i in self.__vm.get_methods():
x = MethodAnalysis( self.__vm, i, self )
self.methods.append( x )
self.hmethods[ i ] = x
self.__nmethods[ i.get_name() ] = x
def get_vm(self):
return self.__vm
def get_method(self, method):
"""
Return an analysis method
:param method: a classical method object
:type method: an :class:`EncodedMethod` object
:rtype: a :class:`MethodAnalysis` object
"""
return self.hmethods[ method ]
def get_methods(self):
"""
Return each analysis method
:rtype: a :class:`MethodAnalysis` object
"""
for i in self.hmethods:
yield self.hmethods[i]
def get_method_signature(self, method, grammar_type="", options={}, predef_sign=""):
"""
Return a specific signature for a specific method
:param method: a reference to method from a vm class
:type method: a :class:`EncodedMethod` object
:param grammar_type: the type of the signature (optional)
:type grammar_type: string
:param options: the options of the signature (optional)
:param options: dict
:param predef_sign: used a predefined signature (optional)
:type predef_sign: string
:rtype: a :class:`Sign` object
"""
if self.signature == None:
self.signature = Signature( self )
if predef_sign != "":
g = ""
o = {}
for i in predef_sign.split(":"):
if "_" in i:
g += "L0:"
o[ "L0" ] = SIGNATURES[ i ]
else:
g += i
g += ":"
return self.signature.get_method( self.get_method( method ), g[:-1], o )
else:
return self.signature.get_method( self.get_method( method ), grammar_type, options )
def get_permissions(self, permissions_needed):
"""
Return the permissions used
:param permissions_needed: a list of restricted permissions to get ([] returns all permissions)
:type permissions_needed: list
:rtype: a dictionnary of permissions paths
"""
permissions = {}
permissions.update( self.get_tainted_packages().get_permissions( permissions_needed ) )
permissions.update( self.get_tainted_variables().get_permissions( permissions_needed ) )
return permissions
def get_permissions_method(self, method):
permissions_f = self.get_tainted_packages().get_permissions_method( method )
permissions_v = self.get_tainted_variables().get_permissions_method( method )
return list( set( permissions_f + permissions_v ) )
def get_tainted_variables(self):
"""
Return the tainted variables
:rtype: a :class:`TaintedVariables` object
"""
return self.tainted_variables
def get_tainted_packages(self):
"""
Return the tainted packages
:rtype: a :class:`TaintedPackages` object
"""
return self.tainted_packages
def get_tainted_fields(self):
return self.get_tainted_variables().get_fields()
def get_tainted_field(self, class_name, name, descriptor):
"""
Return a specific tainted field
:param class_name: the name of the class
:param name: the name of the field
:param descriptor: the descriptor of the field
:type class_name: string
:type name: string
:type descriptor: string
:rtype: a :class:`TaintedVariable` object
"""
return self.get_tainted_variables().get_field( class_name, name, descriptor )
class uVMAnalysis(VMAnalysis):
"""
This class analyses a dex file but on the fly (quicker !)
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
uVMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, vm):
self.vm = vm
self.tainted_variables = TaintedVariables( self.vm )
self.tainted_packages = TaintedPackages( self.vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
self.resolve = False
def get_methods(self):
self.resolve = True
for i in self.vm.get_methods():
yield MethodAnalysis(self.vm, i, self)
def get_method(self, method):
return MethodAnalysis( self.vm, method, None )
def get_vm(self):
return self.vm
def _resolve(self):
if self.resolve == False:
for i in self.get_methods():
pass
def get_tainted_packages(self):
self._resolve()
return self.tainted_packages
def get_tainted_variables(self):
self._resolve()
return self.tainted_variables
def is_ascii_obfuscation(vm):
for classe in vm.get_classes():
if is_ascii_problem(classe.get_name()):
return True
for method in classe.get_methods():
if is_ascii_problem(method.get_name()):
return True
return False
| CZ-NIC/thug | src/Analysis/androguard/core/analysis/analysis.py | Python | gpl-2.0 | 58,997 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See primitives_test docstring for how the Jax2TfLimitations are used."""
import itertools
from typing import Any, Callable, Optional, Sequence, Union
from jax import lax
from jax import numpy as jnp
from jax._src import test_util as jtu
from jax._src import dtypes
from jax.experimental.jax2tf.tests import primitive_harness
import numpy as np
DType = Any
class Jax2TfLimitation(primitive_harness.Limitation):
"""Specific primitive limitations for jax2tf.
See the primitive_test module docstring for details.
"""
def __init__(
self,
description: str,
*,
devices: Union[str, Sequence[str]] = ("cpu", "gpu", "tpu"),
dtypes: Union[DType, Sequence[DType]] = (),
enabled: bool = True,
# jax2tf specific
modes=("eager", "graph", "compiled"),
skip_tf_run=False,
expect_tf_error: bool = True,
skip_comparison=False,
custom_assert: Optional[Callable] = None,
tol=None):
"""See the primitive_harness.Limitation common arguments.
Args :
modes: one of "eager", "graph", "compiled"
skip_tf_run: if set will skip the TF execution. Use this sparingly,
prefer `expect_tf_error`. Use only when the test cannot recover from
the TF error.
expect_tf_error: if set, then expect a TF error in the given mode when
executing the result of jax2tf conversion. If not set, then the
limitation must have a custom_assert or non-default tol.
skip_comparison: skips the numeric comparison.
tol: a tolerance to use for both atol and rtol. We will use the maximum
tolerance over all the applicable limitations, irrespective of their
order.
custom_assert: if given, then execute as
`custom_assert(tst, result_jax, result_tf, args=args, tol=tol, err_msg)`
, where `tst` is the current TestCase instance, and args are the input
arguments that the harness created. The `tol` is the maximum tolerance
based on the applicable limitations. `err_msg` is passed to NumPy
assert methods.
`result_tf` is already converted to NumPy arrays.
"""
super().__init__(
description, devices=devices, dtypes=dtypes, enabled=enabled)
if isinstance(modes, str):
modes = (modes,)
assert all(m in ["eager", "graph", "compiled"] for m in modes), "Invalid modes: {modes}"
self.modes = modes
self.expect_tf_error = expect_tf_error
self.skip_tf_run = skip_tf_run
self.custom_assert = custom_assert
self.tol = tol
self.skip_comparison = skip_comparison
def get_max_tolerance_limitation(
self, limitations: Sequence["Jax2TfLimitation"]
) -> Optional["Jax2TfLimitation"]:
"""Pick the tolerance limitation that establishes the maximum tolerance."""
# TODO: it would be best if the limitations with tolerance are mutually exclusive
# and we don't have to compute the maximum
# TODO: we made this an instance method only so that we don't have to import
# this module from tf_test.util.
max_tol_lim = None
for l in limitations:
if l.tol is not None:
if max_tol_lim is None or l.tol > max_tol_lim.tol:
max_tol_lim = l
return max_tol_lim
def filter( # type: ignore[override]
self,
dtype: Optional[DType] = None,
device: Optional[str] = None,
mode: Optional[str] = None) -> bool:
return ((mode is None or mode in self.modes) and
super().filter(device=device, dtype=dtype))
@classmethod
def limitations_for_harness(
cls, harness: primitive_harness.Harness) -> Sequence["Jax2TfLimitation"]:
group_method = getattr(cls, harness.group_name, None)
if harness.group_name in cls.harness_groups_no_limitations:
assert group_method is None, (
f"Harness group '{harness.group_name}' is both in "
f"'harness_groups_no_limitations' and has a custom "
f"Jax2TfLimitation.classmethod defined (see module docstring)")
return []
else:
assert group_method is not None, (
f"Harness group '{harness.group_name}' must be either part of "
f"'harness_groups_no_limitations' or must have a custom "
f"Jax2TfLimitation.classmethod defined (see module docstring)")
limitations = group_method(harness)
assert isinstance(limitations, (list, tuple))
return limitations
# We keep here the explicit set of groups for which we don't have limitations
harness_groups_no_limitations = {
"abs", "add", "add_any", "and", "atan2",
"bitcast_convert_type", "broadcast", "broadcast_in_dim", "cbrt", "ceil",
"clamp", "concatenate", "cos", "cosh", "complex", "conj",
"convert_element_type",
"cummax", "cummin", "device_put", "dynamic_slice",
"dynamic_update_slice", "exp", "eq", "floor", "gather", "ge", "gt",
"imag",
"iota", "is_finite", "le", "lt", "log", "mul", "ne", "neg", "not",
"or", "pad", "population_count",
"random_categorical", "random_split", "random_uniform", "random_randint",
"reduce",
"reduce_and", "reduce_prod", "reduce_or", "reduce_sum",
"reduce_window_add", "reduce_window_mul", "reduce_window_min",
"reduce_window_max",
"real", "reshape", "rev", "rsqrt", "scatter_max", "scatter_min",
"select_n", "select_and_scatter_add",
"shift_left", "shift_right_logical", "shift_right_arithmetic", "sign",
"sin", "sinh", "slice", "sqrt", "squeeze", "stop_gradient", "sub",
"tie_in", "transpose", "xor", "zeros_like"
}
@classmethod
def helper_get_trig_custom_limitation(cls, np_inverse):
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
tst.assertAllClose(
operand, np_inverse(result_tf), atol=tol, rtol=tol, err_msg=err_msg)
return custom_numeric(
description="May return different but still correct results",
dtypes=[np.complex64, np.complex128],
custom_assert=custom_assert)
@classmethod
def acos(cls, harness: primitive_harness.Harness):
return [
custom_numeric(
dtypes=np.complex64,
devices=("cpu", "gpu"),
tol=1e-4,
modes=("eager", "graph", "compiled")),
custom_numeric(
dtypes=np.complex128,
devices=("cpu", "gpu"),
tol=1e-13,
modes=("eager", "graph", "compiled")),
]
@classmethod
def acosh(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.cosh)
]
@classmethod
def argmax(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"different results when the input contains NaN and enable_xla=False",
dtypes=jtu.dtypes.all_inexact,
devices=("cpu", "gpu", "tpu"),
modes=("eager", "graph", "compiled"),
expect_tf_error=False,
skip_comparison=True,
enabled=("nan_" in harness.name and not harness.params["enable_xla"])),
]
@classmethod
def argmin(cls, harness: primitive_harness.Harness):
return cls.argmax(harness)
@classmethod
def asin(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-4),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.sin)
]
@classmethod
def asinh(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.sinh)
]
@classmethod
def atan(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-5),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.tan)
]
@classmethod
def atanh(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.float64, tol=1e-14),
custom_numeric(dtypes=np.complex64, tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12),
cls.helper_get_trig_custom_limitation(np.tanh)
]
@classmethod
def bessel_i0e(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def bessel_i1e(cls, harness: primitive_harness.Harness):
return cls.bessel_i0e(harness)
@classmethod
def cholesky(cls, harness: primitive_harness.Harness):
def custom_assert(tst, result_jax, result_tf, *, tol, err_msg, **_):
# cholesky_p returns garbage in the strictly upper triangular part of the
# result, so we can safely ignore that part.
tst.assertAllClose(
jnp.tril(result_jax), result_tf, atol=tol, err_msg=err_msg)
return [
# TODO: very high tolerance
custom_numeric(
dtypes=[np.float32, np.complex64],
tol=1e-2,
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
custom_numeric(
dtypes=[np.float64, np.complex128],
tol=1e-6,
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
custom_numeric(
dtypes=[dtypes.bfloat16, np.float16],
tol=5e-2,
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different values in the strictly upper triangular "
"part of the result. This does not matter for correctness, "
"because this part of the matrix is not considered in the result."
),
modes=("eager", "graph", "compiled"))
]
@classmethod
def conv_general_dilated(cls, harness: primitive_harness.Harness):
return [
# Even in compiled mode, for GPU we see a bit of discrepancy but
# very minor.
custom_numeric(dtypes=np.float32, devices="gpu",
modes=("eager", "graph", "compiled"),
tol=1e-5),
custom_numeric(dtypes=np.float32, devices="cpu",
modes=("eager", "graph", "compiled"),
tol=1e-4),
custom_numeric(description="higher numeric inaccuracy when `enable_xla=False`",
modes=("eager", "graph", "compiled"),
enabled=(not harness.params["enable_xla"]),
tol=5e-3)
]
@classmethod
def cumprod(cls, harness):
return [
# JAX uses a different lowering for CPU and GPU.
custom_numeric(
dtypes=(np.float16, jnp.bfloat16),
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled"),
tol=5e-1)
]
@classmethod
def cumsum(cls, harness):
return [
# JAX uses a different lowering for CPU and GPU.
custom_numeric(
dtypes=(np.float16, jnp.bfloat16),
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled"),
tol=5e-1)
]
@classmethod
def custom_linear_solve(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"TODO: large numerical discrepancy",
dtypes=np.float32,
devices="tpu",
expect_tf_error=False,
skip_comparison=True),
custom_numeric(dtypes=np.float32, devices="tpu", tol=0.01),
custom_numeric(tol=1e-3),
]
@classmethod
def digamma(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# In the bfloat16 case, TF and lax both return NaN in undefined cases.
# digamma is not defined at 0 and -1
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
# lax.digamma returns NaN and tf.math.digamma returns inf
arg, = args
special_cases = (arg == 0.) | (arg == -1.)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), dtype(np.nan)),
result_jax[special_cases],
err_msg=err_msg)
tst.assertAllClose(
np.full((nr_special_cases,), dtype(np.inf)),
result_tf[special_cases],
err_msg=err_msg)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-13),
custom_numeric(dtypes=np.float32, devices=["cpu", "gpu"], tol=1e-3),
custom_numeric(
dtypes=dtypes.bfloat16,
custom_assert=custom_assert,
description=(
"May return different results at singularity points 0 and -1."
"JAX returns nan and TF returns inf"))
]
@classmethod
def div(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"TF integer division fails if divisor contains 0; JAX returns NaN",
dtypes=[
np.uint8, np.int8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64
],
# Only the harnesses with "singularity" will have divide by 0
enabled=("singularity" in harness.name))
]
@classmethod
def dot_general(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[np.bool_],),
# TODO(b/189287598)
Jax2TfLimitation(
"Non-deterministic NaN for dot_general with preferred_element_type on GPU (b/189287598)",
dtypes=[
jnp.bfloat16, np.float16, np.float32, np.complex64
],
devices="gpu",
modes=("eager", "graph", "compiled"),
enabled=(harness.params["preferred_element_type"] is not None),
skip_comparison=True),
# JAX performs float16 matmuls in float32 on CPU, so the JAX result
# may be more precise.
custom_numeric(dtypes=[np.float16], devices=["cpu"], tol=1e-2,
modes=("eager", "graph", "compiled")),
]
@classmethod
def eig(cls, harness: primitive_harness.Harness):
compute_left_eigenvectors = harness.params["compute_left_eigenvectors"]
compute_right_eigenvectors = harness.params["compute_right_eigenvectors"]
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
inner_dimension = operand.shape[-1]
# Test ported from tests.linlag_test.testEig
# Norm, adjusted for dimension and type.
def norm(x):
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / ((inner_dimension + 1) * jnp.finfo(dtype).eps)
def check_right_eigenvectors(a, w, vr):
tst.assertTrue(
np.all(norm(np.matmul(a, vr) - w[..., None, :] * vr) < 100))
def check_left_eigenvectors(a, w, vl):
rank = len(a.shape)
aH = jnp.conj(a.transpose(list(range(rank - 2)) + [rank - 1, rank - 2]))
wC = jnp.conj(w)
check_right_eigenvectors(aH, wC, vl)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
# TODO(bchetioui): numerical discrepancies
if dtype in [np.float32, np.complex64]:
tol = 1e-4
elif dtype in [np.float64, np.complex128]:
tol = 1e-13
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff,
np.array(0., closest_diff.dtype),
atol=tol,
err_msg=err_msg)
all_w_jax, all_w_tf = result_jax[0], result_tf[0]
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
if compute_left_eigenvectors:
check_left_eigenvectors(operand, all_w_tf, result_tf[1])
if compute_right_eigenvectors:
check_right_eigenvectors(operand, all_w_tf,
result_tf[1 + compute_left_eigenvectors])
return [
# Eig does not work in JAX on gpu or tpu
Jax2TfLimitation(
"function not compilable", modes="compiled", devices="cpu"),
Jax2TfLimitation(
"TF Conversion of eig is not implemented when both compute_left_eigenvectors and compute_right_eigenvectors are set to True",
enabled=(compute_left_eigenvectors and compute_right_eigenvectors)),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."))
]
@classmethod
def eigh(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
inner_dimension = operand.shape[-1]
def check_right_eigenvectors(a, w, vr):
tol = 1e-16
# TODO(bchetioui): tolerance needs to be very high in compiled mode,
# specifically for eigenvectors.
if dtype == np.float64:
tol = 2e-5
elif dtype == np.float32:
tol = 1e-2
elif dtype in [dtypes.bfloat16, np.complex64]:
tol = 1e-3
elif dtype == np.complex128:
tol = 2e-5
tst.assertAllClose(
np.matmul(a, vr) - w[..., None, :] * vr,
np.zeros(a.shape, dtype=vr.dtype),
atol=tol,
# For bfloat16 the np.matmul returns float32 result.
check_dtypes=False,
err_msg=err_msg)
def check_eigenvalue_is_in_array(eigenvalue, eigenvalues_array):
tol = None
if dtype in [dtypes.bfloat16, np.float32, np.complex64]:
tol = 1e-3
elif dtype in [np.float64, np.complex128]:
tol = 1e-5
closest_diff = min(abs(eigenvalues_array - eigenvalue))
tst.assertAllClose(
closest_diff,
np.array(0., closest_diff.dtype),
atol=tol,
err_msg=err_msg)
_, all_w_jax = result_jax
all_vr_tf, all_w_tf = result_tf
for idx in itertools.product(*map(range, operand.shape[:-2])):
w_jax, w_tf = all_w_jax[idx], all_w_tf[idx]
for i in range(inner_dimension):
check_eigenvalue_is_in_array(w_jax[i], w_tf)
check_eigenvalue_is_in_array(w_tf[i], w_jax)
check_right_eigenvectors(operand, all_w_tf, all_vr_tf)
return [
missing_tf_kernel(
dtypes=dtypes.bfloat16,
devices="tpu",
enabled=(harness.params["shape"] != (0, 0)), # This actually works!
),
Jax2TfLimitation(
"TODO: numeric discrepancies",
dtypes=np.float16,
devices="tpu",
expect_tf_error=False,
skip_comparison=True),
custom_numeric(
custom_assert=custom_assert,
description=("May return the eigenvalues and eigenvectors in a "
"potentially different order. The eigenvectors may "
"also be different, but equally valid."),
modes=("eager", "graph", "compiled"))
]
@classmethod
def erf(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erfc(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def erf_inv(cls, harness: primitive_harness.Harness):
# erf_inv is not defined for arg <= -1 or arg >= 1
def custom_assert(tst, result_jax, result_tf, *, args, tol,
err_msg): # noqa: F811
arg, = args
# for arg < -1 or arg > 1
# lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf
special_cases = (arg < -1.) | (arg > 1.)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=[np.float32, np.float64], tol=1e-4),
custom_numeric(
dtypes=[np.float32, np.float64],
custom_assert=custom_assert,
description=(
"May return different results at undefined points (< -1 or > 1):"
" JAX returns `NaN` and TF returns `+inf` or `-inf`."))
]
@classmethod
def expm1(cls, harness: primitive_harness.Harness):
return [custom_numeric(dtypes=np.float64, tol=1e-5)]
@classmethod
def fft(cls, harness):
return [
Jax2TfLimitation(
"TF function not compileable",
devices=("cpu", "gpu"),
dtypes=[np.float64, np.complex128],
modes="compiled"),
# TODO: very high tolerance
custom_numeric(tol=1e-3, modes=("eager", "graph", "compiled")),
]
@classmethod
def _pow_test_util(cls, harness: primitive_harness.Harness):
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
# NaNs are mismatched, but assertAllClose will also behave weirdly for
# complex numbers containing np.inf as one of their components. See
# https://github.com/numpy/numpy/issues/15959 for more details.
mask = (
np.isnan(result_jax) + np.isnan(result_tf) + np.isinf(result_jax) +
np.isinf(result_tf))
tst.assertAllClose(
result_jax[~mask], result_tf[~mask], rtol=tol, err_msg=err_msg)
return [
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-3),
custom_numeric(
dtypes=[np.float64, np.complex128],
devices=("cpu", "gpu"),
tol=5e-5),
custom_numeric(
dtypes=[np.complex64, np.complex128],
custom_assert=custom_assert,
)
]
@classmethod
def igamma(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igamma is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
arg1, arg2 = args
# lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0
special_cases = (arg1 == 0.) & (arg2 == 0.)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_jax[special_cases])
tst.assertAllClose(
np.full((nr_special_cases,), 0., dtype=dtype),
result_tf[special_cases])
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different results at undefined points "
"(both arguments 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`"))
]
@classmethod
def igammac(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
# igammac is not defined when the first argument is <=0
def custom_assert(tst, result_jax, result_tf, *, args, tol,
err_msg): # noqa: F811
arg1, arg2 = args
# lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN
special_cases = (arg1 <= 0.) | (arg2 <= 0)
nr_special_cases = np.count_nonzero(special_cases)
tst.assertAllClose(
np.full((nr_special_cases,), 1., dtype=dtype),
result_jax[special_cases],
err_msg=err_msg)
tst.assertAllClose(
np.full((nr_special_cases,), np.nan, dtype=dtype),
result_tf[special_cases],
err_msg=err_msg)
# non-special cases are equal
tst.assertAllClose(
result_jax[~special_cases],
result_tf[~special_cases],
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16, np.float16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-9),
custom_numeric(devices="gpu", tol=1e-3),
custom_numeric(
custom_assert=custom_assert,
devices=("cpu", "gpu"),
description=(
"May return different results at undefined points "
"(both arguments less or equal 0). JAX returns `NaN` and TF returns 0 or "
"JAX returns 1 and TF returns `NaN`")),
]
@classmethod
def integer_pow(cls, harness: primitive_harness.Harness):
y = harness.params["y"]
return [
missing_tf_kernel(
dtypes=[
np.int8, np.int16, np.uint8, np.uint16, np.uint32, np.uint64
],
modes="graph",
enabled=(y not in [0, 1]), # These are special-cased
devices=("cpu", "gpu")),
# TODO: on TPU, for f16, we get different results with eager mode
# than with compiled mode.
Jax2TfLimitation(
"Different overflow behavior. ",
dtypes=[np.float16, jnp.bfloat16],
devices="tpu",
expect_tf_error=False,
modes=("eager", "graph"),
skip_comparison=True),
Jax2TfLimitation(
"Different overflow behavior for large exponents. ",
dtypes=[
np.int8, np.int16, np.int32, np.int64, np.float16, jnp.bfloat16,
np.float32, np.complex64, np.complex128
],
enabled=(abs(y) > 10),
expect_tf_error=False,
modes=("eager", "graph"),
skip_comparison=True),
] + list(cls._pow_test_util(harness))
@classmethod
def pow(cls, harness: primitive_harness.Harness):
return cls._pow_test_util(harness)
@classmethod
def lgamma(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float64, tol=1e-11),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def log1p(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.complex128, tol=3e-14),
custom_numeric(dtypes=np.float64, tol=1e-10),
custom_numeric(dtypes=np.float32, tol=1e-3)
]
@classmethod
def lu(cls, harness: primitive_harness.Harness):
dtype = harness.dtype
def custom_assert(tst, result_jax, result_tf, *, args, tol, err_msg):
operand, = args
lu, pivots, perm = result_tf
batch_dims = operand.shape[:-2]
m, n = operand.shape[-2], operand.shape[-1]
def _make_permutation_matrix(perm):
result = []
for idx in itertools.product(*map(range, operand.shape[:-1])):
result += [0 if c != perm[idx] else 1 for c in range(m)]
result = np.reshape(np.array(result, dtype=dtype), [*batch_dims, m, m])
return result
k = min(m, n)
l = jnp.tril(lu, -1)[..., :, :k] + jnp.eye(m, k, dtype=dtype)
u = jnp.triu(lu)[..., :k, :]
p_mat = _make_permutation_matrix(perm)
tst.assertArraysEqual(
lax.linalg.lu_pivots_to_permutation(pivots, m), perm)
tst.assertAllClose(
jnp.matmul(p_mat, operand),
jnp.matmul(l, u),
atol=tol,
rtol=tol,
err_msg=err_msg)
return [
custom_numeric(
dtypes=[np.float32, np.complex64], devices="tpu", tol=0.1),
custom_numeric(
dtypes=[np.float32, np.complex64], devices=("cpu", "gpu"),
tol=1e-5),
custom_numeric(dtypes=[np.float64, np.complex128], tol=1e-13),
custom_numeric(
custom_assert=custom_assert,
description=("May return different, but also correct, results when "
"the decomposition is not unique"),
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
]
@classmethod
def max(cls, harness: primitive_harness.Harness):
# TODO(bchetioui): discrepancies between TF & JAX when comparing with NaN;
# JAX always returns NaN, while TF returns the value NaN is compared with.
def custom_assert(tst, result_jax, result_tf, err_msg, **_):
mask = np.isnan(result_jax)
tst.assertAllClose(result_jax[~mask], result_tf[~mask], err_msg=err_msg)
return [
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different values when one of the values is NaN. "
"JAX always returns NaN, while TF returns the value NaN is compared with."
),
modes=("eager", "graph", "compiled"))
]
@classmethod
def min(cls, harness: primitive_harness.Harness):
# TODO(bchetioui): discrepancies between TF & JAX when comparing with NaN;
# JAX always returns NaN, while TF returns the value NaN is compared with.
def custom_assert(tst, result_jax, result_tf, *, err_msg, **_):
mask = np.isnan(result_jax)
tst.assertAllClose(result_jax[~mask], result_tf[~mask], err_msg=err_msg)
return [
custom_numeric(
custom_assert=custom_assert,
description=(
"May return different values when one of the values is NaN. "
"JAX always returns NaN, while TF returns the value NaN is compared with."
),
modes=("eager", "graph", "compiled"))
]
@classmethod
def nextafter(cls, harness: primitive_harness.Harness):
return [missing_tf_kernel(dtypes=[np.float16, dtypes.bfloat16])]
@classmethod
def qr(cls, harness: primitive_harness.Harness):
# See https://github.com/google/jax/pull/3775#issuecomment-659407824;
# # jit_compile=True breaks for complex types.
# TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824.
# - for now, the performance of the HLO QR implementation called when
# compiling with TF is expected to have worse performance than the
# custom calls made in JAX.
return [
custom_numeric(
dtypes=[np.float64, np.complex128],
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled"),
tol=1e-13),
custom_numeric(
dtypes=[np.float32, np.complex64],
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled"),
tol=1e-4),
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices="tpu",
)
]
@classmethod
def random_gamma(cls, harness: primitive_harness.Harness):
return [custom_numeric(devices="tpu", tol=1e-3)]
@classmethod
def reduce_max(cls, harness: primitive_harness.Harness):
# Unlike reduce_window_max, we use a native TF op: tf.reduce_max, which
# does not work for complex
return [missing_tf_kernel(dtypes=[np.complex64, np.complex128])]
@classmethod
def reduce_min(cls, harness: primitive_harness.Harness):
return cls.reduce_max(harness)
@classmethod
def regularized_incomplete_beta(cls, harness: primitive_harness.Harness):
return [
custom_numeric(dtypes=np.float64, tol=1e-14),
missing_tf_kernel(dtypes=[np.float16, dtypes.bfloat16])
]
@classmethod
def rem(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
"TF integer division fails if divisor contains 0; JAX returns NaN",
dtypes=[
np.uint8, np.int8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64
],
# Only the harnesses with "singularity" will have divide by 0
enabled=("singularity" in harness.name)),
]
@classmethod
def rng_bit_generator(cls, harness: primitive_harness.Harness):
return []
@classmethod
def round(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(
dtypes=[dtypes.bfloat16],
devices=("cpu", "gpu"),
modes=("eager", "graph"))
]
@classmethod
def scatter_add(cls, harness):
return []
@classmethod
def scatter_mul(cls, harness):
return []
@classmethod
def select_and_gather_add(cls, harness):
return [
# This JAX primitives is not not exposed directly in the JAX API
# but arises from JVP of `lax.reduce_window` for reducers
# `lax.max` or `lax.min`. It also arises from second-order
# VJP of the same. Implemented using XlaReduceWindow.
Jax2TfLimitation((
"jax2tf unimplemented for 64-bit inputs because the current implementation "
"relies on packing two values into a single value. This can be "
"fixed by using a variadic XlaReduceWindow, when available"),
dtypes=[np.float64],
devices=("cpu", "gpu"))
]
@classmethod
def sort(cls, harness: primitive_harness.Harness):
return [
Jax2TfLimitation(
# I think that this is because TF is running on CPU even for GPU tests?
"TODO: TF non-stable multiple-array sort",
devices="gpu",
enabled=(harness.params["num_arrays"] > 1 and
not harness.params["is_stable"]),
expect_tf_error=False,
skip_comparison=True),
]
@classmethod
def svd(cls, harness: primitive_harness.Harness):
# TODO: slow test
compute_uv = harness.params["compute_uv"]
def custom_assert(tst, r_jax, r_tf, *, args, tol, err_msg):
def _reconstruct_operand(result, is_tf: bool):
# Reconstructing operand as documented in numpy.linalg.svd (see
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html)
s, u, v = result
U = u[..., :s.shape[-1]]
V = v[..., :s.shape[-1], :]
S = s[..., None, :]
return jnp.matmul(U * S, V), s.shape, u.shape, v.shape
if compute_uv:
r_jax_reconstructed = _reconstruct_operand(r_jax, False)
r_tf_reconstructed = _reconstruct_operand(r_tf, True)
tst.assertAllClose(
r_jax_reconstructed,
r_tf_reconstructed,
atol=tol,
rtol=tol,
err_msg=err_msg)
else:
tst.assertAllClose(r_jax, r_tf, atol=tol, rtol=tol, err_msg=err_msg)
return [
# Works in JAX for complex due to custom calls on cpu and gpu
Jax2TfLimitation(
"function not compilable. Implemented using `tf.linalg.svd` and `tf.linalg.adjoint`",
dtypes=[np.complex64, np.complex128],
devices=("cpu", "gpu"),
modes=("compiled",)),
missing_tf_kernel(dtypes=[dtypes.bfloat16], devices="tpu"),
custom_numeric(
tol=1e-4,
dtypes=[np.float32, np.complex64],
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
custom_numeric(
tol=1e-2,
dtypes=[np.float16],
devices=("tpu"),
modes=("eager", "graph", "compiled")),
# TODO: this is very low tolerance for f64
custom_numeric(
tol=1e-4,
dtypes=[np.float64, np.complex128],
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled")),
custom_numeric(
description="custom numeric comparison when compute_uv",
custom_assert=custom_assert,
devices=("cpu", "gpu"),
modes=("eager", "graph", "compiled"),
enabled=(compute_uv == True))
]
@classmethod
def tan(cls, harness):
return [
custom_numeric(dtypes=np.complex64, devices="tpu", tol=1e-4),
custom_numeric(dtypes=np.complex64, devices=("cpu", "gpu"), tol=1e-3),
custom_numeric(dtypes=np.complex128, devices=("cpu", "gpu"), tol=1e-12)
]
@classmethod
def tanh(cls, harness):
return [
custom_numeric(dtypes=np.complex128, tol=1e-7),
custom_numeric(dtypes=np.complex64, tol=1e-4)
]
@classmethod
def top_k(cls, harness):
def custom_assert(tst, result_jax, result_tf, *, err_msg, **_):
assert len(result_jax) == len(result_tf)
# TODO: TF and JAX sort [inf, nan] differently.
first_arr_jax, first_arr_tf = result_jax[0], result_tf[0]
if np.all(first_arr_jax == first_arr_tf):
for arr_jax, arr_tf in zip(result_jax, result_tf):
tst.assertArraysEqual(arr_jax, arr_tf, err_msg=err_msg)
else:
mask_jax, mask_tf = np.isnan(first_arr_jax), np.isnan(first_arr_tf)
tst.assertArraysEqual(
first_arr_jax[~mask_jax], first_arr_tf[~mask_tf], err_msg=err_msg)
return [
custom_numeric(
dtypes=[np.float16, dtypes.bfloat16, np.float32, np.float64],
custom_assert=custom_assert,
description=(
"Produces different results when the array contains `inf` and `NaN`"
" (they are sorted differently in TF vs. XLA)."))
]
@classmethod
def triangular_solve(cls, harness: primitive_harness.Harness):
return [
missing_tf_kernel(dtypes=[dtypes.bfloat16]),
missing_tf_kernel(
dtypes=[np.float16],
devices=("gpu", "cpu"),
modes=("eager", "graph")),
custom_numeric(dtypes=np.float32, tol=5e-3)
]
@classmethod
def tridiagonal_solve(cls, harness: primitive_harness.Harness):
return []
def custom_numeric(
*,
description="custom numeric comparison",
dtypes=(), # All
modes=(
"eager",
"graph",
), # By default we should not need tolerance for
# "compiled"
devices=("cpu", "gpu", "tpu"),
custom_assert=None,
enabled=True,
tol=None) -> Jax2TfLimitation:
return Jax2TfLimitation(
description,
expect_tf_error=False,
dtypes=dtypes,
devices=devices,
modes=modes,
custom_assert=custom_assert,
enabled=enabled,
tol=tol)
def missing_tf_kernel(*,
description="op not defined for dtype",
dtypes,
modes=("eager", "graph", "compiled"),
devices=("cpu", "gpu", "tpu"),
enabled=True) -> Jax2TfLimitation:
return Jax2TfLimitation(
description, dtypes=dtypes, devices=devices, modes=modes, enabled=enabled)
| google/jax | jax/experimental/jax2tf/tests/jax2tf_limitations.py | Python | apache-2.0 | 40,755 |
"""Examples taken from:
https://github.com/ckan/ckanext-harvest/blob/master/ckanext/harvest/model/__init__.py
https://github.com/ckan/ckanext-spatial/blob/master/ckanext/spatial/model/package_extent.py
"""
# TODO @palcu: split this file into it's own modules
# However those global variables are fucking shit
from logging import getLogger
from sqlalchemy import Table, Column, ForeignKey, types
from sqlalchemy.orm import backref, relation
from ckan.model import Group, Package
from ckan.model.meta import metadata, mapper
from ckan.model.types import make_uuid
from ckan.model.domain_object import DomainObject
log = getLogger(__name__)
inventory_entry_table = None
inventory_item_table = None
class InventoryDomainObject(DomainObject):
pass
class InventoryEntry(InventoryDomainObject):
pass
class InventoryItem(InventoryDomainObject):
pass
def model_setup():
if inventory_entry_table is None:
define_inventory_tables()
log.debug('Inventory tables have been defined in memory')
if not inventory_entry_table.exists():
inventory_entry_table.create()
inventory_item_table.create()
log.debug('Inventory tables have been created')
else:
log.debug('Inventory tables already exist')
# Future migrations go here
def define_inventory_tables():
define_inventory_entry_table()
define_inventory_item_table()
def define_inventory_entry_table():
global inventory_entry_table
inventory_entry_table = Table(
'inventory_entry',
metadata,
Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
Column('title', types.UnicodeText, default=u''),
Column('group_id', types.UnicodeText, ForeignKey('group.id')),
Column('is_recurring', types.Boolean, default=False),
Column('recurring_interval', types.Integer, default=0),
Column('last_added_dataset_timestamp', types.DateTime)
)
mapper(InventoryEntry, inventory_entry_table, properties={
'group': relation(Group, lazy=True, backref=u'inventory_entries')
})
def define_inventory_item_table():
global inventory_item_table
inventory_item_table = Table(
'inventory_item',
metadata,
Column('id', types.UnicodeText, primary_key=True, default=make_uuid),
Column('inventory_entry_id',
types.UnicodeText,
ForeignKey('inventory_entry.id')),
Column('package_id', types.UnicodeText, ForeignKey('package.id'))
)
mapper(InventoryItem, inventory_item_table, properties={
'inventory_entry': relation(InventoryEntry,
lazy=True,
backref=backref('inventory_items',
cascade='all,delete-orphan')),
'package_entry': relation(Package,
lazy=True,
backref=backref('inventory_items',
cascade='all,delete-orphan')),
})
| govro/ckanext-inventory | ckanext/inventory/model/__init__.py | Python | apache-2.0 | 3,084 |
# Generated by Django 3.1.7 on 2021-04-19 12:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("projects", "0010_project_billed"),
]
operations = [
migrations.CreateModel(
name="TaskAssignee",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_resource", models.BooleanField(default=False)),
("is_reviewer", models.BooleanField(default=False)),
("is_manager", models.BooleanField(default=False)),
(
"task",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="task_assignees",
to="projects.task",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="task_assignees",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="ProjectAssignee",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_resource", models.BooleanField(default=False)),
("is_reviewer", models.BooleanField(default=False)),
("is_manager", models.BooleanField(default=False)),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="project_assignees",
to="projects.project",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="project_assignees",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="CustomerAssignee",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_resource", models.BooleanField(default=False)),
("is_reviewer", models.BooleanField(default=False)),
("is_manager", models.BooleanField(default=False)),
(
"customer",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="customer_assignees",
to="projects.customer",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="customer_assignees",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AddField(
model_name="customer",
name="assignees",
field=models.ManyToManyField(
related_name="assigned_to_customers",
through="projects.CustomerAssignee",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="project",
name="assignees",
field=models.ManyToManyField(
related_name="assigned_to_projects",
through="projects.ProjectAssignee",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="task",
name="assignees",
field=models.ManyToManyField(
related_name="assigned_to_tasks",
through="projects.TaskAssignee",
to=settings.AUTH_USER_MODEL,
),
),
]
| adfinis-sygroup/timed-backend | timed/projects/migrations/0011_auto_20210419_1459.py | Python | agpl-3.0 | 4,955 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...table import Table
class TestInitialisation(unittest.TestCase):
"""
Test initialisation of the Table class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.table = Table()
self.table._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Table xml_declaration()"""
self.table._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| jmcnamara/XlsxWriter | xlsxwriter/test/table/test_initialisation.py | Python | bsd-2-clause | 797 |
from . import handlers
from firenado import tornadoweb
from firenado.launcher import ProcessLauncher
from tornado import gen
import os
class LauncherComponent(tornadoweb.TornadoComponent):
def __init__(self, name, application):
super(LauncherComponent, self).__init__(name, application)
self.launcher_path = os.path.abspath(os.path.dirname(__file__))
self.charge_path = os.path.join(self.launcher_path, "charge")
self.launcher = None
def get_handlers(self):
return [
(r'/', handlers.IndexHandler),
]
@gen.coroutine
def initialize(self):
import sys
self.launcher = ProcessLauncher(dir=self.charge_path,
logfile=sys.stderr)
self.launcher.load()
yield self.launcher.launch()
@gen.coroutine
def shutdown(self):
self.launcher.shutdown()
| candango/firenado | examples/launcher/app.py | Python | apache-2.0 | 902 |
import json
import urllib
import os
import sys
import threading
sys.path.append("../")
import server
from server import servicePin
from server import service
from src.model import Pin, Category
#constants
#Prends en argument l'url du fichier json concernant les donnees de velov
#ex: "https://download.data.grandlyon.com/ws/rdata/jcd_jcdecaux.jcdvelov/all.json"
def insertTestData():
service.logMessage(".Inserting test data ")
count=0
for i in range(0, 10000):
title = "testunit"
description = "item"
lat = 0.0
lnd = 0.0
count=count+1
if count == 50:
count = 0
print "inserted " + str(i) + " units"
categorie1 = Category(count, "ABC")
categorie2 = Category(count, "ABC")
categorie3 = Category(count, "ABC")
categorie1.id = count+1
categorie2.id = count+2
categorie1.id = count+3
obj = Pin('velov', title, lnd, lat, 1, [categorie1, categorie2, categorie3], description)
obj.typeSpecificID = i
service.updateVelovByIdVelov(obj)
insertTestData()
| H4213/WeLyon1.0 | test/test_perf.py | Python | mit | 1,006 |
import os
# *****************************
# Environment specific settings
# *****************************
APP_NAME = "Snowflake"
# The settings below can (and should) be over-ruled by OS environment variable settings
# Flask settings # Generated with: import os; os.urandom(24)
SECRET_KEY = os.getenv('SECRET_KEY', '\xc1\x06\x98\x16\x9f\xafk[\xd1~\x00\xf6\xd4\xa0Znl\xc5\x1d\xc4P{lL')
# PLEASE USE A DIFFERENT KEY FOR PRODUCTION ENVIRONMENTS!
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'brandonium21@gmail.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'dragon49')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', APP_NAME + ' <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', '1')) # Use '1' for True and '0' for False
MAIL_USE_TLS = int(os.getenv('MAIL_USE_TLS', '0')) # Use '1' for True and '0' for False
USER_ENABLE_LOGIN_WITHOUT_CONFIRM = False
USER_ENABLE_CONFIRM_EMAIL = False
ADMINS = []
admin1 = os.getenv('ADMIN1', '"admin1" <brandondrice@aol.com>')
admin2 = os.getenv('ADMIN2', '')
admin3 = os.getenv('ADMIN3', '')
admin4 = os.getenv('ADMIN4', '')
if admin1: ADMINS.append(admin1)
if admin2: ADMINS.append(admin2)
if admin3: ADMINS.append(admin3)
if admin4: ADMINS.append(admin4)
# ***********************************
# Settings common to all environments
# ***********************************
# Application settings
APP_SYSTEM_ERROR_SUBJECT_LINE = APP_NAME + " system error"
# Flask settings
CSRF_ENABLED = True
# Flask-User settings
USER_APP_NAME = APP_NAME
USER_AFTER_LOGIN_ENDPOINT = 'member_page'
USER_AFTER_LOGOUT_ENDPOINT = 'home_page'
| brandonium21/snowflake | app/startup/settings.py | Python | bsd-2-clause | 1,980 |
# Copyright (c) 2013-2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
"""SSH channel and session handlers"""
import asyncio
from .constants import *
from .logging import *
from .misc import *
from .packet import *
from .sftp import *
_EOF = object()
class SSHChannel(SSHPacketHandler):
"""Parent class for SSH channels"""
_read_datatypes = set()
_write_datatypes = set()
def __init__(self, conn, loop, encoding, window, max_pktsize):
"""Initialize an SSH channel
If encoding is set, data sent and received will be in the form
of strings, converted on the wire to bytes using the specified
encoding. If encoding is None, data sent and received must be
provided as bytes.
Window specifies the initial receive window size.
Max_pktsize specifies the maximum length of a single data packet.
"""
self._conn = conn
self._loop = loop
self._session = None
self._encoding = encoding
self._extra = { 'connection': conn }
self._send_state = 'closed'
self._send_chan = None
self._send_window = None
self._send_pktsize = None
self._send_paused = False
self._send_buf = []
self._send_buf_len = 0
self._recv_state = 'closed'
self._recv_chan = conn._get_recv_chan()
self._init_recv_window = window
self._recv_window = window
self._recv_pktsize = max_pktsize
self._recv_paused = True
self._recv_buf = []
self._open_waiter = None
self._request_waiters = []
self._close_waiters = []
self.set_write_buffer_limits()
conn._channels[self._recv_chan] = self
def _cleanup(self, exc=None):
if self._open_waiter:
self._open_waiter.set_exception(
ChannelOpenError(OPEN_CONNECT_FAILED,
'SSH connection closed'))
self._open_waiter = None
if self._request_waiters:
for waiter in self._request_waiters:
waiter.set_exception(exc)
self._request_waiters = []
if self._close_waiters:
for waiter in self._close_waiters:
if not waiter.cancelled():
waiter.set_result(None)
self._close_waiters = []
if self._session:
self._session.connection_lost(exc)
self._session = None
if self._conn:
if self._recv_chan:
del self._conn._channels[self._recv_chan]
self._recv_chan = None
self._conn = None
self._send_state = 'closed'
self._recv_state = 'closed'
def _pause_resume_writing(self):
if self._send_paused:
if self._send_buf_len <= self._send_low_water:
self._send_paused = False
self._session.resume_writing()
else:
if self._send_buf_len > self._send_high_water:
self._send_paused = True
self._session.pause_writing()
def _flush_send_buf(self):
while self._send_buf and self._send_window:
pktsize = min(self._send_window, self._send_pktsize)
buf, datatype = self._send_buf[0]
if len(buf) > pktsize:
data = buf[:pktsize]
del buf[:pktsize]
else:
data = buf
del self._send_buf[0]
self._send_buf_len -= len(data)
self._send_window -= len(data)
if datatype is None:
self._send_packet(MSG_CHANNEL_DATA, String(data))
else:
self._send_packet(MSG_CHANNEL_EXTENDED_DATA,
UInt32(datatype), String(data))
self._pause_resume_writing()
if not self._send_buf:
if self._send_state == 'eof_pending':
self._send_packet(MSG_CHANNEL_EOF)
self._send_state = 'eof_sent'
elif self._send_state == 'close_pending':
self._send_packet(MSG_CHANNEL_CLOSE)
self._send_state = 'close_sent'
def _deliver_data(self, data, datatype):
if data == _EOF:
if not self._session.eof_received():
self.close()
else:
self._recv_window -= len(data)
if self._recv_window < self._init_recv_window / 2:
self._send_packet(MSG_CHANNEL_WINDOW_ADJUST,
UInt32(self._init_recv_window -
self._recv_window))
self._recv_window = self._init_recv_window
if self._encoding:
try:
data = data.decode(self._encoding)
except UnicodeDecodeError:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Unicode decode error') from None
self._session.data_received(data, datatype)
def _accept_data(self, data, datatype=None):
if not data:
return
if self._send_state in {'close_pending', 'close_sent', 'closed'}:
return
if data != _EOF and len(data) > self._recv_window:
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Window exceeded')
if self._recv_paused:
self._recv_buf.append((data, datatype))
else:
self._deliver_data(data, datatype)
def _process_connection_close(self, exc):
"""Process the SSH connection closing"""
self._cleanup(exc)
def _process_open(self, send_chan, send_window, send_pktsize, session):
"""Process a channel open request"""
if self._recv_state != 'closed':
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Channel already open')
self._send_state = 'open_received'
self._send_chan = send_chan
self._send_window = send_window
self._send_pktsize = send_pktsize
asyncio.async(self._finish_open_request(session), loop=self._loop)
@asyncio.coroutine
def _finish_open_request(self, session):
"""Finish processing a channel open request"""
try:
if asyncio.iscoroutine(session):
session = yield from session
self._session = session
self._conn._send_channel_open_confirmation(self._send_chan,
self._recv_chan,
self._recv_window,
self._recv_pktsize)
self._send_state = 'open'
self._recv_state = 'open'
self._session.connection_made(self)
except ChannelOpenError as exc:
self._conn._send_channel_open_failure(self._send_chan, exc.code,
exc.reason, exc.lang)
self._loop.call_soon(self._cleanup)
def _process_open_confirmation(self, send_chan, send_window, send_pktsize,
packet):
"""Process a channel open confirmation"""
if not self._open_waiter:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Channel not being opened')
self._send_chan = send_chan
self._send_window = send_window
self._send_pktsize = send_pktsize
self._send_state = 'open'
self._recv_state = 'open'
if not self._open_waiter.cancelled():
self._open_waiter.set_result(packet)
self._open_waiter = None
def _process_open_failure(self, code, reason, lang):
"""Process a channel open failure"""
if not self._open_waiter:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Channel not being opened')
self._open_waiter.set_exception(ChannelOpenError(code, reason, lang))
self._open_waiter = None
self._loop.call_soon(self._cleanup)
def _process_window_adjust(self, pkttype, packet):
if self._recv_state not in {'open', 'eof_received'}:
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Channel not open')
adjust = packet.get_uint32()
packet.check_end()
self._send_window += adjust
self._flush_send_buf()
def _process_data(self, pkttype, packet):
if self._recv_state != 'open':
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Channel not open for sending')
data = packet.get_string()
packet.check_end()
self._accept_data(data)
def _process_extended_data(self, pkttype, packet):
if self._recv_state != 'open':
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Channel not open for sending')
datatype = packet.get_uint32()
data = packet.get_string()
packet.check_end()
if datatype not in self._read_datatypes:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Invalid extended data type')
self._accept_data(data, datatype)
def _process_eof(self, pkttype, packet):
if self._recv_state != 'open':
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Channel not open for sending')
packet.check_end()
self._recv_state = 'eof_received'
self._accept_data(_EOF)
def _process_close(self, pkttype, packet):
if self._recv_state not in {'open', 'eof_received'}:
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Channel not open')
packet.check_end()
# Flush any unsent data
self._send_buf = []
self._send_buf_len = 0
# If we haven't yet sent a close, send one now
if self._send_state not in {'close_sent', 'closed'}:
self._send_packet(MSG_CHANNEL_CLOSE)
self._loop.call_soon(self._cleanup)
def _process_request(self, pkttype, packet):
if self._recv_state not in {'open', 'eof_received'}:
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Channel not open')
if self._send_state in {'close_pending', 'close_sent', 'closed'}:
return
request = packet.get_string()
want_reply = packet.get_boolean()
try:
request = request.decode('ascii')
except UnicodeDecodeError:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Invalid channel request') from None
name = '_process_' + request.replace('-', '_') + '_request'
handler = getattr(self, name, None)
result = handler(packet) if callable(handler) else False
if want_reply:
if result:
self._send_packet(MSG_CHANNEL_SUCCESS)
else:
self._send_packet(MSG_CHANNEL_FAILURE)
if result and request in ('shell', 'exec', 'subsystem'):
self._session.session_started()
self.resume_reading()
def _process_response(self, pkttype, packet):
if self._send_state not in {'open', 'eof_pending', 'eof_sent',
'close_pending', 'close_sent'}:
raise DisconnectError(DISC_PROTOCOL_ERROR, 'Channel not open')
packet.check_end()
if self._request_waiters:
waiter = self._request_waiters.pop(0)
if not waiter.cancelled():
waiter.set_result(pkttype == MSG_CHANNEL_SUCCESS)
else:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Unexpected channel response')
packet_handlers = {
MSG_CHANNEL_WINDOW_ADJUST: _process_window_adjust,
MSG_CHANNEL_DATA: _process_data,
MSG_CHANNEL_EXTENDED_DATA: _process_extended_data,
MSG_CHANNEL_EOF: _process_eof,
MSG_CHANNEL_CLOSE: _process_close,
MSG_CHANNEL_REQUEST: _process_request,
MSG_CHANNEL_SUCCESS: _process_response,
MSG_CHANNEL_FAILURE: _process_response
}
@asyncio.coroutine
def _open(self, chantype, *args):
"""Make a request to open the channel"""
if self._send_state != 'closed':
raise OSError('Channel already open')
self._open_waiter = asyncio.Future(loop=self._loop)
self._conn._send_packet(Byte(MSG_CHANNEL_OPEN), String(chantype),
UInt32(self._recv_chan),
UInt32(self._recv_window),
UInt32(self._recv_pktsize), *args)
self._send_state = 'open_sent'
return (yield from self._open_waiter)
def _send_packet(self, pkttype, *args):
"""Send a packet on the channel"""
if self._send_chan is None:
raise OSError('Channel not open')
self._conn._send_packet(Byte(pkttype), UInt32(self._send_chan), *args)
def _send_request(self, request, *args, want_reply=False):
"""Send a channel request"""
self._send_packet(MSG_CHANNEL_REQUEST, String(request),
Boolean(want_reply), *args)
@asyncio.coroutine
def _make_request(self, request, *args):
"""Make a channel request and wait for the response"""
waiter = asyncio.Future(loop=self._loop)
self._request_waiters.append(waiter)
self._send_request(request, *args, want_reply=True)
return (yield from waiter)
def abort(self):
"""Forcibly close the channel
This method can be called to forcibly close the channel, after
which no more data can be sent or received. Any unsent buffered
data and any incoming data in flight will be discarded.
"""
if self._send_state not in {'close_sent', 'closed'}:
self._send_packet(MSG_CHANNEL_CLOSE)
self._send_state = 'close_sent'
def close(self):
"""Cleanly close the channel
This method can be called to cleanly close the channel, after
which no more data can be sent or received. Any unsent buffered
data will be flushed asynchronously before the channel is
closed.
"""
if self._send_state not in {'close_pending', 'close_sent', 'closed'}:
self._send_state = 'close_pending'
self._flush_send_buf()
@asyncio.coroutine
def wait_closed(self):
"""Wait for this channel to close
This method is a coroutine which can be called to block until
this channel has finished closing.
"""
if self._session:
waiter = asyncio.Future(loop=self._loop)
self._close_waiters.append(waiter)
yield from waiter
def get_extra_info(self, name, default=None):
"""Get additional information about the channel
This method returns extra information about the channel once
it is established. Supported values include ``'connection'``
to return the SSH connection this channel is running over plus
all of the values supported on that connection.
For TCP channels, the values ``'local_peername'`` and
``'remote_peername'`` are added to return the local and remote
host and port information for the tunneled TCP connection.
"""
return self._extra.get(name, self._conn.get_extra_info(name, default)
if self._conn else default)
def can_write_eof(self):
"""Return whether the channel supports :meth:`write_eof`
This method always returns ``True``.
"""
return True
def get_write_buffer_size(self):
"""Return the current size of the channel's output buffer
This method returns how many bytes are currently in the
channel's output buffer waiting to be written.
"""
return self._send_buf_len
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control
This method sets the limits used when deciding when to call
the ``pause_writing()`` and ``resume_writing()`` methods on
SSH sessions. Writing will be paused when the write buffer
size exceeds the high-water mark, and resumed when the
write buffer size equals or drops below the low-water mark.
"""
if high is None:
high = 4*low if low is not None else 65536
if low is None:
low = high // 4
if not 0 <= low <= high:
raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
(high, low))
self._send_high_water = high
self._send_low_water = low
self._pause_resume_writing()
def write(self, data, datatype=None):
"""Write data on the channel
This method can be called to send data on the channel. If
an encoding was specified when the channel was created, the
data should be provided as a string and will be converted
using that encoding. Otherwise, the data should be provided
as bytes.
An extended data type can optionally be provided. For
instance, this is used from a :class:`SSHServerSession`
to write data to ``stderr``.
:param data:
The data to send on the channel
:param integer datatype: (optional)
The extended data type of the data, from :ref:`extended
data types <ExtendedDataTypes>`
:type data: string or bytes
:raises: :exc:`OSError` if the channel isn't open for sending
or the extended data type is not valid for this type
of channel
"""
if self._send_state != 'open':
raise BrokenPipeError('Channel not open for sending')
if datatype is not None and datatype not in self._write_datatypes:
raise OSError('Invalid extended data type')
if len(data) == 0:
return
if self._encoding:
data = data.encode(self._encoding)
self._send_buf.append((bytearray(data), datatype))
self._send_buf_len += len(data)
self._flush_send_buf()
def writelines(self, list_of_data, datatype=None):
"""Write a list of data bytes on the channel
This method can be called to write a list (or any iterable) of
data bytes to the channel. It is functionality equivalent to
calling :meth:`write` on each element in the list.
:param list_of_data:
The data to send on the channel
:param integer datatype: (optional)
The extended data type of the data, from :ref:`extended
data types <ExtendedDataTypes>`
:type list_of_data: iterable of ``string`` or ``bytes`` objects
:raises: :exc:`OSError` if the channel isn't open for sending
or the extended data type is not valid for this type
of channel
"""
sep = '' if self._encoding else b''
return self.write(sep.join(list_of_data), datatype)
def write_eof(self):
"""Write EOF on the channel
This method sends an end-of-file indication on the
channel, after which no more data can be sent. The
channel remains open, though, and data may still be
sent in the other direction.
:raises: :exc:`OSError` if the channel isn't open for sending
"""
if self._send_state != 'open':
raise BrokenPipeError('Channel not open for sending')
self._send_state = 'eof_pending'
self._flush_send_buf()
def pause_reading(self):
"""Pause delivery of incoming data
This method is used to temporarily suspend delivery of incoming
channel data. After this call, incoming data will no longer
be delivered until :meth:`resume_reading` is called. Data will be
buffered locally up to the configured SSH channel window size,
but window updates will no longer be sent, eventually causing
back pressure on the remote system.
.. note:: Channel close notifications are not suspended by this
call. If the remote system closes the channel while
delivery is suspended, the channel will be closed even
though some buffered data may not have been delivered.
"""
self._recv_paused = True
def resume_reading(self):
"""Resume delivery of incoming data
This method can be called to resume delivery of incoming data
which was suspended by a call to :meth:`pause_reading`. As soon
as this method is called, any buffered data will be delivered
immediately. A pending end-of-file notication may also be
delivered if one was queued while reading was paused.
"""
if self._recv_paused:
self._recv_paused = False
while self._recv_buf and not self._recv_paused:
self._deliver_data(*self._recv_buf.pop(0))
class SSHClientChannel(SSHChannel):
"""SSH client channel"""
_read_datatypes = {EXTENDED_DATA_STDERR}
def __init__(self, conn, loop, encoding, window, max_pktsize):
super().__init__(conn, loop, encoding, window, max_pktsize)
self._exit_status = None
self._exit_signal = None
@asyncio.coroutine
def _create(self, session_factory, command, subsystem, env,
term_type, term_size, term_modes):
"""Create an SSH client session"""
packet = yield from self._open(b'session')
# Client sessions should have no extra data in the open confirmation
packet.check_end()
self._session = session_factory()
self._session.connection_made(self)
for name, value in env.items():
name = str(name).encode('utf-8')
value = str(value).encode('utf-8')
self._send_request(b'env', String(name), String(value))
if term_type:
term_type = term_type.encode('ascii')
if len(term_size) == 4:
width, height, pixwidth, pixheight = term_size
elif len(term_size) == 2:
width, height = term_size
pixwidth = pixheight = 0
elif not term_size:
width = height = pixwidth = pixheight = 0
else:
raise ValueError('If set, terminal size must be a tuple of '
'2 or 4 integers')
modes = b''
for mode, value in term_modes.items():
if mode <= PTY_OP_END or mode >= PTY_OP_RESERVED:
raise ValueError('Invalid pty mode: %s' % mode)
modes += Byte(mode) + UInt32(value)
modes += Byte(PTY_OP_END)
if not (yield from self._make_request(b'pty-req',
String(term_type),
UInt32(width),
UInt32(height),
UInt32(pixwidth),
UInt32(pixheight),
String(modes))):
self.close()
raise ChannelOpenError(OPEN_REQUEST_PTY_FAILED,
'PTY request failed')
if command:
result = yield from self._make_request(b'exec', String(command))
elif subsystem:
result = yield from self._make_request(b'subsystem',
String(subsystem))
else:
result = yield from self._make_request(b'shell')
if not result:
self.close()
raise ChannelOpenError(OPEN_REQUEST_SESSION_FAILED,
'Session request failed')
if not self._session:
raise ChannelOpenError(OPEN_REQUEST_SESSION_FAILED,
'Channel closed during session startup')
self._session.session_started()
self.resume_reading()
return self, self._session
def _process_xon_xoff_request(self, packet):
"""Process a request to set up XON/XOFF processing"""
client_can_do = packet.get_boolean()
packet.check_end()
self._session.xon_xoff_requested(client_can_do)
return True
def _process_exit_status_request(self, packet):
"""Process a request to deliver exit status"""
status = packet.get_uint32() & 0xff
packet.check_end()
self._exit_status = status
self._session.exit_status_received(status)
return True
def _process_exit_signal_request(self, packet):
"""Process a request to deliver an exit signal"""
signal = packet.get_string()
core_dumped = packet.get_boolean()
msg = packet.get_string()
lang = packet.get_string()
packet.check_end()
try:
signal = signal.decode('ascii')
msg = msg.decode('utf-8')
lang = lang.decode('ascii')
except UnicodeDecodeError:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Invalid exit signal request') from None
self._exit_signal = (signal, core_dumped, msg, lang)
self._session.exit_signal_received(signal, core_dumped, msg, lang)
return True
def get_exit_status(self):
"""Return the session's exit status
This method returns the exit status of the session if one has
been sent. If an exit signal was received, this method
returns -1 and the exit signal information can be collected
by calling :meth:`get_exit_signal`. If neither has been sent,
this method returns ``None``.
"""
if self._exit_status is not None:
return self._exit_status
elif self._exit_signal:
return -1
else:
return None
def get_exit_signal(self):
"""Return the session's exit signal, if one was sent
This method returns information about the exit signal sent on
this session. If an exit signal was sent, a tuple is returned
containing the signal name, a boolean for whether a core dump
occurred, a message associated with the signal, and the language
the message was in. If no exit signal was sent, ``None`` is
returned.
"""
return self._exit_signal
def change_terminal_size(self, width, height, pixwidth=0, pixheight=0):
"""Change the terminal window size for this session
This method changes the width and height of the terminal
associated with this session.
:param integer width:
The width of the terminal in characters
:param integer height:
The height of the terminal in characters
:param integer pixwidth: (optional)
The width of the terminal in pixels
:param integer pixheight: (optional)
The height of the terminal in pixels
"""
self._send_request(b'window-change', UInt32(width), UInt32(height),
UInt32(pixwidth), UInt32(pixheight))
def send_break(self, msec):
"""Send a break to the remote process
This method requests that the server perform a break
operation on the remote process or service as described in
:rfc:`4335`.
:param integer msec:
The duration of the break in milliseconds
:raises: :exc:`OSError` if the channel is not open
"""
self._send_request(b'break', UInt32(msec))
def send_signal(self, signal):
"""Send a signal to the remote process
This method can be called to deliver a signal to the remote
process or service. Signal names should be as described in
section 6.10 of :rfc:`4254#section-6.10`.
:param string signal:
The signal to deliver
:raises: :exc:`OSError` if the channel is not open
"""
signal = signal.encode('ascii')
self._send_request(b'signal', String(signal))
def terminate(self):
"""Terminate the remote process
This method can be called to terminate the remote process or
service by sending it a ``TERM`` signal.
:raises: :exc:`OSError` if the channel is not open
"""
self.send_signal('TERM')
def kill(self):
"""Forcibly kill the remote process
This method can be called to forcibly stop the remote process
or service by sending it a ``KILL`` signal.
:raises: :exc:`OSError` if the channel is not open
"""
self.send_signal('KILL')
class SSHServerChannel(SSHChannel):
"""SSH server channel"""
_write_datatypes = {EXTENDED_DATA_STDERR}
def __init__(self, conn, loop, encoding, window, max_pktsize):
"""Initialize an SSH server channel"""
super().__init__(conn, loop, encoding, window, max_pktsize)
self._env = self._conn.get_key_option('environment', {})
self._command = None
self._subsystem = None
self._term_type = None
self._term_size = (0, 0, 0, 0)
self._term_modes = {}
def _process_pty_req_request(self, packet):
"""Process a request to open a pseudo-terminal"""
term_type = packet.get_string()
width = packet.get_uint32()
height = packet.get_uint32()
pixwidth = packet.get_uint32()
pixheight = packet.get_uint32()
modes = packet.get_string()
packet.check_end()
try:
self._term_type = term_type.decode('ascii')
except UnicodeDecodeError:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Invalid pty request') from None
if not self._conn.check_key_permission('pty') or \
not self._conn.check_certificate_permission('pty'):
return False
self._term_size = (width, height, pixwidth, pixheight)
idx = 0
while idx < len(modes):
mode = modes[idx]
idx += 1
if mode == PTY_OP_END or mode >= PTY_OP_RESERVED:
break
if idx+4 <= len(modes):
self._term_modes[mode] = int.from_bytes(modes[idx:idx+4], 'big')
idx += 4
else:
raise DisconnectError(DISC_PROTOCOL_ERROR,
'Invalid pty modes string')
return self._session.pty_requested(self._term_type, self._term_size,
self._term_modes)
def _process_env_request(self, packet):
"""Process a request to set an environment variable"""
name = packet.get_string()
value = packet.get_string()
packet.check_end()
try:
name = name.decode('utf-8')
value = value.decode('utf-8')
except UnicodeDecodeError:
return False
self._env[name] = value
return True
def _start_session(self, command=None, subsystem=None):
forced_command = self._conn.get_certificate_option('force-command')
if forced_command is None:
forced_command = self._conn.get_key_option('command')
if forced_command is not None:
command = forced_command
if command is not None:
self._command = command
result = self._session.exec_requested(command)
elif subsystem is not None:
self._subsystem = subsystem
result = self._session.subsystem_requested(subsystem)
else:
result = self._session.shell_requested()
return result
def _process_shell_request(self, packet):
"""Process a request to open a shell"""
packet.check_end()
return self._start_session()
def _process_exec_request(self, packet):
"""Process a request to execute a command"""
command = packet.get_string()
packet.check_end()
try:
command = command.decode('utf-8')
except UnicodeDecodeError:
return False
return self._start_session(command=command)
def _process_subsystem_request(self, packet):
"""Process a request to open a subsystem"""
subsystem = packet.get_string()
packet.check_end()
try:
subsystem = subsystem.decode('ascii')
except UnicodeDecodeError:
return False
return self._start_session(subsystem=subsystem)
def _process_window_change_request(self, packet):
"""Process a request to change the window size"""
width = packet.get_uint32()
height = packet.get_uint32()
pixwidth = packet.get_uint32()
pixheight = packet.get_uint32()
packet.check_end()
self._term_size = (width, height, pixwidth, pixheight)
self._session.terminal_size_changed(width, height, pixwidth, pixheight)
return True
def _process_signal_request(self, packet):
"""Process a request to send a signal"""
signal = packet.get_string()
packet.check_end()
try:
signal = signal.decode('ascii')
except UnicodeDecodeError:
return False
self._session.signal_received(signal)
return True
def _process_break_request(self, packet):
"""Process a request to send a break"""
msec = packet.get_uint32()
packet.check_end()
return self._session.break_received(msec)
def start_sftp_server(self, sftp_factory):
"""Start an SFTP server for this session
This method can be used by an existing :class:`SSHServerSession`
to replace itself with an SFTP server session. Calls to this
method should be made from :meth:`session_started
<SSHServerSession.session_started>` before any data is read
or written. Once called, no further calls will be made on the
original session.
.. note:: The :meth:`connection_lost
<SSHServerSession.connection_lost>` method will not
be called on the original server session when this
is used.
:param callable sftp_server:
A callable which returns an :class:`SFTPServer` object
that will be created to handle SFTP requests on this
channel.
"""
# Reset the encoding to allow the transfer of binary data
self._encoding = None
# Replace the session with an SFTPServerSession
self._session = SFTPServerSession(sftp_factory(self._conn))
self._session.connection_made(self)
self._session.session_started()
def get_environment(self):
"""Return the environment for this session
This method returns the environment set by the client
when the session was opened. Calls to this method should
only be made after :meth:`session_started
<SSHServerSession.session_started>` has been called on
the :class:`SSHServerSession`.
:returns: A dictionary containing the environment variables
set by the client
"""
return self._env
def get_command(self):
"""Return the command the client requested to execute, if any
This method returns the command the client requested to
execute when the session was opened, if any. If the client
did not request that a command be executed, this method
will return ``None``. Calls to this method should only be made
after :meth:`session_started <SSHServerSession.session_started>`
has been called on the :class:`SSHServerSession`. When using
the stream-based API, calls to this can be made at any time
after the handler function has started up.
"""
return self._command
def get_subsystem(self):
"""Return the subsystem the client requested to open, if any
This method returns the subsystem the client requested to
open when the session was opened, if any. If the client
did not request that a subsystem be opened, this method will
return ``None``. Calls to this method should only be made
after :meth:`session_started <SSHServerSession.session_started>`
has been called on the :class:`SSHServerSession`. When using
the stream-based API, calls to this can be made at any time
after the handler function has started up.
"""
return self._subsystem
def get_terminal_type(self):
"""Return the terminal type for this session
This method returns the terminal type set by the client
when the session was opened. If the client didn't request
a pseudo-terminal, this method will return ``None``. Calls
to this method should only be made after :meth:`session_started
<SSHServerSession.session_started>` has been called on the
:class:`SSHServerSession`. When using the stream-based API,
calls to this can be made at any time after the handler
function has started up.
:returns: A string containing the terminal type or ``None`` if
no pseudo-terminal was requested
"""
return self._term_type
def get_terminal_size(self):
"""Return terminal size information for this session
This method returns the latest terminal size information set
by the client. If the client didn't set any terminal size
information, all values returned will be zero. Calls to
this method should only be made after :meth:`session_started
<SSHServerSession.session_started>` has been called on the
:class:`SSHServerSession`. When using the stream-based API,
calls to this can be made at any time after the handler
function has started up.
Also see :meth:`terminal_size_changed()
<SSHServerSession.terminal_size_changed>` or the
:exc:`TerminalSizeChanged` exception for how to get notified
when the terminal size changes.
:returns: A tuple of four integers containing the width and
height of the terminal in characters and the width
and height of the terminal in pixels
"""
return self._term_size
def get_terminal_mode(self, mode):
"""Return the requested TTY mode for this session
This method looks up the value of a POSIX terminal mode
set by the client when the session was opened. If the client
didn't request a pseudo-terminal or didn't set the requested
TTY mode opcode, this method will return ``None``. Calls to
this method should only be made after :meth:`session_started
<SSHServerSession.session_started>` has been called on the
:class:`SSHServerSession`. When using the stream-based API,
calls to this can be made at any time after the handler
function has started up.
:param integer mode:
POSIX terminal mode taken from :ref:`POSIX terminal modes
<PTYModes>` to look up
:returns: An integer containing the value of the requested
POSIX terminal mode or ``None`` if the requested
mode was not set
"""
return self._term_modes.get(mode)
def set_xon_xoff(self, client_can_do):
"""Set whether the client should enable XON/XOFF flow control
This method can be called to tell the client whether or not
to enable XON/XOFF flow control, indicating that it should
intercept Control-S and Control-Q coming from its local
terminal to pause and resume output, respectively.
Applications should set client_can_do to ``True`` to
enable this functionality or to ``False`` to tell the client
to forward Control-S and Control-Q through as normal input.
:param boolean client_can_do:
Whether or not the client should enable XON/XOFF flow control
"""
self._send_request(b'xon-xoff', Boolean(client_can_do))
def write_stderr(self, data):
"""Write output to stderr
This method can be called to send output to the client which
is intended to be displayed on stderr. If an encoding was
specified when the channel was created, the data should be
provided as a string and will be converted using that
encoding. Otherwise, the data should be provided as bytes.
:param data:
The data to send to stderr
:type data: string or bytes
:raises: :exc:`OSError` if the channel isn't open for sending
"""
self.write(data, EXTENDED_DATA_STDERR)
def writelines_stderr(self, list_of_data):
"""Write a list of data bytes to stderr
This method can be called to write a list (or any iterable) of
data bytes to the channel. It is functionality equivalent to
calling :meth:`write_stderr` on each element in the list.
"""
self.writelines(list_of_data, EXTENDED_DATA_STDERR)
def exit(self, status):
"""Send exit status and close the channel
This method can be called to report an exit status for the
process back to the client and close the channel. A zero
exit status is generally returned when the operation was
successful. After reporting the status, the channel is
closed.
:param integer status:
The exit status to report to the client
:raises: :exc:`OSError` if the channel isn't open
"""
if self._send_state not in {'open', 'eof_pending', 'eof_sent'}:
raise OSError('Channel not open')
self._send_request(b'exit-status', UInt32(status & 0xff))
self.close()
def exit_with_signal(self, signal, core_dumped=False,
msg='', lang=DEFAULT_LANG):
"""Send exit signal and close the channel
This method can be called to report that the process
terminated abnormslly with a signal. A more detailed
error message may also provided, along with an indication
of whether or not the process dumped core. After
reporting the signal, the channel is closed.
:param string signal:
The signal which caused the process to exit
:param boolean core_dumped: (optional)
Whether or not the process dumped core
:param msg: (optional)
Details about what error occurred
:param lang: (optional)
The language the error message is in
:raises: :exc:`OSError` if the channel isn't open
"""
if self._send_state not in {'open', 'eof_pending', 'eof_sent'}:
raise OSError('Channel not open')
signal = signal.encode('ascii')
msg = msg.encode('utf-8')
lang = lang.encode('ascii')
self._send_request(b'exit-signal', String(signal),
Boolean(core_dumped), String(msg), String(lang))
self.close()
class SSHTCPChannel(SSHChannel):
"""SSH TCP channel"""
@asyncio.coroutine
def _finish_open_request(self, session):
"""Finish processing a TCP channel open request"""
yield from super()._finish_open_request(session)
if self._session:
self._session.session_started()
self.resume_reading()
@asyncio.coroutine
def _open(self, session_factory, chantype, host, port,
orig_host, orig_port):
"""Open a TCP channel"""
self._extra['local_peername'] = (orig_host, orig_port)
self._extra['remote_peername'] = (host, port)
host = host.encode('utf-8')
orig_host = orig_host.encode('utf-8')
packet = yield from super()._open(chantype, String(host), UInt32(port),
String(orig_host), UInt32(orig_port))
# TCP sessions should have no extra data in the open confirmation
packet.check_end()
self._session = session_factory()
self._session.connection_made(self)
self._session.session_started()
self.resume_reading()
return self, self._session
@asyncio.coroutine
def _connect(self, session_factory, host, port, orig_host, orig_port):
"""Create a new outbound TCP session"""
return (yield from self._open(session_factory, b'direct-tcpip',
host, port, orig_host, orig_port))
@asyncio.coroutine
def _accept(self, session_factory, host, port, orig_host, orig_port):
"""Create a new forwarded TCP session"""
return (yield from self._open(session_factory, b'forwarded-tcpip',
host, port, orig_host, orig_port))
class SSHSession:
"""SSH session handler"""
def connection_made(self, chan):
"""Called when a channel is opened successfully
This method is called when a channel is opened successfully. The
channel parameter should be stored if needed for later use.
:param chan:
The channel which was successfully opened.
:type chan: :class:`SSHClientChannel`
"""
def connection_lost(self, exc):
"""Called when a channel is closed
This method is called when a channel is closed. If the channel
is shut down cleanly, *exc* will be ``None``. Otherwise, it
will be an exception explaining the reason for the channel close.
:param exc:
The exception which caused the channel to close, or
``None`` if the channel closed cleanly.
:type exc: :class:`Exception`
"""
def session_started(self):
"""Called when the session is started
This method is called when a session has started up. For
client and server sessions, this will be called once a
shell, exec, or subsystem request has been successfully
completed. For TCP sessions, it will be called immediately
after the connection is opened.
"""
def data_received(self, data, datatype):
"""Called when data is received on the channel
This method is called when data is received on the channel.
If an encoding was specified when the channel was created,
the data will be delivered as a string after decoding with
the requested encoding. Otherwise, the data will be delivered
as bytes.
:param data:
The data received on the channel
:param datatype:
The extended data type of the data, from :ref:`extended
data types <ExtendedDataTypes>`
:type data: string or bytes
"""
def eof_received(self):
"""Called when EOF is received on the channel
This method is called when an end-of-file indication is received
on the channel, after which no more data will be received. If this
method returns ``True``, the channel remains half open and data
may still be sent. Otherwise, the channel is automatically closed
after this method returns. This is the default behavior.
"""
def pause_writing(self):
"""Called when the write buffer becomes full
This method is called when the channel's write buffer becomes
full and no more data can be sent until the remote system
adjusts its window. While data can still be buffered locally,
applications may wish to stop producing new data until the
write buffer has drained.
"""
def resume_writing(self):
"""Called when the write buffer has sufficiently drained
This method is called when the channel's send window reopens
and enough data has drained from the write buffer to allow the
application to produce more data.
"""
class SSHClientSession(SSHSession):
"""SSH client session handler
Applications should subclass this when implementing an SSH client
session handler. The functions listed below should be implemented
to define application-specific behavior. In particular, the standard
``asyncio`` protocol methods such as :meth:`connection_made`,
:meth:`connection_lost`, :meth:`data_received`, :meth:`eof_received`,
:meth:`pause_writing`, and :meth:`resume_writing` are all supported.
In addition, :meth:`session_started` is called as soon as the SSH
session is fully started, :meth:`xon_xoff_requested` can be used to
determine if the server wants the client to support XON/XOFF flow
control, and :meth:`exit_status_received` and
:meth:`exit_signal_received` can be used to receive session exit
information.
"""
def xon_xoff_requested(self, client_can_do):
"""XON/XOFF flow control has been enabled or disabled
This method is called to notify the client whether or not
to enable XON/XOFF flow control. If client_can_do is
``True`` and output is being sent to an interactive
terminal the application should allow input of Control-S
and Control-Q to pause and resume output, respectively.
If client_can_do is ``False``, Control-S and Control-Q
should be treated as normal input and passed through to
the server. Non-interactive applications can ignore this
request.
By default, this message is ignored.
:param boolean client_can_do:
Whether or not to enable XON/XOFF flow control
"""
def exit_status_received(self, status):
"""A remote exit status has been received for this session
This method is called when the shell, command, or subsystem
running on the server terminates and returns an exit status.
A zero exit status generally means that the operation was
successful. This call will generally be followed by a call
to :meth:`connection_lost`.
By default, the exit status is ignored.
:param integer status:
The exit status returned by the remote process
"""
def exit_signal_received(self, signal, core_dumped, msg, lang):
"""A remote exit signal has been received for this session
This method is called when the shell, command, or subsystem
running on the server terminates abnormally with a signal.
A more detailed error may also be provided, along with an
indication of whether the remote process dumped core. This call
will generally be followed by a call to :meth:`connection_lost`.
By default, exit signals are ignored.
:param string signal:
The signal which caused the remote process to exit
:param boolean core_dumped:
Whether or not the remote process dumped core
:param msg:
Details about what error occurred
:param lang:
The language the error message is in
"""
class SSHServerSession(SSHSession):
"""SSH server session handler
Applications should subclass this when implementing an SSH server
session handler. The functions listed below should be implemented
to define application-specific behavior. In particular, the
standard ``asyncio`` protocol methods such as :meth:`connection_made`,
:meth:`connection_lost`, :meth:`data_received`, :meth:`eof_received`,
:meth:`pause_writing`, and :meth:`resume_writing` are all supported.
In addition, :meth:`pty_requested` is called when the client requests a
pseudo-terminal, one of :meth:`shell_requested`, :meth:`exec_requested`,
or :meth:`subsystem_requested` is called depending on what type of
session the client wants to start, :meth:`session_started` is called
once the SSH session is fully started, :meth:`terminal_size_changed` is
called when the client's terminal size changes, :meth:`signal_received`
is called when the client sends a signal, and :meth:`break_received`
is called when the client sends a break.
"""
def pty_requested(self, term_type, term_size, term_modes):
"""A psuedo-terminal has been requested
This method is called when the client sends a request to allocate
a pseudo-terminal with the requested terminal type, size, and
POSIX terminal modes. This method should return ``True`` if the
request for the pseudo-terminal is accepted. Otherwise, it should
return ``False`` to reject the request.
By default, requests to allocate a pseudo-terminal are accepted
but nothing is done with the associated terminal information.
Applications wishing to use this information should implement
this method and have it return ``True``, or call
:meth:`get_terminal_type() <SSHServerChannel.get_terminal_type>`,
:meth:`get_terminal_size() <SSHServerChannel.get_terminal_size>`,
or :meth:`get_terminal_mode() <SSHServerChannel.get_terminal_mode>`
on the :class:`SSHServerChannel` to get the information they need
after a shell, command, or subsystem is started.
:param string term:
Terminal type to set for this session
:param tuple term_size:
Terminal size to set for this session provided as a
tuple of four integers: the width and height of the
terminal in characters followed by the width and height
of the terminal in pixels
:param dictionary term_modes:
POSIX terminal modes to set for this session, where keys
are taken from :ref:`POSIX terminal modes <PTYModes>` with
values defined in section 8 of :rfc:`4254#section-8`.
:returns: A boolean indicating if the request for a
pseudo-terminal was allowed or not
"""
return True
def terminal_size_changed(self, width, height, pixwidth, pixheight):
"""The terminal size has changed
This method is called when a client requests a
pseudo-terminal and again whenever the the size of
he client's terminal window changes.
By default, this information is ignored, but applications
wishing to use the terminal size can implement this method
to get notified whenever it changes.
:param integer width:
The width of the terminal in characters
:param integer height:
The height of the terminal in characters
:param integer pixwidth: (optional)
The width of the terminal in pixels
:param integer pixheight: (optional)
The height of the terminal in pixels
"""
def shell_requested(self):
"""The client has requested a shell
This method should be implemented by the application to
perform whatever processing is required when a client makes
a request to open an interactive shell. It should return
``True`` to accept the request, or ``False`` to reject it.
If the application returns ``True``, the :meth:`session_started`
method will be called once the channel is fully open. No output
should be sent until this method is called.
By default this method returns ``False`` to reject all requests.
:returns: A boolean indicating if the shell request was
allowed or not
"""
return False
def exec_requested(self, command):
"""The client has requested to execute a command
This method should be implemented by the application to
perform whatever processing is required when a client makes
a request to execute a command. It should return ``True`` to
accept the request, or ``False`` to reject it.
If the application returns ``True``, the :meth:`session_started`
method will be called once the channel is fully open. No output
should be sent until this method is called.
By default this method returns ``False`` to reject all requests.
:param string command:
The command the client has requested to execute
:returns: A boolean indicating if the exec request was
allowed or not
"""
return False
def subsystem_requested(self, subsystem):
"""The client has requested to start a subsystem
This method should be implemented by the application to
perform whatever processing is required when a client makes
a request to start a subsystem. It should return ``True`` to
accept the request, or ``False`` to reject it.
If the application returns ``True``, the :meth:`session_started`
method will be called once the channel is fully open. No output
should be sent until this method is called.
By default this method returns ``False`` to reject all requests.
:param string subsystem:
The subsystem to start
:returns: A boolean indicating if the request to open the
subsystem was allowed or not
"""
return False
def break_received(self, msec):
"""The client has sent a break
This method is called when the client requests that the
server perform a break operation on the terminal. If the
break is performed, this method should return ``True``.
Otherwise, it should return ``False``.
By default, this method returns ``False`` indicating that
no break was performed.
:param integer msec:
The duration of the break in milliseconds
:returns: A boolean to indicate if the break operation was
performed or not
"""
return False
def signal_received(self, signal):
"""The client has sent a signal
This method is called when the client delivers a signal
on the channel.
By default, signals from the client are ignored.
"""
class SSHTCPSession(SSHSession):
"""SSH TCP connection session handler
Applications should subclass this when implementing a handler for
SSH direct or forwarded TCP connections.
SSH client applications wishing to open a direct connection should call
:meth:`create_connection() <SSHClientConnection.create_connection>`
on their :class:`SSHClientConnection`, passing in a factory which
returns instances of this class.
Server applications wishing to allow direct connections should
implement the coroutine :meth:`connection_requested()
<SSHServer.connection_requested>` on their :class:`SSHServer`
object and have it return instances of this class.
Server applications wishing to allow connection forwarding back
to the client should implement the coroutine :meth:`server_requested()
<SSHServer.server_requested>` on their :class:`SSHServer` object
and call :meth:`create_connection()
<SSHServerConnection.create_connection>` on their
:class:`SSHServerConnection` for each new connection, passing it a
factory which returns instances of this class.
When a connection is successfully opened, :meth:`session_started`
will be called, after which the application can begin sending data.
Received data will be passed to the :meth:`data_received` method.
"""
| nchammas/asyncssh | asyncssh/channel.py | Python | epl-1.0 | 61,675 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import hook
from stevedore.tests import utils
class TestHook(utils.TestCase):
def test_hook(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
self.assertEqual(len(em.extensions), 1)
self.assertEqual(em.names(), ['t1'])
def test_get_by_name(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
e_list = em['t1']
self.assertEqual(len(e_list), 1)
e = e_list[0]
self.assertEqual(e.name, 't1')
def test_get_by_name_missing(self):
em = hook.HookManager(
'stevedore.test.extension',
't1',
invoke_on_load=True,
invoke_args=('a',),
invoke_kwds={'b': 'B'},
)
try:
em['t2']
except KeyError:
pass
else:
assert False, 'Failed to raise KeyError'
| ctrlaltdel/neutrinator | vendor/stevedore/tests/test_hook.py | Python | gpl-3.0 | 1,713 |
# Copyright (C) 2016 Domos Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
class Peripheral(object):
supported_firmwares = []
commands = []
def __init__(self, serial, firmware):
self.pause_reading = False
self.serial = serial
self.firmware = firmware
if self.firmware not in self.supported_firmwares:
print(self.supported_firmwares)
raise UnsupportedFirmwareException(self.firmware)
self.create_read_thread()
if hasattr(self, "commands_firmware_"+str(firmware)): # use firmware-specific commands if provided
self.commands = getattr(self, "commands_firmware_"+str(firmware))
self.start_reading()
def start_reading(self):
try:
self.reading_thread.start()
except RuntimeError: # restart thread
self.create_read_thread()
self.reading_thread.start()
def create_read_thread(self):
self.stop_reading_evt = threading.Event()
self.reading_thread = threading.Thread(target=self._read,
name="Thread reading for peripheral {} at port {}".format(self.__class__.__name__, self.serial.name))
def close(self):
self.stop_reading_evt.set()
self.serial.close()
def _read(self):
if not hasattr(self, "read"):
self.read = ""
self.char = None
while not self.stop_reading_evt.is_set():
self.char = self.serial.read(1)
if self.char not in (b'\n', b'\r', b'#', b'?'):
self.read += self.char.decode('utf-8')
if self.char == b'\n':
if self.read != '':
self.call_command(self.read)
self.read = ""
def call_command(self, command):
splt = command.split()
command_name = splt[0]
command_args = splt[1:]
called = False
for com in self.commands:
if com.name == command_name:
com(*command_args, per=self)
called = True
if not called:
print("Couldn't find command handler for {}".format(command))
def write(self, string):
if not string.endswith('\n'):
string += '\n'
self.serial.write(string.encode('utf-8'))
class NotYetRecognizedPeripheral(Peripheral):
supported_firmwares = [None]
def __init__(self):
super().__init__(None, None)
def start_reading(self):
pass
def _read(self):
pass
def create_read_thread(self):
pass
def write(self, string):
pass
def close(self):
pass
class Command(object):
def __init__(self, name):
self.name = name
def __call__(self, *args, **kwargs):
raise NotImplementedError("__call__ method not implemented in command '{}'.".format(self.name))
class UnsupportedFirmwareException(Exception):
def __init__(self, firmware):
self.firmware = firmware
super().__init__("{}.".format(self.firmware))
| dpdani/Domos | src/peripherals/peripheral.py | Python | gpl-3.0 | 3,672 |
"""Classes for the management of current data."""
from backend.utils import compute_progress
from .base import BaseModel
class Stock(BaseModel):
"""Stock model class."""
def key(self):
"""Method that defines the pattern for the stock key."""
return '{}:{}'.format(self.time.date(), self.symbol)
def update_current_values(self, values):
"""Function that updates price and variation and computes progress."""
self.price, self.variation = values
if hasattr(self, 'threshold_upper'):
p, t = self.price, self.threshold_upper
self.progress_upper = compute_progress(p, t)
if hasattr(self, 'threshold_lower'):
p, t = self.price, self.threshold_lower
self.progress_lower = compute_progress(p, t)
self.update_db()
class Bond(BaseModel):
"""Documentation."""
def key(self):
"""Documentation."""
return '{}:{}'.format(self.time.date(), self.isin)
| akita8/scrapper | backend/models/current.py | Python | mit | 983 |
import pluggy
hookimpl = pluggy.HookimplMarker("eggsample")
"""Marker to be imported and used in plugins (and for own implementations)"""
| CYBAI/servo | tests/wpt/web-platform-tests/tools/third_party/pluggy/docs/examples/eggsample/eggsample/__init__.py | Python | mpl-2.0 | 139 |
#
# Copyright 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" Returns information about a module """
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import click
from ..core import ModToolInfo
from .base import common_params, run
@click.command('info')
@click.option('--python-readable', is_flag=True,
help="Return the output in a format that's easier to read for Python scripts.")
@click.option('--suggested-dirs',
help="Suggest typical include dirs if nothing better can be detected.")
@common_params
def cli(**kwargs):
""" Return information about a given module """
self = ModToolInfo(**kwargs)
run(self)
| jdemel/gnuradio | gr-utils/modtool/cli/info.py | Python | gpl-3.0 | 800 |
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special `:modifiers`.
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are are processed by Python's ast.literal_eval function
(U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Replaces the previously hardcoded INI inventory.
- Must be whitelisted in configuration to function.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
'''
EXAMPLES = '''
example1: |
# example cfg file
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to partent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
example2: |
# other example config
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
'''
import ast
import re
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
'''
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
'''
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
'''
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (k, self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
'''
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
'''
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[k] = self._parse_value(v)
return hostnames, port, variables
@staticmethod
def _parse_value(v):
'''
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
'''
try:
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
'''
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r'''^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r'''^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
| sgerhart/ansible | lib/ansible/plugins/inventory/ini.py | Python | mit | 16,758 |
# -*- coding: utf-8 -*-
class Config(object):
DEBUG = False
PORT = 5000
HOST = '0.0.0.0'
# URL_PREFIX = '/api'
LOG_FILENAME = 'fstore.log' # used if DEBUG=True
LOG_ADDRESS = '/dev/log' # used for logging.handlers.SysLogHandler
LOG_FACILITY = 'local1'
LOGGER_NAME = 'fstore.api'
LOGGING_FORMATTER = \
'%(levelname)s %(pathname)s:%(lineno)d %(funcName)s %(message)s'
USE_X_SENDFILE = True
STORAGES_DIR = '/stores'
LINK_STORAGE_DIR = '/store'
USERS_DB = {}
BUNDLE_RESERVED = 10 # How much % of disk space to reserve
BUNDLES = {'store1': {'write_mode': True}}
LINK_STORAGE_DIR = '/srv/link_storage'
STORAGES_DIR = '/srv/storages'
STORAGES = {}
TRANSFORMATIONS = {
'image': {
'ext': {'action': 'ext', 'ext': True},
'150': {'action': 'size', 'params': (150, 150), 'ext': True},
}
}
class Development(Config):
DEBUG = True
LOG_FILENAME = '/srv/log/fstore.log'
SECRET_KEY = 'development'
SSL_VERIFYHOST = 0
USERS_DB = {'sm': 'pass'}
STORAGES = {'store1': {'reserved': 5, 'writable': True}}
class Production(Config):
# hold private settings in the config_production.py file
pass
class Testing(Config):
TESTING = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
DEBUG = True
LOG_FILENAME = '/srv/log/fstore.log'
SECRET_KEY = 'testing'
SSL_VERIFYHOST = 0
TEST_IMG = 'test.jpg'
LINK_STORAGE_DIR = '/tmp/link_storage'
STORAGES_DIR = '/tmp/storages'
STORAGES = {}
USERS_DB = {'test_user': 'test_password'}
| xmm/fstore | api/config.py | Python | bsd-3-clause | 1,605 |
from tests.testing_harness import TestHarness
def test_quadric_surfaces():
harness = TestHarness('statepoint.10.h5')
harness.main()
| wbinventor/openmc | tests/regression_tests/quadric_surfaces/test.py | Python | mit | 142 |
import json
import os
import sys
import lmdb
__author__ = 'ananya.h'
def compute_recall(vertical, path_to_nn_lmdb, k_s=[1, 3, 5, 10, 20]):
env = lmdb.open(path_to_nn_lmdb)
base_dir = "/data/street2shop"
meta_dir = os.path.join(base_dir, "meta", "json")
retrieval_path = os.path.join(meta_dir, "retrieval_"+vertical+".json")
test_data = os.path.join(meta_dir, "test_pairs_"+vertical+".json")
image_dir = os.path.join(base_dir, "structured_images", vertical)
query_dir = os.path.join(base_dir, "structured_images", vertical+"_query")
with open(retrieval_path) as jsonFile:
data = json.load(jsonFile)
photo_to_product_map = {}
product_to_photo_map = {}
for info in data:
photo_to_product_map[info["photo"]] = info["product"]
for photo in photo_to_product_map:
product = photo_to_product_map[photo]
if product not in product_to_photo_map:
product_to_photo_map[product] = set()
product_to_photo_map[product].add(photo)
with open(test_data) as jsonFile:
test_pairs = json.load(jsonFile)
missing_photo, missing_product, valid_count = 0, 0, 0
recall_dict = {}
for k in k_s:
recall_dict[k] = [0, 0]
with env.begin() as txn:
for pair in test_pairs:
photo = pair["photo"]
product = pair["product"]
if not os.path.exists(os.path.join(query_dir, str(photo)+".jpg")):
missing_photo+=1
continue
prod_available = True
for p in product_to_photo_map[product]:
if not os.path.exists(os.path.join(image_dir, str(p)+".jpg")):
prod_available = False
break
if not prod_available:
missing_product+=1
continue
result = txn.get(str(photo))
valid_count+=1
nn = json.loads(txn.get(str(photo)))
product_nn = []
for item in nn:
p = int(item[0])
prod = photo_to_product_map[p]
if prod not in product_nn:
product_nn.append(prod)
assert len(product_nn) > k_s[-1]
for k in k_s:
if product in product_nn[:k]:
recall_dict[k][0]+=1
recall_dict[k][1]+=1
print("Missing query %d Missing product set %d Total %d"%(missing_photo, missing_product, valid_count))
for k in k_s:
print("Recall at %d is %0.3f "%(k, recall_dict[k][0]*1.0/recall_dict[k][1]))
return recall_dict
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 2:
print("Usage is python compute_recall.py <vertical> <path_to_nn_lmdb>")
sys.exit(1)
compute_recall(args[0], args[1]) | flipkart-incubator/fk-visual-search | scripts/compute_recall.py | Python | apache-2.0 | 2,808 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal Tensorflow SequenceExample input utility functions."""
import tensorflow as tf
from poem.core import common
from poem.core import data_utils
from poem.core import tfe_input_layer
read_from_table = tfe_input_layer.read_from_table
def _set_and_permute_time_axis(tensor, sequence_length, axis=-3):
"""Sets a tensor into static shape and permutes its time axis."""
shape = tensor.shape.as_list()
shape[0] = sequence_length
tensor.set_shape(shape)
# in the original tensor the first axis is always the time axis.
permutation = list(range(tensor.shape.ndims))
permutation[0] = permutation[axis]
permutation[axis] = 0
return tf.transpose(tensor, permutation)
def add_decoder_image_sizes(instance_shape, common_module=common):
"""Adds decoders for image sizes.
Args:
instance_shape: A list of integers for the shape (layout) of instances for
each record.
common_module: A Python module that defines common constants.
Returns:
A dictionary for decoders.
"""
return {
common_module.TFSE_KEY_IMAGE_HEIGHT:
tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),
common_module.TFSE_KEY_IMAGE_WIDTH:
tf.io.FixedLenFeature(instance_shape, dtype=tf.int64),
}
def add_decoder_keypoints_2d(keypoint_names_2d,
include_keypoint_scores_2d,
instance_shape,
common_module=common):
"""Adds decoders for 2D keypoints.
Args:
keypoint_names_2d: A list of strings for 2D keypoint names.
include_keypoint_scores_2d: A boolean for whether to include 2D keypoint
scores.
instance_shape: A list of integers for the shape (layout) of instances for
each record.
common_module: A Python module that defines common constants.
Returns:
decoders: A dictionary for decoders.
"""
decoders = {}
for name in keypoint_names_2d:
for keypoint_suffix in common_module.TFSE_KEY_SUFFIX_KEYPOINT_2D:
key = name + keypoint_suffix
decoders[key] = tf.io.FixedLenSequenceFeature(
instance_shape, dtype=tf.float32)
if include_keypoint_scores_2d:
key = name + common_module.TFSE_KEY_SUFFIX_KEYPOINT_2D_SCORE
decoders[key] = tf.io.FixedLenSequenceFeature(
instance_shape, dtype=tf.float32)
return decoders
def add_decoder_keypoints_3d(keypoint_names_3d,
include_keypoint_scores_3d,
instance_shape,
common_module=common):
"""Adds decoders for 3D keypoints.
Args:
keypoint_names_3d: A list of strings for 3D keypoint names.
include_keypoint_scores_3d: A boolean for whether to include 3D keypoint
scores.
instance_shape: A list of integers for the shape (layout) of instances for
each record.
common_module: A Python module that defines common constants.
Returns:
decoders: A dictionary for decoders.
"""
decoders = {}
for name in keypoint_names_3d:
for keypoint_suffix in common_module.TFSE_KEY_SUFFIX_KEYPOINT_3D:
key = name + keypoint_suffix
decoders[key] = tf.io.FixedLenSequenceFeature(
instance_shape, dtype=tf.float32)
if include_keypoint_scores_3d:
key = name + common_module.TFSE_KEY_SUFFIX_KEYPOINT_3D_SCORE
decoders[key] = tf.io.FixedLenSequenceFeature(
instance_shape, dtype=tf.float32)
return decoders
def process_decoded_image_sizes(decoded_tensors,
sequence_length,
common_module=common):
"""Processes decoded image sizes.
Args:
decoded_tensors: A dictionary for decoded tensors.
sequence_length: An integer for input sequence length.
common_module: A Python module that defines common constants.
Returns:
A dictionary for processed 2D keypoint tensors.
"""
image_heights = decoded_tensors[common_module.TFSE_KEY_IMAGE_HEIGHT]
image_widths = decoded_tensors[common_module.TFSE_KEY_IMAGE_WIDTH]
image_sizes = tf.stack([image_heights, image_widths], axis=-1)
image_sizes = data_utils.tile_last_dims(
tf.expand_dims(image_sizes, axis=-2),
last_dim_multiples=[sequence_length, 1])
return {
common_module.KEY_IMAGE_SIZES: image_sizes,
}
def process_decoded_keypoints_2d(decoded_tensors,
keypoint_names_2d,
include_keypoint_scores_2d,
sequence_length,
common_module=common):
"""Processes decoded 2D keypoint tensors.
Args:
decoded_tensors: A dictionary for decoded tensors.
keypoint_names_2d: A list of strings for 2D keypoint names.
include_keypoint_scores_2d: A boolean for whether to include 2D keypoint
scores.
sequence_length: An integer for the length of input sequences.
common_module: A Python module that defines common constants.
Returns:
outputs: A dictionary for processed 2D keypoint tensors.
"""
outputs = {}
keypoints_2d = []
for name in keypoint_names_2d:
sub_keypoints_2d = []
for keypoint_suffix in common_module.TFSE_KEY_SUFFIX_KEYPOINT_2D:
key = name + keypoint_suffix
sub_keypoints_2d.append(decoded_tensors[key])
keypoints_2d.append(tf.stack(sub_keypoints_2d, axis=-1))
keypoints_2d = tf.stack(keypoints_2d, axis=-2)
keypoints_2d = _set_and_permute_time_axis(keypoints_2d, sequence_length)
outputs[common_module.KEY_KEYPOINTS_2D] = keypoints_2d
if include_keypoint_scores_2d:
keypoint_scores_2d = []
for name in keypoint_names_2d:
key = name + common_module.TFSE_KEY_SUFFIX_KEYPOINT_2D_SCORE
keypoint_scores_2d.append(decoded_tensors[key])
keypoint_scores_2d = tf.stack(keypoint_scores_2d, axis=-1)
keypoint_scores_2d = _set_and_permute_time_axis(
keypoint_scores_2d, sequence_length, axis=-2)
outputs[common_module.KEY_KEYPOINT_SCORES_2D] = keypoint_scores_2d
return outputs
def process_decoded_keypoints_3d(decoded_tensors,
keypoint_names_3d,
include_keypoint_scores_3d,
sequence_length,
common_module=common):
"""Processes decoded 3D keypoint tensors.
Args:
decoded_tensors: A dictionary for decoded tensors.
keypoint_names_3d: A list of strings for 3D keypoint names.
include_keypoint_scores_3d: A boolean for whether to include 2D keypoint
scores.
sequence_length: An integer for the length of input sequences.
common_module: A Python module that defines common constants.
Returns:
outputs: A dictionary for processed 2D keypoint tensors.
"""
outputs = {}
keypoints_3d = []
for name in keypoint_names_3d:
sub_keypoints_3d = []
for keypoint_suffix in common_module.TFSE_KEY_SUFFIX_KEYPOINT_3D:
key = name + keypoint_suffix
sub_keypoints_3d.append(decoded_tensors[key])
keypoints_3d.append(tf.stack(sub_keypoints_3d, axis=-1))
keypoints_3d = tf.stack(keypoints_3d, axis=-2)
keypoints_3d = _set_and_permute_time_axis(keypoints_3d, sequence_length)
outputs[common_module.KEY_KEYPOINTS_3D] = keypoints_3d
if include_keypoint_scores_3d:
keypoint_scores_3d = []
for name in keypoint_names_3d:
key = name + common_module.TFSE_KEY_SUFFIX_KEYPOINT_3D_SCORE
keypoint_scores_3d.append(decoded_tensors[key])
keypoint_scores_3d = tf.stack(keypoint_scores_3d, axis=-1)
keypoint_scores_3d = _set_and_permute_time_axis(
keypoint_scores_3d, sequence_length, axis=-2)
outputs[common_module.KEY_KEYPOINT_SCORES_3D] = keypoint_scores_3d
return outputs
def get_tfse_parser_fn(context_features_decoders, sequence_features_decoders,
post_process_fn):
"""Creates a tf.SequenceExample parser function.
Args:
context_features_decoders: A dictionary for keyed tf.SequenceExample context
features decoders.
sequence_features_decoders: A dictionary for keyed tf.SequenceExample
sequence features decoders.
post_process_fn: A function handle for postprocessing decoded tensors.
Returns:
parser_fn: A function handle for the parser function.
"""
def parser_fn(*inputs):
"""Decoder function."""
# Here `inputs` can be either just a serialized example or a (key,
# serialized example) tuple (in which we ignore the key), and we would like
# to handle both cases.
serialized_example = inputs[-1]
decoded_tensors = tf.io.parse_single_sequence_example(
serialized_example,
context_features=context_features_decoders,
sequence_features=sequence_features_decoders)
return post_process_fn(decoded_tensors)
return parser_fn
def create_tfse_parser(keypoint_names_2d=None,
keypoint_names_3d=None,
include_keypoint_scores_2d=True,
include_keypoint_scores_3d=False,
num_objects=1,
sequence_length=None,
common_module=common):
"""Creates default tf.SequenceExample parser function.
Args:
keypoint_names_2d: A list of strings for 2D keypoint names. Use None to skip
reading 2D keypoints.
keypoint_names_3d: A list of strings for 3D keypoint names. Use None to skip
reading 3D keypoints.
include_keypoint_scores_2d: A boolean for whether to read 2D keypoint
scores. Only used if `keypoint_names_2d` is specified.
include_keypoint_scores_3d: A boolean for whether to read 3D keypoint
scores. Only used if `keypoint_names_3d` is specified.
num_objects: An integer for the number of objects each example has.
sequence_length: An integer for the length of input sequences.
common_module: A Python module that defines common constants.
Returns:
parser_fn: A function handle for the parser.
"""
# Prepare context features
context_features_decoders = add_decoder_image_sizes(
instance_shape=[num_objects],
common_module=common_module)
# Prepare sequence features
sequence_features_decoders = {}
if keypoint_names_2d:
sequence_features_decoders.update(
add_decoder_keypoints_2d(
keypoint_names_2d,
include_keypoint_scores_2d=include_keypoint_scores_2d,
instance_shape=[num_objects],
common_module=common_module))
if keypoint_names_3d:
sequence_features_decoders.update(
add_decoder_keypoints_3d(
keypoint_names_3d,
include_keypoint_scores_3d=include_keypoint_scores_3d,
instance_shape=[num_objects],
common_module=common_module))
def post_process_decoded_tensors(decoded_tensors):
"""Postprocesses decoded tensors."""
# Placeholder for postprocessing including static padding, temporal sampling
# augmentation, etc.
outputs = process_decoded_image_sizes(
decoded_tensors[0], sequence_length, common_module)
if keypoint_names_2d:
outputs.update(
process_decoded_keypoints_2d(
decoded_tensors[1],
keypoint_names_2d=keypoint_names_2d,
include_keypoint_scores_2d=include_keypoint_scores_2d,
sequence_length=sequence_length,
common_module=common_module))
if keypoint_names_3d:
outputs.update(
process_decoded_keypoints_3d(
decoded_tensors[1],
keypoint_names_3d=keypoint_names_3d,
include_keypoint_scores_3d=include_keypoint_scores_3d,
sequence_length=sequence_length,
common_module=common_module))
return outputs
return get_tfse_parser_fn(context_features_decoders,
sequence_features_decoders,
post_process_decoded_tensors)
| google-research/google-research | poem/core/tfse_input_layer.py | Python | apache-2.0 | 12,521 |
import http.client
#import pudb
#pudb.set_trace()
conn = http.client.HTTPConnection("www.python.org")
conn.request("GET","/index.html")
res = conn.getresponse()
print(res.getheaders())
print(res.getheader('server'))
print(res.getheader('space','mine'))
| shabbir005/learntosolveit | languages/python/web_httplib_example_3.py | Python | bsd-3-clause | 253 |
# test args, streams
import sys, os
print(os.getcwd()) # to Outputs
print(sys.path[0])
print('[argv]')
for arg in sys.argv: # from Args
print(arg) # to Outputs
print('[interaction]') # to Outputs
text = input('Enter text:') # from Inputs
rept = sys.stdin.readline() # from Inputs
sys.stdout.write(text * int(rept)) # to Outputs
| simontakite/sysadmin | pythonscripts/programmingpython/System/Tester/Scripts/test-basic-args.py | Python | gpl-2.0 | 430 |
#! /usr/bin/python
#
# Code generator for python ctypes bindings for VLC
# Copyright (C) 2009 the VideoLAN team
# $Id: $
#
# Authors: Olivier Aubert <olivier.aubert at liris.cnrs.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
#
"""Unittest module.
"""
import unittest
import vlc
class TestVLCAPI(unittest.TestCase):
#def setUp(self):
# self.seq = range(10)
#self.assert_(element in self.seq)
# We check enum definitions against hardcoded values. In case of
# failure, check that the reason is not a change in the .h
# definitions.
def test_enum_event_type(self):
self.assertEqual(vlc.EventType.MediaStateChanged.value, 5)
def test_enum_meta(self):
self.assertEqual(vlc.Meta.Description.value, 6)
def test_enum_state(self):
self.assertEqual(vlc.State.Playing.value, 3)
def test_enum_media_option(self):
self.assertEqual(vlc.MediaOption.unique.value, 256)
def test_enum_playback_mode(self):
self.assertEqual(vlc.PlaybackMode.repeat.value, 2)
def test_enum_marquee_int_option(self):
self.assertEqual(vlc.VideoMarqueeIntOption.Size.value, 5)
def test_enum_output_device_type(self):
self.assertEqual(vlc.AudioOutputDeviceTypes._2F2R.value, 4)
def test_enum_output_channel(self):
self.assertEqual(vlc.AudioOutputChannel.Dolbys.value, 5)
def test_enum_position_origin(self):
self.assertEqual(vlc.PositionOrigin.ModuloPosition.value, 2)
def test_enum_position_key(self):
self.assertEqual(vlc.PositionKey.MediaTime.value, 2)
def test_enum_player_status(self):
self.assertEqual(vlc.PlayerStatus.StopStatus.value, 5)
# Basic MediaControl tests
def test_mediacontrol_creation(self):
mc=vlc.MediaControl()
self.assert_(mc)
def test_mediacontrol_initial_mrl(self):
mc=vlc.MediaControl()
self.assertEqual(mc.get_mrl(), '')
def test_mediacontrol_set_mrl(self):
mrl='/tmp/foo.avi'
mc=vlc.MediaControl()
mc.set_mrl(mrl)
self.assertEqual(mc.get_mrl(), mrl)
def test_mediacontrol_position(self):
p=vlc.MediaControlPosition(value=2,
origin=vlc.PositionOrigin.RelativePosition,
key=vlc.PositionKey.MediaTime)
self.assertEqual(p.value, 2)
def test_mediacontrol_position_shortcut(self):
p=vlc.MediaControlPosition(2)
self.assertEqual(p.value, 2)
self.assertEqual(p.key, vlc.PositionKey.MediaTime)
self.assertEqual(p.origin, vlc.PositionOrigin.AbsolutePosition)
def test_mediacontrol_get_media_position(self):
mc=vlc.MediaControl()
p=mc.get_media_position()
self.assertEqual(p.value, -1)
def test_mediacontrol_get_stream_information(self):
mc=vlc.MediaControl()
s=mc.get_stream_information()
self.assertEqual(s.position, 0)
self.assertEqual(s.length, 0)
# Basic libvlc tests
def test_instance_creation(self):
i=vlc.Instance()
self.assert_(i)
def test_libvlc_media(self):
mrl='/tmp/foo.avi'
i=vlc.Instance()
m=i.media_new(mrl)
self.assertEqual(m.get_mrl(), mrl)
def test_libvlc_player(self):
mrl='/tmp/foo.avi'
i=vlc.Instance()
p=i.media_player_new(mrl)
self.assertEqual(p.get_media().get_mrl(), mrl)
def test_libvlc_player_state(self):
mrl='/tmp/foo.avi'
i=vlc.Instance()
p=i.media_player_new(mrl)
self.assertEqual(p.get_state(), vlc.State.Ended)
def test_libvlc_logger(self):
i=vlc.Instance()
l=i.log_open()
l.clear()
self.assertEqual(l.count(), 0)
l.close()
def test_libvlc_logger_clear(self):
i=vlc.Instance()
l=i.log_open()
l.clear()
self.assertEqual(l.count(), 0)
l.close()
def test_libvlc_logger(self):
i=vlc.Instance()
i.set_log_verbosity(3)
l=i.log_open()
# This should generate a log message
i.add_intf('dummy')
self.assertNotEqual(l.count(), 0)
for m in l:
# Ensure that messages can be read.
self.assertNotEqual(len(m.message), 0)
l.close()
if __name__ == '__main__':
unittest.main()
| maddox/vlc | bindings/python-ctypes/test.py | Python | gpl-2.0 | 5,008 |
import os
from django.conf import settings
from django.conf.urls import url
from . import views
app_name = 'url_shortener'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<alias>[a-zA-Z0-9-_]+)$', views.redirect, name='alias'),
url(r'^(?P<alias>[a-zA-Z0-9-_]+)(?P<extra>/.*)$', views.redirect, name='alias'),
url(r'^(?P<alias>[a-zA-Z0-9-_]+)\+$', views.preview, name='preview'),
url(r'^~analytics/$', views.analytics, name='analytics'),
]
# For Heroku
if 'DYNO' in os.environ:
urlpatterns += [
url(r'^~static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
]
| smtchahal/url-shortener | url_shortener/urls.py | Python | mit | 655 |
#!/usr/bin/python
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|-|S|p|y|.|c|o|.|u|k|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# pi_camera_options.py
# Takes a sequence of photos with the Pi camera
# using a range of Exposure and White Balance
# settings.
#
# Project URL :
# http://www.raspberrypi-spy.co.uk/?p=1862
#
# Author : Matt Hawkins
# Date : 21/06/2013
import os
import time
import subprocess
# Full list of Exposure and White Balance options
#list_ex = ['off','auto','night','nightpreview','backlight',
# 'spotlight','sports','snow','beach','verylong',
# 'fixedfps','antishake','fireworks']
#list_awb = ['off','auto','sun','cloud','shade','tungsten',
# 'fluorescent','incandescent','flash','horizon']
# Refined list of Exposure and White Balance options. 60 photos.
#list_ex = ['off','auto','night','backlight','spotlight','fireworks']
#list_awb = ['off','auto','sun','cloud','shade','tungsten','fluorescent','incandescent','flash','horizon']
# Test list of Exposure and White Balance options. 6 photos.
list_ex = ['auto','off','snow']
list_awb = ['auto','shade','sun']
# EV level
photo_ev = 0
# Photo dimensions and rotation
photo_width = 640
photo_height = 480
photo_rotate = 180
photo_interval = 0.025 # Interval between photos (seconds)
photo_counter = 0 # Photo counter
#total_photos = len(list_ex) * len(list_awb)
total_photos = 10
# Delete all previous image files
try:
os.remove("photo_*.jpg")
except OSError:
pass
# Lets start taking photos!
try:
print "Starting photo sequence"
for ex in list_ex:
for awb in list_awb:
photo_counter = photo_counter + 1
filename = 'photo_' + ex + '_' + awb + '.jpg'
cmd = 'raspistill -o ' + filename + ' -t 1000 -ex ' + ex + ' -awb ' + awb + ' -ev ' + str(photo_ev) + ' -w ' + str(photo_width) + ' -h ' + str(photo_height) + ' -rot ' + str(photo_rotate)
pid = subprocess.call(cmd, shell=True)
print ' [' + str(photo_counter) + ' of ' + str(total_photos) + '] ' + filename
time.sleep(photo_interval)
print "Finished photo sequence"
except KeyboardInterrupt:
# User quit
print "\nGoodbye!" | autograss/autograss | camera/mult_fotos.py | Python | gpl-2.0 | 2,258 |
"""
Entrance Exams view module -- handles all requests related to entrance exam management via Studio
Intended to be utilized as an AJAX callback handler, versus a proper view/screen
"""
import logging
from functools import wraps
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from contentstore.views.helpers import create_xblock, remove_entrance_exam_graders
from contentstore.views.item import delete_item
from models.settings.course_metadata import CourseMetadata
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student.auth import has_course_author_access
from util import milestones_helpers
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
__all__ = ['entrance_exam', ]
log = logging.getLogger(__name__)
def _get_default_entrance_exam_minimum_pct():
"""
Helper method to return the default value from configuration
Converts integer values to decimals, since that what we use internally
"""
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
return entrance_exam_minimum_score_pct
# pylint: disable=missing-docstring
def check_feature_enabled(feature_name):
"""
Ensure the specified feature is turned on. Return an HTTP 400 code if not.
"""
def _check_feature_enabled(view_func):
def _decorator(request, *args, **kwargs):
# Deny access if the entrance exam feature is disabled
if not settings.FEATURES.get(feature_name, False):
return HttpResponseBadRequest()
return view_func(request, *args, **kwargs)
return wraps(view_func)(_decorator)
return _check_feature_enabled
@login_required
@ensure_csrf_cookie
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def entrance_exam(request, course_key_string):
"""
The restful handler for entrance exams.
It allows retrieval of all the assets (as an HTML page), as well as uploading new assets,
deleting assets, and changing the "locked" state of an asset.
GET
Retrieves the entrance exam module (metadata) for the specified course
POST
Adds an entrance exam module to the specified course.
DELETE
Removes the entrance exam from the course
"""
course_key = CourseKey.from_string(course_key_string)
# Deny access if the user is valid, but they lack the proper object access privileges
if not has_course_author_access(request.user, course_key):
return HttpResponse(status=403)
# Retrieve the entrance exam module for the specified course (returns 404 if none found)
if request.method == 'GET':
return _get_entrance_exam(request, course_key)
# Create a new entrance exam for the specified course (returns 201 if created)
elif request.method == 'POST':
response_format = request.POST.get('format', 'html')
http_accept = request.META.get('http_accept')
if response_format == 'json' or 'application/json' in http_accept:
ee_min_score = request.POST.get('entrance_exam_minimum_score_pct', None)
# if request contains empty value or none then save the default one.
entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct()
if ee_min_score != '' and ee_min_score is not None:
entrance_exam_minimum_score_pct = float(ee_min_score)
return create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
return HttpResponse(status=400)
# Remove the entrance exam module for the specified course (returns 204 regardless of existence)
elif request.method == 'DELETE':
return delete_entrance_exam(request, course_key)
# No other HTTP verbs/methods are supported at this time
else:
return HttpResponse(status=405)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct):
"""
api method to create an entrance exam.
First clean out any old entrance exams.
"""
_delete_entrance_exam(request, course_key)
return _create_entrance_exam(
request=request,
course_key=course_key,
entrance_exam_minimum_score_pct=entrance_exam_minimum_score_pct
)
def _create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct=None):
"""
Internal workflow operation to create an entrance exam
"""
# Provide a default value for the minimum score percent if nothing specified
if entrance_exam_minimum_score_pct is None:
entrance_exam_minimum_score_pct = _get_default_entrance_exam_minimum_pct()
# Confirm the course exists
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
# Create the entrance exam item (currently it's just a chapter)
parent_locator = unicode(course.location)
created_block = create_xblock(
parent_locator=parent_locator,
user=request.user,
category='chapter',
display_name=_('Entrance Exam'),
is_entrance_exam=True
)
# Set the entrance exam metadata flags for this course
# Reload the course so we don't overwrite the new child reference
course = modulestore().get_course(course_key)
metadata = {
'entrance_exam_enabled': True,
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct,
'entrance_exam_id': unicode(created_block.location),
}
CourseMetadata.update_from_dict(metadata, course, request.user)
# Create the entrance exam section item.
create_xblock(
parent_locator=unicode(created_block.location),
user=request.user,
category='sequential',
display_name=_('Entrance Exam - Subsection')
)
add_entrance_exam_milestone(course.id, created_block)
return HttpResponse(status=201)
def _get_entrance_exam(request, course_key): # pylint: disable=W0613
"""
Internal workflow operation to retrieve an entrance exam
"""
course = modulestore().get_course(course_key)
if course is None:
return HttpResponse(status=400)
if not course.entrance_exam_id:
return HttpResponse(status=404)
try:
exam_key = UsageKey.from_string(course.entrance_exam_id)
except InvalidKeyError:
return HttpResponse(status=404)
try:
exam_descriptor = modulestore().get_item(exam_key)
return HttpResponse(
dump_js_escaped_json({'locator': unicode(exam_descriptor.location)}),
status=200, content_type='application/json')
except ItemNotFoundError:
return HttpResponse(status=404)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def update_entrance_exam(request, course_key, exam_data):
"""
Operation to update course fields pertaining to entrance exams
The update operation is not currently exposed directly via the API
Because the operation is not exposed directly, we do not return a 200 response
But we do return a 400 in the error case because the workflow is executed in a request context
"""
course = modulestore().get_course(course_key)
if course:
metadata = exam_data
CourseMetadata.update_from_dict(metadata, course, request.user)
@check_feature_enabled(feature_name='ENTRANCE_EXAMS')
def delete_entrance_exam(request, course_key):
"""
api method to delete an entrance exam
"""
return _delete_entrance_exam(request=request, course_key=course_key)
def _delete_entrance_exam(request, course_key):
"""
Internal workflow operation to remove an entrance exam
"""
store = modulestore()
course = store.get_course(course_key)
if course is None:
return HttpResponse(status=400)
remove_entrance_exam_milestone_reference(request, course_key)
# Reset the entrance exam flags on the course
# Reload the course so we have the latest state
course = store.get_course(course_key)
if course.entrance_exam_id:
metadata = {
'entrance_exam_enabled': False,
'entrance_exam_minimum_score_pct': None,
'entrance_exam_id': None,
}
CourseMetadata.update_from_dict(metadata, course, request.user)
# Clean up any pre-existing entrance exam graders
remove_entrance_exam_graders(course_key, request.user)
return HttpResponse(status=204)
def add_entrance_exam_milestone(course_id, x_block):
# Add an entrance exam milestone if one does not already exist for given xBlock
# As this is a standalone method for entrance exam, We should check that given xBlock should be an entrance exam.
if x_block.is_entrance_exam:
namespace_choices = milestones_helpers.get_namespace_choices()
milestone_namespace = milestones_helpers.generate_milestone_namespace(
namespace_choices.get('ENTRANCE_EXAM'),
course_id
)
milestones = milestones_helpers.get_milestones(milestone_namespace)
if len(milestones):
milestone = milestones[0]
else:
description = u'Autogenerated during {} entrance exam creation.'.format(unicode(course_id))
milestone = milestones_helpers.add_milestone({
'name': _('Completed Course Entrance Exam'),
'namespace': milestone_namespace,
'description': description
})
relationship_types = milestones_helpers.get_milestone_relationship_types()
milestones_helpers.add_course_milestone(
unicode(course_id),
relationship_types['REQUIRES'],
milestone
)
milestones_helpers.add_course_content_milestone(
unicode(course_id),
unicode(x_block.location),
relationship_types['FULFILLS'],
milestone
)
def remove_entrance_exam_milestone_reference(request, course_key):
"""
Remove content reference for entrance exam.
"""
course_children = modulestore().get_items(
course_key,
qualifiers={'category': 'chapter'}
)
for course_child in course_children:
if course_child.is_entrance_exam:
delete_item(request, course_child.scope_ids.usage_id)
milestones_helpers.remove_content_references(unicode(course_child.scope_ids.usage_id))
| jolyonb/edx-platform | cms/djangoapps/contentstore/views/entrance_exam.py | Python | agpl-3.0 | 10,861 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import pypot.robot
import requests
import json
class ServiceReady(pypot.primitive.LoopPrimitive):
def setup(self):
ip = "192.168.1.102"
self._req = "http://"+ip+"/hello?state=on"
def update(self):
r = request.get("127.0.0.1:8080/primitive/rest_open_behave/list.json")
if r.status_code == 200:
request.get(self._req)
self.stop()
else:
pass
| jerome-guichard/primitiveWS | cherry/primitives/service_ready.py | Python | gpl-3.0 | 499 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
class purchase_requisition(osv.osv):
_name = "purchase.requisition"
_description = "Purchase Requisition"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _get_po_line(self, cr, uid, ids, field_names, arg=None, context=None):
result = dict((res_id, []) for res_id in ids)
for element in self.browse(cr, uid, ids, context=context):
for po in element.purchase_ids:
result[element.id] += [po_line.id for po_line in po.order_line]
return result
_columns = {
'name': fields.char('Call for Tenders Reference', required=True, copy=False),
'origin': fields.char('Source Document'),
'ordering_date': fields.date('Scheduled Ordering Date'),
'date_end': fields.datetime('Tender Closing Deadline'),
'schedule_date': fields.date('Scheduled Date', select=True, help="The expected and scheduled delivery date where all the products are received"),
'user_id': fields.many2one('res.users', 'Responsible'),
'exclusive': fields.selection([('exclusive', 'Select only one RFQ (exclusive)'), ('multiple', 'Select multiple RFQ')], 'Tender Selection Type', required=True, help="Select only one RFQ (exclusive): On the confirmation of a purchase order, it cancels the remaining purchase order.\nSelect multiple RFQ: It allows to have multiple purchase orders.On confirmation of a purchase order it does not cancel the remaining orders"""),
'description': fields.text('Description'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'purchase_ids': fields.one2many('purchase.order', 'requisition_id', 'Purchase Orders', states={'done': [('readonly', True)]}),
'po_line_ids': fields.function(_get_po_line, method=True, type='one2many', relation='purchase.order.line', string='Products by supplier'),
'line_ids': fields.one2many('purchase.requisition.line', 'requisition_id', 'Products to Purchase', states={'done': [('readonly', True)]}, copy=True),
'procurement_id': fields.many2one('procurement.order', 'Procurement', ondelete='set null', copy=False),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'state': fields.selection([('draft', 'Draft'), ('in_progress', 'Confirmed'),
('open', 'Bid Selection'), ('done', 'PO Created'),
('cancel', 'Cancelled')],
'Status', track_visibility='onchange', required=True,
copy=False),
'multiple_rfq_per_supplier': fields.boolean('Multiple RFQ per supplier'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True),
}
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
return obj_data.get_object_reference(cr, uid, 'stock', 'picking_type_in')[1]
_defaults = {
'state': 'draft',
'exclusive': 'multiple',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition', context=c),
'user_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).id,
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').next_by_code(cr, uid, 'purchase.order.requisition'),
'picking_type_id': _get_picking_in,
}
def tender_cancel(self, cr, uid, ids, context=None):
purchase_order_obj = self.pool.get('purchase.order')
# try to set all associated quotations to cancel state
for tender in self.browse(cr, uid, ids, context=context):
for purchase_order in tender.purchase_ids:
purchase_order_obj.action_cancel(cr, uid, [purchase_order.id], context=context)
purchase_order_obj.message_post(cr, uid, [purchase_order.id], body=_('Cancelled by the tender associated to this quotation.'), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def tender_in_progress(self, cr, uid, ids, context=None):
if not all(obj.line_ids for obj in self.pool['purchase.requisition'].browse(cr, uid, ids, context=context)):
raise osv.except_osv(_('Warning!'), _('You can not confirm call because there is no product line.'))
return self.write(cr, uid, ids, {'state': 'in_progress'}, context=context)
def tender_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def tender_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'})
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id])
self.create_workflow(cr, uid, [p_id])
return True
def tender_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def open_product_line(self, cr, uid, ids, context=None):
""" This opens product line view to view all lines from the different quotations, groupby default by product and partner to show comparaison
between supplier price
@return: the product line tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase_requisition', 'purchase_line_tree', context=context)
res['context'] = context
po_lines = self.browse(cr, uid, ids, context=context)[0].po_line_ids
res['context'] = {
'search_default_groupby_product': True,
'search_default_hide_cancelled': True,
'tender_id': ids[0],
}
res['domain'] = [('id', 'in', [line.id for line in po_lines])]
return res
def open_rfq(self, cr, uid, ids, context=None):
""" This opens rfq view to view all quotations associated to the call for tenders
@return: the RFQ tree view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'purchase', 'purchase_rfq', context=context)
res['context'] = context
po_ids = [po.id for po in self.browse(cr, uid, ids, context=context)[0].purchase_ids]
res['domain'] = [('id', 'in', po_ids)]
return res
def _prepare_purchase_order(self, cr, uid, requisition, supplier, context=None):
supplier_pricelist = supplier.property_product_pricelist_purchase
return {
'origin': requisition.name,
'date_order': requisition.date_end or fields.datetime.now(),
'partner_id': supplier.id,
'pricelist_id': supplier_pricelist.id,
'currency_id': supplier_pricelist and supplier_pricelist.currency_id.id or requisition.company_id.currency_id.id,
'location_id': requisition.procurement_id and requisition.procurement_id.location_id.id or requisition.picking_type_id.default_location_dest_id.id,
'company_id': requisition.company_id.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'requisition_id': requisition.id,
'notes': requisition.description,
'picking_type_id': requisition.picking_type_id.id
}
def _prepare_purchase_order_line(self, cr, uid, requisition, requisition_line, purchase_id, supplier, context=None):
if context is None:
context = {}
po_line_obj = self.pool.get('purchase.order.line')
product_uom = self.pool.get('product.uom')
product = requisition_line.product_id
default_uom_po_id = product.uom_po_id.id
ctx = context.copy()
ctx['tz'] = requisition.user_id.tz
date_order = requisition.ordering_date and fields.date.date_to_datetime(self, cr, uid, requisition.ordering_date, context=ctx) or fields.datetime.now()
qty = product_uom._compute_qty(cr, uid, requisition_line.product_uom_id.id, requisition_line.product_qty, default_uom_po_id)
supplier_pricelist = supplier.property_product_pricelist_purchase and supplier.property_product_pricelist_purchase.id or False
vals = po_line_obj.onchange_product_id(
cr, uid, [], supplier_pricelist, product.id, qty, default_uom_po_id,
supplier.id, date_order=date_order,
fiscal_position_id=supplier.property_account_position,
date_planned=requisition_line.schedule_date,
name=False, price_unit=False, state='draft', context=context)['value']
vals.update({
'order_id': purchase_id,
'product_id': product.id,
'account_analytic_id': requisition_line.account_analytic_id.id,
})
return vals
def make_purchase_order(self, cr, uid, ids, partner_id, context=None):
"""
Create New RFQ for Supplier
"""
context = dict(context or {})
assert partner_id, 'Supplier should be specified'
purchase_order = self.pool.get('purchase.order')
purchase_order_line = self.pool.get('purchase.order.line')
res_partner = self.pool.get('res.partner')
supplier = res_partner.browse(cr, uid, partner_id, context=context)
res = {}
for requisition in self.browse(cr, uid, ids, context=context):
if not requisition.multiple_rfq_per_supplier and supplier.id in filter(lambda x: x, [rfq.state != 'cancel' and rfq.partner_id.id or None for rfq in requisition.purchase_ids]):
raise osv.except_osv(_('Warning!'), _('You have already one %s purchase order for this partner, you must cancel this purchase order to create a new quotation.') % rfq.state)
context.update({'mail_create_nolog': True})
purchase_id = purchase_order.create(cr, uid, self._prepare_purchase_order(cr, uid, requisition, supplier, context=context), context=context)
purchase_order.message_post(cr, uid, [purchase_id], body=_("RFQ created"), context=context)
res[requisition.id] = purchase_id
for line in requisition.line_ids:
purchase_order_line.create(cr, uid, self._prepare_purchase_order_line(cr, uid, requisition, line, purchase_id, supplier, context=context), context=context)
return res
def check_valid_quotation(self, cr, uid, quotation, context=None):
"""
Check if a quotation has all his order lines bid in order to confirm it if its the case
return True if all order line have been selected during tendering process, else return False
args : 'quotation' must be a browse record
"""
for line in quotation.order_line:
if line.state != 'confirmed' or line.product_qty != line.quantity_tendered:
return False
return True
def _prepare_po_from_tender(self, cr, uid, tender, context=None):
""" Prepare the values to write in the purchase order
created from a tender.
:param tender: the source tender from which we generate a purchase order
"""
return {'order_line': [],
'requisition_id': tender.id,
'origin': tender.name}
def _prepare_po_line_from_tender(self, cr, uid, tender, line, purchase_id, context=None):
""" Prepare the values to write in the purchase order line
created from a line of the tender.
:param tender: the source tender from which we generate a purchase order
:param line: the source tender's line from which we generate a line
:param purchase_id: the id of the new purchase
"""
return {'product_qty': line.quantity_tendered,
'order_id': purchase_id}
def generate_po(self, cr, uid, ids, context=None):
"""
Generate all purchase order based on selected lines, should only be called on one tender at a time
"""
po = self.pool.get('purchase.order')
poline = self.pool.get('purchase.order.line')
id_per_supplier = {}
for tender in self.browse(cr, uid, ids, context=context):
if tender.state == 'done':
raise osv.except_osv(_('Warning!'), _('You have already generate the purchase order(s).'))
confirm = False
#check that we have at least confirm one line
for po_line in tender.po_line_ids:
if po_line.state == 'confirmed':
confirm = True
break
if not confirm:
raise osv.except_osv(_('Warning!'), _('You have no line selected for buying.'))
#check for complete RFQ
for quotation in tender.purchase_ids:
if (self.check_valid_quotation(cr, uid, quotation, context=context)):
#use workflow to set PO state to confirm
po.signal_workflow(cr, uid, [quotation.id], 'purchase_confirm')
#get other confirmed lines per supplier
for po_line in tender.po_line_ids:
#only take into account confirmed line that does not belong to already confirmed purchase order
if po_line.state == 'confirmed' and po_line.order_id.state in ['draft', 'sent', 'bid']:
if id_per_supplier.get(po_line.partner_id.id):
id_per_supplier[po_line.partner_id.id].append(po_line)
else:
id_per_supplier[po_line.partner_id.id] = [po_line]
#generate po based on supplier and cancel all previous RFQ
ctx = dict(context or {}, force_requisition_id=True)
for supplier, product_line in id_per_supplier.items():
#copy a quotation for this supplier and change order_line then validate it
quotation_id = po.search(cr, uid, [('requisition_id', '=', tender.id), ('partner_id', '=', supplier)], limit=1)[0]
vals = self._prepare_po_from_tender(cr, uid, tender, context=context)
new_po = po.copy(cr, uid, quotation_id, default=vals, context=context)
#duplicate po_line and change product_qty if needed and associate them to newly created PO
for line in product_line:
vals = self._prepare_po_line_from_tender(cr, uid, tender, line, new_po, context=context)
poline.copy(cr, uid, line.id, default=vals, context=context)
#use workflow to set new PO state to confirm
po.signal_workflow(cr, uid, [new_po], 'purchase_confirm')
#cancel other orders
self.cancel_unconfirmed_quotations(cr, uid, tender, context=context)
#set tender to state done
self.signal_workflow(cr, uid, [tender.id], 'done')
return True
def cancel_unconfirmed_quotations(self, cr, uid, tender, context=None):
#cancel other orders
po = self.pool.get('purchase.order')
for quotation in tender.purchase_ids:
if quotation.state in ['draft', 'sent', 'bid']:
self.pool.get('purchase.order').signal_workflow(cr, uid, [quotation.id], 'purchase_cancel')
po.message_post(cr, uid, [quotation.id], body=_('Cancelled by the call for tenders associated to this request for quotation.'), context=context)
return True
class purchase_requisition_line(osv.osv):
_name = "purchase.requisition.line"
_description = "Purchase Requisition Line"
_rec_name = 'product_id'
_columns = {
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'requisition_id': fields.many2one('purchase.requisition', 'Call for Tenders', ondelete='cascade'),
'company_id': fields.related('requisition_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account',),
'schedule_date': fields.date('Scheduled Date'),
}
def onchange_product_id(self, cr, uid, ids, product_id, product_uom_id, parent_analytic_account, analytic_account, parent_date, date, context=None):
""" Changes UoM and name if product_id changes.
@param name: Name of the field
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
value = {'product_uom_id': ''}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'product_uom_id': prod.uom_id.id, 'product_qty': 1.0}
if not analytic_account:
value.update({'account_analytic_id': parent_analytic_account})
if not date:
value.update({'schedule_date': parent_date})
return {'value': value}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.requisition.line', context=c),
}
class purchase_order(osv.osv):
_inherit = "purchase.order"
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Call for Tenders', copy=False),
}
def wkf_confirm_order(self, cr, uid, ids, context=None):
res = super(purchase_order, self).wkf_confirm_order(cr, uid, ids, context=context)
proc_obj = self.pool.get('procurement.order')
for po in self.browse(cr, uid, ids, context=context):
if po.requisition_id and (po.requisition_id.exclusive == 'exclusive'):
for order in po.requisition_id.purchase_ids:
if order.id != po.id:
proc_ids = proc_obj.search(cr, uid, [('purchase_id', '=', order.id)])
if proc_ids and po.state == 'confirmed':
proc_obj.write(cr, uid, proc_ids, {'purchase_id': po.id})
order.signal_workflow('purchase_cancel')
po.requisition_id.tender_done(context=context)
return res
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
stock_move_lines = super(purchase_order, self)._prepare_order_line_move(cr, uid, order, order_line, picking_id, group_id, context=context)
if order.requisition_id and order.requisition_id.procurement_id and order.requisition_id.procurement_id.move_dest_id:
for i in range(0, len(stock_move_lines)):
stock_move_lines[i]['move_dest_id'] = order.requisition_id.procurement_id.move_dest_id.id
return stock_move_lines
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'quantity_tendered': fields.float('Quantity Tendered', digits_compute=dp.get_precision('Product Unit of Measure'), help="Technical field for not loosing the initial information about the quantity proposed in the tender", oldname='quantity_bid'),
}
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
super(purchase_order_line, self).action_confirm(cr, uid, ids, context=context)
for element in self.browse(cr, uid, ids, context=context):
if not element.quantity_tendered:
self.write(cr, uid, ids, {'quantity_tendered': element.product_qty}, context=context)
return True
def generate_po(self, cr, uid, tender_id, context=None):
#call generate_po from tender with active_id. Called from js widget
return self.pool.get('purchase.requisition').generate_po(cr, uid, [tender_id], context=context)
class product_template(osv.osv):
_inherit = 'product.template'
_columns = {
'purchase_requisition': fields.boolean('Call for Tenders', help="Check this box to generate Call for Tenders instead of generating requests for quotation from procurement.")
}
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'requisition_id': fields.many2one('purchase.requisition', 'Latest Requisition')
}
def _run(self, cr, uid, procurement, context=None):
requisition_obj = self.pool.get('purchase.requisition')
warehouse_obj = self.pool.get('stock.warehouse')
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id)], context=context)
requisition_id = requisition_obj.create(cr, uid, {
'origin': procurement.origin,
'date_end': procurement.date_planned,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'company_id': procurement.company_id.id,
'procurement_id': procurement.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'line_ids': [(0, 0, {
'product_id': procurement.product_id.id,
'product_uom_id': procurement.product_uom.id,
'product_qty': procurement.product_qty
})],
})
self.message_post(cr, uid, [procurement.id], body=_("Purchase Requisition created"), context=context)
return self.write(cr, uid, [procurement.id], {'requisition_id': requisition_id}, context=context)
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy' and procurement.product_id.purchase_requisition:
if procurement.requisition_id.state == 'done':
if any([purchase.shipped for purchase in procurement.requisition_id.purchase_ids]):
return True
return False
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
| OpusVL/odoo | addons/purchase_requisition/purchase_requisition.py | Python | agpl-3.0 | 23,779 |
"""
From: https://raw.githubusercontent.com/nexB/scancode-toolkit/48aeaf76ce9f53d02223c41c1b2ad1d1ad73b851/etc/scripts/irc-notify.py
Copyright (C) 2015-2016 Christopher R. Wood
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this
program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
Fifth Floor, Boston, MA 02110-1301 USA.
Simple AppVeyor IRC notification script.
Modified by nexB on October 2016:
- rework the handling of environment variables.
- made the script use functions
- support only Appveyor loading its environment variable to craft IRC notices.
Modified by Jyrki Vesterinen on November 2016:
- join the channel instead of sending an external message.
The first argument is a Frenode channel. Other arguments passed to the script will be
sent as notice messages content and any {var}-formatted environment variables will be
expanded automatically, replaced with a corresponding Appveyor environment variable
value. se commas to delineate multiple messages.
Example:
export APPVEYOR_URL=https://ci.appveyor.com
export APPVEYOR_PROJECT_NAME=attributecode
export APPVEYOR_REPO_COMMIT_AUTHOR=pombredanne
export APPVEYOR_REPO_COMMIT_TIMESTAMP=2016-10-31
export APPVEYOR_REPO_PROVIDER=gihub
export APPVEYOR_REPO_BRANCH=repo_branch
export APPVEYOR_PULL_REQUEST_TITLE=pull_request_title
export APPVEYOR_BUILD_VERSION=1
export APPVEYOR_REPO_COMMIT=22c95b72e29248dc4de9b85e590ee18f6f587de8
export APPVEYOR_REPO_COMMIT_MESSAGE="some IRC test"
export APPVEYOR_ACCOUNT_NAME=nexB
export APPVEYOR_PULL_REQUEST_NUMBER=pull_request_number
export APPVEYOR_REPO_NAME=nexB/attributecode
python etc/scripts/irc-notify.py aboutcode '[{project_name}:{branch}] {short_commit}: "{message}" ({author}) {color_red}Succeeded','Details: {build_url} | Commit: {commit_url}'
See also https://github.com/gridsync/gridsync/blob/master/appveyor.yml for examples
in Appveyor's YAML:
on_success:
- "python etc/scripts/irc-notify.py channel [{project_name}:{branch}] {short_commit}: \"{message}\" ({author}) {color_green}Succeeded,Details: {build_url},Commit: {commit_url}"
on_failure:
- "python etc/scripts/irc-notify.py channel [{project_name}:{branch}] {short_commit}: \"{message}\" ({author}) {color_red}Failed,Details: {build_url},Commit: {commit_url}"
"""
import os, random, socket, ssl, sys, time
def appveyor_vars():
"""
Return a dict of key value carfted from appveyor environment variables.
"""
from os import environ
appveyor_url = environ.get('APPVEYOR_URL')
message_extended = environ.get('APPVEYOR_REPO_COMMIT_MESSAGE_EXTENDED')
configuration_name = environ.get('CONFIGURATION')
branch = environ.get('APPVEYOR_REPO_BRANCH')
author = environ.get('APPVEYOR_REPO_COMMIT_AUTHOR')
author_email = environ.get('APPVEYOR_REPO_COMMIT_AUTHOR_EMAIL')
timestamp = environ.get('APPVEYOR_REPO_COMMIT_TIMESTAMP')
repo_provider = environ.get('APPVEYOR_REPO_PROVIDER')
project_name = environ.get('APPVEYOR_PROJECT_NAME')
project_slug = environ.get('APPVEYOR_PROJECT_SLUG')
pull_request_title = environ.get('APPVEYOR_PULL_REQUEST_TITLE')
build_version = environ.get('APPVEYOR_BUILD_VERSION')
commit = environ.get('APPVEYOR_REPO_COMMIT')
message = environ.get('APPVEYOR_REPO_COMMIT_MESSAGE')
account_name = environ.get('APPVEYOR_ACCOUNT_NAME')
pull_request_number = environ.get('APPVEYOR_PULL_REQUEST_NUMBER')
repo_name = environ.get('APPVEYOR_REPO_NAME')
short_commit = commit[:7]
build_url = '{appveyor_url}/project/{account_name}/{project_slug}/build/{build_version}'.format(**locals())
commit_url = 'https://{repo_provider}.com/{repo_name}/commit/{commit}'.format(**locals())
vars = dict(
appveyor_url=appveyor_url,
account_name=account_name,
project_name=project_name,
project_slug=project_slug,
build_version=build_version,
build_url=build_url,
repo_provider=repo_provider,
repo_name=repo_name,
branch=branch,
configuration_name=configuration_name,
author=author,
author_email=author_email,
timestamp=timestamp,
commit=commit,
short_commit=short_commit,
message=message,
message_extended=message_extended,
pull_request_title=pull_request_title,
pull_request_number=pull_request_number,
commit_url=commit_url,
color_green='\x033',
color_red='\x034',
bold='\x02',
underline='\x1f',
plain='\x0f',
)
return vars
if __name__ == '__main__':
apvy_vars = appveyor_vars()
channel = sys.argv[1]
messages = sys.argv[2:]
messages = ' '.join(messages)
messages = messages.split(',')
messages = [msg.format(**apvy_vars).strip() for msg in messages]
irc_username = 'Appveyor'
irc_nick = irc_username + str(random.randint(1, 9999))
try:
# establish connection
irc_sock = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
irc_sock.connect((socket.gethostbyname('chat.freenode.net'), 6697))
irc_sock.send('NICK {0}\r\nUSER {0} * 0 :{0}\r\n'.format(irc_username).encode())
irc_file = irc_sock.makefile()
while irc_file:
line = irc_file.readline()
print(line.rstrip())
response = line.split()
if response[0] == 'PING':
irc_file.send('PONG {}\r\n'.format(reponse[1]).encode())
elif response[1] == '433':
irc_sock.send('NICK {}\r\n'.format(irc_nick).encode())
elif response[1] == '001':
time.sleep(5)
# join the channel
irc_sock.send('JOIN #{}\r\n'.format(channel).encode())
# send messages
for msg in messages:
print('PRIVMSG #{} :{}'.format(channel, msg))
irc_sock.send('PRIVMSG #{} :{}\r\n'.format(channel, msg).encode())
time.sleep(5)
# leave the channel
irc_sock.send('PART #{}\r\n'.format(channel).encode())
sys.exit()
except:
e = sys.exc_info()[0]
print(e)
sys.exit()
| nado/stk-code | tools/appveyor-irc-notify.py | Python | gpl-3.0 | 6,742 |
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
| havard024/prego | venv/lib/python2.7/site-packages/sphinx/pycode/nodes.py | Python | mit | 6,387 |
#!python
# -*- coding: utf-8 -*-
"""File: weka.py
Description: Link WEKA into the project
History:
0.2.0 + a group of functions to interact with WEKA
0.1.0 The first version.
"""
__version__ = '0.2.0'
__author__ = 'SpaceLis'
import re, random
from anatool.dm.db import CONN_POOL, GEOTWEET
from anatool.dm.dataset import loadrows, Dataset, DataItem, list_split
from anatool.analysis.text_util import geo_rect
_SPACE = re.compile(r'\s+')
_SYMBOL = re.compile(r'\+|\*')
_CLSNO = re.compile(r':')
_PARATH = re.compile(r'\(|\)')
def gen_arff(dset, dst, key_lst=None, \
typemap=dict({'__CLASS__': 'DISC'}), \
default_type = 'NUMERIC'):
"""Generate arff file for WEKA"""
farff = open(dst, 'w')
print >> farff, '@Relation {0}'.format(dst)
#Build the universe term set
if key_lst == None:
key_set = set()
for twt in dset:
for key in twt.iterkeys():
key_set.add(key)
key_lst = sorted(key_set)
#Build the universe class set
dis_lst = dict()
for key in key_lst:
if typemap.get(key, default_type) == 'DISC':
dis_lst[key] = set()
for item in dset:
for key in dis_lst.iterkeys():
if item[key] not in dis_lst[key]:
dis_lst[key].add(item[key])
#Generate column description
for key in key_lst:
if typemap.get(key, default_type) == 'DISC':
print >> farff, '@ATTRIBUTE {0}\t{{'.format(key),
print >> farff, ', '.join(val for val in dis_lst[key]),
print >> farff, '}'
else:
print >> farff, '@ATTRIBUTE {0}\t{1}'.\
format(key, typemap.get(key, default_type))
#Generate dataset
print >> farff, '@DATA'
for item in dset:
print >> farff, ', '.join(str(item.get(key, 0)) for key in key_lst)
farff.flush()
farff.close()
def gen_crs_arff(self, dst, fold, key_lst=None, \
typemap=dict({'__CLASS__': 'DISC'}), \
default_type = 'NUMERIC'):
"""generate dataset for cross validation"""
clses = dict()
for i in range(len(self)):
if self[i]['__CLASS__'] not in clses:
clses[self[i]['__CLASS__']] = dict()
clses[self[i]['__CLASS__']]['list'] = list()
clses[self[i]['__CLASS__']]['list'].append(i)
for cls in clses:
random.shuffle(clses[cls]['list'])
clses[cls]['fold'] = list_split(clses[cls]['list'], fold)
for i in range(fold):
test = Dataset()
train = Dataset()
for cls in clses.iterkeys():
test.extend([self[f] for f in clses[cls]['fold'][i]])
for j in range(fold):
if j != i:
train.extend([self[f] for f in clses[cls]['fold'][j]])
gen_arff(test, '{0}.test.{1}.arff'.format(dst, i), key_lst, \
typemap, default_type)
gen_arff(train, '{0}.train.{1}.arff'.format(dst, i), key_lst, \
typemap, default_type)
def log_parse(src):
"""parse predication output from WEKA"""
ins_lst = Dataset()
with open(src) as fsrc:
for line in fsrc:
line, dummy = _SYMBOL.subn(' ', line)
col = _SPACE.split(line)
ins = DataItem()
ins['ref'] = int((_CLSNO.split(col[2]))[0])
ins['refN'] = (_CLSNO.split(col[2]))[1]
ins['prd'] = int((_CLSNO.split(col[3]))[0])
ins['prdN'] = (_CLSNO.split(col[3]))[1]
ins['err'] = True if col[4] == '+' else False
ins['score'] = [float(col[i]) for i in range(4, len(col) - 2)]
ids, dummy = _PARATH.subn('', col[len(col) - 2])
ins['id'] = int(ids)
ins_lst.append(ins)
return ins_lst
def run_weka(cmdline):
"""Run WEKA classifiers and return the output log in string format
"""
#FIXME implement the code for it
pass
def test():
"""Test this unit"""
twt_lst = loadrows(GEOTWEET, ('place_id', 'text'),
('MBRContains({0}, geo)'.format(\
geo_rect((40.75,-74.02),(40.70,-73.97))),))
gen_arff(twt_lst, 'test.arff', {'text': 'TEXT', 'place_id': 'DISC'})
if __name__ == '__main__':
log_parse('../weka/chicago_type.log')
| spacelis/anatool | anatool/dm/weka.py | Python | mit | 4,370 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests of datastore.py."""
import datastore
from src import basetest
class InstanceInfoTest(basetest.TestCase):
"""Unit tests of InstanceInfo."""
def testStatusChange(self):
instance_info = datastore.InstanceInfo()
instance_info.put()
instance_info.SetStatus(datastore.InstanceStatus.RUNNING)
self.assertEqual(datastore.InstanceStatus.RUNNING, instance_info.status)
all_instances = datastore.InstanceInfo.query().fetch()
self.assertEqual(1, len(all_instances))
instance_info = all_instances[0]
self.assertEqual(datastore.InstanceStatus.RUNNING, instance_info.status)
class ClusterInfoTest(basetest.TestCase):
"""Unit tests of ClusterInfo."""
def testStatusChange(self):
cluster_info = datastore.ClusterInfo()
cluster_info.put()
cluster_info.SetStatus(datastore.ClusterStatus.READY)
self.assertEqual(datastore.ClusterStatus.READY, cluster_info.status)
all_clusters = datastore.ClusterInfo.query().fetch()
self.assertEqual(1, len(all_clusters))
cluster_info = all_clusters[0]
self.assertEqual(datastore.ClusterStatus.READY, cluster_info.status)
def testCascadeDelete(self):
cluster_info = datastore.ClusterInfo()
cluster_info.put()
instance_info = datastore.InstanceInfo(parent=cluster_info.key)
instance_info.put()
all_instances = datastore.InstanceInfo.query().fetch()
self.assertEqual(1, len(all_instances))
cluster_info.key.delete()
all_instances = datastore.InstanceInfo.query().fetch()
self.assertEqual(0, len(all_instances))
def testGetMasterIpAddress(self):
cluster_info = datastore.ClusterInfo()
cluster_info.put()
instance_info = datastore.InstanceInfo(parent=cluster_info.key,
external_ip='1.2.3.4')
instance_info.put()
cluster_info.master = instance_info.key
cluster_info.put()
self.assertEqual('1.2.3.4', cluster_info.GetMasterIpAddress())
if __name__ == '__main__':
basetest.main()
| GoogleCloudPlatform/Data-Pipeline | app/src/hadoop/datastore_test.py | Python | apache-2.0 | 2,601 |
class GsmException(Exception):
pass
| PMantovani/road-irregularity-detector | raspberry/src/GsmException.py | Python | apache-2.0 | 40 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
#
from distutils.core import setup
setup(name = "grimoirelib_metadata",
version = "0.1",
author = "Daniel Izquierdo",
author_email = "dizquierdo@bitergia.com",
description = "Data Annotation library for the Metrics Grimoire toolset",
url = "https://github.com/dicortazar/grimoirelib-metadata",
packages = ["grimoirelib_metadata"],
data_files = [],
scripts = [])
| dicortazar/grimoirelib-metadata | setup.py | Python | gpl-3.0 | 1,262 |
import time
import silc
class EchoClient(silc.SilcClient):
def channel_message(self, sender, channel, flags, message):
print message
self.send_channel_message(channel, message)
def private_message(self, sender, flags, message):
print message
self.send_private_message(sender, message)
def running(self):
print "* Running"
client.connect_to_server("silc.example.com")
def connected(self):
print "* Connected"
self.command_call("JOIN #cam")
def disconnected(self, msg):
print "* Disconnected: %s" % msg
# catch responses to commands
def command_reply_join(self, channel, name, topic, hmac, x, y,
users):
print "* Joined channel %s" % name
self.send_channel_message(channel, "Hello!")
# catch async notifications from the server
def notify_join(self, user, channel):
print "* A user named %s has joined the channel %s" % \
(user.username, channel.channel_name)
self.send_channel_message(channel, "Hello, %s" %
user.username)
if __name__ == "__main__":
keys = silc.create_key_pair("silc.pub", "silc.prv", passphrase = "")
client = EchoClient(keys, "echobot", "echobot", "Echo Bot")
while True:
try:
client.run_one()
time.sleep(0.2)
except KeyboardInterrupt:
break
| mnemonikk/pysilc | examples/echo.py | Python | bsd-3-clause | 1,449 |
from symbol.builder import add_anchor_to_arg
from models.FPN.builder import MSRAResNet50V1FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
from models.msrcnn.builder import MaskScoringFasterRcnn as Detector
from models.msrcnn.builder import MaskFPNRpnHead as RpnHead
from models.msrcnn.builder import MaskFasterRcnn4ConvHead as MaskHead
from models.maskrcnn.builder import BboxPostProcessor
from models.maskrcnn.process_output import process_output
from models.msrcnn.builder import MaskIoUConvHead as MaskIoUHead
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class MaskParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
resolution = 28
dim_reduced = 256
num_fg_roi = int(RpnParam.subsample_proposal.image_roi * RpnParam.subsample_proposal.fg_fraction)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 1000
class MaskRoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 14
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
img_roi = 100
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
mult = 1
begin_epoch = 0
end_epoch = 6 * mult
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: process_output(x, y)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam, MaskParam)
roi_extractor = RoiExtractor(RoiParam)
mask_roi_extractor = RoiExtractor(MaskRoiParam)
bbox_head = BboxHead(BboxParam)
mask_head = MaskHead(BboxParam, MaskParam, MaskRoiParam)
bbox_post_processer = BboxPostProcessor(TestParam)
maskiou_head = MaskIoUHead(TestParam, BboxParam, MaskParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, maskiou_head, bbox_post_processer)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet-v1-50"
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["mask_fcn"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
# data processing
class NormParam:
mean = (122.7717, 115.9465, 102.9801) # RGB order
std = (1.0, 1.0, 1.0)
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
max_len_gt_poly = 2500
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage, Pad2DImage
from models.maskrcnn.input import PreprocessGtPoly, EncodeGtPoly, \
Resize2DImageBboxMask, Flip2DImageBboxMask, Pad2DImageBboxMask
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
PreprocessGtPoly(),
Resize2DImageBboxMask(ResizeParam),
Flip2DImageBboxMask(),
EncodeGtPoly(PadParam),
Pad2DImageBboxMask(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["im_info", "gt_bbox", "gt_poly"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
from models.msrcnn.metric import SigmoidCELossMetric, L2
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
mask_cls_metric = SigmoidCELossMetric(
"MaskCE",
["mask_loss_output"],
[]
)
iou_l2_metric = L2(
"IoUL2",
["iou_head_loss_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric, mask_cls_metric, iou_l2_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| TuSimple/simpledet | config/ms_r50v1_fpn_1x.py | Python | apache-2.0 | 10,314 |
"""
evolang.py for exploring ideas from:
Emergence of Communication in Teams of Embodied and Situated
Agents, by Davide Marocco and Stefano Nolfi, ALife 2006.
Author: Doug Blank
Bryn Mawr College
Date: March 2008
For use with PyroRobotics.org
"""
from pyrobot.simulators.pysim import *
from pyrobot.geometry import distance, Polar
from pyrobot.tools.sound import SoundDevice
from pyrobot.brain.ga import *
from pyrobot.robot.symbolic import Simbot
from pyrobot.engine import Engine
from pyrobot.brain import Brain
from pyrobot.brain.conx import SRN
import sys, time, random, math
############################################################
# First, let's define the brains to be used by each robot:
############################################################
class NNBrain(Brain):
def setup(self):
self.robot.range.units = "scaled"
self.net = SRN()
self.sequenceType = "ordered-continuous"
# INPUT: ir, ears, mouth[t-1]
# sonar, stall, ears, eyes, speech[t-1]
self.net.addLayer("input", len(self.robot.range) + 1 + 4 + 1 + 1)
self.net.addContextLayer("context", 2, "hidden")
self.net.addLayer("hidden", 2)
# OUTPUT: trans, rotate, say
self.net.addLayer("output", 3)
# ----------------------------------
self.net.connect("input", "output")
self.net.connect("input", "hidden")
self.net.connect("context", "hidden")
self.net.connect("hidden", "output")
self.net["context"].setActivations(.5)
self.net.learning = 0
def step(self, ot1, or1):
t, r = [((v * 2) - 1) for v in [ot1, or1]]
self.robot.move(t, r)
def propagate(self, sounds):
light = [max(map(lambda v: math.floor(v),self.robot.light[0].values()))]
inputs = (self.robot.range.distance() + [self.robot.stall] +
sounds + light + [self.net["output"].activation[2]])
self.net.propagate(input=inputs)
self.net.copyHiddenToContext()
return [v for v in self.net["output"].activation] # t, r, speech
# Defaults:
SimulatorClass, PioneerClass = TkSimulator, TkPioneer
robotCount = 4
automaticRestart = False
sd = "/dev/dsp"
startEvolving = False
loadPop = None
numTrials = 5
numSeconds = 30
numPopsize= 30
numMaxgen = 100
canHear = True
# Robot colors; make sure you have enough for robotCount:
colors = ["red", "blue", "green", "purple", "pink", "orange", "white"]
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-h":
print "python evolang.py command line:"
print
print " -g 2d|3d|none (graphics, default 2d)"
print " -n N (robot count, default 4)"
print " -a (automatic restart, default off)"
print " -e (start evolving, default off)"
print " -p /dev/dsp (sound device or none, default /dev/dsp)"
print " -l file.pop (load a population of genes)"
print " -t T (fitness function uses T trials, default 5)"
print " -s S (sim seconds per trial, default 20)"
print " -z Z (population size, default 100)"
print " -m M (max generations, default 100)"
print " -c 0|1 (can hear?, default 1)"
print
print " CONTROL+c to stop at next end of generation"
print " CONTROL+c CONTROL+c to stop now"
sys.exit()
if sys.argv[i] == "-g":
i += 1
simType = sys.argv[i].lower()
if simType == "2d":
SimulatorClass, PioneerClass = TkSimulator, TkPioneer
elif simType == "none":
SimulatorClass, PioneerClass = Simulator, Pioneer
elif simType == "3d":
from pyrobot.simulators.pysim3d import Tk3DSimulator
SimulatorClass, PioneerClass = Tk3DSimulator, TkPioneer
else:
raise AttributeError("unknown graphics mode: '%s'" % simType)
elif sys.argv[i] == "-n":
i += 1
robotCount = int(sys.argv[i])
elif sys.argv[i] == "-a":
automaticRestart = True
elif sys.argv[i] == "-l":
i += 1
loadPop = sys.argv[i]
elif sys.argv[i] == "-t":
i += 1
numTrials = int(sys.argv[i])
elif sys.argv[i] == "-s":
i += 1
numSeconds = int(sys.argv[i])
elif sys.argv[i] == "-z":
i += 1
numPopsize = int(sys.argv[i])
elif sys.argv[i] == "-m":
i += 1
numMaxgen = int(sys.argv[i])
elif sys.argv[i] == "-c":
i += 1
canHear = int(sys.argv[i])
elif sys.argv[i] == "-e":
startEvolving = True
elif sys.argv[i] == "-p":
i += 1
if sys.argv[i].lower() == "none":
sd = None
else:
sd = sys.argv[i]
i += 1
try:
sd = SoundDevice(sd)
except:
sd = None
print "Sound device failed to start"
############################################################
# Build a simulated world:
############################################################
# In pixels, (width, height), (offset x, offset y), scale:
sim = SimulatorClass((441,434), (22,420), 40.357554, run=0)
## Milliseconds of time to simulate per step:
sim.timeslice = 250
# Add a bounding box:
# x1, y1, x2, y2 in meters:
sim.addBox(0, 0, 10, 10)
# Add a couple of light sources:
# (x, y) meters, brightness usually 1 (1 meter radius):
sim.addLight(2, 2, 1)
sim.addLight(7, 7, 1)
for i in range(robotCount):
# port, name, x, y, th, bounding Xs, bounding Ys, color
sim.addRobot(60000 + i, PioneerClass("Pioneer%d" % i,
1, 1, -0.86,
((.225, .225, -.225, -.225),
(.15, -.15, -.15, .15)),
colors[i]))
robot = sim.robots[-1] # last one
robot.addDevice(PioneerFrontSonars())
robot.addDevice(PioneerFrontLightSensors())
############################################################
# Now, make some connections to the sim robots
############################################################
# client side:
clients = [Simbot(sim, ["localhost", 60000 + n], n) for n in range(robotCount)]
# server side:
engines = [Engine() for n in range(robotCount)]
for n in range(robotCount):
engines[n].robot = clients[n]
# turn off noise:
clients[n].light[0].noise = 0.0
clients[n].sonar[0].noise = 0.0
# load brain:
engines[n].brain = NNBrain(engine=engines[n])
# Set some properties after robots are created:
for n in range(robotCount):
sim.display["%s robot audio" % colors[n]] = False
if isinstance(sim, TkSimulator):
sim.toggle("trail")
sim.toggle("speech")
sim.toggle("sonar")
alist = []
for n in range(robotCount):
s = "%s robot audio" % colors[n]
alist.append([s, lambda s=s: sim.simToggle(s)])
menu = [('Program', alist)]
for entry in menu:
sim.mBar.tk_menuBar(sim.makeMenu(sim.mBar, entry[0], entry[1]))
sim.redraw()
############################################################
# Define some functions for hearing support
############################################################
def quadNum(myangle, angle):
"""
Given angle, return quad number
|0|
|3| |1|
|2|
"""
diff = angle - myangle
if diff >= 0:
if diff < math.pi/4:
return 0
elif diff < math.pi/4 + math.pi/2:
return 3
elif diff < math.pi:
return 2
else:
return 1
else:
if diff > -math.pi/4:
return 0
elif diff > -math.pi/4 - math.pi/2:
return 1
elif diff > -math.pi:
return 2
else:
return 3
def quadTest(robot = 0):
location = [0] * robotCount
for n in range(robotCount):
location[n] = engines[0].robot.simulation[0].getPose(n)
myLoc = location[robot]
return quadSound(myLoc, range(robotCount), location)
def quadSound(myLoc, lastS, location):
"""
Computes the sound heard for all quads.
myLoc: (x, y, t) of current robot; t where 0 is up
lastS: last sound made by robots
location: (x, y, t) of robots; t where 0 is up
"""
if not canHear:
return [0.5 for x in range(robotCount)]
# dist, freq for each robot; 0.5 is what is silence
closest = [(10000,0.5), (10000,0.5), (10000,0.5), (10000,0.5)]
for n in range(len(location)):
loc = location[n]
if loc != myLoc:
# distance between robots:
dist = distance(myLoc[0], myLoc[1], loc[0], loc[1])
# global angle from one robot to another:
# 0 to right, neg down (geometry-style)
angle = Polar(loc[0] - myLoc[0], loc[1] - myLoc[1], bIsPolar=0)
angle = angle.t # get theta
if angle < 0:
angle = math.pi + (math.pi + angle) # 0 to 2pi
angle = (angle - math.pi/2) % (math.pi * 2)
q = quadNum(myLoc[2], angle)
#print n, myLoc[2], angle, q
# if shorter than previous, and less than N meters
if dist < closest[q][0] and dist < 1.0/2.7 * 7.0:
closest[q] = dist, lastS[n] # new closest
return [v[1] for v in closest] # return the sounds
############################################################
# Now, let's define the GA:
############################################################
class NNGA(GA):
def __init__(self, *args, **kwargs):
self.pre_init = 1
GA.__init__(self, *args, **kwargs)
self.pre_init = 0
self.done = 0
self.randomizePositions()
def generate(self):
if self.generation == 1: return
elitePositions = map(lambda x: x.position, self.pop.eliteMembers)
elitePositions.sort()
# Move all good ones to front of the line:
for i in range(len(self.pop.eliteMembers)):
#print " move", elitePositions[i], "to", i
self.pop.individuals[i] = self.pop.individuals[elitePositions[i]]
# Populate the rest of the pop with copies of these:
for i in range(len(self.pop.eliteMembers)):
copies = ((self.pop.size - len(self.pop.eliteMembers))/
len(self.pop.eliteMembers))
for j in range(copies):
pos = (i * copies) + len(self.pop.eliteMembers) + j
#print " copy", i, "to", pos
self.pop.individuals[pos] = self.pop.individuals[i].copy()
self.pop.individuals[pos].mutate(self.mutationRate)
def loadWeights(self, genePos):
for n in range(len(engines)):
engine = engines[n]
engine.brain.net.unArrayify(self.pop.individuals[genePos].genotype)
def randomizePositions(self, seed=None):
# seed = 0 (reinit), seed = None (random), seed = num (seed it)
if seed == 0: # Reinitialize to something random:
seed = random.random() * 100000 + time.time()
if seed != None: # use a specific seed:
random.seed(seed)
# Make the robots far from these positions:
positions = [(2, 2), (7, 7)] # position of lights
for n in range(len(engines)):
engine = engines[n]
# Put each robot in a random location:
x, y, t = (1 + random.random() * 7,
1 + random.random() * 7,
random.random() * math.pi * 2)
minDistance = min([distance(x, y, x2, y2) for (x2,y2) in positions])
# make sure they are far enough apart:
while minDistance < 2: # in meters
x, y, t = (1 + random.random() * 7,
1 + random.random() * 7,
random.random() * math.pi * 2)
minDistance = min([distance(x, y, x2, y2)
for (x2,y2) in positions])
positions.append( (x,y) )
engine.robot.simulation[0].setPose(n, x, y, t)
sim.redraw()
def fitnessFunction(self, genePos, randomizeSeed=None):
# seed = -1 (cont), seed = 0 (reinit), seed = None (random),
# seed = num (seed it)
if self.pre_init: # initial generation fitness
return 1.0
fitness = 0.01
print "-------------------------------"
for count in range(numTrials):
subfitness = 0.01
if genePos >= 0: # -1 is test of last one
self.loadWeights(genePos)
if randomizeSeed == -1:
pass # continue
else:
# seed = 0 (reinit), seed = None (random), seed = num (seed it)
self.randomizePositions(randomizeSeed)
sim.resetPaths()
sim.redraw()
s = [0] * robotCount # each robot's sound
lastS = [0] * robotCount # previous sound
location = [(0, 0, 0) for v in range(robotCount)]
# Set the context values to zero:
for n in range(robotCount): # number of robots
engine = engines[n]
engine.brain.net.setContext(0.5)
engine.brain.net["output"].setActivations(0.5)
engine.brain.net["output"].resetActivationFlag()
for i in range(self.seconds * (1000/sim.timeslice)): # (10 per sec)
# ------------------------------------------------
# First, get the locations:
# ------------------------------------------------
for n in range(robotCount): # number of robots
location[n] = engines[0].robot.simulation[0].getPose(n)
# ------------------------------------------------
# Next, compute the move for each robot
# ------------------------------------------------
for n in range(robotCount): # number of robots
engine = engines[n]
engine.robot.update()
# compute quad for this robot
myLoc = location[n]
quad = quadSound(myLoc, lastS, location)
# print n, quad
# compute output for each robot
oTrans, oRotate, s[n] = engine.brain.propagate(quad)
# then set the move velocities:
engine.brain.step(oTrans, oRotate)
sim.robots[n].say("%.2f Heard: [%s]" %
(s[n],
",".join(map(lambda v: "%.2f" % v, quad))))
# ------------------------------------------------
# Save the sounds
# ------------------------------------------------
for n in range(robotCount): # number of robots
lastS = [v for v in s]
# ------------------------------------------------
# Make the move:
# ------------------------------------------------
sim.step(run=0)
# update tasks in GUI:
if isinstance(sim, TkSimulator):
while sim.tk.dooneevent(2): pass
# Stop the robots from moving on other steps:
for n in range(robotCount): # number of robots
engine = engines[n]
engine.robot.stop()
# play a sound, need to have a sound thread running
for n in range(robotCount): # number of robots
st = "%s robot audio" % colors[n]
if sim.display[st] and sd != None:
sd.playTone(int(round(engines[n].brain.net["output"].activation[-1], 1) * 2000) + 500, .1) # 500 - 2500
# ------------------------------------------------
# Compute fitness
# ------------------------------------------------
closeTo = [0, 0] # number of lights
# how many robots are close to which lights?
for n in range(len(engines)):
engine = engines[n]
# get global coords
x, y, t = engine.robot.simulation[0].getPose(n)
# which light?
dists = [distance(light.x, light.y, x, y)
for light in sim.lights]
if min(dists) <= 1.0:
if dists[0] < dists[1]:
closeTo[0] += 1
else:
closeTo[1] += 1
# ------------------------------------------------
# Finally, compute the fitness
# ------------------------------------------------
for total in closeTo:
subfitness += .25 * total
# only allow N per feeding area
if total > 2:
subfitness -= 1.0 * (total - 2)
subfitness = max(0.01, subfitness)
#print " ", closeTo, subfitness,
#raw_input(" press [ENTER]")
print " subfitness: %d: %.5f" % (genePos,subfitness)
fitness += subfitness
print "Total Fitness %d: %.5f" % (genePos, fitness)
return fitness
def setup(self, **args):
if args.has_key('seconds'):
self.seconds = args['seconds']
else:
# default value
self.seconds = 20 # how much simulated seconds to run
def isDone(self):
if self.generation % 1 == 0:
self.saveGenesToFile("gen-%05d.pop" % self.generation)
# load the best into a network:
engines[0].brain.net.unArrayify(self.pop.bestMember.genotype)
# and save it
engines[0].brain.net.saveWeightsToFile("best-%05d.wts" %
self.generation)
return self.done
class Experiment:
def __init__(self, seconds, popsize, maxgen):
g = engines[0].brain.net.arrayify()
self.ga = NNGA(Population(popsize, Gene, size=len(g), verbose=1,
imin=-1, imax=1, min=-50, max=50, maxStep = 1,
elitePercent = .20),
mutationRate=0.02, crossoverRate=0.6,
maxGeneration=maxgen, verbose=1, seconds=seconds)
def evolve(self, cont=0):
self.ga.done = 0
self.ga.evolve(cont)
def stop(self):
for n in range(robotCount):
engines[n].robot.stop()
def saveBest(self, filename):
net = engines[0].brain.net
net.unArrayify(self.ga.pop.bestMember.genotype)
net.saveWeightsToFile(filename)
def loadGenotypes(self, filename):
engines[0].brain.net.loadWeightsFromFile(filename)
genotype = engines[0].brain.net.arrayify()
for p in self.ga.pop:
for n in range(len(genotype)):
p.genotype[n] = genotype[n]
def loadWeights(self, filename):
for n in range(robotCount):
engines[n].brain.net.loadWeightsFromFile(filename)
def test(self, seconds):
self.ga.seconds = seconds
return self.ga.fitnessFunction(-1) # -1 testing
def testSpeed(steps=100):
start = time.time()
for i in range(steps):
for client in clients:
client.update()
for engine in engines:
engine.brain.step(1,1)
sim.step(run=0)
if isinstance(sim, TkSimulator):
while sim.tk.dooneevent(2): pass
stop = time.time()
print "Average steps per second:", float(steps)/ (stop - start)
print "%.2f x realtime" % (((float(steps)/ (stop - start)) / 10.0))
# ------------------------------------------------
# Hack to shutdown engine threads, but keep robot:
# ------------------------------------------------
for e in engines:
temp = e.robot
e.pleaseStop()
e.shutdown()
e.robot = temp
# ---------------------------------------------------------------------
# Code to handle control+c: once to exit at end of generation; twice to
# abort right now.
# ---------------------------------------------------------------------
def suspend(*args):
if not e.ga.done: # first time
print "# ------------------------------------------"
print "# Setting GA to stop at end of generation..."
print "# ------------------------------------------"
e.ga.done = 1
else:
print "# ------------------------------------------"
print "# Stopping..."
print "# ------------------------------------------"
raise KeyboardInterrupt
import signal
signal.signal(signal.SIGINT,suspend)
e = Experiment(seconds=numSeconds, popsize=numPopsize, maxgen=numMaxgen)
if automaticRestart:
import glob
maxI = None
flist = glob.glob("./gen-*.pop")
if len(flist) > 0:
flist.sort()
filename = flist[-1]
e.ga.loadGenesFromFile(filename)
e.ga.generation = int(filename[6:11])
elif loadPop:
e.ga.loadGenesFromFile(loadPop)
if startEvolving:
e.evolve(cont=1)
# Other commands to try:
#e.ga.randomizePositions() # pick random places
#e.ga.randomizePositions(7652361) # seed to use
#e.ga.fitnessFunction(23, randomize=0) # test #23, do not reposition
#e.ga.fitnessFunction(-1) # test again in random place
#e.ga.fitnessFunction(-1, randomize=0) # test again in this place
#e.evolve(cont=1) # continues from before
#e.loadWeights("nolfi-100.wts")
#e.loadGenotypes("nolfi-100.wts")
#e.evolve()
#e.saveBest("nolfi-200.wts")
#e.ga.saveGenesToFile("nolfi-20-20-100.pop")
| emilydolson/forestcat | pyrobot/examples/evolang.py | Python | agpl-3.0 | 21,807 |
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.autompg import autompg as df
p = BoxPlot(df, values='mpg', label='cyl', outliers=False,
title="MPG Summary (grouped by CYL, no outliers)")
output_file("boxplot.html")
show(p)
| phobson/bokeh | sphinx/source/docs/user_guide/source_examples/charts_boxplot_outliers.py | Python | bsd-3-clause | 264 |
import numpy as np
import os
from cache import cache
########################################################################
def one_hot_encoded(class_numbers, num_classes=None):
"""
Generate the One-Hot encoded class-labels from an array of integers.
For example, if class_number=2 and num_classes=4 then
the one-hot encoded label is the float array: [0. 0. 1. 0.]
:param class_numbers:
Array of integers with class-numbers.
Assume the integers are from zero to num_classes-1 inclusive.
:param num_classes:
Number of classes. If None then use max(class_numbers)+1.
:return:
2-dim array of shape: [len(class_numbers), num_classes]
"""
# Find the number of classes if None is provided.
# Assumes the lowest class-number is zero.
if num_classes is None:
num_classes = np.max(class_numbers) + 1
return np.eye(num_classes, dtype=float)[class_numbers]
########################################################################
class DataSet:
def __init__(self, in_dir, exts='.jpg'):
"""
Create a data-set consisting of the filenames in the given directory
and sub-dirs that match the given filename-extensions.
For example, the knifey-spoony data-set (see knifey.py) has the
following dir-structure:
knifey-spoony/forky/
knifey-spoony/knifey/
knifey-spoony/spoony/
knifey-spoony/forky/test/
knifey-spoony/knifey/test/
knifey-spoony/spoony/test/
This means there are 3 classes called: forky, knifey, and spoony.
If we set in_dir = "knifey-spoony/" and create a new DataSet-object
then it will scan through these directories and create a training-set
and test-set for each of these classes.
The training-set will contain a list of all the *.jpg filenames
in the following directories:
knifey-spoony/forky/
knifey-spoony/knifey/
knifey-spoony/spoony/
The test-set will contain a list of all the *.jpg filenames
in the following directories:
knifey-spoony/forky/test/
knifey-spoony/knifey/test/
knifey-spoony/spoony/test/
See the TensorFlow Tutorial #09 for a usage example.
:param in_dir:
Root-dir for the files in the data-set.
This would be 'knifey-spoony/' in the example above.
:param exts:
String or tuple of strings with valid filename-extensions.
Not case-sensitive.
:return:
Object instance.
"""
# Extend the input directory to the full path.
in_dir = os.path.abspath(in_dir)
# Input directory.
self.in_dir = in_dir
# Convert all file-extensions to lower-case.
self.exts = tuple(ext.lower() for ext in exts)
# Names for the classes.
self.class_names = []
# Filenames for all the files in the training-set.
self.filenames = []
# Filenames for all the files in the test-set.
self.filenames_test = []
# Class-number for each file in the training-set.
self.class_numbers = []
# Class-number for each file in the test-set.
self.class_numbers_test = []
# Total number of classes in the data-set.
self.num_classes = 0
# For all files/dirs in the input directory.
for name in os.listdir(in_dir):
# Full path for the file / dir.
current_dir = os.path.join(in_dir, name)
# If it is a directory.
if os.path.isdir(current_dir):
# Add the dir-name to the list of class-names.
self.class_names.append(name)
# Training-set.
# Get all the valid filenames in the dir (not sub-dirs).
filenames = self._get_filenames(current_dir)
# Append them to the list of all filenames for the training-set.
self.filenames.extend(filenames)
# The class-number for this class.
class_number = self.num_classes
# Create an array of class-numbers.
class_numbers = [class_number] * len(filenames)
# Append them to the list of all class-numbers for the training-set.
self.class_numbers.extend(class_numbers)
# Test-set.
# Get all the valid filenames in the sub-dir named 'test'.
filenames_test = self._get_filenames(os.path.join(current_dir, 'test'))
# Append them to the list of all filenames for the test-set.
self.filenames_test.extend(filenames_test)
# Create an array of class-numbers.
class_numbers = [class_number] * len(filenames_test)
# Append them to the list of all class-numbers for the test-set.
self.class_numbers_test.extend(class_numbers)
# Increase the total number of classes in the data-set.
self.num_classes += 1
def _get_filenames(self, dir):
"""
Create and return a list of filenames with matching extensions in the given directory.
:param dir:
Directory to scan for files. Sub-dirs are not scanned.
:return:
List of filenames. Only filenames. Does not include the directory.
"""
# Initialize empty list.
filenames = []
# If the directory exists.
if os.path.exists(dir):
# Get all the filenames with matching extensions.
for filename in os.listdir(dir):
if filename.lower().endswith(self.exts):
filenames.append(filename)
return filenames
def get_paths(self, test=False):
"""
Get the full paths for the files in the data-set.
:param test:
Boolean. Return the paths for the test-set (True) or training-set (False).
:return:
Iterator with strings for the path-names.
"""
if test:
# Use the filenames and class-numbers for the test-set.
filenames = self.filenames_test
class_numbers = self.class_numbers_test
# Sub-dir for test-set.
test_dir = "test/"
else:
# Use the filenames and class-numbers for the training-set.
filenames = self.filenames
class_numbers = self.class_numbers
# Don't use a sub-dir for test-set.
test_dir = ""
for filename, cls in zip(filenames, class_numbers):
# Full path-name for the file.
path = os.path.join(self.in_dir, self.class_names[cls], test_dir, filename)
yield path
def get_training_set(self):
"""
Return the list of paths for the files in the training-set,
and the list of class-numbers as integers,
and the class-numbers as one-hot encoded arrays.
"""
return list(self.get_paths()), \
np.asarray(self.class_numbers), \
one_hot_encoded(class_numbers=self.class_numbers,
num_classes=self.num_classes)
def get_test_set(self):
"""
Return the list of paths for the files in the test-set,
and the list of class-numbers as integers,
and the class-numbers as one-hot encoded arrays.
"""
return list(self.get_paths(test=True)), \
np.asarray(self.class_numbers_test), \
one_hot_encoded(class_numbers=self.class_numbers_test,
num_classes=self.num_classes)
########################################################################
def load_cached(cache_path, in_dir):
"""
Wrapper-function for creating a DataSet-object, which will be
loaded from a cache-file if it already exists, otherwise a new
object will be created and saved to the cache-file.
This is useful if you need to ensure the ordering of the
filenames is consistent every time you load the data-set,
for example if you use the DataSet-object in combination
with Transfer Values saved to another cache-file, see e.g.
Tutorial #09 for an example of this.
:param cache_path:
File-path for the cache-file.
:param in_dir:
Root-dir for the files in the data-set.
This is an argument for the DataSet-init function.
:return:
The DataSet-object.
"""
print("Creating dataset from the files in: " + in_dir)
# If the object-instance for DataSet(in_dir=data_dir) already
# exists in the cache-file then reload it, otherwise create
# an object instance and save it to the cache-file for next time.
dataset = cache(cache_path=cache_path,
fn=DataSet, in_dir=in_dir)
return dataset
########################################################################
| Rauf-Kurbanov/au_dl_course | seminar_3/dataset.py | Python | gpl-3.0 | 9,038 |
from django.conf.urls import url
from django.contrib.contenttypes import views
urlpatterns = [
url(r'^shortcut/([0-9]+)/(.*)/$', views.shortcut),
]
| nesdis/djongo | tests/django_tests/tests/v21/tests/contenttypes_tests/urls.py | Python | agpl-3.0 | 153 |
__author__ = 'kyle_xiao'
import tornado.httpclient
import urllib
import json
import hashlib
class AccessTicket(object):
def __init__(self, timestamp, appId, key, nonceStr):
"""
:param timestamp:
:param appId:
:param key:
:param nonceStr:
"""
self.appId = appId
self.key = key
self.ret = {
'nonceStr': nonceStr,
'jsapi_ticket': self.getTicket(),
'timestamp': timestamp,
'url': ""
}
def getAccessToken(self):
"""
get the wechat access_token
:return:
"""
client = tornado.httpclient.HTTPClient()
response = client.fetch("https://api.weixin.qq.com/cgi-bin/token?" + \
urllib.urlencode(
{"grant_type": "client_credential", "appid": self.appId, "secret": self.key}))
body = json.loads(response.body)
return body["access_token"]
def getTicket(self, token=None):
"""
get the access ticket by using the token
:param token:
:return:
"""
if token == None:
token = self.getAccessToken()
client = tornado.httpclient.HTTPClient()
response = client.fetch("https://api.weixin.qq.com/cgi-bin/ticket/getticket?" + \
urllib.urlencode({"access_token": token, "type": "jsapi"}))
body = json.loads(response.body)
return body["ticket"]
def sign(self, url):
"""
config one url to the wechat share
:param url:
:return:
"""
self.ret["url"] = url
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
signature = hashlib.sha1(string.encode('ascii')).hexdigest()
return signature
| kylexiaox/WechatWebShareJs | apiAccess.py | Python | apache-2.0 | 1,927 |
import base64
import os
import re
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.urls import reverse
from PIL import Image
from wiki.core.plugins import registry as plugin_registry
from wiki.models import URLPath
from wiki.plugins.images import models
from wiki.plugins.images.wiki_plugin import ImagePlugin
from ...base import ArticleWebTestUtils
from ...base import DjangoClientTestBase
from ...base import RequireRootArticleMixin
from ...base import wiki_override_settings
class ImageTests(RequireRootArticleMixin, ArticleWebTestUtils, DjangoClientTestBase):
def setUp(self):
super().setUp()
self.article = self.root_article
# A black 1x1 gif
self.test_data = "R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="
def _create_gif_filestream_from_base64(self, str_base64, **kwargs):
"""
Helper function to create filestream for upload.
Parameters :
strData : str, test string data
Optional Arguments :
filename : str, Defaults to 'test.txt'
"""
filename = kwargs.get("filename", "test.gif")
data = base64.b64decode(str_base64)
filedata = BytesIO(data)
filestream = InMemoryUploadedFile(
filedata, None, filename, "image", len(data), None
)
return filestream
def _create_test_image(self, path):
# Get the form index
plugin_index = -1
for cnt, plugin_instance in enumerate(plugin_registry.get_sidebar()):
if isinstance(plugin_instance, ImagePlugin):
plugin_index = cnt
break
self.assertGreaterEqual(plugin_index, 0, msg="Image plugin not activated")
base_edit_url = reverse("wiki:edit", kwargs={"path": path})
url = base_edit_url + "?f=form{0:d}".format(plugin_index)
filestream = self._create_gif_filestream_from_base64(self.test_data)
response = self.client.post(
url,
{
"unsaved_article_title": self.article.current_revision.title,
"unsaved_article_content": self.article.current_revision.content,
"image": filestream,
"images_save": "1",
},
)
self.assertRedirects(response, base_edit_url)
def test_index(self):
url = reverse("wiki:images_index", kwargs={"path": ""})
response = self.client.get(
url,
)
self.assertContains(response, "Images")
def test_upload(self):
"""
Tests that simple file upload uploads correctly
Uploading a file should preserve the original filename.
Uploading should not modify file in any way.
"""
self._create_test_image("")
# Check the object was created.
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
self.assertEqual(image_revision.get_filename(), "test.gif")
self.assertEqual(
image_revision.image.file.read(), base64.b64decode(self.test_data)
)
def get_article(self, cont, image):
urlpath = URLPath.create_urlpath(
URLPath.root(), "html_image", title="TestImage", content=cont
)
if image:
self._create_test_image(urlpath.path)
return urlpath.article.render()
def test_image_missing(self):
output = self.get_article("[image:1]", False)
expected = (
'<figure class="thumbnail"><a href="">'
'<div class="caption"><em>Image not found</em></div>'
'</a><figcaption class="caption"></figcaption></figure>'
)
self.assertEqual(output, expected)
def test_image_default(self):
output = self.get_article("[image:1]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = re.compile(
r'<figure class="thumbnail">'
r'<a href="' + re.escape(image_rev.image.url) + '">'
r'<img alt="test\.gif" src="/?cache/.*\.jpg">'
r'</a><figcaption class="caption"></figcaption></figure>'
)
self.assertRegex(output, expected)
def test_image_large_right(self):
output = self.get_article("[image:1 align:right size:large]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = re.compile(
r'<figure class="thumbnail float-right">'
r'<a href="' + re.escape(image_rev.image.url) + '">'
r'<img alt="test\.gif" src="/?cache/.*\.jpg"></a>'
r'<figcaption class="caption"></figcaption></figure>'
)
self.assertRegex(output, expected)
def test_image_orig(self):
output = self.get_article("[image:1 size:orig]", True)
image_rev = models.Image.objects.get().current_revision.imagerevision
expected = (
'<figure class="thumbnail">'
'<a href="' + image_rev.image.url + '">'
'<img alt="test.gif" src="' + image_rev.image.url + '"></a>'
'<figcaption class="caption"></figcaption></figure>'
)
self.assertEqual(output, expected)
# https://gist.github.com/guillaumepiot/817a70706587da3bd862835c59ef584e
def generate_photo_file(self):
file = BytesIO()
image = Image.new("RGBA", size=(100, 100), color=(155, 0, 0))
image.save(file, "gif")
file.name = "test.gif"
file.seek(0)
return file
def test_add_revision(self):
self._create_test_image(path="")
image = models.Image.objects.get()
before_edit_rev = image.current_revision.revision_number
response = self.client.post(
reverse(
"wiki:images_add_revision",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"image": self.generate_photo_file()},
)
self.assertRedirects(response, reverse("wiki:edit", kwargs={"path": ""}))
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
def test_delete_restore_revision(self):
self._create_test_image(path="")
image = models.Image.objects.get()
before_edit_rev = image.current_revision.revision_number
response = self.client.get(
reverse(
"wiki:images_delete",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
self.assertIs(image.current_revision.deleted, True)
# RESTORE
before_edit_rev = image.current_revision.revision_number
response = self.client.get(
reverse(
"wiki:images_restore",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
image = models.Image.objects.get()
self.assertEqual(models.Image.objects.count(), 1)
self.assertEqual(
image.current_revision.previous_revision.revision_number, before_edit_rev
)
self.assertFalse(image.current_revision.deleted)
def test_purge(self):
"""
Tests that an image is really purged
"""
self._create_test_image(path="")
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
f_path = image_revision.image.file.name
self.assertIs(os.path.exists(f_path), True)
response = self.client.post(
reverse(
"wiki:images_purge",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"confirm": True},
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
self.assertEqual(models.Image.objects.count(), 0)
self.assertIs(os.path.exists(f_path), False)
def test_add_revision_purge_image(self):
"""
Tests that an image with more than one revision is really purged
"""
# use another test to stage this one
self.test_add_revision()
image = models.Image.objects.get()
image_revision = image.current_revision.imagerevision
f_path = image_revision.image.file.name
self.assertIs(os.path.exists(f_path), True)
response = self.client.post(
reverse(
"wiki:images_purge",
kwargs={
"article_id": self.root_article,
"image_id": image.pk,
"path": "",
},
),
data={"confirm": True},
)
self.assertRedirects(
response, reverse("wiki:images_index", kwargs={"path": ""})
)
self.assertEqual(models.Image.objects.count(), 0)
self.assertIs(os.path.exists(f_path), False)
@wiki_override_settings(ACCOUNT_HANDLING=True)
def test_login_on_revision_add(self):
self._create_test_image(path="")
self.client.logout()
image = models.Image.objects.get()
url = reverse(
"wiki:images_add_revision",
kwargs={"article_id": self.root_article, "image_id": image.pk, "path": ""},
)
response = self.client.post(url, data={"image": self.generate_photo_file()})
self.assertRedirects(response, "{}?next={}".format(reverse("wiki:login"), url))
| nert-gu/Xposition | tests/plugins/images/test_views.py | Python | gpl-3.0 | 10,458 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.