repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
MarkusH/talk-django-elasticsearch
|
blog/views.py
|
Python
|
bsd-3-clause
| 2,259
| 0.001771
|
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, render
from django.utils.timezone import now
from django.views.generic import DetailView, ListView
from .models import Article, Category
from .search import Article as SearchArticle
def index(request):
return redirect(reverse('blog:article-list'))
class ArticleDetailView(DetailView):
model = Article
def get_queryset(self):
qs = super(ArticleDetailView, self).get_queryset()
qs = qs.select_related(
|
'author', 'category')
if not self.request.user.is_authenticated():
qs = qs.filter(is_public=True, publish_datetime__lte=now())
return qs
article_detail = ArticleDetailView.as_view()
class ArticleListView(ListView):
model = Article
def get_queryset(self):
qs = super(ArticleListView, self).get_queryset()
qs = qs.select_related('author', 'category')
if not self.request.user.is_authenticated():
qs = qs.filter(is_public=True, publish_
|
datetime__lte=now())
return qs
article_list = ArticleListView.as_view()
class CategoryDetailView(DetailView):
model = Category
def get_context_data(self, **kwargs):
ctx = super(CategoryDetailView, self).get_context_data(**kwargs)
articles = self.object.article_set
if not self.request.user.is_authenticated():
articles = articles.filter(
is_public=True, publish_datetime__lte=now()
)
ctx['articles'] = articles.all()
return ctx
category_detail = CategoryDetailView.as_view()
class CategoryListView(ListView):
model = Category
category_list = CategoryListView.as_view()
def search(request):
q = request.GET.get('q', '')
ctx = {'query': q, 'results': []}
if q:
search = SearchArticle.search()
search = search.query(
'simple_query_string',
query=q,
fields=['title', 'text']
)
if not request.user.is_authenticated():
search = search.filter('term', is_public=True)
search = search.filter('range', publish_datetime={'lte': 'now'})
ctx['results'] = search.execute()
return render(request, 'blog/search.html', ctx)
|
aknh9189/code
|
physicsScripts/flotation/flotation.py
|
Python
|
mit
| 1,813
| 0.04909
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt #import modules
import matplotlib.patches as mpatches
import numpy as np
#get_ipython().magic(u'matplotlib inline') # set to inline for ipython
# In[2]:
water = [0,2,2,3,1.5,1.5,3,2,2,2,2,2.5,2] #arrange data from lab
alc = [0,2.5,2.5,2.5,2.5,3,2.5,2.5]
weight = [20.9+(0.41*5*x) for x in range(0,13)] #generate weight array, based on mass of paper clips, in grams
actWater = [(22)+sum(water[:x+1]) for x in range(0,len(water))] #cumulitive sum of water displacement
actalc = [(28)+sum(alc[:x+1]) for x in range(0,len(alc))] #cumultive sum of alc displacement
slopeAlc, intercept = np.polyfit(weight[:len(actalc)], actalc, 1) #mL/g find avereage slope of alc, have to invert to find densitiy
slopeWater, interssss = np.polyfit(weight, actWater, 1) #repeat for water
print slopeWater,slopeAlc #print values
densityWater = 1/(slopeWater * 0.001) #invert and convert to kg/m^3
densityAlc = 1/(slopeAlc * 0.001)
print densityWater, densityAlc #print them
# In[3]:
actualWater = 1000 # finding per
|
cent errors in densities kg/m^3
actualAlc = 789
pErrorWater = (abs(actualWater-densityWater)/actualWater) * 100 #find percent errors
pErrorAlc = (abs(actualAlc-densityAlc)/actualAlc) *100
print pErrorWater, pErrorAlc #print percent errors
# In[4]:
plt.figure() #create figure
plt.plot(weight,actWater,"o") # plot scatter of water vs weight (ml/g)
plt.plot(weight[:len(actalc)],actalc
|
,"o") #plot scatter of actcalc
plt.xlabel("Mass (g)") #add labels
plt.ylabel("Displacement (mL)") #add labels
plt.show() #show figure
# In[5]:
x = [0,1,2,3,4] ##TESTING np.polyfit
y = [0,0.5,1,1.5,2]
plt.figure()
plt.plot(y,x)
slope,inter = np.polyfit(y,x,1)
print slope
# In[9]:
densityAlc * (1/100.0**3) *1000 ##TESTING CONVERSION OF DENSITY
# In[ ]:
|
ep1cman/workload-automation
|
wlauto/utils/fps.py
|
Python
|
apache-2.0
| 4,301
| 0.00186
|
# Copyright 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class FpsProcessor(object):
"""
Provides common object for processing surfaceFlinger output for frame
statistics.
This processor returns the four frame statistics below:
:FPS: Frames Per Second. This is the frame rate of the workload.
:frames: The total number of frames rendered during the execution of
the workload.
:janks: The number of "janks" that occurred during execution of the
workload. Janks are sudden shifts in frame rate. They result
in a "stuttery" UI. See http://jankfree.org/jank-busters-io
:not_at_vsync: The number of frames that did not render in a single
vsync cycle.
"""
def __init__(self, data, action=None):
"""
data - a pandas.DataFrame object with frame data (e.g. frames.csv)
action - output metrics names with additional action specifier
"""
self.data = data
self.action = action
def process(self, refresh_period, drop_threshold): # pylint: disable=too-many-locals
"""
Generate frame per second (fps) and associated metrics for workload.
refresh_period - the vsync interval
drop_threshold - data points below this fps will be dropped
"""
fps = float('nan')
frame_count, janks, not_at_vsync = 0, 0, 0
vsync_interval = refresh_period
# fiter out bogus frames.
bogus_frames_filter = self.data.actual_present_time != 0x7fffffffffffffff
actual_present_times = self.data.actual_present_time[bogus_frames_filter]
actual_present_time_deltas = actual_present_times - actual_present_times.shift()
actual_present_time_deltas = actual_present_time_deltas.drop(0)
vsyncs_to_compose = actual_present_time_deltas / vsync_interval
vsyncs_to_compose.apply(lambda x: int(round(x, 0)))
# drop values lower than drop_threshold FPS as real in-game frame
# rate is unlikely to drop below that (except on loading screens
# etc, which should not be factored in frame rate calculation).
per_frame_fps = (1.0 / (vsyncs_to_compose * (vsync_interval / 1e9)))
keep_filter = per_frame_fps > drop_threshold
filtered_vsyncs_to_compose = vsyncs_to_compose[keep_filter]
per_frame_fps.name = 'fps'
if not filtered_vsyncs_to_compose.empty:
total_vsyncs = filtered_vsyncs
|
_to_compose.sum()
frame_count = filtered_vsyncs_to_compose.size
if total_vsyncs:
fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
janks = self._calc_janks(filtered_vsyncs_to_compose)
not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
metrics = (fps, frame_count, janks, not_at_vsync)
return per_frame_fps,
|
metrics
@staticmethod
def _calc_janks(filtered_vsyncs_to_compose):
"""
Internal method for calculating jank frames.
"""
pause_latency = 20
vtc_deltas = filtered_vsyncs_to_compose - filtered_vsyncs_to_compose.shift()
vtc_deltas.index = range(0, vtc_deltas.size)
vtc_deltas = vtc_deltas.drop(0).abs()
janks = vtc_deltas.apply(lambda x: (pause_latency > x > 1.5) and 1 or 0).sum()
return janks
@staticmethod
def _calc_not_at_vsync(vsyncs_to_compose):
"""
Internal method for calculating the number of frames that did not
render in a single vsync cycle.
"""
epsilon = 0.0001
func = lambda x: (abs(x - 1.0) > epsilon) and 1 or 0
not_at_vsync = vsyncs_to_compose.apply(func).sum()
return not_at_vsync
|
aequorea/gly
|
versions/gly19.py
|
Python
|
gpl-2.0
| 8,326
| 0.036872
|
#!/usr/bin/python
# gly19.py -- protein glycosylator
# 2016.10.05 -- first version -- John Saeger
# gly19 -- 2017.5.27
# This version complains about incomplete sidechains. You must fix them up.
# I use pymol's mutate wizard to mutate a residue to itself.
# This version has a slightly more sophisticated solvent exposure test.
# You can tune it with the solv_thresh variable (below).
# This version doesn't check if glycosylation sites are too close to
# each other, so it prints out more sites. Use judgement when placing
# glycans close together. You probably won't want to try all sites at
# the same time. It also doesn't cut off based on the bioinformatics
# scoring algorithm. We show everything except things like sites with
# prolines in the immediate neighborhood. After all, I don't know for sure
# if the bioinformatics based scoring is actually telling me anything.
file_prefix = "1yfpA"
input_file = file_prefix + ".pdb" # PDB input file
exposed_file = file_prefix + "_exposed_atm.pdb" # PDB file with solvent exposed atoms
sheet_file = file_prefix + "_sheet.pdb" # PDB file with beta sheet residues
helix_file = file_prefix + "_helix.pdb" # PDB file with alpha helix residues
solv_thresh = 0.33 # this fraction of sidechain atoms exposed to declare residue solvent exposed
res_letters = {'ALA':'A', 'ARG':'R', 'ASN':'N', 'ASP':'D', 'CYS':'C', 'GLU':'E',
'GLN':'Q', 'GLY':'G', 'HIS':'H', 'ILE':'I', 'LEU':'L', 'LYS':'K', 'MET':'M',
'PHE':'F', 'PRO':'P', 'SER':'S', 'THR':'T', 'TRP':'W', 'TYR':'Y', 'VAL':'V'}
res_atoms = {'ALA':6, 'ARG':12, 'ASN':9, 'ASP':9, 'CYS':7, 'GLU':10,
'GLN':10, 'GLY':5, 'HIS':11, 'ILE':9, 'LEU':9, 'LYS':10, 'MET':9,
'PHE':12, 'PRO':8, 'SER':7, 'THR':8, 'TRP':15, 'TYR':13, 'VAL':8}
hydro = { 'A':18, 'R':-45, 'N':-35, 'D':-35, 'C':25, 'E':-35,
'Q':-35, 'G':-4, 'H':-32, 'I':45, 'L':38, 'K':-39, 'M':19,
'F':28, 'P':-16, 'S':-8, 'T':-7, 'W':-9, 'Y':-13, 'V':42,
'Z':0 }
def read_pdb(fn, chain): # read selected chain in PDB file
try:
pdb = [] # clean it up a little
for line in open(fn, 'r'):
l = line.strip().split()
if l[0] != 'ATOM' or l[4] != chain: continue
pdb.append([l[0], l[1], l[2], l[3], l[4], l[5]])
except:
print("Couldn't read {}".format(fn))
quit()
return pdb
def get_sequence(fn, chain): # get sequence of selected chain from PDB file
pdb = read_pdb(fn, chain)
seq = ""
resnum = 0
atomnum = 0
for l in pdb:
if resnum == 0 and atomnum == 0:
l=pdb[0]
resname = l[3]
atomnum += 1
if int(l[5]) != resnum:
oldresnum = resnum
oldresname = resname
oldatomnum = atomnum
resname = l[3]
atomnum = 0
if int(l[5]) != resnum+1: # see if there is a resnum skip
while resnum < int(l[5])-1:
resnum += 1
seq += 'Z' # set to 'Z' if there was a skip
resnum += 1
try:
seq += res_letters[l[3]]
if res_atoms[oldresname]-1 != oldatomnum and oldresnum != 0:
print("Warning! residue {} {} needs {} atoms but only has {}").format(oldresnum, oldresname, res_atoms[oldresname]-1, oldatomnum)
except:
seq += 'Z' # set to 'Z' if residue unknown
return seq
def get_singles(fn, chain): # get solvent exposed atoms from PDB file
pdb = read_pdb(fn, chain)
resnum = 0
atoms = 0 # we'll be counting sidechain atoms
singles = []
for l in pdb:
if resnum == 0 and atoms == 0:
l=pdb[0]
resname = l[3]
if l[2] not in ['N', 'C', 'O'] and int(l[5]) == resnum: # had CA CB
atoms += 1
if int(l[5]) != resnum:
oldresnum = resnum
oldresname = resname
oldatoms = atoms
resname = l[3]
resnum = int(l[5])
if l[2] not in ['N', 'C', 'O']:
atoms = 1
else:
atoms = 0
if oldresname in res_atoms:
if oldatoms > solv_thresh * (res_atoms[oldresname]-4) and oldresnum != 0:
singles.append(oldresnum)
return singles
def get_exposed(fn, chain): # get solvent exposed residues
pdb = read_pdb(fn, chain)
singles = get_singles(fn, chain)
pairs = [] # get solvent exposed pairs
for s in singles:
if s+2 in singles: pairs.append(s)
return
|
singles, pairs
def g
|
et_residues(fn, chain):
pdb = read_pdb(fn, chain)
residues = []
for l in pdb:
if int(l[5]) not in residues:
residues.append(int(l[5]))
return residues
def get_random_coil(input_file, chain, sheet, helix):
all = get_residues(input_file, chain)
coil = []
for res in all:
if res not in sheet and res not in helix:
coil.append(res)
return coil
def sheet_score(hydro, seq, n):
score = 0
score += hydro[seq[n-2]] # hydrophobic
score += hydro[seq[n-1]] # hydrophobic
score -= hydro[seq[n]] # hydrophilic
score += hydro[seq[n+1]] # hydrophobic
score -= hydro[seq[n+2]] # hydrophilic
score += hydro[seq[n+3]] # hydrophobic
return score
def helix_score(hydro, seq, n):
score = 0
# score -= hydro[seq[n-2]] # hydrophilic
# score -= hydro[seq[n-1]] # hydrophilic
score -= hydro[seq[n]] # hydrophilic
score -= hydro[seq[n+1]] # hydrophilic
score -= hydro[seq[n+2]] # hydrophilic
# score -= hydro[seq[n+3]] # hydrophilic
return score
def get_glyco_sites(score_func, seq, allowed1, allowed2):
scores = [] # score potential glycosylation sites ...
for n in range(2, len(seq)-3): # ... based on hydropathy
score = score_func(hydro, seq, n)
# if seq[n+1] == 'N': score = -999 # exclude N in the middle
if 'P' in seq[n-2:n+4]: score = -999 # exclude prolines
if 'Z' in seq[n-2:n+4]: score = -999 # exclude skipped or unknown residues
scores.append([n+1, int(score)]) # sequence starts at 0, PDB starts at 1
n += 1
# sorted_scores = sorted(scores, key=lambda s: s[1], reverse=True)
glyco = [] # narrow down the choices
glyco_short = []
for sc in scores:
if sc[1] <= -999: continue # score too low
if sc[0] not in allowed1 or sc[0] not in allowed2: continue
delta_h = int(hydro['N']+hydro['T']-hydro[seq[sc[0]-1]]-hydro[seq[sc[0]+1]])
glyco.append([sc[0], sc[1], seq[sc[0]-1:sc[0]+2], delta_h])
glyco_short.append(sc[0])
return glyco, glyco_short
def show_sites(common, score_func, name, seq, allowed1, allowed2):
sites, short = get_glyco_sites(score_func, seq, allowed1, allowed2)
print("\nChain {} has {} potential {} glycosylation sites:".format(chain, len(sites), name))
print("[site, score, seq, delta hydropathy]")
for gs in sites: print gs
if len(common) == 0:
common.update(short)
else:
common = common & set(short)
scommon = sorted(common)
return scommon
# main
sheet_common = set()
helix_common = set()
coil_common = set()
total_common = set()
for chain in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
sequence = get_sequence(input_file, chain)
if len(sequence) == 0: continue
print("\nChain {} sequence has {} residues:\n{}".format(chain, len(sequence), sequence))
singles, pairs = get_exposed(exposed_file, chain)
print("\nChain {} has {} solvent accessible residues:\n{}".format(chain, len(singles), singles))
print("\nChain {} has {} solvent accessible pairs:\n{}".format(chain, len(pairs), pairs))
sheet = get_residues(sheet_file, chain)
print("\nChain {} has {} beta sheet residues:\n{}".format(chain, len(sheet), sheet))
helix = get_residues(helix_file, chain)
print("\nChain {} has {} alpha helix residues:\n{}".format(chain, len(helix), helix))
coil = get_random_coil(input_file, chain, sheet, helix)
print("\nChain {} has {} random coil residues:\n{}".format(chain, len(coil), coil))
sheet_common = show_sites(sheet_common, sheet_score, "beta sheet", sequence, pairs, sheet)
helix_common = show_sites(helix_common, helix_score, "alpha helix", sequence, pairs, helix)
coil_common = show_sites(coil_common, helix_score, "random coil", sequence, pairs, coil)
print("\nThere are {} common potential beta sheet glycosylation sites:".format(len(sheet_common)))
print(list(sheet_common))
print("\nThere are {} common potential alpha helix glycosylation sites:".format(len(helix_common)))
print(list(helix_common))
print("\nThere are {} common potential random coil glycosylation sites:".format(len(coil_common)))
print(list(coil_common))
total_common = sheet_common + helix_common + coil_common
stotal_co
|
kiyoto/statsmodels
|
statsmodels/datasets/cancer/data.py
|
Python
|
bsd-3-clause
| 1,743
| 0.010901
|
"""Breast Cancer Data"""
__docformat__ = 'restructuredtext'
CO
|
PYRIGHT = """???"""
TITLE = """Breast Cancer Data"""
SOURCE = """
This is the breast cancer data used in Owen's empirical likelihood. It is taken from
Rice, J.A. Mathematical Sta
|
tistics and Data Analysis.
http://www.cengage.com/statistics/discipline_content/dataLibrary.html
"""
DESCRSHORT = """Breast Cancer and county population"""
DESCRLONG = """The number of breast cancer observances in various counties"""
#suggested notes
NOTE = """::
Number of observations: 301
Number of variables: 2
Variable name definitions:
cancer - The number of breast cancer observances
population - The population of the county
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/cancer.csv', 'rb') as f:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=float)
return data
|
bitrat/alarm-fingerprint
|
AlarmGnuRadioFiles/Spectra_FileInput_To_BinarySlice_Local_only.py
|
Python
|
gpl-2.0
| 10,425
| 0.011415
|
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Top Block
# Generated: Thu Oct 8 20:41:39 2015
##################################################
from datetime import datetime
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.wxgui import forms
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
import time
class top_block(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Top Block")
_icon_path = "/usr/share/icons/hicolor/32x32/apps/gnuradio-grc.png"
self.SetIcon(wx.Icon(_icon_path, wx.BITMAP_TYPE_ANY))
##################################################
# Variables
##################################################
self.symb_rate = symb_rate = 40000
self.samp_rate = samp_rate = 10e6
self.decimation = decimation = 250
self.symb_rate_slider = symb_rate_slider = 4000
self.samp_per_sym = samp_per_sym = int((samp_rate/(decimation)) / symb_rate)
self.freq_offset = freq_offset = 1.8e6
self.freq = freq = 433.92e6
self.channel_trans = channel_trans = 1.2e6
self.channel_spacing = channel_spacing = 3000000+2000000
self.initpathprefix = initpathprefix = "/home/user/alarm-fingerprint/AlarmGnuRadioFiles/"
self.pathprefix = pathprefix = "/home/user/alarm-fingerprint/AlarmGnuRadioFiles/Captured/"
self.finput = finput = initpathprefix+"Capture_init.cap"
self.foutput = foutput = pathprefix+finput.rsplit("/", 1)[1]
self.addconst = addconst = 0
self.recfile4 = recfile4 = initpathprefix+"/init/_AddConst"+str(addconst)+ "_Spectra.dat"
##################################################
# Blocks
##################################################
_channel_trans_sizer = wx.BoxSizer(wx.VERTICAL)
self._channel_trans_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_channel_trans_sizer,
value=self.channel_trans,
callback=self.set_channel_trans,
label='channel_trans',
converter=forms.float_converter(),
proportion=0,
)
self._channel_trans_slider = forms.slider(
parent=self.GetWin(),
sizer=_channel_trans_sizer,
value=self.channel_trans,
callback=self.set_channel_trans,
minimum=0,
maximum=1.8e6,
num_steps=10,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_channel_trans_sizer)
_symb_rate_slider_sizer = wx.BoxSizer(wx.VERTICAL)
self._symb_rate_slider_text_box = forms.text_box(
parent=self.GetWin(),
sizer=_symb_rate_slider_sizer,
value=self.symb_rate_slider,
callback=self.set_symb_rate_slider,
label='symb_rate_slider',
converter=forms.float_converter(),
proportion=0,
)
self._symb_rate_slider_slider = forms.slider(
parent=self.GetWin(),
sizer=_symb_rate_slider_sizer,
value=self.symb_rate_slider,
callback=self.set_symb_rate_slider,
minimum=0,
maximum=10e3,
num_steps=100,
style=wx.SL_HORIZONTAL,
cast=float,
proportion=1,
)
self.Add(_symb_rate_slider_sizer)
self.low_pass_filter_0 = filter.fir_filter_fff(decimation, firdes.low_pass(
1, samp_rate, 8e3, 1.8e6, firdes.WIN_BLACKMAN, 6.76))
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_ccc(1, (firdes.low_pass(1, samp_rate, channel_spacing, channel_trans, firdes.WIN_BLACKMAN,6.76)), -freq_offset, samp_rate)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(samp_per_sym*(1+0.0), 0.25*0.175*0.175, 0.5, 0.175, 0.005)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_gr_complex*1, finput, False)
self.blocks_file_sink_1 = blocks.file_sink(gr.sizeof_char*1, recfile4, False)
self.blocks_file_sink_1.set_unbuffered(False)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
self.blocks_add_const_vxx_0 = blocks.add_const_vff((addconst, ))
##################################################
# Connections
##################################################
self.connect((self.low_pass_filter_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.low_pass_filter_0, 0))
self.connect((self.blocks_file_source_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.blocks_file_sink_1, 0))
def get_pathprefix(self):
return self.pathprefix
def set_pathprefix(self, pathprefix):
self.pathprefix = pathprefix
self.set_finput(self.pathprefix+"Capture_init.cap")
def get_finput(self):
return self.finput
def set_finput(self, finput):
self.finput = finput
self.set_foutput("/home/user/alarm-fingerprint/AlarmGnuRadioFiles/Captured/"+finput.rsplit("/", 1)[1])
self.blocks_file_source_0.open(self.finput, False)
def get_symb_rate(self):
return self.symb_rate
def set_symb_rate(self, symb_rate):
self.symb_rate = symb_rate
self.set_samp_per_sym(int((self.samp_rate/self.decimation) / self.symb_rate))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.set_samp_per_sym(int((self.samp_rate/self.decimation) / self.symb_rate))
self.freq_xlating_fir_filter_xxx_0.set_taps((firdes.low_pass(1, self.samp_rate, self.channel_spacing, self.channel_trans, firdes.WIN_BLACKMAN,6.76)))
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 8e3, 1.8e6, firdes.WIN_BLACKMAN, 6.76))
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
def get_foutput(self):
return self.foutput
def set_foutput(self, foutput):
self.foutput = foutput
self.set_recfile4(self.foutput+"_AddConst"+str(self.addconst)+ "_Spectra.dat")
def get_decimation(self):
return self.decimation
def set_decimation(self, decimation):
self.decimation = decimation
self.set_samp_per_sym(int((self.samp_rate/se
|
lf.decimation) / self.symb_rate))
def g
|
et_channel_spacing(self):
return self.channel_spacing
def set_channel_spacing(self, channel_spacing):
self.channel_spacing = channel_spacing
self.set_freq_offset((self.channel_spacing/2)+(self.channel_spacing * .1))
self.freq_xlating_fir_filter_xxx_0.set_taps((firdes.low_pass(1, self.samp_rate, self.channel_spacing, self.channel_trans, firdes.WIN_BLACKMAN,6.76)))
def get_addconst(self):
return self.addconst
def set_addconst(self, addconst):
self.addconst = addconst
self.set_recfile4(self.foutput+"_AddConst"+str(self.addconst)+ "_Spectra.dat")
self.blocks_add_const_vxx_0.set_k((self.addconst, ))
def get_symb_rate_slider(self):
return self.symb_rate_slider
def set_symb_rate_slider(self, symb_rate_slider):
self.symb_rate_slider = symb_rate_slider
self._symb_rate_slider_slider.set_value(self.symb_rate_slider)
|
gliheng/Mojo
|
mojo/scripts/dump_blog_data.py
|
Python
|
gpl-2.0
| 1,041
| 0.001921
|
import os
import sys
import transaction
from pyramid.paster import bootstrap
import transaction
from mojo.models import root_factory
from mojo.blog.models import get_blogroot
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (
|
cmd, cmd))
sys.exit(1)
def dump(root):
root = get_blogroot(root)
for blog in root.blogs.itervalues():
file =
|
open('data/' + blog.__name__ + '.md', 'w+')
file.write('---\n')
file.write('title: ' + blog.title.encode('utf-8'))
file.write('timestamp: ' + blog.timestamp.ctime())
file.write('tags: ' + str(blog.tags))
file.write('category: ' + blog.category)
file.write('---\n')
file.write(blog.raw_content.encode('utf-8'))
file.close()
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
env = bootstrap(config_uri)
root = root_factory(env['request'])
dump(root)
env['closer']()
|
pythonlittleboy/python_gentleman_crawler
|
util/ImageSimilar.py
|
Python
|
apache-2.0
| 3,416
| 0.005423
|
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def make_regalur_image(img, size=(256, 256)):
return img.resize(size).convert('RGB')
# 几何转变,全部转化为256*256像素大小
def split_image(img, part_size=(64, 64)):
w, h = img.size
pw, ph = part_size
assert w % pw == h % ph == 0
return [img.crop((i, j, i + pw, j + ph)).copy() \
for i in range(0, w, pw) \
for j in range(0, h, ph)]
# region = img.crop(box)
# 将img表示的图片对象拷贝到region中,这个region可以用来后续的操作(region其实就是一个
# image对象,box是个四元组(上下左右))
def hist_similar(lh, rh):
assert len(lh) == len(rh)
return sum(1 - (0 if l == r else float(abs(l - r)) / max(l, r)) for l, r in zip(lh, rh)) / len(lh)
# 好像是根据图片的左右间隔来计算某个长度,zip是可以接受多个x,y,z数组值统一输出的输出语句
def calc_similar(li, ri):
# return hist_similar(li.histogram(), ri.histogram())
return sum(
hist_similar(l.histogram(), r.histogram()) for l, r in zip(split_image(li), split_image(ri))) / 16.0 # 256>64
# 其中histogram()对数组x(数组是随机取样得到的)进行直方图统计,它将数组x的取值范围分为100个区间,
# 并统计x中的每个值落入各个区间中的次数。histogram()返回两个数组p和t2,
# 其中p表示各个区间的取样值出现的频数,t2表示区间。
# 大概是计算一个像素点有多少颜色分布的
# 把split_image处理的东西zip一下,进行histogram,然后得到这个值
def calc_similar_by_path(lf, rf):
li, ri = make_regalur_image(Image.open(lf)), make_regalur_image(Image.open(rf))
return calc_similar(li, ri)
def make_doc_data(lf, rf):
li, ri = make_regalur_image(Image.open(lf)), make_regalur_image(Image.open(rf))
li.save(lf + '_regalur.png') # 转换图片格式:img.save('file.jpg'),保存临时的
ri.save(rf + '_regalur.png') # img对象到硬盘
fd = open('stat.csv', 'w') # stat模块是做随机变量统计的,stat用来计算随机变量的期望值和方差
# 这句是关键啊,把histogram的结果进行map处理
fd.write('\n'.join(l + ',' + r for l, r in zip(map(str, li.histogram()), map(str, ri.histogram()))))
# print >>fd, '\n'
# fd.write(','.join(map(str, ri.histogram())))
fd.close()
li = li.convert('RGB') # 与save对象,这是转换格式
draw = ImageDraw.Draw(li)
for i in range(0, 256, 64):
draw.line((0, i, 256, i), fill='#ff0000')
draw.line((i, 0, i, 256), fill='#ff0000')
# 从始至终划线!!!!!!!!!!!!!!!通过把每一列刷成红色,来进行颜色的随机分布划分
# 用法:pygame.draw.line(Surface, color, start_pos, end_pos, wid
|
th=1)
|
li.save(lf + '_lines.png')
'''
if __name__ == '__main__':
path = r'testpic/TEST%d/%d.JPG'
for i in xrange(1, 7):
print ('test_case_%d: %.3f%%' % (i, \
calc_similar_by_path('testpic/TEST%d/%d.JPG' % (i, 1),
'testpic/TEST%d/%d.JPG' % (i, 2)) * 100))
'''
# print(calc_similar_by_path('D:\MyDrivers\cache\images\Ai\KAWD-793.jpg', 'D:\MyDrivers\cache\images\Ai\TEK-086.jpg'))
|
twizoo/semantic
|
semantic/test/testDates.py
|
Python
|
mit
| 6,209
| 0.000161
|
import datetime
import unittest
from freezegun import freeze_time
from semantic.dates import DateService
@freeze_time('2014-01-01 00:00')
class TestDate(unittest.TestCase):
def compareDate(self, input, target):
service = DateService()
result = service.extractDate(input)
self.assertEqual(target, result)
def compareTime(self, input, target):
service = DateService()
result = service.extractTime(input)
self.assertEqual(target, result)
def compareDates(self, input, targets):
service = DateService()
results = service.extractDates(input)
for (result, target) in zip(results, targets):
self.assertEqual(target, result)
def compareTimes(self, input, targets):
service = DateService()
results = service.extractDates(input)
for (result, target) in zip(results, targets):
self.assertEqual(target.time(), result.time())
#
# Date Tests
#
def testExactWords(self):
input = "Remind me on January Twenty Sixth"
target = "2014-01-26"
self.compareDate(input, target)
def testExactWordsDash(self):
input = "Remind me on January Twenty-Sixth"
target = "2014-01-26"
self.compareDate(input, target)
def testExactNums(self):
input = "Remind me on January 26"
target = "2014-01-26"
self.compareDate(input, target)
def testOrdinalNums(self):
input = "Remind me on January 2nd"
target = "2014-01-02"
self.compareDate(input, target)
def testWeekFromExact(self):
input = "Do x y and z a week from January 26"
target = "2014-02-02"
self.compareDate(input, target)
def testMultipleWeeksFrom(self):
input = "Do x y and z three weeks from January 26"
target = "2014-02-16"
self.compareDate(input, target)
def testMultiWordDaysFrom(self):
input = "Do x y and z twenty six days from January 26"
target = "2014-02-21"
self.compareDate(input, target)
def testMultiWordAndDaysFrom(self):
input = "Do x y and z one hundred and twelve days from January 26"
target = "2014-05-18"
self.compareDate(input, target)
def testNextFriday(self):
input = "Next Friday, go to the grocery store"
target = "2014-01-10"
self.compareDate(input, target)
def testAmbiguousNext(self):
input = "The next event will take place on Friday"
target = "2014-01-03"
self.compareDate(input, target)
def testTomorrow(self):
input = "Tomorrow morning, go to the grocery store"
target = "2014-01-02"
self.compareDate(input, target)
def testToday(self):
input = "Send me an email some time today if you can"
target = "2014-01-01"
self.compareDate(input, target)
def testThis(self):
input = "This morning, I went to the gym"
target = "2014-01-01"
self.compareDate(input, target)
def testIllegalDate(self):
input = "I have a meeting on February 29 at 12:15pm"
self.assertRaises(ValueError, lambda: DateService().extractDate(input))
def testNoDate(self):
input = "It's a very nice day."
target = None
self.compareDate(input, target)
def testNoDateButTime(self):
input = "I have a meeting at 2pm"
target = None
self.compareDate(input, target)
#
# Time Tests
#
def testExactTime(self):
input = "Let's go to the park at 12:51pm"
target = "12:51"
self.compareTime(input, target)
def testInExactTime(self):
input = "I want to leave in two hours and twenty minutes"
target = datetime.datetime.today() + \
datetime.timedelta(hours=2, minutes=20)
self.compareTime(input, target)
def testTimeNoMinutes(self):
input = "Let's go to the park at 8pm"
target = "20:00"
self.compareTime(input, target)
def testTimeNoMinutesLater(self):
input = "Let's go to the park at 10pm"
target = "22:00"
self.compareTime(input, target)
def testTimeDotMinutes(self):
input = "Let's go to the park at 6.20pm"
target = "18:20"
self.compareTime(input, target)
def testTimeDotMinutesZeroMinutes(self):
input = "Let's go to the park at 6.00am"
target = "06:00"
self.compareTime(input, target)
def testAmbiguousTime(self):
input = "Let's go to the park at 8"
target = "20:00"
self.compareTime(input, target)
def testAmbiguousDotTime(self):
input = "Let's go to the park at 8.45"
target = "20:45"
self.compareTime(input, target)
def testMilitaryMorningTime(self):
input = "Let's go to the park at 08:00"
target = "08:00"
self.compareTime(input, target)
def testMilitaryAfternoonTime(self):
input = "Let's go to the park at 20:00"
target = "20:00"
self.compareTime(input, target)
def testThisEve(self):
input = "Let's go to the park this eve."
target = "20:00"
self.compareTime(input, target)
def testTonightTime(self):
input = "Let's go to the park tonight."
target = "20:00"
self.compareTime(input, target)
def testBeforeTenIsEveningTime(self):
input = "Let's go to the park at 5."
target = "17:00"
self.compareTime(input, target)
def testInThe(self):
input = "I went to the park in the afternoon"
target = "15:00"
self.compareTime(input, target)
def testBothDateAndTime(self):
input = "Let's go to the park at 5 tomorrow."
target_time = "17:00"
target_date = "2014-01-02"
self.compareTime(input,
|
target_time)
self.compareDate(input, target_date)
def testNoTime(self):
input = "It's a very nice day."
target = None
self.compareTime(input, target)
|
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestDate)
unittest.TextTestRunner(verbosity=2).run(suite)
|
STIXProject/python-stix
|
stix/extensions/marking/simple_marking.py
|
Python
|
bsd-3-clause
| 780
| 0.002564
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
import stix
from
|
stix.data_marking import MarkingStructure
import stix.bindings.extensions.marking.simple_marking as simple_marking_binding
@stix.register_extension
class SimpleMarkingStructure(MarkingStructure):
_binding = simple_marking_binding
_binding_class = simple_marking_binding.SimpleMarkingStructureType
_namespace = 'http://data-marking.mitre.org/extensions/MarkingStructure#Simple-1'
_XSI_TYPE = "simpleMarking:SimpleMarkingStructureType"
statement = fields.TypedField("Statement"
|
)
def __init__(self, statement=None):
super(SimpleMarkingStructure, self).__init__()
self.statement = statement
|
KlubJagiellonski/pola-backend
|
pola/product/migrations/0015_remove_product_company.py
|
Python
|
bsd-3-clause
| 330
| 0
|
# Generated by Django 3.1.2 on 2021-01-13 17
|
:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0014_auto_20210113_1803'),
]
operations = [
migrations.RemoveField(
model_name='product',
|
name='company',
),
]
|
tensorflow/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py
|
Python
|
apache-2.0
| 27,708
| 0.004547
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_AutoShardDataset` transformation."""
import os
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import
|
errors
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
class AutoShardDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self)
|
:
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
def getAllDatasetElements(self, dataset):
actual = []
next_fn = self.getNext(dataset)
while True:
try:
actual.append(self.evaluate(next_fn()))
except errors.OutOfRangeError:
break
return actual
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(batch_size=[1, 3, 10])))
def testDatasetOfReaderDatasetsPipeline(self, batch_size):
# This tests a scenario where a list_files main return multiple files
# due to the glob containing wildcards.
def batch(iterator, n):
l = len(iterator)
for i in range(0, l, n):
yield iterator[i:min(i + n, l)]
datasets = []
for files in batch(self._filenames, batch_size):
datasets.append(
dataset_ops.Dataset.list_files(files, shuffle=False).map(
core_readers.TFRecordDataset))
dataset = dataset_ops.Dataset.from_tensor_slices(datasets)
dataset = dataset.flat_map(lambda x: x)
# Simulate additional ops in between flat_map and interleave. This should be
# a no-op since if ShardDataset is placed right after flat_map, we will only
# have two datasets left at this point.
dataset = dataset.prefetch(1)
dataset = dataset.prefetch(1)
dataset = dataset.interleave(
lambda x: x, cycle_length=1, num_parallel_calls=1)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(d
|
kaedroho/wagtail
|
wagtail/core/migrations/0027_fix_collection_path_collation.py
|
Python
|
bsd-3-clause
| 906
| 0.003311
|
# -*- coding: utf-8 -*-
from django.db import migrations
def set_collection_path_collation(
|
apps, schema_editor):
"""
Treebeard's path comparison logic can fail on certain locales such as sk_SK, which
sort numbers after letters. To avoid this, we explicitly set the collation for the
'path' column to the (non-locale-specific) 'C' collation.
See: https://groups
|
.google.com/d/msg/wagtail/q0leyuCnYWI/I9uDvVlyBAAJ
"""
if schema_editor.connection.vendor == 'postgresql':
schema_editor.execute("""
ALTER TABLE wagtailcore_collection ALTER COLUMN path TYPE VARCHAR(255) COLLATE "C"
""")
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0026_group_collection_permission'),
]
operations = [
migrations.RunPython(
set_collection_path_collation, migrations.RunPython.noop
),
]
|
SilentObserver/mangafox_rippers
|
mangafox_ripper.py
|
Python
|
lgpl-3.0
| 3,898
| 0.001283
|
import os
import time
from urllib.parse import urljoin
import requests as rq
from bs4 import BeautifulSoup as bs
current_page_url = 0
page_soup = 0
def download_file(file_url, file_path):
if os.path.exists(file_path):
print(file_path, "already exists")
return False
i = 0
while i <= 30:
try:
file = rq.get(file_url, stream=True)
except Exception as e:
print(e)
print("Sleeping for 5 seconds...")
time.sleep(5)
print("Retrying to download file...")
# noinspection PyUnboundLocalVariable
if len(file.content) != 0:
# print("length of file",len(file.content))
break
else:
print("*" * 30)
print("Error getting", file_path, ": Retry=", i)
print("*" * 30)
i += 1
# file.raw.decode_content = True
if file.status_code == 200:
with open(file_path, 'wb') as f:
# shutil.copyfileobj(file.raw,f)
f.write(file.content)
f.flush()
f.close()
return True
return False
def next_page(current_page_soup, current_page_link): # works
next_page_link = current_page_soup.find_all('a', {'class': 'next_page'})[0].get('href')
if ".html" in next_page_link:
return urljoin(current_page_link, next_page_link)
else:
return False
# def get_image_name(image_url): #works
# return re.findall('/\S?([0-9_a-z-]+.jpg)\S?',image_url)[0]
def prepare_image_name(image_count):
return str(image_count) + ".jpg"
def get_image_url(web_page_soup):
global page_soup
try:
image_url = web_page_soup.find('img', {'id': 'image'}).get('src')
except:
page_soup = bs(rq.get(current_page_url).content, 'lxml')
image_url = page_soup.find('img', {'id': 'image'}).get('src')
return image_url
def get_chapter_name(web_page_soup):
return web_page_soup.find('h1', {'class': 'no'}).find('a').text
def create_dir(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
return True
else:
# print("Error creating", dir_path)
try:
print("Removing", dir_path, "directory")
os.system("rm -rf " + dir_path.replace(' ', '\ '))
os.mkdi
|
r(dir_path)
except:
print("Error creating", dir_path)
return False
return True
def prepare_file_path(base_path, image_name):
return base_path + image_name
def download_chapter(start_page, base_path=''):
global current_page_url
global page_soup
current_page_url = start_page
image_count = 0
start_page_soup = bs(rq.get(start_page).content, 'lxml')
if create_dir(base_path + get_chapter_name(start_page_soup)):
base_path += get_
|
chapter_name(start_page_soup) + "/"
page_soup = start_page_soup
page_link = start_page
while True:
image_url = get_image_url(page_soup)
# image_name = get_image_name(image_url)
image_count += 1
image_name = prepare_image_name(image_count)
# print("Downloading", image_name)
download_file(image_url, prepare_file_path(base_path, image_name))
next_link = next_page(page_soup, page_link)
if next_link is False:
break
else:
page_soup = bs(rq.get(next_link).content, 'lxml')
page_link = next_link
current_page_url = next_link
if __name__ == '__main__':
chapter_starting_page = input("Manga Chapter Starting page:")
download_chapter(chapter_starting_page)
os.system("notify-send \"Rip Complete\" \"Completed ripping chapter from Mangafox.me\"")
# link ="http://h.mfcdn.net/store/manga/13088/03-144.0/compressed/o031.jpg"
# download_file(link,get_image_name(link))
|
lowRISC/ot-sca
|
cw/cw305/ceca.py
|
Python
|
apache-2.0
| 23,127
| 0.001254
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import enum
import logging
import sys
import chipwhisperer as cw
import chipwhisperer.analyzer as cwa
import codetiming
import more_itertools
import networkx as nx
import numpy as np
import ray
import scared
"""A distributed implementation of the correlation-enhanced power analysis
collision attack.
See "Correlation-Enhanced Power Analysis Collision Attack" by A. Moradi, O.
Mischke, and T. Eisenbarth (https://eprint.iacr.org/2010/297.pdf) for more
information.
Typical usage:
>>> ./ceca.py -f PROJECT_FILE -n 400000 -w 5 -a 117 127 -d output -s 3
"""
def timer():
"""A customization of the ``codetiming.Timer`` decorator."""
def decorator(func):
@codetiming.Timer(
name=func.__name__,
text=f"{func.__name__} took {{seconds:.1f}}s",
logger=logging.info,
)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return decorator
class AttackDirection(str, enum.Enum):
"""Enumeration for attack direction."""
INPUT = "input"
OUTPUT = "output"
@ray.remote
class TraceWorker:
"""Class for performing distributed computations on power traces.
This class provides methods for performing distributed computations on power
traces such as computing the mean, the standard deviation, and filtering.
After creating multiple instances (workers) of this class and initializing
each worker with a subset of the available traces, distributed computations
can be performed by simply calling the methods of these workers. Individual
results of these workers can then be aggregated to produce the final result.
This class is a Ray actor (https://docs.ray.io/en/master/index.html) and can
be used as follows:
>>> workers = [TraceWorker.remote(...) for ...]
>>> tasks = [worker.compute_stats.remote() for worker in workers]
>>> results = ray.get(tasks)
"""
def __init__(self, project_file, trace_slice, attack_window, attack_direction):
"""Inits a TraceWorker.
Args:
project_file: A Chipwhisperer project file.
trace_slice: Traces assigned to this worker.
attack_window: Samples to process.
attack_direction: Attack direction.
"""
self.project = cw.open_project(project_file)
# TODO: Consider more efficient formats.
self.num_samples = attack_window.stop - attack_window.start
if attack_direction == AttackDirection.INPUT:
self.texts = np.vstack(self.project.textins[trace_slice])
else:
self.texts = np.vstack(self.project.textouts[trace_slice])
self.traces = np.asarray(self.project.waves[trace_slice])[:, attack_window]
def compute_stats(self):
"""Computes sums and sums of deviation products of traces.
Results from multiple workers can be aggregated to compute the standard
deviation of a set of traces in a distributed manner using Eq. 22 in
"Numerically Stable Parallel Computation of (Co-)Variance" by E.
Schubert and M. Gertz (https://dbs.ifi.uni-heidelberg.de/files/Team/
eschubert/publications/SSDBM18-covariance-authorcopy.pdf).
Returns:
Number of traces, their sums, and sums of deviation products.
"""
cnt = self.traces.shape[0]
sum_ = self.traces.sum(axis=0)
mean = sum_ / cnt
sum_dev_prods = ((self.traces - mean) ** 2).sum(axis=0)
return (cnt, sum_, sum_dev_prods)
def filter_noisy_traces(self, min_trace, max_trace):
"""Filters traces with values outside the allowable range.
Args:
min_trace: Minimum allowable values.
max_trace: Maximum allowable values.
Returns:
Number of remaining traces.
"""
traces_to_use = np.all(
(self.traces >= min_trace) & (self.traces <= max_trace), axis=1
)
self.traces = self.traces[traces_to_use]
self.texts = self.texts[traces_to_use]
return self.traces.shape[0]
def count_and_sum_text_traces(self):
"""Computes the number of traces and sums of these traces for all values
of each text byte.
Returns:
A tuple ``(cnts, sums)``, where
- ``cnts`` is a (16, 256, 1) array where ``cnts[i, j, 0]`` gives the
number of traces where text byte i is j, and
- ``sums`` is a (16, 256, NUM_SAMPLES) array where ``sums[i, j, :]``
gives the sum of traces where text byte i is j.
"""
sums = np.zeros((16, 256, self.num_samples))
# Need to specify the last dimension for broadcasting to work during
# aggregation.
cnts = np.zeros((16, 256, 1))
for byte_pos in range(16):
# While a little bit more complex, below code is more efficient than
# a naive implementation that searches for all possible byte values
# in ``self.texts``.
sorted_indices = self.texts[:, byte_pos].argsort()
sorted_bytes = self.texts[sorted_indices, byte_pos]
# Find the indices where byte values change.
val_changes = np.where(np.roll(sorted_bytes, 1) != sorted_bytes)[0]
# Append the number of rows to be able to use ``pairwise``.
val_indices = list(val_changes) + [sorted_bytes.shape[0]]
for (start, end) in more_itertools.pairwise(val_indices):
byte_val = sorted_bytes[start]
cnts[byte_pos, byte_val] = end - start
act_indices = sorted_indices[start:end]
sums[byte_pos, byte_val] = self.traces[act_indices].sum(axis=0)
return cnts, sums
def compute_mean_and_std(workers):
"""Computes mean and standard deviation of all traces.
This function uses Eq. 22 in "Numerically Stable Parallel Computation of
(Co-)Variance" by E. Schubert and M. Gertz (https://dbs.ifi.uni-heidelberg.
de/files/Team/eschubert/publications/SSDBM18-covariance-authorcopy.pdf) to
distribute standard deviation computation to multiple ``TraceWorker``
instances.
Args:
workers: ``TraceWorker`` handles.
Returns:
Mean and standard deviation of all traces.
"""
tasks = [worker.compute_stats.remote() for worker in workers]
running_sum_dev_prods = None
running_sum = None
running_cnt = 0
while tasks:
done, tasks = ray.wait(task
|
s)
cnt, sum_, sum_dev_prods = ray.get(done[0])
if running_cnt == 0:
running_sum_dev_prods = np.copy(sum_dev_prods)
running_sum = np.copy(sum_)
running_cnt += cnt
else:
running_sum_dev_prods += sum_dev_prods + (
(cnt * running_sum - running
|
_cnt * sum_) ** 2
/ (cnt * running_cnt * (cnt + running_cnt))
)
running_sum += sum_
running_cnt += cnt
return running_sum / running_cnt, np.sqrt(running_sum_dev_prods / running_cnt)
def filter_noisy_traces(workers, mean_trace, std_trace, max_std):
"""Signals ``TraceWorker`` instances to filter noisy traces.
Args:
workers:``TraceWorker`` handles.
mean_trace: Mean of all traces.
std_trace: Standard deviation of all traces.
max_std: Allowed number of standard deviations from the mean trace.
Returns:
Number of remaining traces.
"""
min_trace = mean_trace - max_std * std_trace
max_trace = mean_trace + max_std * std_trace
tasks = [
worker.filter_noisy_traces.remote(min_trace, max_trace) for worker in workers
]
running_cnt = 0
while tasks:
done, tasks = ray.wait(tasks)
running_cnt += ray.get(done[0])
return running_cnt
def compute_mean_text_traces(workers):
"""Computes mean traces for all values of all text bytes.
This function distributes work to ``TraceWorker`` ins
|
spaceone/httoop
|
httoop/status/redirect.py
|
Python
|
mit
| 3,015
| 0.021891
|
# -*- coding: utf-8 -*-
from httoop.status.types import StatusException
from httoop.uri import URI
class RedirectStatus(StatusException):
u"""REDIRECTIONS = 3xx
A redirection to other URI(s) which are set in the Location-header.
"""
location = None
def __init__(self, location, *args, **kwargs):
if not isinstance(location, (type(None), list, tuple)):
location = [location]
if location is not None:
kwargs.setdefault('headers', {})['Location'] = ', '.join(str(URI(uri)) for uri in location)
super(RedirectStatus, self).__init__(*args, **kwargs)
def to_dict(self):
dct = super(RedirectStatus, self).to_dict()
if self.headers.get('Location'):
dct.update(dict(Location=self.headers['Location']))
return dct
class MULTIPLE_CHOICES(RedirectStatus):
u"""The server has multiple representations of the requested resource.
And the client e.g. did not specify the Accept-header or
the requested representation does not exists.
"""
code = 300
class MOVED_PERMANENTLY(RedirectStatus):
u"""The the server knows the target resource but the URI
is incorrect (wrong domain, trailing slash, etc.).
It can also be send if a resource have moved or
renamed to prevent broken links."""
code = 301
cacheable = True
class FOUND(RedirectStatus):
code = 302
cacheable = True
class SEE_OTHER(RedirectStatus):
u"""The request has been processed but instead of serving a
representation of the result or resource it links to another
document which contains a static status message, etc. so
the client is not forced to download the data.
This is also useful for links like
/release-latest.tar.gz -> /release-1.2.tar.gz"""
code = 303
cacheable = True
class NOT_MODIFIED(RedirectStatus):
u"""The client already has the data which is provided through the
information in the Etag or If-Modified-Since-header.
The Date-header is required, the ETag-header and
Content-Location-header are useful.
Also the caching headers Expires, Cache-Control and Vary are
required if they differ from those sent previously.
TODO: what to do if the representation format has
changed but not the representation itself?
The response body has to be empty."""
code = 304
body = None
def __init__(self, *args, **kwargs):
# don't set location
super(NOT_MODIFIED, self).__init__(None, *args, **kwargs)
header_to_remove = (
"Allow", "Content-Encoding",
|
"Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Expires", "Location"
)
class USE_PROXY(RedirectStatus):
code = 305
class TEMPORARY_REDIRECT(RedirectStatus):
u"""The request has not processed because the requested
resource is located at a different URI.
The client should resent the reques
|
t to the URI given in the Location-header.
for GET this is the same as 303 but for POST, PUT and DELETE it is
important that the request was not processed."""
code = 307
cacheable = True
class PERMANENT_REDIRECT(RedirectStatus):
code = 308
cacheable = True
|
archerda/gkcx
|
Admit/HttpRequestTool.py
|
Python
|
apache-2.0
| 2,421
| 0.004131
|
import urllib.request
import urllib.parse
import json
from Admit import Data
# number:the number of a student
# birthday: the student's birthday, like 9301
# try_time: the request reconnect times when it broken.
def get_admit_result_by_number_and_birthday(number, birthday, try_times=3):
if try_times == 0:
file = open('error.txt', 'a')
file.write(str(number + "-" + birthday))
return
# make the request header.
# all the header are copied from a normal HTTP request.
# maybe some header is not necessary.
cookie = "Hm_lvt_2be31ff7176cef646e9351788dc99055=1437448597;Hm_lpvt_2be31ff7176cef646e9351788dc99055=1437450310;PHPSESSID=fblmt7m54ehe2m0q65otdfbbg5"
url = "http://www.5184.com/gk/common/get_lq_edg.php"
post_data = urllib.parse.urlencode({'csny': birthday, 'zkzh': number, 'yzm': ''})
post_data = post_data.encode('utf-8')
request = urllib.request.Request(url)
request.add_header("Host", "www.5184.com")
request.add_header("Connection", "keep-alive")
request.add_header("Content-Length", "31")
request.add_header("Accept", "application/json, text/javascript, */*; q=0.01")
request.add_header("Origin", "http://www.5184.com")
request.add_header("X-Requested-With", "XMLHttpRequest")
request.add_header("User-Agent", "Mozilla/5.0 (Windows NT 6.3; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36")
request.add_header("Content-Type", "application/x-www-form-urlencoded")
request.add_header("Referer", "http://www.5184.com/gk/check_lq.html") # important. without referer, the request will return error.
request.add_header("Accept-Encoding", "gzip,deflate,sdch")
|
request.add_header("Accept-Language", "zh-CN,zh;q=0.8")
|
request.add_header("Cookie", cookie)
try:
f = urllib.request.urlopen(request, post_data)
r_data = f.read().decode('utf-8')
except:
get_admit_result_by_number_and_birthday(number, birthday, try_times - 1)
result_json = json.loads(r_data)
if result_json['flag'] == 1:
result = result_json['result']
if result['zymc'] == Data.SCHOOL:
file = open('admit.txt', 'a')
file.write(str(result['zkzh'] + ' ' + birthday + ' ' + result['zymc'] + ' ' + result['xm'] + '\n'))
# test
get_admit_result_by_number_and_birthday("1802301001", "9608", 1)
|
ltb-project/white-pages
|
docs/conf.py
|
Python
|
gpl-3.0
| 5,250
| 0
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'LDAP Tool Box White Pages'
copyright = u'2021, LDAP Tool Box'
author = u'LDAP Tool Box'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx-prompt'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_logo = 'images/ltb-logo.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'whitepagesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(m
|
aster_doc, 'whitepages.tex', u'white-pages Documentation',
u'Clément OUDOT', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file
|
, name, description, authors, manual section).
man_pages = [
(master_doc, 'whitepages', u'white-pages Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'whitepages', u'white-pages Documentation',
author, 'Clément OUDOT', 'white-pages Documentation',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractXvvCpuMybluehostMe.py
|
Python
|
bsd-3-clause
| 553
| 0.034358
|
def extractXvvCpuMybluehostMe(item):
'''
Parser fo
|
r 'xvv.cpu.mybluehost.me'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, t
|
l_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
VlachosGroup/VlachosGroupAdditivity
|
pgradd/RINGParser/Reader.py
|
Python
|
mit
| 2,491
| 0
|
# -*- coding: utf-8 -*-
from .. Error import RINGError
class Reader(object):
"""
Reader reads the parsed RING input, and returns the RDkit wrapper objects
in pgradd.RDkitWrapper.
Attributes
----------
ast : abstract syntax tree obtrained from parser
"""
def __init__(self, ast):
# ast = Abstract Syntax Tree
self.ast = ast
def ReadRINGInput(self, tree):
# Check the type of input
assert
|
tree[0][0].name in ('Fragment',
'ReactionRule',
'EnumerationQuery')
# if fragment, molquery is returned
if tree[0][0].name
|
== 'Fragment':
from . MolQueryRead import MolQueryReader
self.type = 'MolQuery'
return MolQueryReader(tree[0][1:]).Read()
# if reaction rule, reacitonquery is returned
elif tree[0][0].name == 'ReactionRule':
from . ReactionQueryRead import ReactionQueryReader
self.type = 'ReactionQuery'
return ReactionQueryReader(tree[0][1:]).Read()
# TODO enumeration query
elif tree[0][0].name == 'EnumerationQuery':
raise NotImplementedError('Coming soon')
def Read(self):
# Root tree reading. Check if the input is RINGinput
assert self.ast[0].name == "RINGInput"
return self.ReadRINGInput(self.ast[1:])
def Read(text, strict=False):
"""
Return MolQuery, ReactionQuery, or ReactionNetworkEnumerationQuery by
interpretting RING input string.
Parameters
----------
text : string
Specify string describing chemical structure, elementary reaction, or
reaction network enumeration rules in RING notation.
strict : boolean, optional
If True, then disable use of syntactic extensions such as support for
"radical electrons".
Returns
-------
Returns RDkit wrapped queries that extends RDkit's functionality:
* MolQuery if fragment is given in string
* ReactionQuery if reaction rule is given in string
* ReactionNetworkEnumerationQuery if enumeration query is given in string
Raises
------
msr.error.RINGSyntaxError
If `text` does not conform to RING syntax.
msr.error.RINGReaderError
If `text` is invalid RING for non-syntactic reasons.
"""
from . import Parser
try:
return Reader(Parser.parse(text)).Read()
except RINGError as exc:
raise exc
|
Vayne-Lover/Effective
|
Python/Effective Python/item6.py
|
Python
|
mit
| 211
| 0.085308
|
#!/us
|
r/local/bin/python
a=['red','orange','yellow','green','blue','purple']
odds=a[::2]
evens=a[1::2]
print odds
print evens
x=b'abcdefg'
y=x[::-1]
print y
c=['a','b','c','d','e','f']
d=c[::2]
e=d[1:-1]
print
|
e
|
pragya1990/pox_whole_code
|
pox/misc/videoSlice1.py
|
Python
|
gpl-3.0
| 10,272
| 0.012169
|
'''
Coursera:
- Software Defined Networking (SDN) course
-- Network Virtualization
Professor: Nick Feamster
Teaching Assistant: Arpit Gupta
'''
from pox.core import core
from collections import defaultdict
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery
import pox.openflow.spanning_tree
from pox.lib.revent import *
from pox.lib.util import dpid_to_str
from pox.lib.util import dpidToStr
from pox.lib.addresses import IPAddr, EthAddr
from collections import namedtuple
import os
log = core.getLogger()
class VideoSlice (EventMixin):
def __init__(self):
self.listenTo(core.openflow)
core.openflow_discovery.addListeners(self)
# Adjacency map. [sw1][sw2] -> port from sw1 to sw2
self.adjacency = defaultdict(lambda:defaultdict(lambda:None))
'''
The structure of self.portmap is a four-tuple key and a string value.
The type is:
(dpid string, src MAC addr, dst MAC addr, port (int)) -> dpid of next switch
'''
self.portmap = {
# h1 <-- port 80 --> h3
|
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:03'), 80): '00-00-00-00-00-03',
# """ Add your mapping logic here"""
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:03'), 80): '00-00-00-00-00-04',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:03'),
|
EthAddr('00:00:00:00:00:01'), 80): '00-00-00-00-00-01',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:01'), 80): '00-00-00-00-00-03',
#port 22
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:03'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:03'), 22): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:01'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:01'), 22): '00-00-00-00-00-01',
#h2--h4
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:04'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:04'), 80): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:02'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:02'), 80): '00-00-00-00-00-01',
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:04'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:04'), 22): '00-00-00-00-00-04',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:02'), 22): '00-00-00-00-00-01',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:02'), 22): '00-00-00-00-00-02',
#h1-h4
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:04'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:04'), 80): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:01'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:01'), 80): '00-00-00-00-00-01',
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:04'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:01'),
EthAddr('00:00:00:00:00:04'), 22): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:01'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:04'),
EthAddr('00:00:00:00:00:01'), 22): '00-00-00-00-00-01',
#h2-h3
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:03'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:03'), 80): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:02'), 80): '00-00-00-00-00-03',
('00-00-00-00-00-03', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:02'), 80): '00-00-00-00-00-01',
('00-00-00-00-00-01', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:03'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:02'),
EthAddr('00:00:00:00:00:03'), 22): '00-00-00-00-00-04',
('00-00-00-00-00-04', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:02'), 22): '00-00-00-00-00-02',
('00-00-00-00-00-02', EthAddr('00:00:00:00:00:03'),
EthAddr('00:00:00:00:00:02'), 22): '00-00-00-00-00-01',
}
def _handle_LinkEvent (self, event):
l = event.link
sw1 = dpid_to_str(l.dpid1)
sw2 = dpid_to_str(l.dpid2)
log.debug ("link %s[%d] <-> %s[%d]",
sw1, l.port1,
sw2, l.port2)
self.adjacency[sw1][sw2] = l.port1
self.adjacency[sw2][sw1] = l.port2
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
tcpp = event.parsed.find('tcp')
def install_fwdrule(event,packet,outport):
msg = of.ofp_flow_mod()
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.actions.append(of.ofp_action_output(port = outport))
msg.data = event.ofp
msg.in_port = event.port
event.connection.send(msg)
def forward (message = None):
|
johnbachman/indra
|
indra/assemblers/tsv/assembler.py
|
Python
|
bsd-2-clause
| 8,109
| 0.00037
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from copy import copy
from indra.databases import get_identifiers_url
from indra.statements import *
from indra.util import write_unicode_csv
logger = logging.getLogger(__name__)
class TsvAssembler(object):
"""Assembles Statements into a set of tabular files for export or curation.
Currently designed for use with "raw" Statements, i.e., Statements with a
single evidence entry. Exports Statements into a single tab-separated file
with the following columns:
*INDEX*
A 1-indexed integer identifying the statement.
*UUID*
The UUID of the Statement.
*TYPE*
Statement type, given by the name of the class in indra.statements.
*STR*
String representation of the Statement. Contains most relevant
information for curation including any additional statement data
beyond the Statement type and Agents.
*AG_A_TEXT*
For Statements extracted from text, the text in the sentence
corresponding to the first agent (i.e., the 'TEXT' entry in the
db_refs dictionary). For all other Statements, the Agent name is
given. Empty field if the Agent is None.
*AG_A_LINKS*
Groundings for the first agent given as a comma-separated list of
identifiers.org links. Empty if the Agent is None.
*AG_A_STR*
String representation of the first agent, including additional
agent context (e.g. modification, mutation, location, and bound
conditions). Empty if the Agent is None.
*AG_B_TEXT, AG_B_LINKS, AG_B_STR*
As above for the second agent. Note that the Agent may be None (and
these fields left empty) if the Statement consists only of a single
Agent (e.g., SelfModification, ActiveForm, or Translocation statement).
*PMID*
PMID of the first entry in the evidence list for the Statement.
*TEXT*
Evidence text for the Statement.
*IS_HYP*
Whether the Statement represents a "hypothesis", as flagged by some
reading systems and recorded in the `evidence.epistemics['hypothesis']`
field.
*IS_DIRECT*
Whether the Statement represents a direct physical interactions,
as recorded by the `evidence.epistemics['direct']` field.
In addition, if the `add_curation_cols` flag is set when calling
:py:meth:`TsvAssembler.make_model`, the following additional (empty)
columns will be added, to be filled out by curators:
*AG_A_IDS_CORRECT*
Correctness of Agent A grounding.
*AG_A_STATE_CORRECT*
Correctness of Agent A context (e.g., modification, bound, and other
conditions).
*AG_B_IDS_CORRECT, AG_B_STATE_CORRECT*
As above, for Agent B.
*EVENT_CORRECT*
Whether the event is supported by the evidence text if the entities
(Agents A and B) are considered as placeholders (i.e.,
ignoring the correctness of their grounding).
*RES_CORRECT*
For Modification statements, whether the amino acid residue indicated
by the Statement is supported by the evidence.
*POS_CORRECT*
For Modification statements, whether the amino acid position indicated
by the Statement is supported by the evidence.
*SUBJ_ACT_CORRECT*
For Activation/Inhibition Statements, whether the activity indicated
for the subject (Agent A) is supported by the evidence.
*OBJ_ACT_CORRECT*
For Activation/Inhibition Statements, whether the activity indicated
for the object (Agent B) is supported by the evidence.
*HYP_CORRECT*
Whether the Statement is correctly flagged as a hypothesis.
*HYP_CORRECT*
Whether the Statement is correctly flagged as direct.
Parameters
----------
stmts : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to be assembled.
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to be assembled.
"""
def __init__(self, statements=None):
if not statements:
self.statements = []
else:
self.statements = statements
def add_statements(self, stmts):
self.statements.extend(stmts)
def make_model(self, output_file, add_curation_cols=False, up_only=False):
"""Export the statements into a tab-separated text file.
Parameters
----------
output_file : str
Name of the output file.
add_curation_cols : bool
Whether to add columns to facilitate statement curation. Default
is False (no additional columns).
up_only : bool
Whether to include identifiers.org links *only* for the Uniprot
grounding of an agent when one is available. Because most
spreadsheets allow only a single hyperlink per cell, this can makes
it easier to link to Uniprot information pages for curation
purposes. Default is False.
"""
stmt_header = ['INDEX', 'UUID', 'TYPE', 'STR',
'AG_A_TEXT', 'AG_A_LINKS', 'AG_A_STR',
'AG_B_TEXT', 'AG_B_LI
|
NKS', 'AG_B_STR',
'PMID', 'TEXT', 'IS_HYP', 'IS_DIRECT']
if add_curation_cols:
stmt_header = stmt_header + \
['AG_A_IDS_CORRECT', 'AG_A_STATE_CORRECT',
'AG_B_IDS_CORRECT', 'AG_B_ST
|
ATE_CORRECT',
'EVENT_CORRECT',
'RES_CORRECT', 'POS_CORRECT', 'SUBJ_ACT_CORRECT',
'OBJ_ACT_CORRECT', 'HYP_CORRECT', 'DIRECT_CORRECT']
rows = [stmt_header]
for ix, stmt in enumerate(self.statements):
# Complexes
if len(stmt.agent_list()) > 2:
logger.info("Skipping statement with more than two members: %s"
% stmt)
continue
# Self-modifications, ActiveForms
elif len(stmt.agent_list()) == 1:
ag_a = stmt.agent_list()[0]
ag_b = None
# All others
else:
(ag_a, ag_b) = stmt.agent_list()
# Put together the data row
row = [ix+1, stmt.uuid, stmt.__class__.__name__, str(stmt)] + \
_format_agent_entries(ag_a, up_only) + \
_format_agent_entries(ag_b, up_only) + \
[stmt.evidence[0].pmid, stmt.evidence[0].text,
stmt.evidence[0].epistemics.get('hypothesis', ''),
stmt.evidence[0].epistemics.get('direct', '')]
if add_curation_cols:
row = row + ([''] * 11)
rows.append(row)
# Write to file
write_unicode_csv(output_file, rows, delimiter='\t')
def _format_id(ns, id):
"""Format a namespace/ID pair for display and curation."""
label = '%s:%s' % (ns, id)
label = label.replace(' ', '_')
url = get_identifiers_url(ns, id)
return (label, url)
def _format_agent_entries(agent, up_only):
if agent is None:
return ['', '', '']
# Agent text/name
agent_text = agent.db_refs.get('TEXT')
if agent_text is None:
agent_text = agent.name
# Agent db_refs str
db_refs = copy(agent.db_refs)
if 'TEXT' in db_refs:
db_refs.pop('TEXT')
db_refs_str = ','.join(['%s|%s' % (k, v)
for k, v in db_refs.items()])
# Agent links
identifier_links = []
if up_only and 'UP' in db_refs:
up_label, up_url = _format_id('UP', db_refs['UP'])
identifier_links = [up_url]
else:
for ns, id in db_refs.items():
label, url = _format_id(ns, id)
if url is None:
identifier_links.append(label)
else:
identifier_links.append(url)
links_str = ', '.join(identifier_links)
return [agent_text, links_str, str(agent)]
|
rbreitenmoser/snapcraft
|
snapcraft/tests/test_yaml.py
|
Python
|
gpl-3.0
| 25,577
| 0
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import logging
import os
import unittest
import unittest.mock
import fixtures
import snapcraft.common
import snapcraft.yaml
from snapcraft import (
dirs,
tests,
)
class TestYaml(tests.TestCase):
def setUp(self):
super().setUp()
dirs.setup_dirs()
patcher = unittest.mock.patch('os.path.exists')
mock_wrap_exe = patcher.start()
mock_wrap_exe.return_value = True
self.addCleanup(patcher.stop)
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
@unittest.mock.patch('snapcraft.wiki.Wiki.get_part')
def test_config_loads_plugins(self, mock_get_part, mock_loadPlugin):
self.make_snapcraft_yaml("""name: test
version: "1"
summary: test
description: test
parts:
part1:
plugin: go
stage-packages: [fswebcam]
""")
snapcraft.yaml.Config()
mock_loadPlugin.assert_called_with('part1', 'go', {
'stage-packages': ['fswebcam'],
'stage': [], 'snap': [],
})
self.assertFalse(mock_get_part.called)
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
@unittest.mock.patch('snapcraft.wiki.Wiki.get_part')
def test_config_loads_with_different_encodings(
self, mock_get_part, mock_loadPlugin):
content = """name: test
version: "1"
summary: test
description: ñoño test
parts:
part1:
plugin: go
stage-packages: [fswebcam]
"""
for enc in ['utf-8', 'utf-8-sig', 'utf-16']:
with self.subTest(key=enc):
self.make_snapcraft_yaml(content, encoding=enc)
snapcraft.yaml.Config()
mock_loadPlugin.assert_called_with('part1', 'go', {
'stage-packages': ['fswebcam'],
'stage': [], 'snap': [],
})
self.assertFalse(mock_get_part.called)
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
@unittest.mock.patch('snapcraft.wiki.Wiki.compose')
def test_config_loads_part_from_wiki(self, mock_compose, mock_loadPlugin):
self.make_snapcraft_yaml("""name: test
version: "1"
summary: test
description: test
parts:
part1:
stage-packages: [fswebcam]
""")
mock_compose.return_value = {
'plugin': 'go',
'source': 'http://source.tar.gz',
}
snapcraft.yaml.Config()
mock_loadPlugin.assert_called_with('part1', 'go', {
'source': 'http://source.tar.gz', 'stage': [], 'snap': []})
@unittest.mock.patch('snapcraft.pluginhandler.load_plugin')
@unittest.mock.patch('snapcraft.wiki.Wiki.get_part')
def test_config_with_wiki_part_after(self, mock_get_part, mock_load):
self.make_snapcraft_yaml("""name: test
version: "1"
summary: test
description: test
parts:
part1:
after:
- part2wiki
plugin: go
stage-packages: [fswebcam]
""")
def load_effect(*args, **kwargs):
mock_part = unittest.mock.Mock()
mock_part.code.build_packages = []
mock_part.deps = []
mock_part.name = args[0]
return mock_part
mock_load.side_effect = load_effect
mock_get_part.return_value = {
'plugin': 'go',
'source': 'http://somesource'
}
snapcraft.yaml.Config()
call1 = unittest.mock.call('part1', 'go', {
'stage-packages': ['fswebcam'], 'stage': [], 'snap': []})
call2 = unittest.mock.call('part2wiki', 'go', {
'source': 'http://somesource'})
mock_load.assert_has_calls([call1, call2])
self.assertTrue(mock_get_part.called)
def test_config_raises_on_missing_snapcraft_yaml(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
# no snapcraft.yaml
with self.assertRaises(
snapcraft.yaml.SnapcraftYamlFileError) as raised:
snapcraft.yaml.Config()
self.assertEqual(raised.exception.file, 'snapcraft.yaml')
def test_config_loop(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""name: test
version: "1"
summary: test
description: test
parts:
p1:
plugin: tar-content
source: .
after: [p2]
p2:
plugin: tar-content
source: .
after: [p1]
""")
with self.assertRaises(snapcraft.yaml.SnapcraftLogicError) as raised:
snapcraft.yaml.Config()
self.assertEqual(
raised.exception.message,
'circular dependency chain found in parts definition')
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
def test_invalid_yaml_missing_name(self, mock_loadPlugin):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""
version: "1"
summary: test
description: nothing
parts:
part1:
plugin: go
stage-packages: [fswebcam]
""")
with self.assertRaises(snapcraft.yaml.SnapcraftSchemaError) as raised:
snapcraft.yaml.Config()
self.assertEqual(raised.exc
|
eption.message,
'\'name\' is a required property')
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
def test_invalid_yaml_invalid_name_as_number(self, mock_loadPlugin):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""name: 1
version: "1"
summary: test
description: nothing
parts:
part1:
plu
|
gin: go
stage-packages: [fswebcam]
""")
with self.assertRaises(snapcraft.yaml.SnapcraftSchemaError) as raised:
snapcraft.yaml.Config()
self.assertEqual(raised.exception.message,
'1 is not of type \'string\'')
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
def test_invalid_yaml_invalid_name_chars(self, mock_loadPlugin):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""name: myapp@me_1.0
version: "1"
summary: test
description: nothing
parts:
part1:
plugin: go
stage-packages: [fswebcam]
""")
with self.assertRaises(snapcraft.yaml.SnapcraftSchemaError) as raised:
snapcraft.yaml.Config()
self.assertEqual(
raised.exception.message,
'\'myapp@me_1.0\' does not match \'^[a-z0-9][a-z0-9+-]*$\'')
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
def test_invalid_yaml_missing_description(self, mock_loadPlugin):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""name: test
version: "1"
summary: test
parts:
part1:
plugin: go
stage-packages: [fswebcam]
""")
with self.assertRaises(snapcraft.yaml.SnapcraftSchemaError) as raised:
snapcraft.yaml.Config()
self.assertEqual(
raised.exception.message,
'\'description\' is a required property')
@unittest.mock.patch('snapcraft.yaml.Config.load_plugin')
def test_tab_in_yaml(self, mock_loadPlugin):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
self.make_snapcraft_yaml("""name: test
version: "1"
\tsummary: test
parts:
part1:
plugin: go
stage-packages: [fswebcam]
""")
with self.assertRaise
|
mschon314/pyamazonclouddrive
|
bin/acdsession.py
|
Python
|
mit
| 3,367
| 0.017226
|
#!/usr/bin/env python
#
# Copyright (c) 2011 anatanokeitai.com(sakurai_youhei)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os, sys, getpass
from optparse import OptionParser
try:
import pyacd
except ImportError:
pyacd_lib_dir=os.path.dirname(__file__)+os.sep+".."
if os.path.exists(pyacd_lib_dir) and os.path.isdir(pyacd_lib_dir):
sys.path.insert(0, pyacd_lib_dir)
import pyacd
parser=OptionParser(
epilog="This command updates/creates your session of Amazon Cloud Drive.",
usage="%prog -e youremail -p yourpassword -s path/to/sessionfile",
version=pyacd.__version__
)
parser.add_option(
"--domain",dest="domain",action="store",default="www.amazon.com",
help="domain of Amazon [default: %default]"
)
parser.add_option(
"-e",dest="email",action="store",default=None,
help="email address for Amazon"
)
parser.add_option(
"-p",dest="password",action="store",default=None,
help="password for Amazon"
)
parser.add_option(
"-s",dest="session",action="store",default=None,metavar="FILE",
help="save/load login session to/from FILE"
)
parser.add_option(
"-v",dest="verbose",action="store_true",default=False,
help="show verbose message"
)
def main():
opts,args=parser.parse_args(sys.argv[1:])
pyacd.set_amazon_domain(opts.domain)
for m in ["email","session"]:
if not opts.__dict__[m]:
print >>sys.stderr, "mandatory option is mi
|
ssing (%s)\n"%m
parser.print_help()
exit(2)
if not opts.password:
opts.password = getpass.getpass()
if os.path.isdir(opts.session):
print >
|
>sys.stderr, "%s should not be directory."%s
exit(2)
if opts.verbose:
print >>sys.stderr, "Loading previous session...",
try:
s=pyacd.Session.load_from_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
s=pyacd.Session()
if opts.verbose:
print >>sys.stderr, "Failed."
if opts.verbose:
print >>sys.stderr, "Logging into %s..."%opts.domain,
try:
session=pyacd.login(opts.email,opts.password,session=s)
if opts.verbose:
print >>sys.stderr, "Done."
if opts.verbose:
print >>sys.stderr, "Updating current session...",
session.save_to_file(opts.session)
if opts.verbose:
print >>sys.stderr, "Done."
except:
if opts.verbose:
print >>sys.stderr, "Failed."
if __name__=="__main__":
main()
|
tensorflow/recommenders-addons
|
demo/embedding_variable/ev-keras-eager.py
|
Python
|
apache-2.0
| 3,467
| 0.002019
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
import tensorflow_datasets as tfds
import tensorflow_recommenders_addons as tfra
ratings = tfds.load("movielens/100k-ratings", split="train")
ratings = ratings.map(
lambda x: {
"movie_id": tf.strings.to_number(x["movie_id"], tf.int64),
"user_id": tf.strings.to_number(x["user_id"], tf.int64),
"user_rating": x["user_rating"]
})
tf.random.set_seed(2021)
shuffled = ratings.shuffle(100_000, seed=2021, reshuffle_each_iteration=False)
dataset_train = shuffled.take(100_000).batch(256)
class NCFModel(tf.keras.Model):
def __init__(self):
super(NCFModel, self).__init__()
self.embedding_size = 32
self.d0 = Dense(
256,
activation='relu',
kernel_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1),
bias_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1))
self.d1 = Dense(
64,
activation='relu',
kernel_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1),
bias_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1))
self.d2 = Dense(
1,
kernel_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1),
bias_initializer=tf.keras.initializers.RandomNormal(0.0, 0.1))
self.user_embeddings = tfra.embedding_variable.EmbeddingVariable(
name="user_dynamic_embeddings",
ktype=tf.int64,
embedding_dim=self.embedding_size,
initializer=tf.keras.initializers.RandomNormal(-1, 1))
self.movie_embeddings = tfra.embedding_variable.EmbeddingVariable(
name="moive_dynamic_embeddings",
embedding_dim=self.embedding_size,
ktype=tf.int64,
initializer=tf.keras.initializers.RandomNormal(-1, 1))
self.loss = tf.keras.losses.MeanSquaredError()
def call(self, batch):
movie_id = batch["movie_id"]
user_id = batch["user_id"]
rating = batch["user_rating"]
user_id_val, user_id_idx = np.unique(user_id, return_inverse=True)
user_id_weights = tf.nn.embedding_lookup(params=self.user_embeddings,
ids=user_id_val,
|
name="user-id-weights")
user_id_weights = tf.gather(user_id_weights, user_id_idx)
movie_id_val, movie_id_idx = np.unique(movie_id, return_inv
|
erse=True)
movie_id_weights = tf.nn.embedding_lookup(params=self.movie_embeddings,
ids=movie_id_val,
name="movie-id-weights")
movie_id_weights = tf.gather(movie_id_weights, movie_id_idx)
embeddings = tf.concat([user_id_weights, movie_id_weights], axis=1)
dnn = self.d0(embeddings)
dnn = self.d1(dnn)
dnn = self.d2(dnn)
out = tf.reshape(dnn, shape=[-1])
loss = self.loss(rating, out)
return loss
model = NCFModel()
optimizer = tfra.embedding_variable.AdamOptimizer(learning_rate=0.001)
def train(epoch=1):
for i in range(epoch):
total_loss = np.array([])
for (_, batch) in enumerate(dataset_train):
with tf.GradientTape() as tape:
loss = model(batch)
total_loss = np.append(total_loss, loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("epoch:", i, "mean_squared_error:", np.mean(total_loss))
if __name__ == "__main__":
train(10)
|
python-thumbnails/python-thumbnails
|
tests/utils.py
|
Python
|
mit
| 814
| 0
|
# -*- coding: utf-8 -*-
import importlib
import json
import os
def has_installed(dependency):
try:
importlib.import_module(dependency)
return True
except ImportError:
return False
def is_tox_env(env):
if 'VIRTUAL_ENV' in os.environ:
return env in os.environ['VIRTUAL_ENV']
de
|
f has_django():
return has_installed('django')
|
def has_pillow():
return has_installed('PIL.Image')
def has_redis():
return has_installed('redis')
class OverrideSettings(object):
def __init__(self, **settings):
self.settings = settings
def __enter__(self):
os.environ['overridden_settings'] = json.dumps(self.settings)
def __exit__(self, *args, **kwargs):
del os.environ['overridden_settings']
override_settings = OverrideSettings
|
BartDeCaluwe/925r
|
ninetofiver/serializers.py
|
Python
|
gpl-3.0
| 422
| 0
|
"""ninetofiver serializers."""
from django.contrib.auth import models as auth_models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django_countries.serializers import CountryFieldMixin
from django.db.models import Q
from rest_framework
|
import serializ
|
ers
import logging
import datetime
from ninetofiver import models
logger = logging.getLogger(__name__)
|
mrawls/apVisitproc
|
apvisitproc/tests/test_despike.py
|
Python
|
mit
| 2,736
| 0.002558
|
import numpy as np
from apvisitproc import despike
import pytest
import os
DATAPATH = os.path.dirname(__file__)
FILELIST1 = os.path.join(DATAPATH, 'list_of_txt_spectra.txt')
FILELIST2 = os.path.join(DATAPATH, 'list_of_fits_spectra.txt')
@pytest.fixture
def wave_spec_generate():
'''
Read in three small chunks of spectra for testing purposes
'wavelist_speclist_generate' can be used as input to any other test function
that needs access to the variables it returns!
wave1, spec1 are a single chunk of 1d spectrum
wavelist, speclist are lists of three chunks of 1d spectrum
'''
wave1, spec1 = np.loadtxt(os.path.join(DATAPATH, 'spec1test.txt'), unpack=True)
wave2, spec2 = np.loadtxt(os.path.join(DATAPATH, 'spec2test.txt'), unpack=True)
wave3, spec3 = np.loadtxt(os.path.join(DATAPATH, 'spec3test.txt'), unpack=True)
wavelist = [wave1, wave2, wave3]
speclist = [spec1, spec2, spec3]
return wave1, spec1, wavelist, speclist
@pytest.mark.parametrize('filelist, cond', [
(FILELIST1, False),
(FILELIST2, True),
])
def test_read_infiles(filelist, cond):
'''
Test reading in both text and fits files
Each resulting wavelength array should be sorted in ascending order
'''
infilelist, wavelist, speclist = despike.read_infiles(DATAPATH, filelist, cond)
assert len(infilelist) > 0
assert len(infilelist) == len(wavelist)
assert len(wavelist) == len(speclist)
for wave in wavelist:
assert all(value >= 0 for value in wave)
|
assert list(np.sort(wave)) == list(wave)
assert all(np.equal(np.sort(wave), wave))
def test_simpledespike(wave_spec_generate):
'''
spike condition is met at pixels 15, 16, 17 and 18
so indices 9 through 24, inclusive, should be removed
'''
wave, spec = wave_spec_generate[0], wave_spec_generate[1]
newwave,
|
newspec = despike.simpledespike(wave, spec, delwindow=6,
stdfactorup=0.7, stdfactordown=3,
plot=False)
assert len(newwave) == len(newspec)
assert len(newwave) <= len(wave)
assert len(newspec) <= len(spec)
assert all(np.equal(np.hstack((wave[0:9], wave[25:])), newwave))
assert all(np.equal(np.hstack((spec[0:9], spec[25:])), newspec))
def test_despike_spectra(wave_spec_generate):
'''
Test that new spectra are shorter than the original because the outliers are gone
'''
wavelist, speclist = wave_spec_generate[2], wave_spec_generate[3]
newwavelist, newspeclist = despike.despike_spectra(wavelist, speclist, type='simple', plot=False)
assert len(newwavelist) == len(wavelist)
assert len(newspeclist) == len(speclist)
|
alex-dot/upwdchg
|
tests/python-tokenreader-test.py
|
Python
|
gpl-3.0
| 7,822
| 0.003328
|
#!/usr/bin/env python3
# -*- mode:python; tab-width:4; c-basic-offset:4; intent-tabs-mode:nil; -*-
# ex: filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab autoindent smartindent
#
# Universal Password Changer (UPwdChg)
# Copyright (C) 2014-2018 Cedric Dufour <http://cedric.dufour.name>
# Author: Cedric Dufour <http://cedric.dufour.name>
#
# The Universal Password Changer (UPwdChg) is free software:
# you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, Version 3.
#
# The Universal Password Changer (UPwdChg) is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details.
#
# SPDX-License-Identifier: GPL-3.0
# License-Filename: LICENSE/GPL-3.0.txt
#
#------------------------------------------------------------------------------
# DEPENDENCIES
#------------------------------------------------------------------------------
# UPwdChg
from UPwdChg import \
TokenReader
# Standard
import unittest as UT
import sys
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class testTokenReader_ReadToken(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
def testPasswordNonceRequest(self):
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
self.assertEqual(self.oToken.readToken('./tmp/password-nonce-request.token'), 0)
def testPasswordChange(self):
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
self.assertEqual(self.oToken.readToken('./tmp/password-change.token'), 0)
def testPasswordReset(self):
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
self.assertEqual(self.oToken.readToken('./tmp/password-reset.token'), 0)
def testPasswordNonce(self):
self.oToken.config('./resources/frontend-private.pem', './resources/backend-public.pem')
self.assertEqual(self.oToken.readToken('./tmp/password-nonce.token'), 0)
class testTokenReader_PasswordNonceRequest(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
if(self.oToken.readToken('./tmp/password-nonce-request.token')):
self.skipTest('Failed to read token')
def testType(self):
self.assertIn('type', self.oToken.keys())
self.assertEqual(self.oToken['type'], 'password-nonce-request')
def testTimestamp(self):
self.assertIn('timestamp', self.oToken.keys())
self.assertRegex(self.oToken['timestamp'], '^20[0-9]{2}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]Z$')
def testUsername(self):
self.assertIn('username', self.oToken.keys())
self.assertEqual(self.oToken['username'], 'test-Benützername')
class testTokenReader_PasswordChange(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
if(self.oToken.readToken('./tmp/password-change.token')):
self.skipTest('Failed to read token')
def testType(self):
self.assertIn('type', self.oToken.keys())
self.assertEqual(self.oToken['type'], 'password-change')
def testTimestamp(self):
self.assertIn('timestamp', self.oToken.keys())
self.assertRegex(self.oToken['timestamp'], '^20[0-9]{2}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]Z$')
def testUsername(self):
self.assertIn('username', self.oToken.keys())
self.assertEqual(self.oToken['username'], 'test-Benützername')
def testPasswordNew(self):
self.assertIn('password-new', self.oToken.keys())
self.assertEqual(self.oToken['password-new'], 'test-Paßw0rt_new')
def testPasswordOld(self):
self.assertIn('password-old', self.oToken.keys())
self.assertEqual(self.oToken['password-old'], 'test-Paßw0rt_old')
def testPasswordNonce(self):
self.assertIn('password-nonce', self.oToken.keys())
self.assertEqual(self.oToken['password-nonce'], 'test-Paßw0rt_nonce')
class testTokenReader_PasswordReset(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
self.oToken.config('./resources/backend-private.pem', './resources/frontend-public.pem')
if(self.oToken.readToken('./tmp/password-reset.token')):
self.skipTest('Failed to read token')
def testType(self):
self.assertIn('type', self.oToken.keys())
self.assertEqual(self.oToken['type'], 'password-reset')
def testTimestamp(self):
se
|
lf.assertIn('timestamp', self.oToken.keys())
self.assertRegex(self.oToken['timestamp'], '^20[0-9]{2}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]Z$')
def testUsername(self):
self.assertIn('username', self.oToken.keys())
self.assertEqua
|
l(self.oToken['username'], 'test-Benützername')
def testPasswordNew(self):
self.assertIn('password-new', self.oToken.keys())
self.assertEqual(self.oToken['password-new'], 'test-Paßw0rt_new')
def testPasswordNonce(self):
self.assertIn('password-nonce', self.oToken.keys())
self.assertEqual(self.oToken['password-nonce'], 'test-Paßw0rt_nonce')
class testTokenReader_PasswordNonce(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
self.oToken.config('./resources/frontend-private.pem', './resources/backend-public.pem')
if(self.oToken.readToken('./tmp/password-nonce.token')):
self.skipTest('Failed to read token')
def testType(self):
self.assertIn('type', self.oToken.keys())
self.assertEqual(self.oToken['type'], 'password-nonce')
def testTimestamp(self):
self.assertIn('timestamp', self.oToken.keys())
self.assertRegex(self.oToken['timestamp'], '^20[0-9]{2}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]Z$')
def testExpiration(self):
self.assertIn('expiration', self.oToken.keys())
self.assertRegex(self.oToken['expiration'], '^20[0-9]{2}-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]Z$')
def testUsername(self):
self.assertIn('username', self.oToken.keys())
self.assertEqual(self.oToken['username'], 'test-Benützername')
def testPasswordNonceId(self):
self.assertIn('password-nonce-id', self.oToken.keys())
self.assertEqual(self.oToken['password-nonce-id'], 'test')
def testPasswordNonceSecret(self):
self.assertIn('password-nonce-secret', self.oToken.keys())
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
if __name__ == '__main__':
#UT.main()
oTestSuite = UT.TestSuite()
oTestSuite.addTest(UT.makeSuite(testTokenReader_ReadToken))
oTestSuite.addTest(UT.makeSuite(testTokenReader_PasswordNonceRequest))
oTestSuite.addTest(UT.makeSuite(testTokenReader_PasswordChange))
oTestSuite.addTest(UT.makeSuite(testTokenReader_PasswordReset))
oTestSuite.addTest(UT.makeSuite(testTokenReader_PasswordNonce))
oTestResult = UT.TextTestRunner(verbosity=2).run(oTestSuite)
sys.exit(0 if oTestResult.wasSuccessful() else 1)
|
michkol/prx
|
prx_aplikacja/apps.py
|
Python
|
gpl-3.0
| 258
| 0.003876
|
from django.apps import AppConfig
from django.template.base import add_to_builtins
class PrxAppConfig(AppConfig):
|
name = 'p
|
rx_aplikacja'
verbose_name = 'prx_aplikacja'
def ready(self):
add_to_builtins('prx_aplikacja.templatetags.tagi')
|
blazek/lrs
|
lrs/ui/lrscombomanagerbase.py
|
Python
|
gpl-2.0
| 5,538
| 0.002167
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
LrsPlugin
A QGIS plugin
Linear reference system builder and editor
-------------------
begin : 2013-10-02
copyright : (C) 2013 by Radim Blažek
email : radim.blazek@gmail.com
Partialy based on qgiscombomanager by Denis Rouzaud.
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
*
|
*
**********************************
|
*****************************************/
"""
# Import the PyQt and QGIS libraries
from ..lrs.utils import *
from qgis.PyQt.QtGui import *
# combo is QComboBox or list of QComboBox
class LrsComboManagerBase(QObject):
def __init__(self, comboOrList, **kwargs):
super(LrsComboManagerBase, self).__init__()
if isinstance(comboOrList, list):
self.comboList = comboOrList
else:
self.comboList = [comboOrList] # QComboBox list
self.settingsName = kwargs.get('settingsName')
self.allowNone = kwargs.get('allowNone', False) # allow select none
self.sort = kwargs.get('sort', True) # sort values
# debug ( "sort = %s" % self.sort )
self.defaultValue = kwargs.get('defaultValue', None)
self.model = QStandardItemModel(0, 1, self)
if self.sort:
self.proxy = QSortFilterProxyModel(self)
self.proxy.setSourceModel(self.model)
else:
self.proxy = None
for combo in self.comboList:
if self.proxy:
combo.setModel(self.proxy);
else:
combo.setModel(self.model)
# options is dict with of [value,label] pairs
self.setOptions(kwargs.get('options', []))
def debug(self, message):
debug("CM(%s): %s" % (self.settingsName, message))
def connectCombos(self):
# https://qgis.org/api/classQgsMapLayerComboBox.html#a7b6a9f46e655c0c48392e33089bbc992
for combo in self.comboList:
combo.currentIndexChanged.connect(self.currentIndexChanged)
combo.activated.connect(self.activated)
def currentIndexChanged(self, idx):
# reset other combos
#self.debug("LrsComboManager currentIndexChanged")
#debug("currentIndexChanged sender = %s" % self.sender())
for combo in self.comboList:
if combo == self.sender():
continue
combo.setCurrentIndex(idx)
def activated(self):
pass
# To be implemented in subclasses, load layers from project, fields from layer, ...
def reload(self):
pass
def clear(self):
self.options = []
self.model.clear()
def setOptions(self, options):
self.options = options
self.model.clear()
if options:
for opt in options:
item = QStandardItem(opt[1])
item.setData(opt[0], Qt.UserRole)
self.model.appendRow(item)
def value(self):
idx = self.comboList[0].currentIndex()
if idx != -1:
return self.comboList[0].itemData(idx, Qt.UserRole)
return None
def writeToProject(self):
idx = self.comboList[0].currentIndex()
val = self.comboList[0].itemData(idx, Qt.UserRole)
#self.debug("writeToProject val = %s" % val)
QgsProject.instance().writeEntry(PROJECT_PLUGIN_NAME, self.settingsName, val)
def readFromProject(self):
val = QgsProject.instance().readEntry(PROJECT_PLUGIN_NAME, self.settingsName)[0]
if val == '':
val = None # to set correctly none
#self.debug("readFromProject val = %s" % val)
for combo in self.comboList:
idx = combo.findData(val, Qt.UserRole)
# debug( "readFromProject settingsName = %s val = %s idx = %s" % ( self.settingsName, val, idx) )
if idx == -1:
idx = combo.findData(self.defaultValue, Qt.UserRole)
combo.setCurrentIndex(idx)
# reset to index -1
def reset(self):
for combo in self.comboList:
if self.defaultValue is not None:
idx = combo.findData(self.defaultValue, Qt.UserRole)
# debug( "defaultValue = %s idx = %s" % ( self.defaultValue, idx ) )
combo.setCurrentIndex(idx)
else:
combo.setCurrentIndex(-1)
def findItemByData(self, data):
# QStandardItemModel.match() is not suitable, with Qt.MatchExactly it seems to comare objects
# (must be reference to the same object?) and with Qt.MatchFixedString it works like with Qt.MatchContains
# so we do our loop
for i in range(self.model.rowCount() - 1, -1, -1):
itemData = self.model.item(i).data(Qt.UserRole)
if itemData == data:
return self.model.item(i)
return None
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/metacritic.py
|
Python
|
gpl-3.0
| 2,280
| 0.025877
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
)
class MetacriticIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?metacritic\.com/.+?/trailers/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.metacritic.com/game/playstation-4/infamous-second-son/trailers/3698222',
'info_dict': {
'id': '3698222',
'ext': 'mp4',
'title': 'inFamous: Second Son - inSide Sucker Punch: Smoke & Mirrors',
'description': 'Take a peak behind-the-scenes to see how Sucker Punch brings smoke into the universe of inFAMOUS Second Son on the PS4.',
'duration': 221,
},
'skip': 'Not providing trailers anymore',
}, {
'url': 'http://www.metacritic.com/game/playstation-4/tales-from-the-borderlands-a-telltale-game-series/trailers/5740315',
'info_dict': {
'id': '5740315',
'ext': 'mp4',
'title': 'Tales from the Borderlands - Finale: The Vault of the Traveler',
'description': 'In the final episode of the season, all hell breaks loose. Jack is now in control of Helios\' systems, and he\'s ready to reclaim his rightful place as king of Hyperion (with or without you).',
'duration': 114,
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
# The xml is not well formatted, there are raw '&'
inf
|
o = self._download_xml('http://www.metacritic.com/video_data?video=' + video_id,
video_id, 'Downloading info xml'
|
, transform_source=fix_xml_ampersands)
clip = next(c for c in info.findall('playList/clip') if c.find('id').text == video_id)
formats = []
for videoFile in clip.findall('httpURI/videoFile'):
rate_str = videoFile.find('rate').text
video_url = videoFile.find('filePath').text
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': rate_str,
'tbr': int(rate_str),
})
self._sort_formats(formats)
description = self._html_search_regex(r'<b>Description:</b>(.*?)</p>',
webpage, 'description', flags=re.DOTALL)
return {
'id': video_id,
'title': clip.find('title').text,
'formats': formats,
'description': description,
'duration': int(clip.find('duration').text),
}
|
softlayer/stack-dev-tools
|
platforms/softlayer.py
|
Python
|
mit
| 4,908
| 0
|
from os import getenv
from time import time, sleep
from core import Platform, Instance
from SoftLayer import Client
from SoftLayer.CCI import CCIManager
from paramiko import SSHClient
class _SuppressPolicy(object):
def missing_host_key(self, client, hostname, key):
pass
class CCIPlatform(Platform):
_required_opts = ['cores', 'memory', 'domain',
'datacenter', 'os_code']
def _on_init(self):
self._client = Client(username=getenv('SL_USERNAME'),
api_key=getenv('SL_API_KEY'))
self._manager = CCIManager(self._client)
def find_instance(self, host_name):
instance = None
host_name = host_name.lower()
for ii in self._manager.list_instances():
fqdn = ii.get('fullyQualifiedDomainName', '')
if fqdn.lower() == host_name:
instance = Instance(id=ii.get('id'), name=fqdn)
break
return instance
def get_instance(self, id):
cci = self._manager.get_instance(id)
return self._cci_to_instance(cci)
def create_instance(self, host_name):
host_bits = host_name.split('.', 1)
host_name = host_bits[0]
domain = host_bits[1] if len(host_bits) >= 2 else self.config('domain')
base_options = {'cpus': self.config('cores'),
'memory': self.config('memory'),
'hostname': host_name,
'domain': domain,
'datacenter': self.config('datacenter'),
'os_code': self.config('os_code')}
print 'creating cci %s/%s' % (host_name, domain)
print base_options
cci = self._manager.create_instance(**base_options)
cci = self._cci_await_ready(cci)
self._cci_install_keys(cci['id'])
return self._cci_to_instance(cci)
def reimage_instance(self, instance):
self._manager.reload_instance(instance.id)
cci = self._manager.get_instance(instance.id)
cci = self._cci_await_transaction_start(cci)
cci = self._cci_await_ready(cci)
self._cci_install_keys(cci['id'])
return self._cci_to_instance(cci)
def delete_instance(self, instance):
self._manager.cancel_instance(instance.id)
self._cci_await_delete(self._manager.get_instance(instance.id))
def instance_ready(self, instance):
cci = self._manager.get_instance(instance.id)
return (cci and 'activeTransaction' not in cci)
def _cci_to_instance(self, cci):
if not cci:
return None
return Instance(id=cci['id'], name=cci['fullyQualifiedDomainName'])
def _cci_await_state(self, cci, state_check, sleep_secs=5):
wait_start = time()
self.log_info('Waiting for %s to change state...' % (cci['id']))
while state_check(cci):
sleep(sleep_secs)
cci = self._manager.get_instance(cci['id'])
self.log_info('...')
self.log_info('Available after %0.3f secs.' % (time() - wait_start))
return cci
def _cci_await_ready(self, cci):
return self._cci_await_state(cci,
lambda c: 'activeTransaction' in c,
sleep_secs=5)
def _cci_await_transaction_start(self, cci):
return self._cci_await_state(cci,
lambda c: 'activeTransaction' not in c,
|
sleep_secs=2)
def _cci_await_delete(self, cci):
|
return self._cci_await_state(cci,
lambda c: c and 'id' in c,
sleep_secs=2)
def _get_cci_root_password(self, cci):
passwords = self._manager.get_instance_passwords(cci['id'])
password = None
for p in passwords:
if 'username' in p and p['username'] == 'root':
password = p['password']
break
return password
def _cci_install_keys(self, id):
cci = self._manager.get_instance(id)
password = self._get_cci_root_password(cci)
if not password:
raise Exception('Passwords are not available for instance %s' %
cci['id'])
keys_url = self.config('ssh_key_url')
if not keys_url:
return
client_settings = {'hostname': cci['primaryIpAddress'],
'username': 'root',
'password': password}
client = SSHClient()
client.set_missing_host_key_policy(_SuppressPolicy())
client.connect(look_for_keys=False, **client_settings)
client.exec_command('mkdir -p ~/.ssh')
client.exec_command('wget -T 10 -q -O ~/.ssh/authorized_keys %s' %
keys_url)
client.close()
|
Elima85/bccfccraycaster
|
data/scripts/loadvolume.py
|
Python
|
gpl-2.0
| 213
| 0.004695
|
# Sample script for loading volume data.
impor
|
t voreen
# usage: voreen.loadVolume(filepath, [name of VolumeSource processor])
voreen.loadVolume(voreen.getBasePath() + "/d
|
ata/volumes/nucleon.dat", "VolumeSource")
|
olga121/Selenium_Webdriver
|
test_sticker.py
|
Python
|
apache-2.0
| 496
| 0.010081
|
import pytest
from selenium import webdriver
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
request.addfinalizer(wd.qui
|
t)
return wd
def test_example(driver):
driver.get("http://localhost/litecart/")
driver.implicitly_wait(10)
sticker_number = len(driver.find_elements_by_xpath("//div[contains(@class,'sticker')]"))
product_number = len(driver.fin
|
d_elements_by_xpath("//*[contains(@href,'products')]"))
assert sticker_number == product_number
|
alfa-addon/addon
|
plugin.video.alfa/servers/youdbox.py
|
Python
|
gpl-3.0
| 822
| 0.008516
|
# -*- coding: utf-8 -*-
# import
|
re
from core import httptools
fro
|
m core import scrapertools
from platformcode import logger
import codecs
def get_video_url(page_url, video_password):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
list = scrapertools.find_single_match(data, 'var urK4Ta4 = ([^\]]+)').replace('[', '').replace('"', '').replace('\\x', '').replace(',', ' ')
list = list.split()[::-1]
url =""
for elem in list:
decoded = codecs.decode(elem, "hex")
url += decoded.decode("utf8")
url= scrapertools.find_single_match(url, '<source src="([^"]+)"')
if not url:
url= scrapertools.find_single_match(data, '<source src="([^"]+)"')
video_urls.append(["[youdbox]", url])
return video_urls
|
birkenfeld/elpy
|
elpy/tests/test_server.py
|
Python
|
gpl-3.0
| 14,270
| 0
|
# coding: utf-8
"""Tests for the elpy.server module"""
import os
import tempfile
import unittest
import mock
from elpy import rpc
from elpy import server
from elpy.tests import compat
from elpy.tests.support import BackendTestCase
import elpy.refactor
class ServerTestCase(unittest.TestCase):
def setUp(self):
self.srv = server.ElpyRPCServer()
class BackendCallTestCase(ServerTestCase):
def assert_calls_backend(self, method):
with mock.patch("elpy.server.get_source") as get_source:
with mock.patch.object(self.srv, "backend") as backend:
get_source.return_value = "transformed source"
getattr(self.srv, method)("filename", "source", "offset")
get_source.assert_called_with("source")
getattr(backend, method).assert_called_with(
"filename", "transformed source", "offset"
)
class TestInit(ServerTestCase):
def test_should_not_select_a_backend_by_default(self):
self.assertIsNone(self.srv.backend)
class TestRPCEcho(ServerTestCase):
def test_should_return_arguments(self):
self.assertEqual(("hello", "world"),
self.srv.rpc_echo("hello", "world"))
class TestRPCInit(ServerTestCase):
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_set_project_root(self, RopeBackend, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
self.assertEqual("/project/root", self.srv.project_root)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_initialize_rope(self, RopeBackend, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
RopeBackend.assert_called_with("/project/root")
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_initialize_jedi(self, RopeBacke
|
nd, JediBackend):
self.srv.rpc_init({"project_root": "/project/root",
"backend": "jedi"})
JediBackend.assert_called_with("/project/root")
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_rope_if_available_and_requested(
self, RopeBackend, JediBackend):
RopeBa
|
ckend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": "rope"})
self.assertEqual("rope", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_jedi_if_available_and_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": "jedi"})
self.assertEqual("jedi", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_rope_if_available_and_nothing_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
self.assertEqual("rope", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_jedi_if_rope_not_available_and_nothing_requested(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
old_rope = server.ropebackend
server.ropebackend = None
try:
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
finally:
server.ropebackend = old_rope
self.assertEqual("jedi", self.srv.backend.name)
@mock.patch("elpy.jedibackend.JediBackend")
@mock.patch("elpy.ropebackend.RopeBackend")
def test_should_use_none_if_nothing_available(
self, RopeBackend, JediBackend):
RopeBackend.return_value.name = "rope"
JediBackend.return_value.name = "jedi"
old_rope = server.ropebackend
old_jedi = server.jedibackend
server.ropebackend = None
server.jedibackend = None
try:
self.srv.rpc_init({"project_root": "/project/root",
"backend": None})
finally:
server.ropebackend = old_rope
server.jedibackend = old_jedi
self.assertIsNone(self.srv.backend)
class TestRPCGetCalltip(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_calltip")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_calltip("filname", "source",
"offset"))
class TestRPCGetCompletions(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_completions")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertEqual([],
self.srv.rpc_get_completions("filname", "source",
"offset"))
def test_should_sort_results(self):
with mock.patch.object(self.srv, 'backend') as backend:
backend.rpc_get_completions.return_value = [
{'name': '_e'},
{'name': '__d'},
{'name': 'c'},
{'name': 'B'},
{'name': 'a'},
]
expected = list(reversed(backend.rpc_get_completions.return_value))
actual = self.srv.rpc_get_completions("filename", "source",
"offset")
self.assertEqual(expected, actual)
def test_should_uniquify_results(self):
with mock.patch.object(self.srv, 'backend') as backend:
backend.rpc_get_completions.return_value = [
{'name': 'a'},
{'name': 'a'},
]
expected = [{'name': 'a'}]
actual = self.srv.rpc_get_completions("filename", "source",
"offset")
self.assertEqual(expected, actual)
class TestRPCGetCompletionDocs(ServerTestCase):
def test_should_call_backend(self):
with mock.patch.object(self.srv, "backend") as backend:
self.srv.rpc_get_completion_docstring("completion")
(backend.rpc_get_completion_docstring
.assert_called_with("completion"))
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_completion_docstring("foo"))
class TestRPCGetCompletionLocation(ServerTestCase):
def test_should_call_backend(self):
with mock.patch.object(self.srv, "backend") as backend:
self.srv.rpc_get_completion_location("completion")
(backend.rpc_get_completion_location
.assert_called_with("completion"))
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_completion_location("foo"))
class TestRPCGetDefinition(BackendCallTestCase):
def test_should_call_backend(self):
self.assert_calls_backend("rpc_get_definition")
def test_should_handle_no_backend(self):
self.srv.backend = None
self.assertIsNone(self.srv.rpc_get_definition("filname", "source",
"offset"))
class TestRPCGetDocstring(BackendCallTestCase):
def test_should_call_backend(
|
TsinghuaX/edx-platform
|
common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py
|
Python
|
agpl-3.0
| 22,068
| 0.001042
|
"""
Tests the crowdsourced hinter xmodule.
"""
from mock import Mock, MagicMock
import unittest
import copy
from xmodule.crowdsource_hinter import CrowdsourceHinterModule
from xmodule.vertical_module import VerticalModule, VerticalDescriptor
from xblock.field_data import DictFieldData
from xblock.fragment import Fragment
from xblock.core import XBlock
from . import get_test_system
import json
class CHModuleFactory(object):
"""
Helps us make a CrowdsourceHinterModule with the specified internal
state.
"""
sample_problem_xml = """
<?xml version="1.0"?>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown="A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value. The answer is correct if it is within a specified numerical tolerance of the expected answer. Enter the number of fingers on a human hand: = 5 [explanation] If you look at your hand, you can count that you have five fingers. [explanation] " rerandomize="never" showanswer="finished">
<p>A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.</p>
<p>The answer is correct if it is within a specified numerical tolerance of the expected answer.</p>
<p>Enter the number of fingers on a human hand:</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
"""
num = 0
@staticmethod
def next_num():
"""
Helps make unique names for our mock CrowdsourceHinterModule's
"""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create(hints=None,
previous_answers=None,
user_submissions=None,
|
user_voted=None,
moderate=None,
mod_queue=None):
"""
A factory method for making CHM's
"""
# Should have a single child, but it doesn't matter what that child is
field_data = {'data': CHModuleFactory.sample_problem_xml, 'children': [None]}
if hints is not None:
field_data['hints'] = hints
else:
field_data['hints'] = {
'24.0': {'0': ['Best hint', 40],
'3': ['Another hint', 30],
'4': ['A third hint', 20],
'6': ['A less popular hint', 3]},
'25.0': {'1': ['Really popular hint', 100]}
}
if mod_queue is not None:
field_data['mod_queue'] = mod_queue
else:
field_data['mod_queue'] = {
'24.0': {'2': ['A non-approved hint']},
'26.0': {'5': ['Another non-approved hint']}
}
if previous_answers is not None:
field_data['previous_answers'] = previous_answers
else:
field_data['previous_answers'] = [
['24.0', [0, 3, 4]],
['29.0', []]
]
if user_submissions is not None:
field_data['user_submissions'] = user_submissions
else:
field_data['user_submissions'] = ['24.0', '29.0']
if user_voted is not None:
field_data['user_voted'] = user_voted
if moderate is not None:
field_data['moderate'] = moderate
descriptor = Mock(weight='1')
# Make the descriptor have a capa problem child.
capa_descriptor = MagicMock()
capa_descriptor.name = 'capa'
capa_descriptor.displayable_items.return_value = [capa_descriptor]
descriptor.get_children.return_value = [capa_descriptor]
# Make a fake capa module.
capa_module = MagicMock()
capa_module.lcp = MagicMock()
responder = MagicMock()
def validate_answer(answer):
""" A mock answer validator - simulates a numerical response"""
try:
float(answer)
return True
except ValueError:
return False
responder.validate_answer = validate_answer
def compare_answer(ans1, ans2):
""" A fake answer comparer """
return ans1 == ans2
responder.compare_answer = compare_answer
capa_module.lcp.responders = {'responder0': responder}
capa_module.displayable_items.return_value = [capa_module]
system = get_test_system()
# Make the system have a marginally-functional get_module
def fake_get_module(descriptor):
"""
A fake module-maker.
"""
return capa_module
system.get_module = fake_get_module
module = CrowdsourceHinterModule(descriptor, system, DictFieldData(field_data), Mock())
system.xmodule_instance = module
return module
class VerticalWithModulesFactory(object):
"""
Makes a vertical with several crowdsourced hinter modules inside.
Used to make sure that several crowdsourced hinter modules can co-exist
on one vertical.
"""
sample_problem_xml = """<?xml version="1.0"?>
<vertical display_name="Test vertical">
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Another test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
</vertical>
"""
num = 0
@staticmethod
def next_num():
"""Increments a global counter for naming."""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create():
"""Make a vertical."""
field_data = {'data': VerticalWithModulesFactory.sample_problem_xml}
system = get_test_system()
descriptor = VerticalDescriptor.from_xml(VerticalWithModulesFactory.sample_problem_xml, system)
module = VerticalModule(system, descriptor, field_data)
return module
class FakeChild(XBlock):
"""
A fake Xmodule.
"""
def __init__(self):
self.runtime = get_test_system()
self.student_view = Mock(return_value=Fragment(self.get_html()))
self.save = Mock()
self.id = 'i4x://this/is/a/fake/id'
def get_html(self):
"""
Return a fake html string.
"""
return u'This is supposed to be test html.'
class CrowdsourceHinterTest(unittest.TestCase):
"""
In the below tests, '24.0' represents a wrong answer, and '42.5' represents
a correct answer.
"""
def test_gethtml(self):
"""
A simple test of get_html - make sure it returns the html of the inner
problem.
"""
mock_module = CHModuleFactory.create()
def f
|
|
pydcs/dcs
|
dcs/terrain/thechannel.py
|
Python
|
lgpl-3.0
| 165,474
| 0.006986
|
# flake8: noqa
import dcs.mapping as mapping
from dcs.terrain.terrain import Airport, Runway, ParkingSlot, Terrain, MapView
from .projections.thechannel import PARAMETERS
class Abbeville_Drucat(Airport):
id = 1
name = "Abbeville Drucat"
tacan = None
unit_zones = []
civilian = False
slot_version = 2
def __init__(self, terrain: Terrain) -> None:
super().__init__(mapping.Point(-81655.472418, 15915.37745, terrain), terrain)
self.runways.append(Runway(20))
self.runways.append(Runway(90))
self.parking_slots.append(ParkingSlot(
crossroad_idx=2, position=mapping.Point(-80735.7109375, 16925.8671875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='02', length=26.0, width=24.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=5, position=mapping.Point(-81904.96875, 18212.265625, self._terrain), large=False, heli=False,
airplanes=True, slot_name='27', length=21.0, width=15.0, height=8.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=6, position=mapping.Point(-81981.75, 18186.1328125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='26', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=7, position=mapping.Point(-82036.9921875, 18140.03125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='25', length=21.0, width=15.0, height=8.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=8, position=mapping.Point(-82104.96875, 18073.689453125, self._terrain), large=False, heli=False,
airplanes=True, slot_name='22', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=9, position=mapping.Point(-82129.5390625, 18024.455078125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='21', length=21.0, width=15.0, height=8.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=10, position=mapping.Point(-82251.1328125, 17997.052734375, self._terrain), large=False, heli=True,
airplanes=True, slot_name='20', length=26.0, width=24.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=11, position=mapping.Point(-82177.9140625, 18076.76953125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='23', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=12, position=mapping.Point(-82156.6171875, 18166.736328125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='24', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=13, position=mapping.Point(-80611.53125, 17394.251953125, self._terrain), large=False, heli=False,
airplanes=True, slot_name='07', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=14, position=mapping.Point(-80539.515625, 17308.9140625, self._terrain), large=False, heli=True,
airplanes=True, slot_name='06', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.ap
|
pend(ParkingSlot(
crossroad_idx=15, position=mapping.Point(-80572.734375, 17265.576171875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='05', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_s
|
lots.append(ParkingSlot(
crossroad_idx=16, position=mapping.Point(-80796.953125, 17540.17578125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='08', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=17, position=mapping.Point(-80770.890625, 17580.666015625, self._terrain), large=False, heli=True,
airplanes=True, slot_name='09', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=18, position=mapping.Point(-80780.75, 17636.404296875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='10', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=19, position=mapping.Point(-80849.515625, 17666.310546875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='11', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=20, position=mapping.Point(-80876.828125, 17769.578125, self._terrain), large=False, heli=False,
airplanes=True, slot_name='12', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=21, position=mapping.Point(-80916.1171875, 17871.734375, self._terrain), large=False, heli=False,
airplanes=True, slot_name='13', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=22, position=mapping.Point(-80914.4921875, 17947.568359375, self._terrain), large=False, heli=False,
airplanes=True, slot_name='14', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=23, position=mapping.Point(-80921.890625, 18065.88671875, self._terrain), large=False, heli=False,
airplanes=True, slot_name='15', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=24, position=mapping.Point(-80939.0234375, 18224.556640625, self._terrain), large=False, heli=False,
airplanes=True, slot_name='16', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=25, position=mapping.Point(-80966.4453125, 18286.35546875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='17', length=21.0, width=15.0, height=8.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=26, position=mapping.Point(-80920.09375, 18382.9296875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='18', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=27, position=mapping.Point(-80843.6640625, 18428.873046875, self._terrain), large=False, heli=True,
airplanes=True, slot_name='19', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=28, position=mapping.Point(-80718.4921875, 17059.05859375, self._terrain), large=False, heli=True,
airplanes=True, slot_name='03', length=26.0, width=24.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=29, position=mapping.Point(-80717.234375, 17166.173828125, self._terrain), large=False, heli=True,
airplanes=True, slot_name='04', length=26.0, width=22.0, height=11.0, shelter=False))
self.parking_slots.append(ParkingSlot(
crossroad_idx=30, position=mapping.Point(-80798.8046875, 16778.943359375, self._terrain), large=False, heli=False,
airplanes=True, slot_name='01', length=26.0, width=24.0, height=11.0, shelter=False))
class Merville_Calonne(Airport):
id = 2
name = "Merville Calonne"
tacan = None
unit_zones = []
civilian = False
slot_version = 2
|
anhaidgroup/py_entitymatching
|
py_entitymatching/feature/autofeaturegen.py
|
Python
|
bsd-3-clause
| 34,022
| 0.001323
|
"""
This module contains functions for auto feature generation.
"""
import logging
import pandas as pd
import six
from py_entitymatching.utils.validation_helper import validate_object_type
from IPython.display import display
import py_entitymatching as em
import py_entitymatching.feature.attributeutils as au
import py_entitymatching.feature.simfunctions as sim
import py_entitymatching.feature.tokenizers as tok
logger = logging.getLogger(__name__)
def get_features(ltable, rtable, l_attr_types, r_attr_types,
attr_corres, tok_funcs, sim_funcs):
"""
This function will automatically generate a set of features based on the
attributes of the input tables.
Specifically, this function will go through the attribute
correspondences between the input tables. For each correspondence ,
it examines the types of the involved attributes, then apply the
appropriate tokenizers and sim functions to generate all appropriate
features for this correspondence.
Args:
ltable,rtable (DataFrame): The pandas DataFrames for which the
features must be generated.
l_attr_types,r_attr_types (dictionary): The attribute types for the
input DataFrames. Typically this is generated using the
function 'get_attr_types'.
attr_corres (dictionary): The attribute correspondences between the
input DataFrames.
tok_funcs (dictionary): A Python dictionary containing tokenizer
functions.
sim_funcs (dictionary): A Python dictionary containing similarity
functions.
Returns:
A pandas DataFrame containing automatically generated features.
Specifically, the DataFrame contains the following attributes:
'feature_name', 'left_attribute', 'right_attribute',
'left_attr_tokenizer', 'right_attr_tokenizer', 'simfunction',
'function', 'function_source', 'is_auto_generated'.
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_attr_types` is not of type
python dictionary.
AssertionError: If
|
`r_attr_types` is not of type
python dictionary.
AssertionError: If `attr_corres` is not of type
python dictionary.
AssertionError: If `sim_funcs` is not of type
python dictionary.
AssertionError: If `tok_funcs` is not of type
python dictionary.
AssertionError: If the `ltable` and `rtable` order is
|
same as mentioned
in the `l_attr_types`/`r_attr_types` and `attr_corres`.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> match_t = em.get_tokenizers_for_matching()
>>> match_s = em.get_sim_funs_for_matching()
>>> atypes1 = em.get_attr_types(A) # don't need, if atypes1 exists from blocking step
>>> atypes2 = em.get_attr_types(B) # don't need, if atypes2 exists from blocking step
>>> match_c = em.get_attr_corres(A, B)
>>> match_f = em.get_features(A, B, atypes1, atype2, match_c, match_t, match_s)
See Also:
:meth:`py_entitymatching.get_attr_corres`, :meth:`py_entitymatching.get_attr_types`,
:meth:`py_entitymatching.get_sim_funs_for_blocking`,
:meth:`py_entitymatching.get_tokenizers_for_blocking`,
:meth:`py_entitymatching.get_sim_funs_for_matching`,
:meth:`py_entitymatching.get_tokenizers_for_matching`
Note:
In the output DataFrame, two
attributes demand some explanation: (1)function, and (2)
is_auto_generated. The function, points to the actual python function
that implements feature. Specifically, the function takes in two
tuples (one from each input table) and returns a numeric value. The
attribute is_auto_generated contains either True or False. The flag
is True only if the feature is automatically generated by py_entitymatching.
This is important because this flag is used to make some assumptions
about the semantics of the similarity function used and use that
information for scaling purposes.
"""
# Validate input parameters
# # We expect the ltable to be of type pandas DataFrame
validate_object_type(ltable, pd.DataFrame, 'Input ltable')
# # We expect the rtable to be of type pandas DataFrame
validate_object_type(rtable, pd.DataFrame, 'Input rtable')
# # We expect the l_attr_types to be of type python dictionary
validate_object_type(l_attr_types, dict, 'Input l_attr_types')
# # We expect the r_attr_types to be of type python dictionary
validate_object_type(r_attr_types, dict, 'Input r_attr_types')
# # We expect the attr_corres to be of type python dictionary
validate_object_type(attr_corres, dict, 'Input attr_corres')
# # We expect the tok_funcs to be of type python dictionary
validate_object_type(tok_funcs, dict, 'Input tok_funcs')
# # We expect the sim_funcs to be of type python dictionary
validate_object_type(sim_funcs, dict, 'Input sim_funcs')
# We expect the table order to be same in l/r_attr_types and attr_corres
if not _check_table_order(ltable, rtable,
l_attr_types, r_attr_types, attr_corres):
logger.error('Table order is different than what is mentioned '
'in l/r attr_types and attr_corres')
raise AssertionError('Table order is different than what is mentioned '
'in l/r attr_types and attr_corres')
# Initialize output feature dictionary list
feature_dict_list = []
# Generate features for each attr. correspondence
for ac in attr_corres['corres']:
l_attr_type = l_attr_types[ac[0]]
r_attr_type = r_attr_types[ac[1]]
# Generate a feature only if the attribute types are same
if l_attr_type != r_attr_type:
logger.info('py_entitymatching types: %s type (%s) and %s type (%s) '
'are different.'
'If you want to set them to be same and '
'generate features, '
'update output from get_attr_types and '
'use get_features command.\n.'
% (ac[0], l_attr_type, ac[1], r_attr_type))
# features_1 = _get_features_for_type(l_attr_type)
# features_2 = _get_features_for_type(r_attr_type)
# features = set(features_1).union(features_2)
continue
# Generate features
features = _get_features_for_type(l_attr_type)
# Convert features to function objects
fn_objs = _conv_func_objs(features, ac, tok_funcs, sim_funcs)
# Add the function object to a feature list.
feature_dict_list.append(fn_objs)
# Create a feature table
feature_table = pd.DataFrame(flatten_list(feature_dict_list))
# Project out only the necessary columns.
feature_table = feature_table[['feature_name', 'left_attribute',
'right_attribute', 'left_attr_tokenizer',
'right_attr_tokenizer',
'simfunction', 'function',
'function_source', 'is_auto_generated']]
# Return the feature table.
return feature_table
def get_features_for_blocking(ltable, rtable, validate_inferred_attr_types=True):
"""
This function automatically generates features that can be used for
blocking purposes.
Args:
ltable,rtable (DataFrame): The pandas DataFrames for which the
features are to be generated.
validate_inferred_attr_types (boolean): A flag to indicate whether to
show the user the inferred attribute types and the features
chosen for those types.
Returns:
A pand
|
saebyn/django-classifieds
|
classifieds/forms/fields.py
|
Python
|
bsd-3-clause
| 1,278
| 0.005477
|
from django.forms import CharField, ValidationError
from django.forms.fields import EMPTY_VALUES
import re, string
class TinyMCEField(CharField):
def clean(self, value):
"Validates max_length and min_length. Returns a Unicode object."
if value in EMPTY_VALUES:
return u''
stripped_value = re.sub(r'<.*?>', '', value)
stripped_value = string.replace(stripped_value, ' ', ' ')
stripped_value = string.replace(stripped_value, '<', '<')
stripped_value = string.replace(stripped_value, '>', '>')
stripped_value = stri
|
ng.replace(stripped_value, '&', '&')
stripped_value = str
|
ing.replace(stripped_value, '\n', '')
stripped_value = string.replace(stripped_value, '\r', '')
value_length = len(stripped_value)
value_length -= 1
if self.max_length is not None and value_length > self.max_length:
raise ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length})
if self.min_length is not None and value_length < self.min_length:
raise ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length})
return value
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/scout/trap/shared_trap_webber.py
|
Python
|
mit
| 444
| 0.047297
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DO
|
NE IMPROPE
|
RLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/scout/trap/shared_trap_webber.iff"
result.attribute_template_id = -1
result.stfName("item_n","trap_webber")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
mmllnr/plugin.video.xstream
|
resources/lib/gui/hoster.py
|
Python
|
gpl-3.0
| 8,657
| 0.005198
|
# -*- coding: utf-8 -*-
from resources.lib.handler.jdownloaderHandler import cJDownloaderHandler
from resources.lib.download import cDownload
from resources.lib.handler.hosterHandler import cHosterHandler
from resources.lib.gui.gui import cGui
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.player import cPlayer
from resources.lib.handler.requestHandler import cRequestHandler
import urlresolver
import logger
class cHosterGui:
SITE_NAME = 'cHosterGui'
# step 1 - bGetRedirectUrl in ein extra optionsObject verpacken
def showHoster(self, oGui, oHoster, sMediaUrl, bGetRedirectUrl = False):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setFunction('showHosterMenu')
oGuiElement.setTitle(oHoster.getDisplayName())
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMediaUrl', sMediaUrl)
oOutputParameterHandler.addParameter('sHosterIdentifier', oHoster.getPluginIdentifier())
oOutputParameterHandler.addParameter('bGetRedirectUrl', bGetRedirectUrl)
oOutputParameterHandler.addParameter('sFileName', oHoster.getFileName())
oGui.addFolder(oGuiElement, oOutputParameterHandler)
# step 2
def showHosterMenu(self):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sHosterIdentifier = oInputParameterHandler.getValue('sHosterIdentifier')
sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
bGetRedirectUrl = oInputParameterHandler.getValue('bGetRedirectUrl')
#sFileName = oInputParameterHandler.getValue('sFileName')
oHoster = cHosterHandler().getHoster(sHosterIdentifier)
oHoster.setFileName(sFileName)
self.showHosterMenuDirect(oGui, oHoster, sMediaUrl, bGetRedirectUrl)
oGui.setEndOfDirectory()
def showHosterMenuDirect(self, oGui, oHoster, sMediaUrl, bGetRedirectUrl=False, sFileName=''):
# play
self.__showPlayMenu(oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName)
# playlist
self.__showPlaylistMenu(oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName)
# download
self.__showDownloadMenu(oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName)
# JD
self.__showJDMenu(oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName)
def __showPlayMenu(self, oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setFunction('play')
oGuiElement.setTitle('play')
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMediaUrl', sMediaUrl)
oOutputParameterHandler.addParameter('bGetRedirectUrl', bGetRedirectUrl)
oOutputParameterHandler.addParameter('sFileName', sFileName)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def __showDownloadMenu(self, oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setFunction('download')
oGuiElement.setTitle('download über XBMC')
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMediaUrl', sMediaUrl)
oOutputParameterHandler.addParameter('bGetRedirectUrl', bGetRedirectUrl)
oOutputParameterHandler.addParameter('sFileName', sFileName)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def __showJDMenu(self, oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setTitle('an JDownloader senden')
oGuiElement.setFunction('sendToJDownloader')
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMediaUrl', sMediaUrl)
oOutputParameterHandler.addParameter('bGetRedirectUrl', bGetRedirectUrl)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def __showPlaylistMenu(self, oGui, sMediaUrl, oHoster, bGetRedirectUrl, sFileName=''):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setFunction('addToPlaylist')
oGuiElement.setTitle('add to playlist')
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sMediaUrl', sMediaUrl)
oOutputParameterHandler.addParameter('bGetRedirectUrl', bGetRedirectUrl)
oOutputParameterHandler.addParameter('sFileName', sFileName)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def play(self):
#oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
bGetRedirectUrl = oInputParameterHandler.getValue('bGetRedirectUrl')
sFileName = oInputParameterHandler.getValue('sFileName')
if (bGetRedirectUrl == 'True'):
sMediaUrl = self.__getRedirectUrl(sMediaUrl)
logger.info('call play: ' + sMediaUrl)
sLink = urlresolver.resolve(sMediaUrl)
if (sLink != False):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setMediaUrl(sLink)
oGuiElement.setTitle(sFileName)
oPlayer = cPlayer()
oPlayer.clearPlayList()
oPlayer.addItemToPlaylist(oGuiElement)
oPlayer.startPlayer()
return
#except:
# logger.fatal('could not load plugin: ' + sHosterFileName)
#oGui.setEndOfDirectory()
def addToPlaylist(self):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
bGetRedirectUrl = oInputParameterHandler.getValue('bGetRedirectUrl')
sFileName = oInputParameterHandler.getValue('sFileName')
if (bGetRedirectUrl == 'True'):
sMediaUrl = self.__getRedirectUrl(sMediaUrl)
logger.info('call play: ' + sMediaUrl)
sLink = urlresolver.resolve(sMediaUrl)
if (sLink != False):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(self.SITE_NAME)
oGuiElement.setMediaUrl(sLink)
oGuiElement.setTitle(sFileName)
oPlayer = cPlayer()
oPlayer.addItemToPlaylist(oGuiElement)
oGui.showInfo('Playlist', 'St
|
ream wurde hinzugefügt', 5);
return
oGui.showError('Playlist', 'Stream wurde nicht hinzugefügt', 5);
return False
#except:
# logger.fatal('could not load plugin: ' + sHosterFileName)
#oGui.setEndOfDirectory()
def download(self):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
bGetRedirectUrl
|
= oInputParameterHandler.getValue('bGetRedirectUrl')
sFileName = oInputParameterHandler.getValue('sFileName')
if (bGetRedirectUrl == 'True'):
sMediaUrl = self.__getRedirectUrl(sMediaUrl)
logger.info('call download: ' + sMediaUrl)
sLink = urlresolver.resolve(sMediaUrl)
if (sLink != False):
oDownload = cDownload()
oDownload.download(sLink, 'Stream')
return
#except:
# logger.fatal('could not load plugin: ' + sHosterFileName)
oGui.setEndOfDirectory()
def sendToJDownloader(self):
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sHosterIdentifier = oInputParameterHandler.getValue('sHosterIdentifier')
sMediaUrl = oInputParameterHandler.getValue('sMediaUrl')
bGetRedirectUrl = oInputParamete
|
schwardo/chicago47-sms
|
tests/test_core.py
|
Python
|
mit
| 2,358
| 0.004665
|
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from datetime import datetime
from datetime import date
from twilio.rest.resources import parse_date
from twilio.rest.resources import transform_params
from twilio.rest.resources import convert_keys
from twilio.rest.resources import convert_case
from twilio.rest.resources import normalize_dates
class CoreTest(unittest.TestCase):
def test_date(self):
d = date(2009,10,10)
self.assertEquals(parse_date(d), "2009-10-10")
def test_datetime(self):
d = datetime(2009,10,10)
self.assertEquals(parse_date(d), "2009-10-10")
def test_string_date(self):
d = "2009-10-10"
self.assertEquals(parse_date(d), "2009-10-10")
def test_string_date(self):
d = None
self.assertEquals(parse_date(d), None)
def test_string_date(self):
d = False
self.assertEquals(parse_date(d), None)
def test_fparam(self):
d = {"HEY": None, "YOU": 3}
ed = {"YOU":3}
self.assertEquals(transform_params(d), ed)
def test_fparam_booleans(self):
d = {"HEY": None, "YOU":
|
3, "Activated": False}
ed = {"YOU":3, "Activated": "false"}
self.assertEquals(transform_params(d), ed)
def test_normalize_dates(self):
@normalize_dates
def foo(on=None, before=None, after=None):
return {
"on": on,
"before": before,
"after": after,
}
d = foo(on="2009-10-10", before=date(2009,10,10),
after=datetime(2009,10,10
|
))
self.assertEquals(d["on"], "2009-10-10")
self.assertEquals(d["after"], "2009-10-10")
self.assertEquals(d["before"], "2009-10-10")
def test_convert_case(self):
self.assertEquals(convert_case("from_"), "From")
self.assertEquals(convert_case("to"), "To")
self.assertEquals(convert_case("frienldy_name"), "FrienldyName")
def test_convert_keys(self):
d = {
"from_": 0,
"to": 0,
"friendly_name": 0,
"ended": 0,
}
ed = {
"From": 0,
"To": 0,
"FriendlyName": 0,
"EndTime": 0,
}
self.assertEquals(ed, convert_keys(d))
|
skyoo/jumpserver
|
apps/terminal/migrations/0025_auto_20200810_1735.py
|
Python
|
gpl-2.0
| 543
| 0.001842
|
# Generated by Django 2.2.13 on 2020-08-10 09:35
from django.db import migrati
|
ons, models
class Migration(migrations.Migration):
dependencies = [
('terminal', '0024_auto_20200715_1713'),
]
operations = [
migrations.AlterField(
model_name='session',
name='protocol',
field=models.CharField(choices=[('ssh', 'ssh'), ('rdp', 'rdp'), ('vnc', 'vnc'), ('telnet', 'telnet'), ('mysql', 'mysql'), ('k8s', 'kubernetes')], db_index=True, default='ssh', max_length=8),
),
]
|
|
indexofire/gork
|
src/gork/application/know/plugins/attachments/views.py
|
Python
|
mit
| 13,497
| 0.003853
|
# -*- coding: utf-8 -*-
from django.contrib import messages
from django.db.models import Q
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect, get_object_or_404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from know.core.http import send_file
from know.decorators import get_article, response_forbidden
from know.plugins.attachments import models, settings, forms
from know.views.mixins import ArticleMixin
class AttachmentView(ArticleMixin, FormView):
form_class = forms.AttachmentForm
template_name = "know/plugins/attachments/index.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
if article.can_moderate(request.user):
self.attachments = models.Attachment.objects.filter(
articles=article, current_revision__deleted=False
).exclude(
current_revision__file=None
).order_by('original_filename')
self.form_class = forms.AttachmentArcihveForm
else:
self.attachments = models.Attachment.objects.active().filter(articles=article)
# Fixing some weird transaction issue caused by adding commit_manually to form_valid
return super(AttachmentView, self).dispatch(request, article, *args, **kwargs)
def form_valid(self, form):
if (self.request.user.is_anonymous() and not settings.ANONYMOUS or not self.article.can_write(self.request.user) or self.article.current_revision.locked):
return response_forbidden(self.request, self.article, self.urlpath)
attachment_revision = form.save()
if isinstance(attachment_revision, list):
messages.success(self.request, _(u'Successfully added: %s') % (", ".join([ar.get_filename() for ar in attachment_revision])))
else:
messages.success(self.request, _(u'%s was successfully added.') % attachment_revision.get_filename())
return redirect("know:attachments_index", path=self.urlpath.path, article_id=self.article.id)
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['article'] = self.article
kwargs['request'] = self.request
return kwargs
def get_context_data(self, **kwargs):
kwargs['attachments'] = self.attachments
kwargs['deleted_attachments'] = models.Attachment.objects.filter(articles=self.article, current_revision__deleted=True)
kwargs['search_form'] = forms.SearchForm()
kwargs['selected_tab'] = 'attachments'
kwargs['anonymous_disallowed'] = self.request.user.is_anonymous() and not settings.ANONYMOUS
return super(AttachmentView, self).get_context_data(**kwargs)
class AttachmentHistoryView(ArticleMixin, TemplateView):
template_name = "know/plugins/attachments/history.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, attachment_id, *args, **kwargs):
if article.can_moderate(request.user):
self.attachment = get_object_or_404(models.Attachment, id=attachment_id, articles=article)
else:
self.attachment = get_object_or_404(models.Attachment.objects.active(), id=attachment_id, articles=article)
return super(AttachmentHistoryView, self).dispatch(request, article, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['attachment'] = self.attachment
kwargs['revisions'] = self.attachment.attachmentrevision_set.all().order_by('-revision_number')
kwargs['selected_tab'] = 'attachments'
return super(AttachmentHistoryView, self).get_context_data(**kwargs)
class AttachmentReplaceView(ArticleMixin, FormView):
form_class = forms.AttachmentForm
template_name = "know/plugins/attachments/replace.html"
@method_decorator(get_article(can_write=True, not_locked=True))
def dispatch(self, request, article, attachment_id, *args, **kwargs):
if request.user.is_anonymous() and not settings.ANONYMOUS:
return response_forbidden(request, article, kwargs.get('urlpath', None))
if article.can_moderate(request.user):
self.attachment = get_object_or_404(models.Attachment, id=attachment_id, articles=article)
else:
self.attachment = get_object_or_404(models.Attachment.objects.active(), id=attachment_id, articles=article)
return super(AttachmentReplaceView, self).dispatch(request, article, *args, **kwargs)
def form_valid(self, form):
try:
attachment_revision = form.save(commit=False)
attachment_revision.attachment = self.attachment
attachment_revision.set_from_request(self.request)
attachment_revision.previous_revision = self.attachment.current_revision
attachment_revision.save()
self.attachment.current_revision = attachment_revision
self.attachment.save()
messages.success(self.request, _(u'%s uploaded and replaces old attachment.') % attachment_revision.get_filename())
except models.IllegalFileExtension, e:
messages.error(self.request, _(u'Your file could not be saved: %s') % e)
return redirect("know:attachments_replace", attachment_id=self.attachment.id,
path=self.urlpath.path, article_id=self.article.id)
except Exception:
messages.error(self.request, _(u'Your file could not be saved, probably because of a permission error on the web server.'))
return redirect("know:attachments_replace", attachment_id=self.attachment.id,
path=self.urlpath.path, article_id=self.article.id)
return redirect("know:attachments_index", path=self.urlpath.path, article_id=self.article.id)
def get_form(self, form_class):
form = FormView.get_form(self, form_class)
form.fields['file'].help_text = _(u'Your new file will automatically be renamed to match the file already present. Files with different extensions are not allowed.')
return form
def get_initial(self, **kwargs):
return {'description': self.attachment.current_revision.description}
def get_context_data(self, **kwargs):
kwargs['attachment'] = self.attachment
|
kwargs['selected_tab'] = 'attachments'
return super(AttachmentReplaceView, self).get_context_data(**kwargs)
class AttachmentDownloadView(ArticleMixin, View):
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, attachment_id, *args, **kwargs):
if article.can_moderate(reque
|
st.user):
self.attachment = get_object_or_404(models.Attachment, id=attachment_id, articles=article)
else:
self.attachment = get_object_or_404(models.Attachment.objects.active(), id=attachment_id, articles=article)
revision_id = kwargs.get('revision_id', None)
if revision_id:
self.revision = get_object_or_404(models.AttachmentRevision, id=revision_id, attachment__articles=article)
else:
self.revision = self.attachment.current_revision
return super(AttachmentDownloadView, self).dispatch(request, article, *args, **kwargs)
def get(self, request, *args, **kwargs):
if self.revision:
if settings.USE_LOCAL_PATH:
try:
return send_file(request, self.revision.file.path,
self.revision.created, self.attachment.original_filename)
except OSError:
pass
else:
return HttpResponseRedirect(self.revision.file.url)
raise Http404
class AttachmentChangeRevisionView(ArticleMixin, View):
form_class = forms.AttachmentForm
template_name = "know/plugins/attachments/replace.html"
@method_decorator(get_article(can_write=True, not_locked=True))
|
haaspt/whatsnew
|
main.py
|
Python
|
mit
| 2,815
| 0.006039
|
import click
import newsfeeds
import random
import sys
from config import GlobalConfig
def mixer(full_story_list, sample_number):
"""Selects a random sample of stories from the full list to display to the user.
Number of stories is set in config.py
Todo: Add argument support for number of stories to display
"""
mixed_story_list = random.sample(set(full_story_list), sample_number)
return mixed_story_list
def default_display(list_of_stories):
"""Displays a set of stories in the following format:
n - Story Title -- OutletName -- Section
Story abstract: Lorem ipsum dolor sit amet
"""
index_num = 0
for story in list_of_stories:
index_num += 1
click.secho('%r - ' % index_num, bold=True, nl=False)
click.secho('%s ' % story.title, fg=option.headline_color, bold=True, nl=False)
click.secho('-- %s -- ' % story.source, fg=option.source_color, bold=True, nl=False)
click.secho('%s' % story.section, fg=option.section_color)
click.secho('Story abstract: %s' % story.abstract, fg=option.abstract_color)
click.echo()
if index_num > 0:
exit_now == False
while exit_now != True:
click.secho("Select an index number to go to story, or [Enter] to exit:
|
", fg=option.prompt_color, bold=True, nl=False)
raw_selection = input()
if raw_selection.isdigit():
selection = int(raw_selection) - 1
if selection <= index_num - 1:
story = mixed_story_list[selection]
click.launch(story.url)
if option.prompt_until_exit == True:
pass
else:
|
return exit_now == True
else:
click.secho("Invalid entry", fg='red')
if option.prompt_until_exit == True:
pass
else:
return exit_now == True
elif raw_selection == '':
return exit_now == True
else:
click.secho("Invalid entry", fg='red')
if option.prompt_until_exit == True:
pass
else:
return exit_now == True
else:
click.secho("No recent headlines to display", fg=option.prompt_color, bold=True, nl=False)
click.echo()
def main():
global option
option = GlobalConfig()
click.echo("Loading the news...")
story_list = newsfeeds.feeder()
global exit_now
exit_now = False
click.clear()
global mixed_story_list
mixed_story_list = mixer(story_list, option.article_limit)
default_display(mixed_story_list)
if __name__ == '__main__':
main()
|
rtulke/ceph-deploy
|
ceph_deploy/tests/test_install.py
|
Python
|
mit
| 1,312
| 0
|
from mock import Mock
from ceph_deploy import install
class TestSanitizeArgs(object):
def setup(self):
|
self.args = Mock()
# set the default behavior we set in cli.py
self.args.default_release = False
self.args.stable = None
def t
|
est_args_release_not_specified(self):
self.args.release = None
result = install.sanitize_args(self.args)
# XXX
# we should get `args.release` to be the latest release
# but we don't want to be updating this test every single
# time there is a new default value, and we can't programatically
# change that. Future improvement: make the default release a
# variable in `ceph_deploy/__init__.py`
assert result.default_release is True
def test_args_release_is_specified(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.default_release is False
def test_args_release_stable_is_used(self):
self.args.stable = 'dumpling'
result = install.sanitize_args(self.args)
assert result.release == 'dumpling'
def test_args_stable_is_not_used(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.stable is None
|
cstipkovic/spidermonkey-research
|
testing/mozharness/scripts/gaia_unit.py
|
Python
|
mpl-2.0
| 4,408
| 0.004537
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import os
import sys
import glob
import subprocess
import json
# load modules from parent dir
sys.path.insert(1, os.path.dirname(sys.path[0]))
from mozharness.mozilla.testing.gaia_test import GaiaTest
from mozharness.mozilla.testing.unittest import TestSummaryOutputParserHelper
class GaiaUnitTest(GaiaTest):
def __init__(self, require_config_file=False):
GaiaTest.__init__(self, require_config_file)
def pull(self, **kwargs):
GaiaTest.pull(self, **kwargs)
def run_tests(self):
"""
Run the unit test suite.
"""
dirs = self.query_abs_dirs()
self.make_node_modules()
# make the gaia profile
self.make_gaia(dirs['abs_gaia_dir'],
self.config.get('xre_path'),
xre_url=self.config.get('xre_url'),
debug=True)
# build the testrunner command arguments
python = self.query_python_path('python')
cmd = [python, '-u', os.path.join(dirs['abs_runner_dir'],
'gaia_unit_test',
'main.py')]
executable = 'firefox'
if 'b2g' in self.binary_path:
executable = 'b2g-bin'
profile = os.path.join(dirs['abs_gaia_dir'], 'profile-debug')
binary = os.path.join(os.path.dirname(self.binary_path), executable)
cmd.extend(self._build_arg('--binary', binary))
cmd.extend(self._build_arg('--profile', profile))
cmd.extend(self._build_arg('--symbols-path', self.symbols_path))
cmd.extend(self._build_
|
arg('--browser-arg', self.config.get('browser_arg')))
# Add support for chunking
if self.config.get('total_chunks') and self.config.get('this_chunk'):
chunker = [ os.path.join(dirs['abs_gaia_dir'], 'bin', 'chunk'),
self.config.get('total_chunks'), self.config.get('this_chunk') ]
disabled_tests = []
disabled_manifest = os.path
|
.join(dirs['abs_runner_dir'],
'gaia_unit_test',
'disabled.json')
with open(disabled_manifest, 'r') as m:
try:
disabled_tests = json.loads(m.read())
except:
print "Error while decoding disabled.json; please make sure this file has valid JSON syntax."
sys.exit(1)
# Construct a list of all tests
unit_tests = []
for path in ('apps', 'tv_apps'):
test_root = os.path.join(dirs['abs_gaia_dir'], path)
full_paths = glob.glob(os.path.join(test_root, '*/test/unit/*_test.js'))
unit_tests += map(lambda x: os.path.relpath(x, test_root), full_paths)
# Remove the tests that are disabled
active_unit_tests = filter(lambda x: x not in disabled_tests, unit_tests)
# Chunk the list as requested
tests_to_run = subprocess.check_output(chunker + active_unit_tests).strip().split(' ')
cmd.extend(tests_to_run)
output_parser = TestSummaryOutputParserHelper(config=self.config,
log_obj=self.log_obj,
error_list=self.error_list)
upload_dir = self.query_abs_dirs()['abs_blob_upload_dir']
if not os.path.isdir(upload_dir):
self.mkdir_p(upload_dir)
env = self.query_env()
env['MOZ_UPLOAD_DIR'] = upload_dir
# I don't like this output_timeout hardcode, but bug 920153
code = self.run_command(cmd, env=env,
output_parser=output_parser,
output_timeout=1760)
output_parser.print_summary('gaia-unit-tests')
self.publish(code)
if __name__ == '__main__':
gaia_unit_test = GaiaUnitTest()
gaia_unit_test.run_and_exit()
|
invenia/shepherd
|
shepherd/common/exceptions.py
|
Python
|
mpl-2.0
| 911
| 0
|
class LoggingException(Exception):
def __init__(self, message, logger):
Exception.__init__(self, message)
if logger:
logger.error(message)
class ConfigError(LoggingException):
def __init__(self, message, error=None, logger=None):
LoggingException.__init__(self, message, logger)
self.error = error
class ManifestError(LoggingException):
def __init__(self, message, errors=None, logger=None):
LoggingException.__init__(self, message, logger)
self.errors = errors
class StackError(LoggingException):
def __init__(self, message, errors=None, logger=None):
LoggingException.__init__(self, message, logger)
self.errors = errors
class PluginError(LoggingException):
def __init__(s
|
elf, message, errors=None, logger=None):
LoggingException.__init__(self, message, logge
|
r)
self.errors = errors
|
kpi-web-guild/django-girls-blog-DrEdi
|
main/tests/test_models.py
|
Python
|
mit
| 2,406
| 0.002909
|
"""Tests for models."""
from unittest.mock import patch
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from main.models import Post, Comment
class ModelPostTest(TestCase):
"""Main class for testing Post models of this project."""
def setUp(self):
"""Prepare data for testing."""
self.user = User.objects.create(username='testuser')
self.test_post = Post.objects.create(author=self.user, title='Test', text='superText')
def test
|
_post_rendering(self):
"""Post is rendered as its title."""
self.assertEqual(str(self.test_post), self.test_post.title)
@patch('django.utils.timezone.now', lambda: datetime(day=1, month=4, year=2016,
tzinfo=timezone.get_current_timezone()))
def test_post_publish_method(self):
"""Publish method working ok."""
self.test_post.publish()
|
self.assertEqual(self.test_post.published_date, datetime(day=1, month=4, year=2016,
tzinfo=timezone.get_current_timezone()))
def tearDown(self):
"""Clean data for new test."""
del self.user
del self.test_post
class ModelCommentTest(TestCase):
"""Main class for testing Comment models of this project."""
def setUp(self):
"""Prepare data for testing."""
self.user = User.objects.create(username='testuser')
self.test_post = Post.objects.create(author=self.user, title='Test', text='superText')
self.comment = Comment.objects.create(post=self.test_post, author=self.user.username, text='superComment',
is_approved=False)
def test_comment_rendering(self):
"""Comment is rendered as its title."""
self.assertEqual(str(self.comment), self.comment.text)
def test_comment_approve(self):
"""Audit for right work of publish method in comment models."""
self.comment.is_approved = False
self.comment.approve()
self.assertTrue(self.comment.is_approved)
self.comment.approve()
self.assertTrue(self.comment.is_approved)
def tearDown(self):
"""Clean data for new test."""
del self.user
del self.test_post
del self.comment
|
erikaklein/algoritmo---programas-em-Python
|
GerarNumeroNoIntervalo.py
|
Python
|
mit
| 251
| 0.032389
|
#Faça um programa que receba dois números inteiros e gere os números inteiros que estão no intervalo compreendido por eles.
a=int(input('valor incial'))
print (a)
b=int(input('valor final
|
'))
print (b)
|
while a<b:
print(a)
a=a+1
|
drayanaindra/inasafe
|
safe/messaging/styles.py
|
Python
|
gpl-3.0
| 2,016
| 0.000496
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid **Messaging styles.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Style constants for use with messaging. Example usage::
from messaging.styles import PROGRESS_UPDATE_STYLE
m.ImportantText(myTitle, **P
|
ROGRESS_UPDATE_STYLE)
This will result in some standardised styling being applied to the important
text element.
"""
__author__ = 'tim@linfiniti.com'
__revision__ = '$Format:%H$'
__date__ = '06/06/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for
|
'
'Disaster Reduction')
# These all apply to heading elements
PROGRESS_UPDATE_STYLE = {
'level': 5,
'icon': 'icon-cog icon-white',
'style_class': 'info'}
INFO_STYLE = {
'level': 5,
'icon': 'icon-info-sign icon-white',
'style_class': 'info'}
WARNING_STYLE = {
'level': 5,
'icon': 'icon-warning-sign icon-white',
'style_class': 'warning'}
SUGGESTION_STYLE = {
'level': 5,
'icon': 'icon-comment icon-white',
'style_class': 'suggestion'}
PROBLEM_STYLE = {
'level': 5,
'icon': 'icon-remove-sign icon-white',
'style_class': 'warning'}
DETAILS_STYLE = {
'level': 5,
'icon': 'icon-list icon-white',
'style_class': 'problem'}
SMALL_ICON_STYLE = {
'attributes': 'style="width: 24px; height: 24px;"',
}
TRACEBACK_STYLE = {
'level': 5,
'icon': 'icon-info-sign icon-white',
'style_class': 'inverse',
'attributes': 'onclick="toggleTracebacks();"'}
TRACEBACK_ITEMS_STYLE = {
'style_class': 'traceback-detail',
}
# This is typically a text element or its derivatives
KEYWORD_STYLE = {
# not working unless you turn css on and off again using inspector
#'style_class': 'label label-success'
}
|
Arcanemagus/SickRage
|
sickbeard/providers/scc.py
|
Python
|
gpl-3.0
| 7,234
| 0.002903
|
# coding=utf-8
# Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import time
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from six.moves.urllib.parse import quote
import sickbeard
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import cpu_presets
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class SCCProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "SceneAccess")
self.username = None
self.password = None
self.minseed = None
self.minleech = None
self.cache = tvcache.TVCache(self) # only poll SCC every 20 minutes max
self.urls = {
'base_url': 'https://sceneaccess.eu',
'login': 'https://sceneaccess.eu/login',
'detail'
|
: 'https://www.sceneaccess.eu/details?id=%s',
'search': 'https://sceneaccess.e
|
u/all?search=%s&method=1&%s',
'download': 'https://www.sceneaccess.eu/%s'
}
self.url = self.urls['base_url']
self.categories = {
'Season': 'c26=26&c44=44&c45=45', # Archive, non-scene HD, non-scene SD; need to include non-scene because WEB-DL packs get added to those categories
'Episode': 'c17=17&c27=27&c33=33&c34=34&c44=44&c45=45', # TV HD, TV SD, non-scene HD, non-scene SD, foreign XviD, foreign x264
'RSS': 'c17=17&c26=26&c27=27&c33=33&c34=34&c44=44&c45=45' # Season + Episode
}
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'submit': 'come on in'
}
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search(r'Username or password incorrect', response) \
or re.search(r'<title>SceneAccess \| Login</title>', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
@staticmethod
def _isSection(section, text):
title = r'<title>.+? \| {0}</title>'.format(section)
return re.search(title, text, re.I)
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals,too-many-branches, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
if mode != 'RSS':
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls['search'] % (quote(search_string), self.categories[mode])
try:
data = self.get_url(search_url, returns='text')
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except Exception as e:
logger.log("Unable to fetch data. Error: {0}".format(repr(e)), logger.WARNING)
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='torrents-table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrent_table('tr')[1:]:
try:
link = result.find('td', class_='ttr_name').find('a')
url = result.find('td', class_='td_dl').find('a')
title = link.string
if re.search(r'\.\.\.', title):
data = self.get_url(urljoin(self.url, link['href']), returns='text')
if data:
with BS4Parser(data) as details_html:
title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
download_url = self.urls['download'] % url['href']
seeders = int(result.find('td', class_='ttr_seeders').string)
leechers = int(result.find('td', class_='ttr_leechers').string)
torrent_size = result.find('td', class_='ttr_size').contents[0]
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = SCCProvider()
|
carolinehardin/learnProgrammingByForums
|
countReddit.py
|
Python
|
gpl-2.0
| 5,525
| 0.029864
|
import os, logging, praw, HTMLParser, ConfigParser, pprint, csv
from bs4 import BeautifulSoup
from urlparse import urlparse
from tldextract import tldextract
print "Reddit Research Scraper v0.1"
print "============================"
'''
Grab the config file (we're gonna need it later on)
'''
try:
config = ConfigParser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/se
|
ttingsR.conf')
assert(config.get('global', 'outputCSV'))
print "Settings parsed correctly."
except ConfigParser.NoSectionError:
print "Your config file does not appear to be valid. Please verify that settings.conf exists."
#make a pretty printer for use later
pp = pprint.PrettyPrinter(indent=4)
# log events to redditScraper.log with debug level logging
LOG_FILENAME = config.get('global', 'logfile')
LOG_LEVEL = c
|
onfig.get('global', 'loglevel')
logging.basicConfig(filename=LOG_FILENAME, level=LOG_LEVEL)
# we need this to unescape the escaped characters
redditParse = HTMLParser.HTMLParser()
# create files for saving the reddit stuff to
outputCSV = config.get('global', 'outputCSV')
commentsCSV = config.get('global', 'commentsCSV')
commentsFixedCSV = config.get('global', 'commentsFixedCSV')
# put together a dictionary for building the CSV file
# resource, number of links, number of mentions
fieldnames = ['resource', 'number of links', 'number of mentions']
#let's grab the stuff from reddit using praw
reddit = praw.Reddit(user_agent='linux:ResearchRedditScraper:v0.1 (by /u/plzHowDoIProgram)')
username = config.get('user', 'username')
password = config.get('user', 'password')
print "Logging into Reddit..."
reddit.login(username, password)
#DEPRECATED. Will be removed in a future version of PRAW. Password-based authentication will stop working on 2015/08/03 and as a result will be removed in PRAW4.
print "We're in. Requesting comments from reddit..."
commentPile = reddit.get_comments('learnprogramming', limit=None)
print "Request complete. Parsing raw comments to CSV..."
commentCount = 0
#write comment to a csv file for counting
with open(commentsCSV, 'w') as csvFile:
csvwriter= csv.writer(csvFile,delimiter='\t')
for comments in commentPile:
#unescape the html and put in unicode
commentFormatted = (redditParse.unescape(comments.body_html)).encode('utf-8')
#write it to a csv file for counting comments
csvwriter.writerow([commentFormatted])
commentCount += 1
print "Complete. We parsed " + str(commentCount) + " comments."
# parse the document. we're using the praw version now
with open(commentsFixedCSV,'r') as inputFile:
#parse the input csv using beautiful soup
soup = BeautifulSoup(inputFile)
linkPile = [a.attrs.get('href') for a in soup.find_all('a')]
#pretty print the output to see what we've got so far
#pp.pprint(linkPile)
print "We found " + str(len(linkPile)) + " total number of links"
#create a dictionary to keep the resource names in and count the number of appearences
print "Building dictionary...."
resources = {}
#look at every link in the pile
for linkCandidate in linkPile:
pp.pprint(linkCandidate)
baseUrl = " "
try:
#we only want the hostname
baseUrl = urlparse(linkCandidate).hostname
except: #we get some non-url text somehow.
print 'Skip this:'
print linkCandidate
# if it's not empty
if not baseUrl is None:
# extract the TLD
resourceFound = tldextract.extract(baseUrl).domain
#check each name against this list to see if it's new
if resources.has_key(resourceFound):
#if in list, increment count
resources[resourceFound] = (resources[resourceFound]+1)
else:
#if not in list, add it
resources[resourceFound] = 1
#seed the dictionary with the most commonly used resources listed on the reddit FAQ
resources.update({'rubymonk':0, 'tryruby':0, 'hackety hack':0,'codecademy':0,'codeacademy':0,'eloquent javascript':0, 'caveofprogramming':0, 'udemy':0,'try python':0, 'learnpython':0, 'crunchy':0, 'coursera':0, 'udacity':0, 'edx':0 })
print "Dictionary complete. "
pp.pprint(resources)
#create an empty list for collecting results
csvOutput = []
csvOutput.append(fieldnames)
#now go back through the file and find how many times in all text each resources shows up. Count by comments, not total appearences.
#i.e., if someone talks about 'stackoverflow' three times in a single comment, it is only counted once.
#this will run slow!
for key in resources:
#change to unicode to avoid parsing problems. add spaces to get discreet keys, not parts of words
unicodeKey = " " + key.encode('utf-8') + " "
#make sure we are in lower case for everything
unicodeKey = unicodeKey.lower()
totalCount = 0
#we are counting by the row as each row is a comment
with open(commentsCSV,'rb') as inputFile:
reader = csv.reader(inputFile, delimiter='\t')
for row in reader:
#the second column has the body text. get it in lower case
bodyText = row[0].lower()
if((bodyText.count(unicodeKey))>0):
totalCount += 1
#now that we've gone through each row, add it to the output.
csvOutput.append([key,resources[key], totalCount])
#finally, save the CSV file
print "Writing results to CSV file...."
with open(outputCSV, 'w+') as csvfile:
csvwrite = csv.writer(csvfile)
for row in csvOutput:
csvwrite.writerow(row)
print "Complete. Results can be found in " + outputCSV
|
imclab/confer
|
server/recommender.py
|
Python
|
mit
| 741
| 0.017544
|
import sys, os, operator, json
from py4j.java_gateway import JavaGateway
from py4j.java_collections import ListConverter
'''
@author: Anant Bhardwaj
@date: Nov 1, 2013
'''
class Recommender:
def __init__(self):
self.gateway = JavaGateway()
def get_item_based_recommendations(self, paper_id_list):
java_paper_id_list = ListConverter().convert(
paper_id_
|
list, self.gateway._gateway_client)
recs = self.gateway.entry_point.recommend(java_paper_id_list)
res=[]
for rec in recs:
r = rec.split(',')
res.append({'id': r[0], 'score': float
|
(r[1])})
return res
def main():
r = Recommender()
res = r.get_item_based_recommendations(['pn1460'])
print res
if __name__ == "__main__":
main()
|
rajalokan/keystone
|
keystone/tests/hacking/checks.py
|
Python
|
apache-2.0
| 14,985
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keystone's pep8 extensions.
In order to make the review process faster and easier for core devs we are
adding some Keystone specific pep8 checks. This will catch common errors
so that core devs don't have to.
There are two types of pep8 extensions. One is a function that takes either
a physical or logical line. The physical or logical line is the first param
in the function definition and can be followed by other parameters supported
by pep8. The second type is a class that parses AST trees. For more info
please see pep8.py.
"""
import ast
import re
import six
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""Created object automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
class CheckForMutableDefaultArgs(BaseASTChecker):
"""Check for the use of mutable objects as function/method defaults.
We are only checking for list and dict literals at this time. This means
that a developer could specify an instance of their own and cause a bug.
The fix for this is probably more work than it's worth because it will
get caught during code review.
"""
CHECK_DESC = 'K001 Using mutable as a function/method default'
MUTABLES = (
ast.List, ast.ListComp,
ast.Dict, ast.DictComp,
ast.Set, ast.SetComp,
ast.Call)
def visit_FunctionDef(self, node):
for arg in node.args.defaults:
if isinstance(arg, self.MUTABLES):
self.add_error(arg)
super(CheckForMutableDefaultArgs, self).generic_visit(node)
def block_comments_begin_with_a_space(physical_line, line_number):
"""There should be a space after the # of block comments.
There is already a check in pep8 that enforces this rule for
inline comments.
Okay: # this is a comment
Okay: #!/usr/bin/python
Okay: # this is a comment
K002: #this is a comment
"""
MESSAGE = "K002 block comments should start with '# '"
# shebangs are OK
if line_number == 1 and physical_line.startswith('#!'):
return
text = physical_line.strip()
if text.startswith('#'): # look for block comments
if len(text) > 1 and not text[1].isspace():
return physical_line.index('#'), MESSAGE
class CheckForAssertingNoneEquality(BaseASTChecker):
"""Ensure that code does not use a None with assert(Not*)Equal."""
CHECK_DESC_IS = ('K003 Use self.assertIsNone(...) when comparing '
'against None')
CHECK_DESC_ISNOT = ('K004 Use assertIsNotNone(...) when comparing '
' against None')
def visit_Call(self, node):
# NOTE(dstanek): I wrote this in a verbose way to make it easier to
# read for those that have little experience with Python's AST.
def _is_None(node):
if six.PY3:
return (isinstance(node, ast.NameConstant)
and node.value is None)
else:
return isinstance(node, ast.Name) and node.id == 'None'
if isinstance(node.func, ast.Attribute):
if node.func.attr == 'assertEqual':
for arg in node.args:
if _is_None(arg):
self.add_error(node, message=self.CHECK_DESC_IS)
elif node.func.attr == 'assertNotEqual':
for arg in node.args:
if _is_None(arg):
self.add_error(node, message=self.CHECK_DESC_ISNOT)
super(CheckForAssertingNoneEquality, self).generic_visit(node)
class CheckForTranslationIssues(BaseASTChecker):
LOGGING_CHECK_DESC = 'K005 Using translated string in logging'
USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn'
LOG_MODULES = ('logging', 'oslo_log.log')
I18N_MODULES = (
'
|
keystone.i18n._',
)
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
'critical': '_LC',
}
def __init__(self, tree, filename):
super(CheckForTranslationIssues, self).__init_
|
_(tree, filename)
self.logger_names = []
self.logger_module_names = []
self.i18n_names = {}
# NOTE(dstanek): this kinda accounts for scopes when talking
# about only leaf node in the graph
self.assignments = {}
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
item._parent = node
self.visit(item)
elif isinstance(value, ast.AST):
value._parent = node
self.visit(value)
def _filter_imports(self, module_name, alias):
"""Keep lists of logging and i18n imports."""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
self.i18n_names[alias.asname or alias.name] = alias.name
def visit_Import(self, node):
for alias in node.names:
self._filter_imports(alias.name, alias)
return super(CheckForTranslationIssues, self).generic_visit(node)
def visit_ImportFrom(self, node):
for alias in node.names:
full_name = '%s.%s' % (node.module, alias.name)
self._filter_imports(full_name, alias)
return super(CheckForTranslationIssues, self).generic_visit(node)
def _find_name(self, node):
"""Return the fully qualified name or a Name or Attribute."""
if isinstance(node, ast.Name):
return node.id
elif (isinstance(node, ast.Attribute)
and isinstance(node.value, (ast.Name, ast.Attribute))):
method_name = node.attr
obj_name = self._find_name(node.value)
if obj_name is None:
return None
return obj_name + '.' + method_name
elif isinstance(node, six.string_types):
return node
else: # could be Subscript, Call or many more
return None
def visit_Assign(self, node):
"""Look for 'LOG = logging.getLogger'.
This handles the simple case:
name = [logging_module].getLogger(...)
- or -
name = [i18n_name](...)
And some much more comple ones:
|
390910131/Misago
|
misago/conf/defaults.py
|
Python
|
gpl-2.0
| 12,306
| 0
|
"""
Misago default settings
This fille sets everything Misago needs to run.
If you want to add custom app, middleware or path, please update setting vallue
defined in this file instead of copying setting from here to your settings.py.
Yes:
#yourproject/settings.py
INSTALLED_APPS += (
'myawesomeapp',
)
No:
#yourproject/settings.py
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'misago.core',
'misago.conf',
...
'myawesomeapp',
)
"""
# Build paths inside the project like this: os.path.join(MISAGO_BASE_DIR, ...)
import os
MISAGO_BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Default JS debug to false
# This setting used exclusively by test runner and isn't part of public API
_MISAGO_JS_DEBUG = False
# Assets Pipeline
# See http://django-pipeline.readthedocs.org/en/latest/configuration.html
PIPELINE_CSS = {
'misago_admin': {
'source_filenames': (
'misago/admin/css/style.less',
),
'output_filename': 'misago_admin.css',
},
}
PIPELINE_JS = {
'misago_admin': {
'source_filenames': (
'misago/admin/js/jquery.js',
'misago/admin/js/bootstrap.js',
'misago/admin/js/moment.min.js',
'misago/admin/js/bootstrap-datetimepicker.min.js',
'misago/admin/js/misago-datetimepicker.js',
'misago/admin/js/misago-timestamps.js',
'misago/admin/js/misago-tooltips.js',
'misago/admin/js/misago-tables.js',
'misago/admin/js/misago-yesnoswitch.js',
),
'output_filename': 'misago_admin.js',
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
)
PIPELINE_TEMPLATE_EXT = '.hbs'
PIPELINE_TEMPLATE_FUNC = 'Ember.Handlebars.compile'
PIPELINE_TEMPLATE_NAMESPACE = 'window.Ember.TEMPLATES'
PIPELINE_TEMPLATE_SEPARATOR = '/'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
# Keep misago.users above django.contrib.auth
# so our management commands take precedence over theirs
'misago.users',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'debug_toolbar',
'pipeline',
'crispy_forms',
'mptt',
'rest_framework',
'misago.admin',
'misago.acl',
'misago.core',
'misago.conf',
'misago.markup',
'misago.notifications',
'misago.legal',
'misago.forums',
'misago.threads',
'misago.readtracker',
'misago.faker',
)
MIDDLEWARE_CLASSES = (
'misago.core.middleware.embercliredirects.EmberCLIRedirectsMiddleware',
'misago.users.middleware.AvatarServerMiddleware',
'misago.users.middleware.RealIPMiddleware',
'misago.core.middleware.frontendcontext.FrontendContextMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'misago.users.middleware.UserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'misago.core.middleware.exceptionhandler.ExceptionHandlerMiddleware',
'misago.users.middleware.OnlineTrackerMiddleware',
'misago.admin.middleware.AdminAuthMiddleware',
'misago.threads.middleware.UnreadThreadsCountMiddleware',
'misago.core.middleware.threadstore.ThreadStoreMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'misago.core.context_processors.site_address',
'misago.conf.context_processors.settings',
'misago.users.context_processors.sites_links',
# Data preloaders
'misago.conf.context_processors.preload_settings_json',
'misago.users.context_processors.preload_user_json',
# Note: keep frontend_context processor last for previous processors
# to be able to add data to request.frontend_context
'misago.core.context_processors.frontend_context',
)
MISAGO_ACL_EXTENSIONS = (
'misago.users.permissions.account',
'misago.users.permissions.profiles',
'misago.users.permissions.warnings',
'misago.users.permissions.moderation',
'misago.users.permissions.delete',
'misago.forums.permissions',
'misago.threads.permissions.threads',
'misago.threads.permissions.privatethreads',
)
MISAGO_MARKUP_EXTENSIONS = ()
MISAGO_POSTING_MIDDLEWARES = (
# Note: always keep FloodProtectionMiddleware middleware first one
'misago.threads.posting.floodprotection.FloodProtectionMiddleware',
'misago.threads.posting.reply.ReplyFormMiddleware',
'misago.threads.posting.participants.ThreadParticipantsFormMiddleware',
'misago.threads.posting.threadlabel.ThreadLabelFormMiddleware',
'misago.threads.posting.threadpin.ThreadPinFormMiddleware',
'misago.threads.posting.threadclose.ThreadCloseFormMiddleware',
'misago.threads.posting.recordedit.RecordEditMiddleware',
'misago.threads.posting.updatestats.UpdateStatsMiddleware',
# Note: always keep SaveChangesMiddleware middleware last one
'misago.threads.posting.savechanges.SaveChangesMiddleware',
)
MISAGO_THREAD_TYPES = (
# category and redirect types
'misago.forums.forumtypes.RootCategory',
'misago.forums.forumtypes.Category',
'misago.forums.forumtypes.Redirect',
# real thread types
'misago.threads.threadtypes.forumthread.ForumThread',
'misago.threads.threadtypes.privatethread.PrivateThread',
'misago.threads.threadtypes.report.Report',
)
# Register Misago directories
LOCALE_PATHS = (
os.path.join(MISAGO_BASE_DIR, 'locale'),
)
STATICFILES_DIRS = (
os.path.join(MISAGO_BASE_DIR, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(MISAGO_BASE_DIR, 'templates'),
)
# Internationalization
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'UTC'
# Misago specific date formats
# https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MISAGO_COMPACT_DATE_FORMAT_DAY_MONTH = 'j M'
MISAGO_COMPACT_DATE_FORMAT_DAY_MONTH_YEAR = 'M \'y'
# Use Misago CSRF Failure Page
CSRF_FAILURE_VIEW = 'misago.core.errorpages.csrf_failure'
# Use Misago authentication
AUTH_USER_MODEL = 'misago_users.User'
AUTHENTICATION_BACKENDS = (
'misago.users.authbackends.MisagoBackend',
)
MISAGO_NEW_REGISTRATIONS_VALIDATORS = (
'misago.users.validators.validate_gmail_email',
'misago.users.validators.validate_with_sfs',
)
MISAGO_STOP_FORUM_SPAM_USE = True
MISAGO_STOP_FORUM_SPAM_MIN_CONFIDENCE = 80
# How many e-mails should be sent in single step.
# This is used for conserving memory usage when mailing many users at same time
MISAGO_MAILER_BATCH_SIZE = 20
# Auth paths
MISAGO_LOGIN_API_URL = 'auth'
LOGIN_REDIRECT_URL = 'misago:index'
LOGIN_URL = 'misago:login'
LOGOUT_URL = 'misago:logout'
# Misago Admin Path
# Omit starting and trailing slashes
# To disable Misago admin, empty this value
MISAGO_ADMIN_PATH = 'admincp'
# Admin urls namespaces that Misago's AdminA
|
uthMiddleware should protect
MISAGO_ADMIN_NAMESPACES = (
'admin',
'misago:admin',
)
# How long (in minutes) since previous request to admin namespace should
# admin session last.
MISAGO_ADMIN_SESSION_EXPIRATION
|
= 60
# Max age of notifications in days
# Notifications older than this are deleted
# On very a
|
fxia22/ASM_xf
|
PythonD/lib/python2.4/site-packages/display/cursing/ScrollBar.py
|
Python
|
gpl-2.0
| 5,591
| 0.023609
|
#
# This file is part of GNU Enterprise.
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2002-2003 Free Software Foundation
#
# FILE:
# ScrollBar.py
#
# DESCRIPTION:
#
# NOTES:
#
#from gnue.common.apps import GDebug
import math
import string
import curses
from constants import *
from Control import Control
from Button import Button
class ScrollBar(Control):
"""
horizontal only :-(
"""
def __init__(self, Parent, SBName, Y, X, W, **properties):
apply(Control.__init__, (self,Parent,SBName),properties)
self.CANGETFOCUS = 1
self.H = 1
self.W = W
self.Y = Y
self.X = X
self.PARENT = Parent
self.SetMethod("SYSPAINT", self.Paint)
self.SetMethod("SYSRUN",self.Run)
self.SetMethod("GOTFOCUS", self.__GotFocus)
self.SetMet
|
hod("LOSTFOCUS", self.__LostFocus)
self.SetMethod("CLICK", self._ChangePos)
self._max = 1
self._val = 0
self.stepsize = 1
self.__initButtons()
def __initButtons(self):
if string.find(str(self.__class__), '.ScrollBar') != -1:
Y = self.Y
X = self.X
W = self.W
Parent = self.PARE
|
NT
self.rightarrow = Button(Parent,'rightarrow',Y,X+W - 3,3,'>')
self.rightarrow.SetMethod("CLICK",self._Inc)
Parent.AddControl(self.rightarrow)
self.left2arrow = Button(Parent,'left2arrow',Y,X+W - 6,3,'<')
self.left2arrow.SetMethod("CLICK",self._Dec)
Parent.AddControl(self.left2arrow)
self.leftarrow = Button(Parent,'leftarrow',Y,X,3,'<')
self.leftarrow.SetMethod("CLICK",self._Dec)
Parent.AddControl(self.leftarrow)
def __GotFocus(self,v1,v2,v3):
self.FOCUS = 1
self.Paint(None,None,None)
return 1
def __LostFocus(self,v1,v2,v3):
self.FOCUS = 0
self.Paint(None,None,None)
return 1
def Run(self, v1,v2,v3):
if v1 :
self.ExecMethod("CLICK", self, v2, v3)
Container = self.PARENT.Screen()
global BACKWARDS
while 1:
ch = Container.GetChar()
if self.PARENT.BreakOrder(ch) :
return
if ch in (Container.TokNextField, Container.TokDownArrow, Container.TokUpArrow):
BACKWARDS = 0
if ch == Container.TokUpArrow:
BACKWARDS = 1
return
elif ch == Container.TokLeftArrow:
self._Dec(None,None,None)
elif ch == Container.TokRightArrow:
self._Inc(None, None, None)
def _ChangePos(self,arg1,arg2,newX):
X = newX - self.start
if X >= (self.WorkingArea-1):
val = self._max
else:
val = float(X) / self.stepsize
val = int(math.ceil(val))
self.Set(val)
self._Action()
def Init(self, Max):
self._max = Max
self._val = 0
self.WorkingArea = float(self.W-9)
self.start = 3
self.UsedSpace = int(math.floor(self.WorkingArea / float(self._max)))
self.stepsize = self.WorkingArea / self._max
if self.UsedSpace < 1:
self.UsedSpace = 1
self.Paint(None,None,None)
def Paint(self,v1,v2,v3):
## TODO: This is all wrong... it only partially supports _ABSX
Pos = int(math.ceil(float(self._val) * (self.stepsize))) + self.start + self._ABSX
Screen = self.PARENT.Screen()
Screen.AutoRefresh = 0
# clear the bar region in reverse standard-color
self.SetColor(1)
self.LoLight()
for i in range(0, int(self.WorkingArea)):
Screen.PutAt(self._ABSY, self.start + i + self._ABSX, ' ', curses.A_REVERSE)
# correct position
if Pos >= (self.WorkingArea + self.start):
Pos = (self.start + self.WorkingArea)
elif Pos < (self.start + self.UsedSpace):
Pos = self.start + self.UsedSpace
# draw the handle hilight
if self.FOCUS:
self.SetColor(3)
else:
self.SetColor(2)
self.LoLight()
for i in range(0, self.UsedSpace):
Screen.PutAt(self._ABSY, (Pos - self.UsedSpace) + i, ' ', curses.A_REVERSE)
Screen.AutoRefresh = 1
Screen.Refresh()
def Dec(self,arg1,arg2,arg3):
if self._val > 0:
self._val -= 1
self.Paint(None,None,None)
def _Dec(self, arg1,arg2,arg3):
self.Dec(None,None,None)
self._Action()
def Inc(self,arg1,arg2,arg3):
if self._val < self._max:
self._val += 1
self.Paint(None,None,None)
def _Inc(self, arg1,arg2,arg3):
self.Inc(None,None,None)
self._Action()
def Set(self,newVal):
if newVal < 0:
newVal = 0
elif newVal > self._max:
newVal =self._max
self._val = newVal
self.Paint(None,None,None)
def __del__(self):
Parent = self.PARENT
Parent.DelControl(self.rightarrow)
Parent.DelControl(self.leftarrow)
Parent.DelControl(self.left2arrow)
def _Action(self):
action = self.GetMethod("CHANGED")
if action != None:
apply(action,(self._val,self._max,None))
|
twrightsman/advent-of-code-2015
|
advent_day7_pt2.py
|
Python
|
unlicense
| 2,686
| 0.004468
|
import re
import sys
def get_wire(input_value):
try:
int(input_value)
return int(input_value)
except ValueError:
if callable(wires[input_value]):
wires[input_value] = wires[input_value]()
return wires[input_value]
class Gate:
def __init__(self, first_input, operator, shift_bits, second_input):
#check if first input is constant or wire ID
self.first_input = first_input
self.operator = operator
self.shift_bits = shift_bits
self.second_input = second_input
if operator:
self.output = getattr(self, 'logic_{}'.format(operator))
else:
try:
int(first_input)
self.output = self.logic_CONST
except ValueError:
self.output = self.logic_JOIN
def logic_AND(self):
print(self.first_input, 'AND', self.second_input)
return get_wire(self.first_input) & get_wire(self.second_input)
def logic_OR(self):
print(self.first_input, 'OR', self.second_input)
return get_wire(self.first_input) | get_wire(self.second_input)
def logic_NOT(self):
#0xFFFF used to mask the higher bits
print('NOT', self.second_input)
return (~get_wire(self.second_input)) & 0xFFFF
def logic_RSHIFT(self):
print('RSHIFT', self.first_input, 'by', self.shift_bits, 'bits')
return (get_wire(self.first_input) >> int(self.shift_bits)) & 0xFFFF
def logic_LSHIFT(self):
print('LSHIFT', self.first_input, 'by', self.shift_bits, 'bits')
return (get_wire(
|
self.first_input) << int(self.shift_bits)) & 0xFFFF
def logic_JOIN(self):
print('Getting signal of wire',self.f
|
irst_input)
return wires[self.first_input]()
def logic_CONST(self):
print('Consant signal', self.first_input)
return int(self.first_input)
wires = {}
def get_signal(wire_id):
return wires[wire_id]()
'''
Group 1: first input to gate (wire id OR constant)
Group 2: operator, if given
Group 3: SHIFT gate bits, if given
Group 4: second input to gate (wire id OR constant), if given
Group 5: output wire id of gate
'''
instruction_pattern = re.compile(r'([a-z]+|[0-9]+)? ?([A-Z]+)? ?([0-9]+)? ?([a-z]+|[0-9]+)? -> ([a-z]+)')
for instruction in sys.stdin:
result = re.match(instruction_pattern, instruction)
first_input = result.group(1)
operator = result.group(2)
shift_bits = result.group(3)
second_input = result.group(4)
output = result.group(5)
wires[output] = Gate(first_input, operator, shift_bits, second_input).output
wires['b'] = get_signal('a')
print(get_signal('a'))
|
eyaler/tensorpack
|
examples/basics/mnist-visualizations.py
|
Python
|
apache-2.0
| 4,834
| 0.002069
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist-visualizations.py
"""
The same MNIST ConvNet example, but with weights/activations visualization.
"""
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import dataset
IMAGE_SIZE = 28
def visualize_conv_weights(filters, name):
"""Visualize use weights in convolution filters.
Args:
filters: tensor containing the weights [H,W,Cin,Cout]
name: label for tensorboard
Returns:
image of all weight
"""
with tf.name_scope('visualize_w_' + name):
filters = tf.transpose(filters, (3, 2, 0, 1)) # [h, w, cin, cout] -> [cout, cin, h, w]
filters = tf.unstack(filters) # --> cout * [cin, h, w]
filters = tf.concat(filters, 1) # --> [cin, cout * h, w]
filters = tf.unstack(filters) # --> cin * [cout * h, w]
filters = tf.concat(filters, 1) # --> [cout * h, cin * w]
filters = tf.expand_dims(filters, 0)
filters = tf.expand_dims(filters, -1)
tf.summary.image('visualize_w_' + name, filters)
def visualize_conv_activations(activation, name):
"""Visualize activations for convolution layers.
Remarks:
This tries to place all activations into a square.
Args:
activation: tensor with the activation [B,H,W,C]
name: label for tensorboard
Returns:
image of almost all activations
"""
import math
with tf.name_scope('visualize_act_' + name):
_, h, w, c = activation.get_shape().as_list()
rows = []
c_per_row = int(math.sqrt(c))
for y in range(0, c - c_per_row, c_per_row):
row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5]
cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W]
row = tf.concat(cols, 1)
rows.append(row)
viz = tf.concat(rows, 2)
tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))
class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
def build_graph(self, image, label):
image = tf.expand_dims(image * 2 - 1, 3)
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
c0 = Conv2D('conv0', image)
p0 = MaxPooling('pool0', c0, 2)
|
c1 = Conv2D('conv1', p0)
c2 = Conv2D('conv2', c1)
p1 = MaxPooling('pool1', c2, 2)
c3 = Conv2D('conv3', p1)
fc1 = FullyCo
|
nnected('fc0', c3, 512, nl=tf.nn.relu)
fc1 = Dropout('dropout', fc1, 0.5)
logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity)
with tf.name_scope('visualizations'):
visualize_conv_weights(c0.variables.W, 'conv0')
visualize_conv_activations(c0, 'conv0')
visualize_conv_weights(c1.variables.W, 'conv1')
visualize_conv_activations(c1, 'conv1')
visualize_conv_weights(c2.variables.W, 'conv2')
visualize_conv_activations(c2, 'conv2')
visualize_conv_weights(c3.variables.W, 'conv3')
visualize_conv_activations(c3, 'conv3')
tf.summary.image('input', (image + 1.0) * 128., 3)
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tf.reduce_mean(tf.to_float(tf.nn.in_top_k(logits, label, 1)), name='accuracy')
wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
return tf.add_n([wd_cost, cost], name='total_cost')
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr)
def get_data():
train = BatchData(dataset.Mnist('train'), 128)
test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return train, test
if __name__ == '__main__':
logger.auto_set_dir()
dataset_train, dataset_test = get_data()
config = TrainConfig(
model=Model(),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(
dataset_test, ScalarStats(['cross_entropy_loss', 'accuracy'])),
],
steps_per_epoch=len(dataset_train),
max_epoch=100,
)
launch_train_with_config(config, SimpleTrainer())
|
PuZheng/cloud-dashing
|
cloud_dashing/default_settings.py
|
Python
|
gpl-2.0
| 618
| 0
|
# -*- coding: UTF-8 -*-
"""
this is the default settings, don't insert into your customized settings!
"""
DEBUG = True
TESTING = True
SECRET_KEY = "5L)0K%,i.;*i/s("
|
SECURITY_SALT = "sleiuyyao"
# DB config
SQLALCHEMY_DATABASE_URI = "sqlite:///dev.db"
SQLALCHEMY_ECHO = True
UPLOADS_DEFAULT_DEST = 'uploads'
LOG_FILE = 'log.txt'
ERROR_LOG_RECIPIENTS = []
# Flask-Mail related configuration, refer to
# `http://pythonhosted.org/flask-mail/#configuring-flask-mail`
MAIL_SERVER = 'smtp.foo.com'
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
|
MAIL_DEFAULT_SENDER = 'user@foo.com'
FREEZER_RELATIVE_URLS = False
|
DLR-SC/DataFinder
|
src/datafinder/core/configuration/properties/domain.py
|
Python
|
bsd-3-clause
| 6,707
| 0.00999
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the
|
above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the Ge
|
rman Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Defines two classes to support domain objects. Domain objects allow you to
model meta data in a more compact way.
Domain objects should always be inherited from C{DomainObject}. Then the required
properties should be defined on class level using C{DomainProperty}.
Here an example:
>>> class Author(DomainObject):
... name = DomainProperty(StringType(), None, "Name", "This is the author name.")
...
... @name.setValidate
... def _validateName(self):
... if self.name is None or len(self.name) == 0:
... raise ValueError("Name should not be empty.")
...
... author = Author()
... author.name = "Pierre"
"""
import inspect
__version__ = "$Revision-Id:$"
class DomainProperty(property):
""" Describes a property of a domain object.
Properties defined in this way are persisted and can be further described by a
documentation string, display name, and a default value. You can also provide a
custom validation function using C{setValidate). The existing property types
L{property_type<datafinder.core.configuration.properties.property_type>} can
be used.
"""
def __init__(self, type_, defaultValue=None, displayName="", docString=""):
property.__init__(self, self._getter, self._setter)
self.type = type_
self.__doc__ = docString
self.defaultValue = defaultValue
self.displayName = displayName
self._values = dict()
self._validate = lambda _: None
def validate(self, instance):
""" Validates the given object.
@raise ValueError: Indicates an invalid object.
"""
self.type.validate(self._getter(instance))
self._validate(instance)
def setValidate(self, function):
""" This method is intended to be used as method decorator.
>>> @name.setValidate
... def _validateName(self):
... pass
The decorated method should just expect the domain
property instance as argument. Invalid values should be
indicated using C{ValueError}. The default validation method
does nothing.
"""
self._validate = function
def _getter(self, instance):
if id(instance) in self._values:
return self._values[id(instance)]
else:
return self.defaultValue
def _setter(self, instance, value):
self._values[id(instance)] = value
def __repr__(self):
return "%s: %s\n%s" % (self.displayName, self.type.name, self.__doc__)
class DomainObject(object):
""" Base class for all domain objects.
@note: Domain object should be created using an empty constructor.
"""
def validate(self):
""" Indicates validation errors using C{ValueError}. """
for instance, _, propertyDescriptor, value in self.walk():
propertyDescriptor.validate(instance)
if isinstance(value, DomainObject):
value.validate()
def walk(self, recursively=False):
""" Returns a generator which allows walking through
all defined domain properties.
For every property the following information is returned:
- The instance on which the property is defined.
- The attribute name to which the property is bound.
- The property descriptor.
- The current value of the property.
@param recursively: Indicates whether sub domain
objects are processed as well. Default: C{False}
@type recursively: C{bool}
"""
processLater = list()
for name, propertyDescriptor in inspect.getmembers(self.__class__):
if isinstance(propertyDescriptor, DomainProperty):
value = getattr(self, name)
yield self, name, propertyDescriptor, value
if isinstance(value, DomainObject) and recursively:
processLater.append(value)
for theProperty in processLater:
for propertyInfo in theProperty.walk(recursively):
yield propertyInfo
def __cmp__(self, other):
""" Two instances are equal if all domain properties are equal. """
for _, name, __, value in self.walk():
try:
if cmp(getattr(other, name), value) != 0:
return 1
except AttributeError:
return 1
return 0
def __hash__(self):
hashValue = list()
for _, __, ___, value in self.walk():
hashValue.append(value)
return hash(tuple(hashValue))
def __repr__(self):
result = ""
for _, name, __, value in self.walk():
result += "%s: '%s' " % (name, str(value))
return result.strip()
|
ajhager/copycat
|
copycat/slipnet/sliplink.py
|
Python
|
gpl-2.0
| 1,760
| 0.001705
|
# Copyright (c) 2007-2017 Joseph Hager.
#
# Copycat is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License,
# as published by the Free Software Foundation.
#
# Copycat is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Copycat; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Sliplink"""
class Sliplink(object):
"""Sl
|
iplink is a link between two nod
|
es in the slipnet.
Attributes:
from_node: The node this link starts at.
to_node: The node this link ends at.
label: The node that labels this link.
fixed_length: A static length of the link has no label."""
def __init__(self, from_node, to_node, label, fixed_length):
"""Initializes Sliplink."""
self.from_node = from_node
self.to_node = to_node
self.label = label
self.fixed_length = fixed_length
def intrinsic_degree_of_association(self):
"""Return the intrinsic degree of association of the link."""
if self.fixed_length != None:
return 100 - self.fixed_length
else:
return self.label.intrinsic_degree_of_association()
def degree_of_association(self):
"""Return the degree of association of the link."""
if self.fixed_length != None:
return 100 - self.fixed_length
else:
return self.label.degree_of_association()
|
omat/django-timeline
|
timeline/views.py
|
Python
|
mit
| 4,465
| 0.002917
|
# -*- coding: utf-8 -*-
from time import strftime
from django.db.models import Max
from django.shortcuts import render_to_response, get_object_or_404
from django.core.paginator import QuerySetPaginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.utils.feedgenerator import Rss201rev2Feed
from django.http import Http404, HttpResponse
from django.template import RequestContext
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.views.decorators.vary import vary_on_headers
from django.template.loader import render_to_string
from favorites.models import Favorite
from main.utils import index_context, list_context, tags_for_model
from tagging.models import TaggedItem, Tag
from timeline.models import Timeline
@vary_on_headers('Host')
def user_list(request):
queryset = Timeline.objects.filter(user__is_active=True).values('user').annotate(last=Max('time')).order_by('-last')
if request.current_user:
ctype = ContentType.objects.get_for_model(User)
favorites = Favorite.objects.filter(user=request.current_user,
content_type=ctype).values_list('object_id', flat=True)
queryset = queryset.filter(user__id__in=favorites)
paginator = QuerySetPaginator(queryset, 20)
try:
page = paginator.page(request.GET.get('p', 1))
except (EmptyPage, InvalidPage):
page = paginator.page(paginator.num_pages)
return render_to_response('timeline/user_list.html',
{'user_list': [User.objects.get(id=item['user']) for item in page.object_list],
'page': page},
context_instance=RequestContext(request))
@vary_on_headers('Host')
def rss(request, tag_slug=None, model_name=None):
items = Timeline.objects.all()
if model_name:
if model_name in ('not', 'foto', 'harita', 'yazi', 'uye', 'forum', 'aktivite', 'yer'):
if model_name == 'not':
feed_title = u'en son eklenen notlar'
elif model_name == 'foto':
feed_title = u'en son eklenen fotoğraflar'
elif model_name == 'harita':
feed_title = u'en son eklenen haritalar'
elif model_name == 'yazi':
feed_title = u'en son eklenen yazılar'
elif model_name == 'uye':
feed_title = u'en son katılan üyeler'
elif model_name == 'forum':
feed_title = u'en son forum yazıları'
elif model_name == 'aktivite':
feed_title = u'en son eklenen etkinlikler'
elif model_name == 'yer':
feed_title = u'en son işaretlenen yerler'
else:
raise Http404
items = items.filter(content_type=ctype)
else:
feed_title = u'en son eklenenler'
if request.current_user:
items = items.filter(user=request.current_user)
feed_title = u'%s tarafından %s' % (request.current_user, feed_title)
feed_link = 'http://%s.%s%s' % (request.current_user, Site.objects.get_current().domain, request.path)
else:
feed_title = u'''gezgin'e %s''' % (feed_title,)
feed_link = request.path
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
items = TaggedItem.objects.get_by_model(items, tag)
feed_title = u'%s için %s' % (tag.name, feed_title)
feed = Rss201rev2Feed(title=feed_title,
description=feed_title,
link=feed_link)
items = items.order_by('-time')[:50]
current_site = Site.objects.get_current()
for tl_item in items:
feed.add_item(title=render_to_string('timeline/feed_item_title.html'
|
, {'item': tl_item,
'current_site': current_site}),
description=render_to_string('timeline/item.html',
|
{'item': tl_item,
'current_site': current_site}),
link=render_to_string('timeline/feed_item_link.html', {'item': tl_item,
'current_site': current_site}))
return HttpResponse(feed.writeString('UTF-8'), mimetype=feed.mime_type)
|
kaiix/schematics
|
schematics/types/base.py
|
Python
|
bsd-3-clause
| 28,230
| 0.001594
|
import uuid
import re
import datetime
import decimal
import itertools
import functools
import random
import string
import six
from six import iteritems
from ..exceptions import (
StopValidation, ValidationError, ConversionError, MockCreationError
)
try:
from string import ascii_letters # PY3
except ImportError:
from string import letters as ascii_letters #PY2
try:
basestring #PY2
except NameError:
basestring = str #PY3
try:
unicode #PY2
except:
import codecs
unicode = str #PY3
def utf8_decode(s):
if six.PY3:
s = str(s) #todo: right thing to do?
else:
s = unicode(s, 'utf-8')
return s
def fill_template(template, min_length, max_length):
return template % random_string(
get_value_in(
min_length,
max_length,
padding=len(template) - 2,
required_length=1))
def force_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
#obj = unicode(obj, encoding)
obj = utf8_decode(obj)
elif not obj is None:
#obj = unicode(obj)
obj = utf8_decode(obj)
return obj
def get_range_endpoints(min_length, max_length, padding=0, required_length=0):
if min_length is None and max_length is None:
min_length = 0
max_length = 16
elif min_length is None:
min_length = 0
elif max_length is None:
max_length = max(min_length * 2, 16)
if padding:
max_length = max_length - padding
min_length = max(min_length - padding, 0)
if max_length < required_length:
raise MockCreationError(
'This field is too short to hold the mock data')
min_length = max(min_length, required_length)
return min_length, max_length
def get_value_in(min_length, max_length, padding=0, required_length=0):
return random.randint(
*get_range_endpoints(min_length, max_length, padding, required_length))
def random_string(length, chars=ascii_letters + string.digits):
return ''.join(random.choice(chars) for _ in range(length))
_last_position_hint = -1
_next_position_hint = itertools.count()
class TypeMeta(type):
"""
Meta class for BaseType. Merges `MESSAGES` dict and accumulates
validator methods.
"""
def __new__(mcs, name, bases, attrs):
messages = {}
validators = []
for base in reversed(bases):
if hasattr(base, 'MESSAGES'):
messages.update(base.MESSAGES)
if hasattr(base, "_validators"):
validators.extend(base._validators)
if 'MESSAGES' in attrs:
messages.update(attrs['MESSAGES'])
attrs['MESSAGES'] = messages
for attr_name, attr in iteritems(attrs):
if attr_name.startswith("validate_"):
validators.append(attr)
attrs["_validators"] = validators
return type.__new__(mcs, name, bases, attrs)
class BaseType(TypeMeta('BaseTypeBase', (object, ), {})):
"""A base class for Types in a Schematics model. Instances of this
class may be added to subclasses of ``Model`` to define a model schema.
Validators that need to access variables on the instance
can be defined be implementing methods whose names start with ``validate_``
and accept one parameter (in addition to ``self``)
:param required:
Invalidate field when value is None or is not supplied. Default:
False.
:param default:
When no data is provided default to this value. May be a callable.
Default: None.
:param serialized_name:
The name of this field defaults to the class attribute used in the
model. However if the field has another name in foreign data set this
argument. Serialized data will use this value for the key name too.
:param deserialize_from:
A name or list of named fields for which foreign data sets are
searched to provide a value for the given field. This only effects
inbound data.
:param choices:
A list of valid choices. This is the last step of the validator
chain.
:param validators:
A list of callables. Each callable receives the value after it has been
converted into a rich python type. Default: []
:param serialize_when_none:
Dictates if the field should appear in the serialized data even if the
value is None. Default: True
:param messages:
Override the error messages with a dict. You can also do this by
subclassing the Type and defining a `MESSAGES` dict attribute on the
class. A metaclass will merge all the `MESSAGES` and override the
resulting dict with instance level `messages` and assign to
`self.messages`.
"""
ME
|
SSAGES = {
'required': u"This field is required.",
'choices': u"Value must be one of {0}.",
}
def __init__(self, required=False, default=None, serialized_name=No
|
ne,
choices=None, validators=None, deserialize_from=None,
serialize_when_none=None, messages=None):
super(BaseType, self).__init__()
self.required = required
self._default = default
self.serialized_name = serialized_name
if choices and not isinstance(choices, (list, tuple)):
raise TypeError('"choices" must be a list or tuple')
self.choices = choices
self.deserialize_from = deserialize_from
self.validators = [functools.partial(v, self) for v in self._validators]
if validators:
self.validators += validators
self.serialize_when_none = serialize_when_none
self.messages = dict(self.MESSAGES, **(messages or {}))
self._position_hint = next(_next_position_hint) # For ordering of fields
def __call__(self, value):
return self.to_native(value)
def _mock(self, context=None):
return None
def _setup(self, field_name, owner_model):
"""Perform late-stage setup tasks that are run after the containing model
has been created.
"""
self.name = field_name
self.owner_model = owner_model
@property
def default(self):
default = self._default
if callable(self._default):
default = self._default()
return default
def to_primitive(self, value, context=None):
"""Convert internal data to a value safe to serialize.
"""
return value
def to_native(self, value, context=None):
"""
Convert untrusted data to a richer Python construct.
"""
return value
def allow_none(self):
if hasattr(self, 'owner_model'):
return self.owner_model.allow_none(self)
else:
return self.serialize_when_none
def validate(self, value):
"""
Validate the field and return a clean value or raise a
``ValidationError`` with a list of errors raised by the validation
chain. Stop the validation process from continuing through the
validators by raising ``StopValidation`` instead of ``ValidationError``.
"""
errors = []
for validator in self.validators:
try:
validator(value)
except ValidationError as exc:
errors.extend(exc.messages)
if isinstance(exc, StopValidation):
break
if errors:
raise ValidationError(errors)
def validate_required(self, value):
if self.required and value is None:
raise ValidationError(self.messages['required'])
def validate_choices(self, value):
if self.choices is not None:
if value not in self.choices:
raise ValidationError(self.messages['choices']
.format(unicode(self.choices)))
def mock(self, context=None):
if not self.required and not random.choice([True, False]):
return self.default
if self.choices is not None:
return random.cho
|
Nic30/hwtHls
|
tests/utils/alapAsapDiffExample.py
|
Python
|
mit
| 2,761
| 0.002898
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.interfaces.std import VectSignal
from hwt.interfaces.utils import addClkRstn
from hwt.simulator.simTestCase import SimTestCase
from hwt.synthesizer.unit import Unit
from hwtHls.hlsStreamProc.streamProc import HlsStreamProc
from hwtHls.platform.virtual import VirtualHlsPlatform
from hwtHls.scheduler.errors import TimeConstraintError
from hwtSimApi.utils import freq_to_period
class AlapAsapDiff
|
Example(Unit):
def _config(self):
self.CLK_FREQ = int(400e6)
def _declr(self):
addClkRstn(self)
self.clk.FREQ = self.CLK_FREQ
self.a = VectSignal(8)
self.b = VectSignal(8)
self.c = VectSignal(8)
self.d = VectSignal(8)._m()
def _impl(self):
hls = HlsStreamProc(self)
# inputs has to be readed to enter
|
hls scope
# (without read() operation will not be schedueled by HLS
# but they will be directly synthesized)
a, b, c = [hls.read(intf) for intf in [self.a, self.b, self.c]]
# depending on target platform this expresion
# can be mapped to DPS, LUT, etc...
# no constrains are specified => default strategy is
# to achieve zero delay and minimum latency, for this CLK_FREQ
d = ~(~a & ~b) & ~c
hls.thread(
hls.While(True,
hls.write(d, self.d)
)
)
def neg_8b(a):
return ~a & 0xff
class AlapAsapDiffExample_TC(SimTestCase):
def test_400MHz(self):
self._test_simple(400e6)
def test_200MHz(self):
self._test_simple(200e6)
def test_1Hz(self):
self._test_simple(1)
def test_1GHz_fail(self):
with self.assertRaises(TimeConstraintError):
self._test_simple(1e9)
def _test_simple(self, freq):
u = AlapAsapDiffExample()
u.CLK_FREQ = int(freq)
a = 20
b = 58
c = 48
self.compileSimAndStart(u, target_platform=VirtualHlsPlatform())
u.a._ag.data.append(a)
u.b._ag.data.append(b)
u.c._ag.data.append(c)
self.runSim(int(40 * freq_to_period(u.CLK_FREQ)))
res = u.d._ag.data[-1]
self.assertValEqual(res, neg_8b(neg_8b(a) & neg_8b(b)) & neg_8b(c))
if __name__ == "__main__":
import unittest
from hwt.synthesizer.utils import to_rtl_str
u = AlapAsapDiffExample()
from hwtHls.platform.virtual import makeDebugPasses
print(to_rtl_str(u, target_platform=VirtualHlsPlatform(**makeDebugPasses("tmp"))))
#suite = unittest.TestSuite()
## suite.addTest(FrameTmplTC('test_frameHeader'))
#suite.addTest(unittest.makeSuite(AlapAsapDiffExample_TC))
#runner = unittest.TextTestRunner(verbosity=3)
#runner.run(suite)
|
Eloston/mipybot
|
mipybot/world/world.py
|
Python
|
gpl-3.0
| 626
| 0
|
'''
MiPyBot is free software: you can redistribute it and/or
|
modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MiPyBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MiPyBot. If not, see {http://www.gnu.org/licenses/}.
'''
# To be implemented
|
jbjornson/SublimeGMail
|
GMail.py
|
Python
|
mit
| 4,056
| 0.003205
|
# Loosely based on https://github.com/Skarlso/SublimeGmailPlugin by Skarlso
import sublime
import sublime_plugin
from smtplib import SMTP
from email.mime.text import MIMEText
from email.header import Header
# from email.headerregistry import Address
# from email.utils import parseaddr, formataddr
config = {
# TODO You need to change these values
# Default value for each field of the email message
"default_value": {
"smtp_login": "example@gmail.com",
"smtp_passwd": "c1everP@ssword",
"from": "example@gmail.com",
"display_name": u"Firstname Lastname",
"recipients": "first@recipient.com; second@recipient.com; third@recipient.com",
"subject": u"Sent from SublimeText"
},
# TODO Set to "true" to be prompted to edit the value, "false" to si
|
lently use the default_value from above
"interactive": {
"smtp_login": False,
"smtp_passwd": False,
"from": False,
"display_name": True,
"recipients": False,
"subject": True
},
|
# The prompt message to the user for each field
"prompt": {
"smtp_login": "GMail User ID",
"smtp_passwd": "GMail Password",
"from": "Sender e-mail address",
"display_name": "Sender's display name",
"recipients": "Recipients (semicolon or comma separated list)",
"subject": "Subject"
}
}
class GmailCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Collect all the text regions and send together
text = ''
for region in self.view.sel():
if not region.empty():
# Get the selected text
text = '%s%s\n\n' % (text, self.view.substr(region))
# Only send an email if there is some content to send
if text:
self.values = {}
self.values['body'] = text
self.stack = ["smtp_login", "smtp_passwd", "from", "display_name", "recipients", "subject"]
self.handle_input()
else:
sublime.status_message('Please select some text to send (via gmail)')
def handle_input(self, key=None, value=None):
if key:
# self.values[key] = value
self.values[key] = value
if len(self.stack) == 0:
sublime.set_timeout_async(lambda : self.send_email(), 0)
else:
key = self.stack.pop(0)
if config['interactive'][key]:
# get the value from the user
on_done = lambda s: self.handle_input(key, s)
sublime.active_window().show_input_panel(config['prompt'][key], config['default_value'][key], on_done, None, None)
pass
else:
# use the default
self.values[key] = config['default_value'][key]
self.handle_input()
def send_email(self):
# Parse the recipients list
recipients = self.values['recipients']
recipient_list = [recipients]
for sep in [';', ',']:
if sep in recipients:
recipient_list = recipients.split(sep)
break
msg = MIMEText(str(self.values['body']), 'plain', 'UTF-8')
msg['From'] = "\"%s\" <%s>" % (Header(str(self.values['display_name']), 'utf-8'), self.values['from'])
msg['To'] = ', '.join(recipient_list)
msg['Subject'] = Header(str(self.values['subject']), 'UTF-8')
try:
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.values['smtp_login'], self.values['smtp_passwd'])
# FIXME should we use msg['From'] or self.values['from'] here?
mailServer.sendmail(self.values['from'], recipient_list, msg.as_string())
mailServer.close()
except:
message = "There was an error sending the email to: %s " % recipients
print(message)
sublime.status_message(message)
|
vivsh/django-ginger
|
ginger/middleware.py
|
Python
|
mit
| 3,073
| 0.000976
|
from ginger import utils
from datetime import datetime, timedelta
from django.core.cache import cache
from django.conf import settings
from django.utils import timezone
import pytz
__all__ = ['CurrentRequestMiddleware',
'MultipleProxyMiddleware',
'ActiveUserMiddleware',
'LastLoginMiddleware',
'UsersOnlineMiddleware'
]
class CurrentRequestMiddleware(object):
"""
This should be the first middleware
"""
def process_request(self, request):
ctx = utils.context()
|
ctx.request = request
def process_response(self, request, response):
ctx = utils.context()
ctx.request = None
return response
|
class MultipleProxyMiddleware(object):
"""
https://docs.djangoproject.com/en/dev/ref/request-response/
"""
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def process_request(self, request):
"""
Rewrites the proxy headers so that only the most
recent proxy is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[-1].strip()
class LastLoginMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
cache.set(str(request.user.id), True, settings.USER_ONLINE_TIMEOUT)
class ActiveUserMiddleware:
def process_request(self, request):
user = request.user
if not request.is_ajax() and request.method == 'GET':
if user.is_authenticated() and not user.is_online():
user.make_online(commit=True)
class UsersOnlineMiddleware(object):
def process_request(self, request):
now = datetime.now()
delta = now - timedelta(minutes=settings.USER_ONLINE_TIMEOUT)
users_online = cache.get('users_online', {})
guests_online = cache.get('guests_online', {})
if request.user.is_authenticated():
users_online[request.user.id] = now
else:
guest_sid = request.COOKIES.get(settings.SESSION_COOKIE_NAME, '')
guests_online[guest_sid] = now
for user_id in users_online.keys():
if users_online[user_id] < delta:
del users_online[user_id]
for guest_id in guests_online.keys():
if guests_online[guest_id] < delta:
del guests_online[guest_id]
cache.set('users_online', users_online, 60*60*24)
cache.set('guests_online', guests_online, 60*60*24)
class TimezoneMiddleware(object):
def process_request(self, request):
session = request.session
key = 'ginger-tz'
if key in session:
try:
tz = pytz.timezone(session[key])
timezone.activate(tz)
except Exception:
del session[key]
|
blitzmann/Pyfa
|
eos/db/gamedata/group.py
|
Python
|
gpl-3.0
| 1,802
| 0.002775
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Column, String, Integer, Boolean, ForeignKey, Table
from sqlalchemy.orm import relation, mapper, synonym, deferred, backref
from eos.db import gamedata_meta
from eos.gamedata import Category, Group
groups_table = Table("invgroups", gamedata_meta,
Column("groupID", Integer, primary_key=True),
Column("groupName", String),
Column("description", String),
Column("published", Boolean),
Column("categoryID", Integer, ForeignKey("invcategories.category
|
ID")),
Column("iconID", Integer))
mapper(Group, groups_table,
properties={
"category" : relation(Category, backref=backref("groups", cascade="all,delete")),
"ID" : synonym("groupID"),
|
"name" : synonym("groupName"),
"description": deferred(groups_table.c.description)
})
|
ncclient/ncclient
|
ncclient/devices/junos.py
|
Python
|
apache-2.0
| 6,467
| 0.002783
|
"""
Handler for Juniper device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Junos", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
import logging
import re
from lxml import etree
from lxml.etree import QName
from ncclient.operations.retrieve import GetSchemaReply
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt, Commit, Rollback
from ncclient.operations.rpc import RPCError
from ncclient.xml_ import to_ele, replace_namespace, BASE_NS_1_0, NETCONF_MONITORING_NS
from ncclient.transport.third_party.junos.parser import JunosXMLParser
from ncclient.transport.parser import DefaultXMLParser
from ncclient.transport.parser import SAXParserHandler
logger = logging.getLogger(__name__)
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
self.__reply_parsing_error_transform_by_cls = {
GetSchemaReply: fix_get_schema_reply
}
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
dict["commit"] = Commit
dict["rollback"] = Rollback
return dict
def perform_qualify_check(self):
return False
def handle_raw_dispatch(self, raw):
if 'routing-engine' in raw:
raw = re.sub(r'<ok/>', '</routing-engine>\n<ok/>', raw)
return raw
# check if error is during capabilities exchange itself
elif re.search(r'<rpc-reply>.*?</rpc-reply>.*</hello>?', raw, re.M | re.S):
errs = re.findall(
r'<rpc-error>.*?</rpc-error>', raw, re.M | re.S)
err_list = []
if errs:
add_ns = """
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output indent="yes"/>
<xsl:template match="*">
<xsl:element name="{local-name()}" namespace="urn:ietf:params:xml:ns:netconf:base:1.0">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
</xsl:stylesheet>"""
for err in errs:
doc = etree.ElementTree(etree.XML(err))
# Adding namespace using xslt
xslt = etree.XSLT(etree.XML(add_ns))
transformed_xml = etree.XML(etree.tostring(xslt(doc)))
err_list.append(RPCError(transformed_xml))
return RPCError(to_ele("<rpc-reply>"+''.join(errs)+"</rpc-reply>"), err_list)
else:
return False
def handle_connection_exceptions(self, sshsession):
c = sshsession._channel = sshsession._transport.open_channel(
|
kind="session")
c.set_name("netconf-command-" + str(sshsession._channel_id))
c.exec_command("xml-mode netconf need-trailer")
return True
def reply_parsing_error_transform(self, reply_cls):
# return transform function if found, else None
return self.__reply_parsing_error_transfo
|
rm_by_cls.get(reply_cls)
def transform_reply(self):
reply = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
'''
import sys
if sys.version < '3':
return reply
else:
return reply.encode('UTF-8')
def get_xml_parser(self, session):
# use_filter in device_params can be used to enabled using SAX parsing
if self.device_params.get('use_filter', False):
l = session.get_listener_instance(SAXParserHandler)
if l:
session.remove_listener(l)
del l
session.add_listener(SAXParserHandler(session))
return JunosXMLParser(session)
else:
return DefaultXMLParser(session)
def fix_get_schema_reply(root):
# Workaround for wrong namespace of the data elem
# (issue with some Junos versions, might be corrected by Juniper at some point)
# get the data element, by local-name
data_elems = root.xpath('/nc:rpc-reply/*[local-name()="data"]', namespaces={'nc': BASE_NS_1_0})
if len(data_elems) != 1:
return # Will not alter unexpected content
data_el = data_elems[0]
namespace = QName(data_el).namespace
if namespace == BASE_NS_1_0:
# With the default netconf setting, we may get "{BASE_NS_1_0}data"; warn and fix it
logger.warning("The device seems to run non-rfc compliant netconf. You may want to "
"configure: 'set system services netconf rfc-compliant'")
replace_namespace(data_el, old_ns=BASE_NS_1_0, new_ns=NETCONF_MONITORING_NS)
elif namespace is None:
# With 'set system services netconf rfc-compliant' we may get "data" (no namespace); fix it
# There is no default xmlns and the data el is <data xmlns:ncm="NETCONF_MONITORING_NS">
replace_namespace(data_el, old_ns=None, new_ns=NETCONF_MONITORING_NS)
|
SaikWolf/gnuradio
|
grc/gui/Port.py
|
Python
|
gpl-3.0
| 10,040
| 0.003586
|
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import pygtk
pygtk.require('2.0')
import gtk
from . import Actions, Colors, Utils
from .Constants import (
PORT_SEPARATION, PORT_SPACING, CONNECTOR_EXTENSION_MINIMAL,
CONNECTOR_EXTENSION_INCREMENT, PORT_LABEL_PADDING, PORT_MIN_WIDTH, PORT_LABEL_HIDDEN_WIDTH, PORT_FONT
)
from .Element import Element
from ..core.Constants import DEFAULT_DOMAIN, GR_MESSAGE_DOMAIN
from ..core.Port import Port as _Port
PORT_MARKUP_TMPL="""\
<span foreground="black" font_desc="$font">$encode($port.get_name())</span>"""
class Port(_Port, Element):
"""The graphical port."""
def __init__(self, block, n, dir):
"""
Port constructor.
Create list of connector coordinates.
"""
_Port.__init__(self, block, n, dir)
Element.__init__(self)
self.W = self.H = self.w = self.h = 0
self._connector_coordinate = (0, 0)
self._connector_length = 0
self._hovering = True
self._force_label_unhidden = False
def create_shapes(self):
"""Create new areas and labels for the port."""
Element.create_shapes(self)
if self.get_hide():
return # this port is hidden, no need to create shapes
if self.get_domain() == GR_MESSAGE_DOMAIN:
pass
elif self.get_domain() != DEFAULT_DOMAIN:
self.line_attributes[0] = 2
#get current rotation
rotation = self.get_rotation()
#get all sibling ports
ports = self.get_parent().get_sources_gui() \
if self.is_source else self.get_parent().get_sinks_gui()
ports = filter(lambda p: not p.get_hide(), ports)
#get the max width
self.W = max([port.W for port in ports] + [PORT_MIN_WIDTH])
W = self.W if not self._label_hidden() else PORT_LABEL_HIDDEN_WIDTH
#get a numeric index for this port
|
relative to its sibling ports
try:
index = ports.index(self)
except:
if hasattr(self, '_connector_length'):
del self._connector_length
return
length = len(filter(lambda p: not p.get_hide(), ports))
|
#reverse the order of ports for these rotations
if rotation in (180, 270):
index = length-index-1
port_separation = PORT_SEPARATION \
if not self.get_parent().has_busses[self.is_source] \
else max([port.H for port in ports]) + PORT_SPACING
offset = (self.get_parent().H - (length-1)*port_separation - self.H)/2
#create areas and connector coordinates
if (self.is_sink and rotation == 0) or (self.is_source and rotation == 180):
x = -W
y = port_separation*index+offset
self.add_area((x, y), (W, self.H))
self._connector_coordinate = (x-1, y+self.H/2)
elif (self.is_source and rotation == 0) or (self.is_sink and rotation == 180):
x = self.get_parent().W
y = port_separation*index+offset
self.add_area((x, y), (W, self.H))
self._connector_coordinate = (x+1+W, y+self.H/2)
elif (self.is_source and rotation == 90) or (self.is_sink and rotation == 270):
y = -W
x = port_separation*index+offset
self.add_area((x, y), (self.H, W))
self._connector_coordinate = (x+self.H/2, y-1)
elif (self.is_sink and rotation == 90) or (self.is_source and rotation == 270):
y = self.get_parent().W
x = port_separation*index+offset
self.add_area((x, y), (self.H, W))
self._connector_coordinate = (x+self.H/2, y+1+W)
#the connector length
self._connector_length = CONNECTOR_EXTENSION_MINIMAL + CONNECTOR_EXTENSION_INCREMENT*index
def create_labels(self):
"""Create the labels for the socket."""
Element.create_labels(self)
self._bg_color = Colors.get_color(self.get_color())
# create the layout
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_markup(Utils.parse_template(PORT_MARKUP_TMPL, port=self, font=PORT_FONT))
self.w, self.h = layout.get_pixel_size()
self.W = 2 * PORT_LABEL_PADDING + self.w
self.H = 2 * PORT_LABEL_PADDING + self.h * (
3 if self.get_type() == 'bus' else 1)
self.H += self.H % 2
# create the pixmap
pixmap = self.get_parent().get_parent().new_pixmap(self.w, self.h)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, self.w, self.h)
pixmap.draw_layout(gc, 0, 0, layout)
# create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().get_parent().new_pixmap(self.h, self.w)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
def draw(self, gc, window):
"""
Draw the socket with a label.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or
self.get_parent().is_dummy_block and Colors.MISSING_BLOCK_BORDER_COLOR or
Colors.BORDER_COLOR,
)
if not self._areas_list or self._label_hidden():
return # this port is either hidden (no areas) or folded (no label)
X, Y = self.get_coordinate()
(x, y), (w, h) = self._areas_list[0] # use the first area's sizes to place the labels
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+X+(self.W-self.w)/2, y+Y+(self.H-self.h)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+X+(self.H-self.h)/2, y+Y+(self.W-self.w)/2, -1, -1)
def get_connector_coordinate(self):
"""
Get the coordinate where connections may attach to.
Returns:
the connector coordinate (x, y) tuple
"""
x, y = self._connector_coordinate
X, Y = self.get_coordinate()
return (x + X, y + Y)
def get_connector_direction(self):
"""
Get the direction that the socket points: 0,90,180,270.
This is the rotation degree if the socket is an output or
the rotation degree + 180 if the socket is an input.
Returns:
the direction in degrees
"""
if self.is_source: return self.get_rotation()
elif self.is_sink: return (self.get_rotation() + 180)%360
def get_connector_length(self):
"""
Get the length of the connector.
The connector length increases as the port index changes.
Returns:
the length in pixels
"""
return self._connector_length
def get_rotation(self):
"""
Get the parent's rotation rather than self.
Returns:
the parent's rotation
"""
return self.get_parent().get_rotation()
def move(self, delta_coor):
"""
Move the parent rather than self.
Args:
delta_corr: the (delta_x, delta_y) tuple
"""
self.get_parent().move(delta_coor)
|
pandas-dev/pandas
|
pandas/tests/test_take.py
|
Python
|
bsd-3-clause
| 11,995
| 0.000417
|
from datetime import datetime
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._testing as tm
import pandas.core.algorithms as algos
@pytest.fixture(params=[True, False])
def writeable(request):
return request.param
# Check that take_nd works both with writeable arrays
# (in which case fast typed memory-views implementation)
# and read-only arrays alike.
@pytest.fixture(
params=[
(np.float64, True),
(np.float32, True),
(np.uint64, False),
(np.uint32, False),
(np.uint16, False),
(np.uint8, False),
(np.int64, False),
(np.int32, False),
(np.int16, False),
(np.int8, False),
(np.object_, True),
(np.bool_, False),
]
)
def dtype_can_hold_na(request):
return request.param
@pytest.fixture(
params=[
(np.int8, np.int16(127), np.int8),
(np.int8, np.int16(128), np.int16),
(np.int32, 1, np.int32),
(np.int32, 2.0, np.float64),
(np.int32, 3.0 + 4.0j, np.complex128),
(np.int32, True, np.object_),
(np.int32, "", np.object_),
(np.float64, 1, np.float64),
(np.float64, 2.0, np.float64),
(np.float64, 3.0 + 4.0j, np.complex128),
(np.float64, True, np.object_),
(np.float64, "", np.object_),
(np.complex128, 1, np.complex128),
(np.complex128, 2.0, np.complex128),
(np.complex128, 3.0 + 4.0j, np.complex128),
(np.complex128, True, np.object_),
(np.complex128, "", np.object_),
(np.bool_, 1, np.object_),
(np.bool_, 2.0, np.object_),
(np.bool_, 3.0 + 4.0j, np.object_),
(np.bool_, True, np.bool_),
(np.bool_, "", np.object_),
]
)
def dtype_fill_out_dtype(request):
return request.param
class TestTake:
# Standard incompatible fill error.
fill_error = re.compile("Incompatible type for fill_value")
def test_1d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()
assert result[3] == fill_value
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2, 3]] == data[indexer]).all()
|
assert result.dtype == dtype
def test_2d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()
|
assert (result[3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()
assert (result[:, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()
assert result.dtype == dtype
def test_3d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()
assert (result[3, :, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()
assert (result[:, 3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()
assert (result[:, :, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()
assert result.dtype == dtype
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_nd(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(11_045_376, 11_360_736, (5, 3)) * 100_000_000_000
arr = arr.view(dtype="datetime64[ns]")
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
|
gitsimon/tq_website
|
partners/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,987
| 0.003523
|
# Generated by Django 2.2.12 on 2020-04-25 15:53
from django.db import migrations, models
import django.db.models.deletion
import djangocms_text_ckeditor.fields
import parler.fields
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='partners', verbose_name='Image')),
('url', models.URLField()),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='PartnerTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=100, verbose_name='Name')),
|
('description', djangocms_text
|
_ckeditor.fields.HTMLField(blank=True, null=True, verbose_name='Description')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='partners.Partner')),
],
options={
'verbose_name': 'partner Translation',
'db_table': 'partners_partner_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
]
|
FireCARES/firecares
|
firecares/firecares_core/migrations/0009_registrationwhitelist.py
|
Python
|
mit
| 576
| 0.003472
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firecares_core', '0008_auto_20161122_1420'),
]
operations = [
migrations.CreateMode
|
l(
name='RegistrationWhitelist',
fields=[
('id', m
|
odels.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email_or_domain', models.CharField(unique=True, max_length=254)),
],
),
]
|
lreis2415/AutoFuzSlpPos
|
autofuzslppos/FuzzySlpPosInference.py
|
Python
|
gpl-2.0
| 6,208
| 0.003222
|
# -*- coding: utf-8 -*-
"""Prepare configure file for fuzzy slope position inference program.
@author : Liangjun Zhu
@changelog:
- 15-09-08 lj - initial implementation.
- 17-07-30 lj - reorganize and incorporate with pygeoc.
"""
from __future__ import absolute_import, unicode_literals
import time
from io import open
import os
import sys
if os.path.abspath(os.path.join(sys.path[0], '..')) not in sys.path:
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '..')))
from pygeoc.utils import StringClass
from autofuzslppos.Config import get_input_cfgs
from autofuzslppos.ParasComb import combine_inf_conf_parameters
from autofuzslppos.TauDEMExtension import TauDEMExtension
def read_inf_param_from_file(conf):
"""Read fuzzy inference parameters from file."""
params_list = list()
with open(conf, 'r', encoding='utf-8') as f:
for line in f.readlines():
eles = line.split('\n')[0].split('\t')
params = StringClass.extract_numeric_values_from_string(line.split('\n')[0])
if StringClass.string_match(eles[0], 'Parameters') and len(params) >= 6:
params_list.append([eles[1]] + [eles[3]] + params[-6:])
return params_list
def fuzzy_inference(cfg):
"""Fuzzy slope position inference."""
if not cfg.flag_fuzzyinference:
return 0
start_t = time.time()
simif = list() # similarity file path of each slope position types
for i, slppos in enumerate(cfg.slppostype):
if slppos not in cfg.inferparam:
cfg.inferparam[slppos] = dict()
simif.append(cfg.singleslpposconf[slppos].fuzslppos)
if cfg.flag_auto_inferenceparams: # use automatically recommended parameters
params_list = read_inf_param_from_file(cfg.singleslpposconf[slppos].infrecommend)
for p in params_list:
cfg.inferparam[slppos][p[0]] = p[1:]
else: # keep cfg.inferparam[slppos] as it is, and read infconfig for update
params_list = read_inf_param_from_file(cfg.singleslpposconf[slppos].infconfig)
for p in params_list:
# for update
cfg.inferparam[slppos][p[0]] = p[1:]
#cfg.selectedtopo[p[0]] = p[1]
# # for supplement
# if not p[0] in cfg.inferparam[slppos]:
# cfg.inferparam[slppos][p[0]] = p[1:]
# if not p[0] in cfg.selectedtopo:
# cfg.selectedtopo[p[0]] = p[1]
# In current version, all regional attribute named as 'rpi'
# Set fuzzy inference parameters for 'rpi' if it does not existed.
regional_attr_range_dict = dict()
for slppos in cfg.slppostype:
# TODO, add some value check
regional_attr_range_dict[slppos] = cfg.extractrange[slppos]['rpi']
for i, typ in enumerate(cfg.slppostype):
cur_rng = regional_attr_range_dict[typ]
if i == 0: # for Ridge, S: w1 = Rdg.max-Shd.max
next_rng = regional_attr_range_dict[cfg.slppostype[i + 1]]
tempw1 = cur_rng[1] - next_rng[1]
cur_param = ['S', tempw1, 2, 0.5, 1, 0, 1]
elif i == len(cfg.slppostype) - 1: # for Valley, Z: w2 = Fts.max-Vly.max
before_rng = regional_attr_range_dict[cfg.slppostype[i - 1]]
tempw2 = before_rng[1] - cur_rng[1]
cur_param = ['Z', 1, 0, 1, tempw2, 2, 0.5]
else:
# for other slope positions, B: w1 = w2 = min(cur.min-next.max, before.min-cur.max)
next_rng = regional_attr_range_dict[cfg.slppostype[i + 1]]
before_rng = regional_attr_range_dict[cfg.slppostype[i - 1]]
tempw = min(cur_rng[0] - next_rng[1], before_rng[0] - cur_rng[1])
cur_param = ['B', tempw, 2, 0.5, tempw, 2, 0.5]
if 'rpi' not in cfg.inferparam[typ]:
cfg.inferparam[typ]['rpi'] = cur_param[:]
# write fuzzy inference parameters to configuration file and run 'fuzzyslpposinference'
for i, slppos in enumerate(cfg.slppostype):
config_info = open(cfg.singleslpposconf[slppos].infconfig, 'w', encoding='utf-8')
config_info.write('PrototypeGRID\t%s\n' % cfg.singleslpposconf[slppos].typloc)
config_info.write('ProtoTag\t%d\n' % cfg.slppostag[i])
|
config_info.write('ParametersNUM\t%d\n' % len(cfg.inferparam[slppos]))
for name, param in list(cfg.inferparam[slppos].items()):
config_info.write('Parameters\t%s\t%s\t%s\t%f\t%f\t%f\t%f\t%f\t%f\n' % (
name, cfg.selectedtopo[name], param[0], param[1], param[2],
param[3], param[4], param[5], param[6]))
config_info.write('DistanceExponentForIDW\t%d\n' % cfg.dist_exp)
config_info.write('OUTPUT\
|
t%s\n' % cfg.singleslpposconf[slppos].fuzslppos)
config_info.close()
TauDEMExtension.fuzzyslpposinference(cfg.proc,
cfg.singleslpposconf[slppos].infconfig,
cfg.ws.output_dir, cfg.mpi_dir, cfg.bin_dir,
cfg.log.all, cfg.log.runtime, cfg.hostfile)
TauDEMExtension.hardenslppos(cfg.proc, simif, cfg.slppostag,
cfg.slpposresult.harden_slppos,
cfg.slpposresult.max_similarity,
cfg.slpposresult.secharden_slppos,
cfg.slpposresult.secmax_similarity, None, None,
cfg.ws.output_dir, cfg.mpi_dir, cfg.bin_dir,
cfg.log.all, cfg.log.runtime, cfg.hostfile)
print('Fuzzy slope position calculated done!')
# Combine fuzzy inference parameters.
combine_inf_conf_parameters(cfg.slppostype, cfg.singleslpposconf, cfg.slpposresult.infconfig)
end_t = time.time()
cost = (end_t - start_t) / 60.
with open(cfg.log.runtime, 'a', encoding='utf-8') as logf:
logf.write('Fuzzy Slope Position Inference Time-consuming: %s\n' % repr(cost))
return cost
def main():
"""TEST CODE"""
fuzslppos_cfg = get_input_cfgs()
fuzzy_inference(fuzslppos_cfg)
if __name__ == '__main__':
main()
|
wadobo/socializa
|
backend/editor/urls.py
|
Python
|
agpl-3.0
| 775
| 0.00129
|
from dj
|
ango.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.editor, name="editor"),
url(r'^game/$', views.edit_game, name="add_game"),
url(r'^game/(?P<gameid>\d+)/$', views.edit_game, name="edit_game"),
url(r'^event/$', views.edit_event, name="add_event"),
url(r'^event/(?P<evid>\d+)/$', views.edit_event, name="edit_event"),
url(r'^ajax/player/$', vi
|
ews.ajax_player_search, name="ajax_player_search"),
url(r'^api/game/(?P<game_id>\d+)/$', views.gameview, name='get_game'),
url(r'^api/game/$', views.gameview, name='new_game'),
url(r'^api/games/$', views.gamelist, name='get_games'),
url(r'^api/ev/(?P<ev_id>\d+)/$', views.evview, name='get_ev'),
url(r'^api/ev/$', views.evview, name='new_ev'),
]
|
maikodaraine/EnlightenmentUbuntu
|
bindings/python/python-efl/examples/elementary/test_spinner.py
|
Python
|
unlicense
| 2,598
| 0.005774
|
#!/usr/bin/env python
# encoding: utf-8
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.box import Box
from efl.elementary.spinner import Spinner
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
def spinner_clicked(obj):
win = StandardWindow("spinner", "Spinner test", autodel=True,
size=(300, 300))
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
bx = Box(win, size_hint_weight=EXPAND_BOTH)
win.resize_object_add(bx)
bx.show()
sp = Spinner(win, editable=True, label_format="%1.1f units", step=1.3,
wrap=True, min_max=(-50.0, 250.0), size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
bx.pack_end(sp)
sp.show()
sp = Spinner(win, label_format="Base 5.5, Round 2 : %1.1f",
min_max=(-100.0, 100.0), round=2, base=5.5, value=5.5,
size_hint_weight=EXPAND_BOTH, size_hint_align=FILL_HORIZ)
bx.pack_end(sp)
sp.show()
sp = Spinner(win, label_format="Percentage %%%1.2f something",
step=5.0, min_max=(0.0, 100.0), size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
bx.pack_end(sp)
sp.show()
sp = Spinner(win, label_format="%1.1f units", step=1.3, wrap=True,
style="vertical", min_max=(-50.0, 250.0), size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
bx.pack_end(sp)
sp.show()
sp = Spinner(win, label_format="Disabled %.0f", disabled=True,
min_max=(-50.0, 250.0), size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
bx.pack_end(sp)
sp.show()
sp = Spinner(win, wrap=True, min_max=(1, 12), value=1,
label_format="%.0f", editable=False, size_hint_weight=EXPAND_BOTH,
size_hint_align=FILL_HORIZ)
sp.special_value_add(1, "January")
sp.special_val
|
ue_add(2, "February")
sp.special_value_add(3, "March")
sp.special_value_add(4, "April")
sp.special_value_add(5, "May")
sp.special_value_add(6, "June")
sp.special_value_add(7, "July")
sp.special_value_add(8, "August")
sp.special_value_add(9, "September")
sp.special_value_add(10, "October")
sp.special_val
|
ue_add(11, "November")
sp.special_value_add(12, "December")
bx.pack_end(sp)
sp.show()
win.show()
if __name__ == "__main__":
elementary.init()
spinner_clicked(None)
elementary.run()
elementary.shutdown()
|
gVallverdu/pymatgen
|
pymatgen/command_line/__init__.py
|
Python
|
mit
| 237
| 0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distri
|
buted under the terms of the MIT License.
"""
This package contains various command line wrappers to programs used in
pymatgen that do not have Python equivalents.
|
"""
|
o-kei/design-computing-aij
|
ch3_2/bezier_2D.py
|
Python
|
mit
| 1,190
| 0
|
import numpy as np # モジュールnumpyを読み込み
import matplotlib.pyplot as plt # モジュールmatplotlibのpylab関数を読み込み
def bernstein(t, n, i): # bernstein基底関数の定義
cn, ci, cni = 1.0, 1.0, 1.0
for k in range(2, n, 1):
cn = cn * k
for k in range(1, i, 1):
if i == 1:
break
ci = ci * k
for k in range(1, n - i + 1, 1):
if n == i:
break
cni = cni * k
j = t**(i - 1) * (1 - t)**(n - i) * cn / (ci * cni)
return j
def bezierplot(t, cp): # bezier曲線の定義
n = len(cp)
r = np.zeros([len(t), 2])
for k in range(len(t)):
sum1, sum2 = 0.0, 0.0
for i in range(1, n + 1, 1):
bt = bernstein(t[k], n, i)
sum1 += cp[i - 1, 0] * bt
sum2 += cp[i - 1, 1] * bt
|
r[k, :] = [sum1, sum2]
return np.array(r)
cp = np.array([[0, -2], [1, -3], [2, -2], [3, 2], [4, 2], [5, 0]]) # 制御点座標
t = np.arange(0, 1 + 0.01, 0.01) # パラメータ生成
p = bezierplot(t, cp) # bezier曲線生成
plt.figure()
plt.plot(p[:, 0], p[:,
|
1])
plt.plot(cp[:, 0], cp[:, 1], ls=':', marker='o')
plt.show()
|
deavid/bjsonrpc
|
bjsonrpc/main.py
|
Python
|
bsd-3-clause
| 2,824
| 0.01204
|
"""
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instan
|
ce or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, soc
|
ket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
|
travispavek/testrail-python
|
tests/test_api.py
|
Python
|
mit
| 31,201
| 0.000128
|
import ast
import copy
from datetime import datetime, timedelta
import mock
import os
import shutil
import util
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from itertools import ifilter as filter
except ImportError:
pass
from testrail.api import API
from testrail.helper import TestRailError
class TestBase(unittest.TestCase):
def setUp(self):
self.client = API()
def test_set_project_id(self):
self.client.set_project_id(20)
self.assertEqual(self.client._project_id, 20)
class TestConfig(unittest.TestCase):
def setUp(self):
home = os.path.expanduser('~')
self.config_path = '%s/.testrail.conf' % home
self.config_backup = '%s/.testrail.conf_test_orig' % home
self.test_dir = os.path.dirname(os.path.abspath(__file__))
if os.path.isfile(self.config_path):
shutil.move(self.config_path, self.config_backup)
shutil.copyfile('%s/testrail.conf' % self.test_dir, self.config_path)
def tearDown(self):
if os.path.isfile(self.config_path):
os.remove(self.config_path)
if os.path.isfile(self.config_backup):
shutil.move(self.config_backup, self.config_path)
if os.environ.get('TESTRAIL_USER_EMAIL'):
del os.environ['TESTRAIL_USER_EMAIL']
if os.environ.get('TESTRAIL_USER_KEY'):
del os.environ['TESTRAIL_USER_KEY']
if os.environ.get('TESTRAIL_URL'):
del os.environ['TESTRAIL_URL']
if os.environ.get('TESTRAIL_VERIFY_SSL'):
del os.environ['TESTRAIL_VERIFY_SSL']
def test_no_env(self):
client = API()
config = client._conf()
self.assertEqual(config['email'], 'user@yourdomain.com')
self.assertEqual(config['key'], 'your_api_key')
self.assertEqual(config['url'], 'https://<server>')
self.assertEqual(client.verify_ssl, True)
def test_user_env(self):
email = 'user@example.com'
os.environ['TESTRAIL_USER_EMAIL'] = email
client = API()
config = client._conf()
self.assertEqual(config['email'], email)
self.assertEqual(config['key'], 'your_api_key')
self.assertEqual(config['url'], 'https://<server>')
def test_key_env(self):
key = 'itgiwiht84inf92GWT'
os.environ['TESTRAIL_USER_KEY'] = key
client = API()
config = client._conf()
self.assertEqual(config['email'], 'user@yourdomain.com')
self.assertEqual(config['key'], key)
self.assertEqual(config['url'], 'https://<server>')
def test_url_env(self):
url = 'https://example.com'
os.environ['TESTRAIL_URL'] = url
client = API()
config = client._conf()
|
self.assertEqual(config['email'], 'user@yourdomain.com')
self.assert
|
Equal(config['key'], 'your_api_key')
self.assertEqual(config['url'], url)
def test_ssl_env(self):
os.environ['TESTRAIL_VERIFY_SSL'] = 'False'
client = API()
self.assertEqual(client.verify_ssl, False)
def test_no_config_file(self):
os.remove(self.config_path)
key = 'itgiwiht84inf92GWT'
email = 'user@example.com'
url = 'https://example.com'
os.environ['TESTRAIL_URL'] = url
os.environ['TESTRAIL_USER_KEY'] = key
os.environ['TESTRAIL_USER_EMAIL'] = email
client = API()
config = client._conf()
self.assertEqual(config['url'], url)
self.assertEqual(config['key'], key)
self.assertEqual(config['email'], email)
self.assertEqual(client.verify_ssl, True)
def test_config_no_email(self):
os.remove(self.config_path)
shutil.copyfile('%s/testrail.conf-noemail' % self.test_dir,
self.config_path)
with self.assertRaises(TestRailError) as e:
API()
self.assertEqual(str(e.exception),
('A user email must be set in environment ' +
'variable TESTRAIL_USER_EMAIL or in ~/.testrail.conf'))
def test_config_no_key(self):
os.remove(self.config_path)
shutil.copyfile('%s/testrail.conf-nokey' % self.test_dir,
self.config_path)
with self.assertRaises(TestRailError) as e:
API()
self.assertEqual(str(e.exception),
('A password or API key must be set in environment ' +
'variable TESTRAIL_USER_KEY or in ~/.testrail.conf'))
def test_config_no_url(self):
os.remove(self.config_path)
shutil.copyfile('%s/testrail.conf-nourl' % self.test_dir,
self.config_path)
with self.assertRaises(TestRailError) as e:
API()
self.assertEqual(str(e.exception),
('A URL must be set in environment ' +
'variable TESTRAIL_URL or in ~/.testrail.conf'))
def test_config_verify_ssl_false(self):
os.remove(self.config_path)
shutil.copyfile('%s/testrail.conf-nosslcert' % self.test_dir, self.config_path)
client = API()
self.assertEqual(client.verify_ssl, False)
class TestHTTPMethod(unittest.TestCase):
def setUp(self):
self.client = API()
@mock.patch('testrail.api.requests.get')
def test_get_ok(self, mock_get):
mock_response = mock.Mock()
return_value = {
"announcement": "..",
"completed_on": None,
"id": 1,
"is_completed": False,
"name": "Datahub",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1"
}
expected_response = copy.deepcopy(return_value)
mock_response.json.return_value = return_value
mock_response.status_code = 200
mock_get.return_value = mock_response
url = 'https://<server>/index.php?/api/v2/get_project/1'
actual_response = self.client._get('get_project/1')
mock_get.assert_called_once_with(
url,
headers={'Content-Type': 'application/json'},
params=None,
verify=True,
auth=('user@yourdomain.com', 'your_api_key')
)
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, actual_response)
@mock.patch('testrail.api.requests.get')
def test_get_bad_no_params(self, mock_get):
mock_response = mock.Mock()
expected_response = {
'url': 'https://<server>/index.php?/api/v2/get_plan/200',
'status_code': 400,
'payload': None,
'response_headers': "Mock headers",
'error': 'Invalid or unknown test plan'
}
url = 'https://<server>/index.php?/api/v2/get_plan/200'
mock_response.json.return_value = {
'error': 'Invalid or unknown test plan'
}
mock_response.headers = "Mock headers"
mock_response.status_code = 400
mock_response.url = url
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.client._get('get_plan/200')
mock_get.assert_called_once_with(
url,
headers={'Content-Type': 'application/json'},
params=None,
verify=True,
auth=('user@yourdomain.com', 'your_api_key')
)
self.assertEqual(1, mock_response.json.call_count)
self.assertEqual(expected_response, ast.literal_eval(str(e.exception)))
class TestUser(unittest.TestCase):
def setUp(self):
self.client = API()
self.mock_user_data = [
{
"email": "han@example.com",
"id": 1,
"is_active": 'true',
"name": "Han Solo"
},
{
"email": "jabba@example.com",
"id": 2,
"is_active": 'true',
"name": "Jabba the Hutt"
}
]
self.users = copy.deepcopy(self.mock_user_data)
def tearDown(self):
|
simbha/mAngE-Gin
|
lib/django/contrib/auth/tests/test_context_processors.py
|
Python
|
mit
| 7,020
| 0.000142
|
import os
from django.contrib.auth import authenticate
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.context_processors import PermWrapper, PermLookupDict
from django.db.models import Q
from django.test import TestCase, override_settings
from django.utils._os import upath
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(TestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertTrue('mockapp' in perms)
self.assertFalse('nonexisting' in perms)
self.assertTrue('mockapp.someperm' in perms)
self.assertFalse('mockapp.nonexisting' in perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject()
|
in pldict
@skipIfCustomUser
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
ROOT_URLCONF='django.contrib.auth.tests.urls'
|
,
USE_TZ=False, # required for loading the fixture
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
TEMPLATE_CONTEXT_PROCESSORS=(
'django.contrib.auth.context_processors.auth',
),
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
TEMPLATE_CONTEXT_PROCESSORS=(
'django.contrib.auth.context_processors.auth',
),
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
|
pierg75/pier-sosreport
|
sos/plugins/distupgrade.py
|
Python
|
gpl-2.0
| 1,902
| 0
|
# Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANT
|
ABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public
|
License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
class DistUpgrade(Plugin):
""" Distribution upgrade data """
plugin_name = "distupgrade"
profiles = ('system', 'sysmgmt')
files = None
class RedHatDistUpgrade(DistUpgrade, RedHatPlugin):
packages = (
'preupgrade-assistant',
'preupgrade-assistant-ui',
'preupgrade-assistant-el6toel7',
'redhat-upgrade-tool'
)
files = (
"/var/log/upgrade.log",
"/var/log/redhat_update_tool.log",
"/root/preupgrade/all-xccdf*",
"/root/preupgrade/kickstart"
)
def postproc(self):
self.do_file_sub(
"/root/preupgrade/kickstart/anaconda-ks.cfg",
r"(useradd --password) (.*)",
r"\1 ********"
)
self.do_file_sub(
"/root/preupgrade/kickstart/anaconda-ks.cfg",
r"(\s*rootpw\s*).*",
r"\1********"
)
self.do_file_sub(
"/root/preupgrade/kickstart/untrackeduser",
r"\/home\/.*",
r"/home/******** path redacted ********"
)
# vim: set et ts=4 sw=4 :
|
sdpython/ensae_teaching_cs
|
src/ensae_teaching_cs/td_1a/flask_helper.py
|
Python
|
mit
| 1,966
| 0.001526
|
# -*- coding: utf-8 -*-
"""
@file
@brief Helpers for :epkg:`Flask`.
"""
import traceback
import threading
from flask import Response
def Text2Response(text):
"""
convert a text into plain text
@param text text to convert
@return textReponse
"""
return Response(text, mimetype='text/plain')
def Exception2Response(e):
"""
convert an exception into plain text and display the stack trace
@param e Exception
@return textReponse
"""
text = traceback.format_exc()
return Text2Response("Exception: {0}\nSTACK:\n{1}".format(str(e), text))
class FlaskInThread(threading.Thread):
"""
Defines a thread for the server.
"""
def __init__(self, app, host="localhost", port=8081):
"""
@param app :epkg:`Flask` application
"""
threading.Thread.__init__(self)
self._app = app
self._host = host
self._
|
port = port
self.daemon = True
def run(self):
"""
Starts the server.
"""
self._app.run(host=self._h
|
ost, port=self._port)
def shutdown(self):
"""
Shuts down the server, the function could work if:
* method run keeps a pointer on a server instance
(the one owning method
`serve_forever <https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.serve_forever>`_)
* module `werkzeug <http://werkzeug.pocoo.org/>`_
returns this instance in function
`serving.run_simple <https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/serving.py>`_
* module `Flask <http://flask.pocoo.org/>`_
returns this instance in method
`app.Flask.run <https://github.com/mitsuhiko/flask/blob/master/flask/app.py>`_
"""
raise NotImplementedError()
# self.server.shutdown()
# self.server.server_close()
|
citrix-openstack-build/nova
|
nova/consoleauth/manager.py
|
Python
|
apache-2.0
| 5,195
| 0.000577
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth Components for Consoles."""
import time
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova import manager
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
consoleauth_opts = [
cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens'),
cfg.StrOpt('consoleauth_manager',
default='nova.consoleauth.manager.ConsoleAuthManager',
help='Manager for console auth'),
]
CONF = cfg.CONF
CONF.register_opts(consoleauth_opts)
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
class ConsoleAuthManager(manager.Manager):
"""Manages token based authentication."""
RPC_API_VERSION = '1.2'
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(service_name='consoleauth',
*args, **kwargs)
self.mc = memorycache.get_client()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_tokens_for_instance(self, instance_uuid):
token
|
s_str = self.mc.get(instance_uuid.encode('UTF-8'))
if not tokens_str:
tokens = []
else:
tokens = jsonutils.loads(tokens_str)
return tokens
def authorize_console(self, context, token, console_type, host, port,
in
|
ternal_access_path, instance_uuid=None):
token_dict = {'token': token,
'instance_uuid': instance_uuid,
'console_type': console_type,
'host': host,
'port': port,
'internal_access_path': internal_access_path,
'last_activity_at': time.time()}
data = jsonutils.dumps(token_dict)
self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl)
if instance_uuid is not None:
tokens = self._get_tokens_for_instance(instance_uuid)
# Remove the expired tokens from cache.
for tok in tokens:
token_str = self.mc.get(tok.encode('UTF-8'))
if not token_str:
tokens.remove(tok)
tokens.append(token)
self.mc.set(instance_uuid.encode('UTF-8'),
jsonutils.dumps(tokens))
LOG.audit(_("Received Token: %(token)s, %(token_dict)s"),
{'token': token, 'token_dict': token_dict})
def _validate_token(self, context, token):
instance_uuid = token['instance_uuid']
if instance_uuid is None:
return False
# NOTE(comstud): consoleauth was meant to run in API cells. So,
# if cells is enabled, we must call down to the child cell for
# the instance.
if CONF.cells.enable:
return self.cells_rpcapi.validate_console_port(context,
instance_uuid, token['port'], token['console_type'])
instance = self.db.instance_get_by_uuid(context, instance_uuid)
return self.compute_rpcapi.validate_console_port(context,
instance,
token['port'],
token['console_type'])
def check_token(self, context, token):
token_str = self.mc.get(token.encode('UTF-8'))
token_valid = (token_str is not None)
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s"),
{'token': token, 'token_valid': token_valid})
if token_valid:
token = jsonutils.loads(token_str)
if self._validate_token(context, token):
return token
def delete_tokens_for_instance(self, context, instance_uuid):
tokens = self._get_tokens_for_instance(instance_uuid)
for token in tokens:
self.mc.delete(token.encode('UTF-8'))
self.mc.delete(instance_uuid.encode('UTF-8'))
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
|
vnsofthe/odoo-dev
|
addons/rhwl/rhwl_project.py
|
Python
|
agpl-3.0
| 1,119
| 0.040991
|
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.
|
osv import fields, osv
import openerp.addons.decimal_precision as dp
import datetime
import re
class rhwl_project(osv.osv):
_name = "rhwl.project"
_columns = {
"name":fields.char(u"项目名称"),
"catelog":fields.char(u"类别"),
"process":fiel
|
ds.char(u"进度"),
"user_id":fields.many2one("res.users",u"负责人"),
"content1":fields.char(string = u"12月5"),
"content2":fields.char(string = u"12月12"),
"content3":fields.char(string = u"12月19"),
"content4":fields.char(string = u"12月26"),
"content5":fields.char(string = u"01月2"),
"content6":fields.char(string = u"01月9"),
"content7":fields.char(string = u"01月16"),
"content8":fields.char(string = u"01月23"),
"content9":fields.char(string = u"2015/02月第一周"),
"content10":fields.char(string = u"2015/02月第二周"),
"content11":fields.char(string = u"2015/02月第三周"),
"content12":fields.char(string = u"2015/02月第四周")
}
|
googleapis/python-dialogflow-cx
|
samples/generated_samples/dialogflow_v3beta1_generated_sessions_detect_intent_async.py
|
Python
|
apache-2.0
| 1,676
| 0.000597
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governin
|
g permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DetectIntent
# NOTE: This snippet has been automatically generated for illus
|
trative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_Sessions_DetectIntent_async]
from google.cloud import dialogflowcx_v3beta1
async def sample_detect_intent():
# Create a client
client = dialogflowcx_v3beta1.SessionsAsyncClient()
# Initialize request argument(s)
query_input = dialogflowcx_v3beta1.QueryInput()
query_input.text.text = "text_value"
query_input.language_code = "language_code_value"
request = dialogflowcx_v3beta1.DetectIntentRequest(
session="session_value",
query_input=query_input,
)
# Make the request
response = await client.detect_intent(request=request)
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_Sessions_DetectIntent_async]
|
SIU-CS/J-JAM-production
|
mhapsite/mhap/migrations/0004_delete_quote.py
|
Python
|
gpl-3.0
| 354
| 0
|
# -*- coding: utf-8 -*-
# Generated
|
by Django 1.10 on 2017-04-02 19:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
|
('mhap', '0003_auto_20170402_1906'),
]
operations = [
migrations.DeleteModel(
name='Quote',
),
]
|
scibi/django-teryt
|
teryt/south_migrations/0002_auto__add_field_miejscowosc_aktywny__add_field_ulica_aktywny__add_fiel.py
|
Python
|
mit
| 4,805
| 0.005411
|
# coding: utf-8
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Miejscowosc.aktywny'
db.add_column(u'teryt_miejscowosc', 'aktywny',
self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True),
keep_default=False)
# Adding field 'Ulica.aktywny'
db.add_column(u'teryt_ulica', 'aktywny',
self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True),
keep_default=False)
# Adding field 'JednostkaAdministracyjna.aktywny'
db.add_column(u'teryt_jednostkaadministracyjna', 'aktywny',
self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True),
keep_default=False)
# Adding field 'RodzajMiejsowosci.aktywny'
db.add_column(u'teryt_rodzajmiejsowosci', 'aktywny',
self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Miejscowosc.aktywny'
db.delete_column(u'teryt_miejscowosc', 'aktywny')
# Deleting field 'Ulica.aktywny'
db.delete_column(u'teryt_ulica', 'aktywny')
# Deleting field 'JednostkaAdministracyjna.aktywny'
db.delete_column(u'teryt_jednostkaadministracyjna', 'aktywny')
# Deleting field 'RodzajMiejsowosci.aktywny'
db.delete_column(u'teryt_rodzajmiejsowosci', 'aktywny')
models = {
u'teryt.jednostkaadministracyjna': {
'Meta': {'object_name': 'JednostkaAdministracyjna'},
'aktywny': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '7', 'primary_key': 'True'}),
'nazwa': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'nazwa_dod': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'stan_na': ('django.db.models.fields.DateField', [], {})
},
u'teryt.miejscowosc': {
'Meta': {'object_name': 'Miejscowosc'},
'aktywny': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'jednostka': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teryt.JednostkaAdministracyjna']"}),
'miejscowosc_nadrzedna': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teryt.Miejscowosc']", 'null': 'True', 'blank': 'True'}),
'nazwa': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rodzaj_miejscowosci': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teryt.RodzajMiejsowosci']"}),
'stan_na': ('django.db.models.fields.DateField', [], {}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '7', 'primary_key': 'True'})
},
u'teryt.rodzajmiejsowosci': {
'Meta': {'object_name': 'RodzajMiejsowosci'},
'aktywny': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'nazwa': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'stan_na': ('django.db.models.fields.DateField', [], {})
},
u'teryt.ulica': {
'Meta': {'object_name': 'Ulica'},
'aktywny': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'cecha': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '12', 'primary_key': 'True'}),
'
|
miejscowosc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teryt.Miejscowosc']"}),
'nazwa_1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'nazwa_2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stan_na': ('django.db.models.fields.DateField', [], {}),
'symbol_ulicy':
|
('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['teryt']
|
simplegeo/sqlalchemy
|
test/engine/test_pool.py
|
Python
|
mit
| 25,148
| 0.003141
|
import threading, time
from sqlalchemy import pool, interfaces, create_engine, select
import sqlalchemy as tsa
from sqlalchemy.test import TestBase, testing
from sqlalchemy.test.util import gc_collect, lazy_gc
from sqlalchemy.test.testing import eq_
mcid = 1
class MockDBAPI(object):
def __init__(self):
self.throw_error = False
def connect(self, *args, **kwargs):
if self.throw_error:
raise Exception("couldnt connect !")
delay = kwargs.pop('delay', 0)
if delay:
time.sleep(delay)
return MockConnection()
class MockConnection(object):
def __init__(self):
global mcid
self.id = mcid
self.closed = False
mcid += 1
def close(self):
self.closed = True
def rollback(self):
pass
def cursor(self):
return MockCursor()
class MockCursor(object):
def execute(self, *args, **kw):
pass
def close(self):
pass
mock_dbapi = MockDBAPI()
class PoolTestBase(TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
class PoolTest(PoolTestBase):
def testmanager(self):
manager = pool.manage(mock_dbapi, use_threadlocal=True)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
connection3 = manager.connect('bar.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is connection2)
self.assert_(connection2 is not connection3)
def testbadargs(self):
manager = pool.manage(mock_dbapi)
try:
connection = manager.connect(None)
except:
pass
def testnonthreadlocalmanager(self):
manager = pool.manage(mock_dbapi, use_threadlocal = False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p.recreate()
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.throw_error = True
p.dispose()
|
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
for p in pool.QueuePool(creator=mock_dbapi.connect,
pool_siz
|
e=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=mock_dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_properties(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
pool_size=1, max_overflow=0, use_threadlocal=False)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
self.assert_(c.connection is not c2.connection)
self.assert_(not c2.info)
self.assert_('foo2' in c.info)
def test_listeners(self):
dbapi = MockDBAPI()
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print "connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print "first_connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print "checkout(%s, %s, %s)" % (con, record, proxy)
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print "checkin(%s, %s)" % (con, record)
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interf
|
calandryll/transcriptome
|
scripts/old/quality_stats.py
|
Python
|
gpl-2.0
| 685
| 0.00438
|
#!/usr/bin/python -tt
# Quality scores from fastx
# Website: http://hannonlab.cshl.edu/fastx_toolkit/
# Import OS features to run external programs
import os
import glob
v = "Version 0.1"
# Versions:
# 0.1 - Simple script to run cutadapt on all of the files
fastq_indir = "/home/chris/transcriptome/fastq/trimmed/"
fastq_outdir = "/home/chris/transcriptome/fastq/reports/quality stats"
# Sample 1
print "Analyzing Sample 1..."
os.sy
|
stem("fastx_quality_stats -i %s/Sample_1_L001_trimmed.fastq %s/Sample_1_L001_trimmed.txt" % (fastq_indir, fastq_outdir))
os.system("fastx
|
_quality_stats -i %s/Sample_1_L002_trimmed.fastq %s/Sample_1_L002_trimmed.txt" % (fastq_indir, fastq_outdir))
|
baidubce/bce-sdk-python
|
baidubce/services/tsdb/tsdb_handler.py
|
Python
|
apache-2.0
| 1,744
| 0.006881
|
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with th
|
e License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides general http handler functions for processing http responses from TSDB services.
"""
import http.client
import json
from baidubce import utils
from baidubce.exception import BceClientError
from baidubce.exception import BceServerError
from baidubce.utils import Expando
def parse_json(http_response, response):
"""If the body is not empty, convert it to a python object and set as the value of
response.body. http_response is always closed if no error occurs.
:param http_response: the http_response object returned by HTTPConnection.getresponse()
:type http_response: httplib.HTTPResponse
:param response: general response object which will be returned to the caller
:type response: baidubce.BceResponse
:return: always true
:rtype bool
"""
body = http_response.read()
if body:
response.__dict__.update(json.loads(body, object_hook=dict_to_python_object).__dict__)
http_response.close()
return True
def dict_to_python_object(d):
"""
:param d:
:return:
"""
attr = {}
for k, v in list(d.items()):
k = str(k)
attr[k] = v
return Expando(attr)
|
sbmlteam/deviser
|
deviser/code_files/cpp_functions/Constructors.py
|
Python
|
lgpl-2.1
| 42,243
| 0.001349
|
#!/usr/bin/env python
#
# @file Constructors.py
# @brief class for constructors for c++ and c
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from ...util import strFunctions, query, global_variables
class Constructors():
"""Class for all constructors"""
def __init__(self, language, is_cpp_api, class_object):
self.language = language
self.cap_language = language.upper()
self.package = class_object['package']
self.class_name = class_object['name']
self.is_cpp_api = is_cpp_api
if is_cpp_api:
self.object_name = class_object['name']
else:
self.object_name = class_object['name'] + '_t'
self.concretes = class_object['concretes']
self.base_class = class_object['baseClass']
self.attributes = query.get_unique_attributes(class_object['attribs'])
self.is_list_of = False
if class_object['name'].startswith('ListOf'):
self.is_list_of = True
self.has_children = class_object['has_children']
self.child_elements = []
if 'child_elements' in class_object:
self.child_elements = class_object['child_elements']
self.child_lo_elements = []
if 'child_lo_elements' in class_object:
self.child_lo_elements = class_object['child_lo_elements']
self.overwrites_children = class_object['overwrites_children']
if 'elementName' in class_object and class_object['elementName'] != '':
self.xml_name = \
strFunctions.lower_first(class_object['elementName'])
else:
self.xml_name = strFunctions.lower_first(class_object['name'])
self.is_plugin = False
if 'is_plugin' in class_object:
self.is_plugin = class_object['is_plugin']
self.is_doc_plugin = False
if 'is_doc_plugin' in class_object:
self.is_doc_plugin = class_object['is_doc_plugin']
self.document = False
if 'document' in class_object:
self.document = class_object['document']
# we do overwrite if we have concrete
if not self.overwrites_children and 'concretes' in class_object:
if len(class_object['concretes']) > 0:
self.overwrites_children = True
########################################################################
# Functions for writing constructors
# function to write level version constructor
def write_level_version_constructor(self, index=0):
# I want to write the equivalent constructor but without level and version
if not global_variables.has_level_version:
return self.write_class_constructor(index)
if (len(self.concretes) == 0 and index == 0) or index == -1:
ob_name = self.object_name
create = 'create'
elif self.is_cpp_api:
ob_name = self.object_name
create = 'create'
else:
if index == 0:
return
else:
i = index - 1
ob_name = '{0}'.format(self.concretes[i]['element'],
self.object_name)
create = 'create{0}'.format(strFunctions.remove_prefix(self.concretes[i]['element']))
# create doc string header
title_line = 'Creates a new {0} using the given {1} Level' \
.format(ob_name, self.cap_language)
if global_variables.is_package:
title_line += ', Version and “{0}” package ' \
'version.'.format(strFunctions.
lower_first(self.package))
else:
title_line += ' and @ p version values.'
params = ['@param level an unsigned int, the {0} Level to '
'assign to this {1}.'.format(self.cap_language,
self.object_name),
'@param version an unsigned int, the {0} Version to '
'assign to this {1}.'.format(self.cap_language,
self.object_name)]
if global_variables.is_package:
params.append('@param pkgVersion an unsigned int, the {0} {1} '
'Version to assign to this {2}.'
.format(self.cap_language, self.package,
self.object_name))
#changed following current documentation p
|
ractice
return_lines = ['@copydetails doc_note_setting_lv_pkg']
# return_lines = ['@throws {0}Constructor'
# 'Exception'.format(self.cap_language),
# 'Thrown if the given @p level and @p version '
# 'combination, or this kind of {0} object, are either '
# 'invalid or mismatched with respect to the parent '
# '{1} object
|
.'.format(self.cap_language,
# global_variables.document_class),
# '@copydetails doc_note_setting_lv']
additional = []
if not self.is_cpp_api:
additional.append('@copydetails doc_returned_owned_pointer')
# create the function declaration
if self.is_cpp_api:
function = self.class_name
return_type = ''
else:
function = '{0}_{1}'.format(self.class_name, create)
if not ob_name.endswith('_t'):
return_type = '{0}_t *'.format(ob_name)
else:
return_type = '{0} *'.format(ob_name)
if global_variables.is_package:
arguments = [
'unsigned int level = '
'{0}Extension::getDefaultLevel()'.format(self.package),
'unsigned int version = '
'{0}Extension::getDefaultVersion()'.format(self.package),
'unsigned int pkgVersion = '
'{0}Extension::getDefaultPackageVersion()'.format(self.package)]
arguments_no_defaults = ['unsigned int level',
'unsigned int version',
'unsigned int pkgVersion']
else:
if self.is_cpp_api:
arguments = ['unsigned int level = {0}_D
|
FloBay/PyOmics
|
setup.py
|
Python
|
bsd-3-clause
| 1,788
| 0.003356
|
from setuptools import
|
setup
def setup_package():
# PyPi doesn't accept markdown as HTML output for long_description
# Pypan
|
doc is only required for uploading the metadata to PyPi and not installing it by the user
# Try to covert Mardown to RST file for long_description
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst')
# Except ImportError then read in the Markdown file for long_description
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
with open('README.md') as f:
long_description = f.read()
# Define the meatadata as dictionary
metadata = dict(
name='PyOmics',
version='0.0.1.dev8',
description='A library for dealing with omic-data in the life sciences',
long_description=long_description,
url='https://github.com/FloBay/PyOmics.git',
author='Florian P. Bayer',
author_email='f.bayer@tum.de',
license='BSD',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='PyOmics bioinformatics omics science data analysis easy',
packages=['PyOmics'],
install_requires = ['ipython', 'numpy', 'matplotlib'],
)
# Bundle metadata up
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
smartbgp/libbgp
|
libbgp/bmp/termination.py
|
Python
|
apache-2.0
| 1,910
| 0.001571
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the Licens
|
e. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
#
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from .message import Message
@Message.register
class Termination(Message):
TYPE = Message.TERMINATION
TYPE_STR = 'termination'
reason_codict = {
0: "Session administratively closed. The session might be re-initiated.",
1: "Unspecified reason.",
2: "Out of resources. The router has exhausted resources available for the BMP session.",
3: "Redundant connection. The router has determined\
that this connection is redundant with another one.",
4: "Session permanently administratively closed,\
will not be re-initiated. Monitoring station should reduce\
(potentially to 0) the rate at which it attempts\
reconnection to the monitored router."
}
@classmethod
def unpack(cls, data):
infor_tlv = dict()
while data:
info_type, info_len = struct.unpack('!HH', data[0:4])
info_value = data[4: 4 + info_len]
if info_type == 0:
infor_tlv['string'] = info_value.decode('ascii')
elif info_type == 1:
infor_tlv['reason'] = cls.reason_codict[struct.unpack('!H', info_value)[0]]
data = data[4 + info_len:]
return cls(value=infor_tlv)
|
mhils/pytest
|
testing/python/metafunc.py
|
Python
|
mit
| 36,868
| 0.001302
|
import re
import pytest, py
from _pytest import python as funcargs
class TestMetafunc:
def Metafunc(self, func):
# the unit tests of this class check if things work correctly
# on the funcarg level, so we don't need a full blown
# initiliazation
class FixtureInfo:
name2fixturedefs = None
def __init__(self, names):
self.names_closure = names
names = funcargs.getfuncargnames(func)
fixtureinfo = FixtureInfo(names)
return funcargs.Metafunc(func, fixtureinfo, None)
def test_no_funcargs(self, testdir):
def function(): pass
metafunc = self.Metafunc(function)
assert not metafunc.fixturenames
repr(metafunc._calls)
def test_function_basic(self):
def func(arg1, arg2="qwe"): pass
metafunc = self.Metafunc(func)
assert len(metafunc.fixturenames) == 1
assert 'arg1' in metafunc.fixturenames
assert metafunc.function is func
assert metafunc.cls is None
def test_addcall_no_args(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
metafunc.addcall()
assert len(metafunc._calls) == 1
call = metafunc._calls[0]
assert call.id == "0"
assert not hasattr(call, 'param')
def test_addcall_id(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
pytest.raises(ValueError, "metafunc.addcall(id=None)")
metafunc.addcall(id=1)
pytest.raises(ValueError, "metafunc.addcall(id=1)")
pytest.raises(ValueError, "metafunc.addcall(id='1')")
metafunc.addcall(id=2)
assert len(metafunc._calls) == 2
assert metafunc._calls[0].id == "1"
assert metafunc._calls[1].id == "2"
def test_addcall_param(self):
def func(arg1): pass
metafunc = self.Metafunc(func)
class obj: pass
metafunc.addcall(param=obj)
metafunc.addcall(param=obj)
metafunc.addcall(param=1)
assert len(metafunc._calls) == 3
assert metafunc._calls[0].getparam("arg1") == obj
assert metafunc._calls[1].getparam("arg1") == obj
assert metafunc._calls[2].getparam("arg1") == 1
def test_addcall_funcargs(self):
def func(x): pass
metafunc = self.Metafunc(func)
class obj: pass
metafunc.addcall(funcargs={"x": 2})
metafunc.addcall(funcargs={"x": 3})
pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})")
assert len(metafunc._calls) == 2
assert metafunc._calls[0].funcargs == {'x': 2}
assert metafunc._calls[1].funcargs == {'x': 3}
assert not hasattr(metafunc._calls[1], 'param')
def test_parametrize_error(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.parametrize("x", [1,2])
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
metafunc.parametrize("y", [1,2])
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5
|
,6]))
def test_parametrize_and_id(self):
def func
|
(x, y): pass
metafunc = self.Metafunc(func)
metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
metafunc.parametrize("y", ["abc", "def"])
ids = [x.id for x in metafunc._calls]
assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
def test_parametrize_with_wrong_number_of_ids(self, testdir):
def func(x, y): pass
metafunc = self.Metafunc(func)
pytest.raises(ValueError, lambda:
metafunc.parametrize("x", [1,2], ids=['basic']))
pytest.raises(ValueError, lambda:
metafunc.parametrize(("x","y"), [("abc", "def"),
("ghi", "jkl")], ids=["one"]))
def test_parametrize_with_userobjects(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
class A:
pass
metafunc.parametrize("x", [A(), A()])
metafunc.parametrize("y", list("ab"))
assert metafunc._calls[0].id == "x0-a"
assert metafunc._calls[1].id == "x0-b"
assert metafunc._calls[2].id == "x1-a"
assert metafunc._calls[3].id == "x1-b"
@pytest.mark.issue250
def test_idmaker_autoname(self):
from _pytest.python import idmaker
result = idmaker(("a", "b"), [("string", 1.0),
("st-ring", 2.0)])
assert result == ["string-1.0", "st-ring-2.0"]
result = idmaker(("a", "b"), [(object(), 1.0),
(object(), object())])
assert result == ["a0-1.0", "a1-b1"]
# unicode mixing, issue250
result = idmaker((py.builtin._totext("a"), "b"), [({}, '\xc3\xb4')])
assert result == ['a0-\xc3\xb4']
def test_idmaker_native_strings(self):
from _pytest.python import idmaker
result = idmaker(("a", "b"), [(1.0, -1.1),
(2, -202),
("three", "three hundred"),
(True, False),
(None, None),
(re.compile('foo'), re.compile('bar')),
(str, int),
(list("six"), [66, 66]),
(set([7]), set("seven")),
(tuple("eight"), (8, -8, 8))
])
assert result == ["1.0--1.1",
"2--202",
"three-three hundred",
"True-False",
"None-None",
"foo-bar",
"str-int",
"a7-b7",
"a8-b8",
"a9-b9"]
def test_idmaker_enum(self):
from _pytest.python import idmaker
enum = pytest.importorskip("enum")
e = enum.Enum("Foo", "one, two")
result = idmaker(("a", "b"), [(e.one, e.two)])
assert result == ["Foo.one-Foo.two"]
@pytest.mark.issue351
def test_idmaker_idfn(self):
from _pytest.python import idmaker
def ids(val):
if isinstance(val, Exception):
return repr(val)
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["10.0-IndexError()",
"20-KeyError()",
"three-b2",
]
@pytest.mark.issue351
def test_idmaker_idfn_unique_names(self):
from _pytest.python import idmaker
def ids(val):
return 'a'
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["0a-a",
"1a-a",
"2a-a",
]
@pytest.mark.issue351
def test_idmaker_idfn_exception(self):
from _pytest.python import idmaker
def ids(val):
raise Exception("bad code")
result = idmaker(("a", "b"), [(10.0, IndexError()),
(20, KeyError()),
("three", [1, 2, 3]),
], idfn=ids)
assert result == ["10.0-b0",
"20-b1",
"three-b2",
]
def test_addcall_and_parametrize(self):
def func(x, y): pass
metafunc = self.Metafunc(func)
metafunc.addcall({'x': 1})
metafunc.parametrize('y', [2,3])
assert len(metafunc._calls) == 2
assert metafunc._calls[0].func
|
uannight/reposan
|
plugin.video.tvalacarta/channels/ecuadortv.py
|
Python
|
gpl-2.0
| 5,395
| 0.007987
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para Ecuador TV
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
import urllib
import os
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = False
CHANNELNAME = "ecuadortv"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.ecuadortv mainlist")
return programas(item)
def programas(item):
logger.info("tvalacarta.channels.ecuadortv canal")
item.url = "http://www.ecuadortv.ec/television"
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
'''
<div class="field field-name-field-icon field-type-image field-label-hidden">
<div class="field-items">
<div class="field-item even">
<a href="/programas/al-medios-dia">
<img typeof="foaf:Image" src="http://www.ecuadortv.ec/sites/default/files/styles/program_menu_item/public/program/almediodiaweb_0.png?itok=wv9Isyhi" width="155" height="105" alt="" />
</a>
</div>
</div>
</div>
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title"
><h2>Al Medio Día </h2></div></div></div></div>
'''
# Extrae las zonas de los programas
patron = '<div class="field-item even"[^<]+'
patron += '<a href="([^"]+)"[^<]+'
patron += '<img typeof="foaf.Image" src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=CHANNELNAME, title=title , url=url, thumbnail=thumbnail, plot=plot, action="episodios", show=title, folder=True) )
return itemlist
def episodios(item):
logger.info("tvalacarta.channels.ecuadortv episodios")
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
'''
<div class="item-thumbnail">
<div class="field field-name-rtv-video-thumb field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even">
<a href="/rtv/streaming/vod/46056" class="play-trigger use-ajax" data-video-id="FTb5jhfjJ-Y">
<span class="img">
<img src="http://img.youtube.com/vi/FTb5jhfjJ-Y/mqdefault.jpg" alt="" width="278" height="190" />
</span>
<span class="play-button play_big"> </span></a></div></div></div> </div>
<div class="slider_caption display_none">
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title">
<h2>Palabra Amazónica - cap. 08
</h2>
</div>
</div>
</div>
<div class="field field-name-field-chapter field-type-taxonomy-term-reference field-label-above">
<div class="field-label">Capítulo:
</div>
<div class="field-items"><div class="field-item even"><span c
|
lass="lineage-item lineage-item-level-0">8</span></div></div></div> </div>
'''
'''
|
<div class="slider_caption display_none">
<div class="field field-name-title field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even" property="dc:title">
<h2>Ecuador Multicolor
</h2>
</div>
</div>
</div>
<div class="field field-name-rtv-description field-type-ds field-label-hidden">
<div class="field-items">
<div class="field-item even">
<p>
<span style="font-size:16px;">Cantón Pillaro - II parte</span></p>
'''
# Extrae las zonas de los videos
patron = '<div class="item-thumbnail"[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<a href="[^"]+" class="[^"]+" data-video-id="([^"]+)"[^<]+'
patron += '<span class="img"[^<]+'
patron += '<img src="([^"]+)"[^<]+'
patron += '</span[^<]+'
patron += '<span[^<]+</span[^<]+</a[^<]+</div[^<]+</div[^<]+</div[^<]+</div[^<]+'
patron += '<div class="slider_caption[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<div class="field[^<]+'
patron += '<h2>([^<]+)</h2'
matches = re.compile(patron,re.DOTALL).findall(data)
for youtube_id,scrapedthumbnail,scrapedtitle in matches:
title = scrapedtitle
url = "https://www.youtube.com/watch?v="+youtube_id
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=CHANNELNAME, title=title , url=url, thumbnail=thumbnail, plot=plot, action="play", server="youtube", show=item.show, folder=False) )
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
# Comprueba que la primera opción tenga algo
categorias_items = mainlist(Item())
programas_items = programas(categorias_items[0])
episodios_items = episodios(programas_items[0])
if len(episodios_items)>0:
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.