repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
pombredanne/straight.plugin | test-packages/package-test-plugins/testplugin/foo/__init__.py | Python | mit | 25 | 0.04 | def do(i):
| return i+2 | |
riklaunim/django-examples | suqashexamples/manage.py | Python | mit | 257 | 0 | #!/ | usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suqashexamples.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | |
botswana-harvard/edc-visit-schedule | edc_visit_schedule/fieldsets.py | Python | gpl-2.0 | 430 | 0 | visit_schedule_fields = ('visit_schedule_name', 'schedule_name | ', 'visit_code')
visit_schedule_fieldset_tuple = (
'Visit Schedule', {
'classes': ('collapse',),
'fields': visit_schedule_fields})
visit_schedule_only_fields = ('visit_schedule_name', 'schedule_name')
visit_schedule_only_fieldset_tuple = (
'Visit Schedule', | {
'classes': ('collapse',),
'fields': visit_schedule_only_fields})
|
rchakra3/x9115rc3 | hw/code/8/optimizer/de2.py | Python | gpl-2.0 | 6,313 | 0.000634 | from __future__ import division
import random
import math
from common import prerun_each_obj
from model.helpers.candidate import Candidate
from helpers.a12 import a12
""" This contains the optimizers """
def de(model, frontier_size=10, cop=0.4, ea=0.5, max_tries=100, threshold=0.01, era_size=10, era0=None, lives=5):
# normalizers = prerun_each_obj(model, runs=10000)
out = []
repeat = int(max_tries / era_size)
print "Repeat:" + str(repeat)
frontier_size = era_size
energy = model.aggregate
# def energy(candidate, eval_func=model.eval, normalizers=normalizers):
# # This evaluates the objs and stores them candidate.scores
# eval_func(candidate)
# # Just for fun
# normalized_scores = [normalize(x) for normalize, x in zip(normalizers, candidate.scores)]
# # The distance of score of each objective from hell
# hell_dist = [(1 - x) for x in normalized_scores]
# sum_of_squares = sum([x ** 2 for x in hell_dist])
# energy = 1 - (math.sqrt(sum_of_squares) / math.sqrt(len(hell_dist)))
# return energy
def type1(can1, can2):
return (energy(can1) < energy(can2))
def type2(era1, era2):
# a12 returns times that lst1 is greater than lst2
# total = 0
# n = 0
# for obj_scores1, obj_scores2 in zip(era1, era2):
# # If this is 1, that means era1 is greater more often
# # If minimizing, this means era1 is worse
# total += a12(obj_scores1, obj_scores2)
# n += 1
# return (total / n >= 0.5)
# Currently returns true if even one of the objectives have improved
# print "here:" + str(len(era2))
# print "*****#############*************"
for index, objective in enumerate(era2):
# print "comparing:\n" + str(era1[index])
# print "and\n"
# print str(objective)
# print "******"
a12_score = a12(era1[index], objective)
# print "######"
# print a12_score
# print "######"
if (a12_score >= 0.56):
# print "######"
# print objective
# print era1[index]
# print a12_score
# print "######"
return True
# print "######"
# print a12_score
# print "######"
return False
frontier = []
total = 0
n = 0
if not era0:
for i in range(frontier_size):
can = model.gen_candidate()
while can is None:
can = model.gen_candidate()
frontier += [can]
total += energy(can)
n += 1
else:
for can in era0:
p = Candidate(dec_vals=can.dec_vals, scores=can.scores)
frontier += [p]
total = sum([energy(can) for can in frontier])
n = len(frontier)
curr_era = [[] for _ in model.objectives()]
# print "model_objectives_len:" + str(len(curr_era))
for can in frontier:
model.eval(can)
obj_scores = [x for x in can.scores]
for index, score in enumerate(obj_scores):
curr_era[index] += [score]
# Currently treating candidates as having only one objective i.e. energy
# which we're minimizing
eras = [curr_era]
curr_era = [[] for _ in model.objectives()]
best_score = total / n
curr_lives = lives
early_end = False
for j in range(repeat):
# if j % era_size == 0:
out += ["\n" + str(best_score) + " "]
total, n = de_update(frontier, cop, ea, energy, out, model.decisions())
if total / n < threshold:
best_score = total / n
out += ["!"]
out += ["\nScore satisfies Threshold"]
break
elif total / n < best_score:
best_score = total / n
out += ["!"]
for can in frontier:
model.eval(can)
obj_scores = [x for x in can.scores]
# print "obj_scores_len:" + str(len(obj_scores))
for index, score in enumerate(obj_s | cores):
curr_era[index] += [score]
eras += [curr_era]
curr_era = [[] for _ in model.objectives()]
if len(eras) > 1:
if type2(eras[len(eras) - 2], eras[len(eras) - 1]):
curr_lives += lives
else:
curr_lives -= 1
| if curr_lives == 0:
# print "No more"
out += ["\nNo more Lives"]
break
# print ''.join(out)
# print "\nNumber of repeats:" + str(j + 1)
# print "Best Score:" + str(best_score)
return _, best_score, eras[len(eras) - 1]
def de_update(frontier, cop, ea, energy_func, out, decision_objs):
total, n = (0, 0)
for i, can in enumerate(frontier):
score = energy_func(can)
new_can = de_extrapolate(frontier, i, cop, ea, decision_objs)
new_score = energy_func(new_can)
if new_score < score:
frontier[i] = new_can
score = new_score
out += ["+"]
else:
out += ["."]
total += score
n += 1
return total, n
def de_extrapolate(frontier, can_index, cop, ea, decision_objs):
can = frontier[can_index]
new_can = Candidate(dec_vals=list(can.dec_vals))
two, three, four = get_any_other_three(frontier, can_index)
changed = False
for d in range(len(can.dec_vals)):
x, y, z = two.dec_vals[d], three.dec_vals[d], four.dec_vals[d]
if random.random() < cop:
changed = True
new_can.dec_vals[d] = decision_objs[d].wrap(x + ea * (y - z))
if not changed:
d = random.randint(0, len(can.dec_vals) - 1)
new_can.dec_vals[d] = two.dec_vals[d]
if(new_can.dec_vals[d]<=0 or new_can.dec_vals[d]>=1):
print "###########3"
print "x,y,z :" + str(x)+","+str(y)+","+str(z)
print new_can.dec_vals[d]
print "##########3"
return new_can
def get_any_other_three(frontier, ig_index):
lst = []
while len(lst) < 3:
i = random.randint(0, len(frontier) - 1)
if i is not ig_index:
lst += [frontier[i]]
return tuple(lst)
|
akretion/python-cfonb | cfonb/__init__.py | Python | lgpl-3.0 | 48 | 0 | #
| from .parser.statement import StatementReader
| |
jantman/gw2copilot | gw2copilot/wine_mumble_reader.py | Python | agpl-3.0 | 9,880 | 0.000405 | """
gw2copilot/wine_mumble_reader.py
The latest version of this package is available at:
<https://github.com/jantman/gw2copilot>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of gw2copilot.
gw2copilot is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gw2copilot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with gw2copilot. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/gw2copilot> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
###################################################################### | ##########
"""
import logging
import os
import json
import psutil
import pkg_resources
from twisted.internet import protocol
from twisted.internet.task import LoopingCall
logger = logging.getLogger(__name__)
class WineMumbleLinkReader(object):
"""
Class to handle reading MumbleLink via wine.
"""
def __init__(self, parent_server, poll_interval):
| """
Initialize the class.
:param parent_server: the TwistedServer instance that started this
:type parent_server: :py:class:`~.TwistedServer`
:param poll_interval: interval in seconds to poll MumbleLink
:type poll_interval: float
"""
logger.debug("Instantiating WineMumbleLinkReader")
self.server = parent_server
self._poll_interval = poll_interval
self._wine_protocol = None
self._wine_process = None
self._looping_deferred = None
self._setup_process()
self._add_update_loop()
def _add_update_loop(self):
"""
Setup the LoopingCall to poll MumbleLink every ``self.poll_interval``;
helper for testing.
"""
logger.debug("Creating LoopingCall")
l = LoopingCall(self._wine_protocol.ask_for_output)
l.clock = self.server.reactor
logger.info('Setting poll interval to %s seconds',
self._poll_interval)
self._looping_deferred = l.start(self._poll_interval)
self._looping_deferred.addErrback(logger.error)
def _setup_process(self):
"""
Setup and spawn the process to read MumbleLink.
"""
logger.debug("Creating WineProcessProtocol")
self._wine_protocol = WineProcessProtocol(self.server)
logger.debug("Finding process executable, args and environ")
executable, args, env = self._gw2_wine_spawn_info
# this seems to cause problems
if 'WINESERVERSOCKET' in env:
del env['WINESERVERSOCKET']
logger.debug(
"Creating spawned process; executable=%s args=%s len(env)=%d",
executable, args, len(env)
)
logger.debug("Process environment:")
for k in sorted(env.keys()):
logger.debug('%s=%s' % (k, env[k]))
self._wine_process = self.server.reactor.spawnProcess(
self._wine_protocol, executable, args, env)
@property
def _gw2_wine_spawn_info(self):
"""
Return the information required to spawn :py:mod:`~.read_mumble_link`
as a Python script running under GW2's wine install.
:return: return a 3-tuple of wine executable path (str), args to pass
to wine (list, wine python binary path and ``read_mumble_link.py``
module path), wine process environment (dict)
:rtype: tuple
"""
gw2_ps = self._gw2_process
env = gw2_ps.environ()
wine_path = os.path.join(os.path.dirname(gw2_ps.exe()), 'wine')
logger.debug("Gw2.exe executable: %s; inferred wine binary as: %s",
gw2_ps.exe(), wine_path)
wine_args = [
wine_path,
self._wine_python_path(env['WINEPREFIX']),
self._read_mumble_path,
'-i'
]
return wine_path, wine_args, env
@property
def _read_mumble_path(self):
"""
Return the absolute path to :py:mod:`~.read_mumble_link` on disk.
:return: absolute path to :py:mod:`~.read_mumble_link`
:rtype: str
"""
p = pkg_resources.resource_filename('gw2copilot', 'read_mumble_link.py')
p = os.path.abspath(os.path.realpath(p))
logger.debug('Found path to read_mumble_link as: %s', p)
return p
def _wine_python_path(self, wineprefix):
"""
Given a specified ``WINEPREFIX``, return the path to the Python binary
in it.
:param wineprefix: ``WINEPREFIX`` env var
:type wineprefix: str
:return: absolute path to wine's Python binary
:rtype: str
"""
p = os.path.join(wineprefix, 'drive_c', 'Python27', 'python.exe')
if not os.path.exists(p):
raise Exception("Unable to find wine Python at: %s", p)
logger.debug('Found wine Python binary at: %s', p)
return p
@property
def _gw2_process(self):
"""
Find the Gw2.exe process; return the Process object.
:return: Gw2.exe process
:rtype: psutil.Process
"""
gw2_p = None
for p in psutil.process_iter():
if p.name() != 'Gw2.exe':
continue
if gw2_p is not None:
raise Exception("Error: more than one Gw2.exe process found")
gw2_p = p
if gw2_p is None:
raise Exception("Error: could not find a running Gw2.exe process")
logger.debug("Found Gw2.exe process, PID %d", gw2_p.pid)
return gw2_p
class WineProcessProtocol(protocol.ProcessProtocol):
"""
An implementation of :py:class:`twisted.internet.protocol.ProcessProtocol`
to communicate with :py:mod:`~.read_mumble_link` when it is executed
as a command-line script under wine. This handles reading data from the
process and requesting more.
"""
def __init__(self, parent_server):
"""
Initialize; save an instance variable pointing to our
:py:class:`~.TwistedServer`
:param parent_server: the TwistedServer instance that started this
:type parent_server: :py:class:`~.TwistedServer`
"""
logger.debug("Initializing WineProcessProtocol")
self.parent_server = parent_server
self.have_data = False
def connectionMade(self):
"""Triggered when the process starts; just logs a debug message"""
logger.debug("Connection made")
def ask_for_output(self):
"""
Write a newline to the process' STDIN, prompting it to re-read the map
and write the results to STDOUT, which will be received by
:py:meth:`~.outReceived`.
"""
logger.debug("asking for output")
self.transport.write("\n")
def outReceived(self, data):
"""
Called when output is received from the process; attempts to deserialize
JSON and on success passes it back to ``self.parent_server`` via
:py:meth:`~.TwistedServer.update_mum |
looker/sentry | src/sentry/db/models/base.py | Python | bsd-3-clause | 4,686 | 0.00064 | """
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from copy import copy
import logging
import six
from bitfield.types import BitHandler
from django.db import models
from django.db.models import signals
from django.db.models.query_utils import DeferredAttribute
from django.utils import timezone
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
DEFERRED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id', ) + attrs
def _repr(self):
cls = type(self).__name__
pairs = ('%s=%s' % (a, repr(getattr(self, a, None))) for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __hash__(self):
# Django decided that it shouldnt let us hash objects even though they have
# memory addresses. We need that behavior, so let's revert.
if self.pk:
return models.Model.__hash__(self)
return id(self)
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(type(field).__dict__.get(field.attname), DeferredAttribute):
return DEFERRED
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.attname, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
# XXX(dcramer): this is how Django determines this (copypasta from Model)
if isinstance(type(f).__dict__.get(f.attname),
DeferredAttribute) or f.column is None:
continue
try:
v = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(six.text_type(e))
else:
if isinstance(v, BitHandler):
v = copy(v)
data[f.column] = v
self.__data = data
else:
self.__data = UNSAVED
def _update_timestamps(self):
if hasattr(self, 'date_updated'):
self.date_updated = timezone.now()
def has_changed(self, field_name):
| "Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
value = self.__get_field_value(field)
if value is DEFERRED:
return False
return self.__data.get(field_name) != value
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVE | D:
return None
value = self.__data.get(field_name)
if value is DEFERRED:
return None
return self.__data.get(field_name)
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
def __model_pre_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_timestamps()
def __model_class_prepared(sender, **kwargs):
if not issubclass(sender, BaseModel):
return
if not hasattr(sender, '__core__'):
raise ValueError('{!r} model has not defined __core__'.format(sender))
signals.pre_save.connect(__model_pre_save)
signals.post_save.connect(__model_post_save)
signals.class_prepared.connect(__model_class_prepared)
|
kkozarev/mwacme | casa_commands_instructions/plot_max_spectra_calibrated.py | Python | gpl-2.0 | 12,151 | 0.026088 | import glob, os, sys,fnmatch
import matplotlib.pyplot as plt
from astropy.io import ascii
import numpy as np
def match_list_values(ls1,ls2):
#Return lists of the indices where the values in two lists match
#It will return only the first index of occurrence of repeating values in the lists
#Written by Kamen Kozarev, with help from stackoverflow:
#http://stackoverflow.com/questions/1388818/how-can-i-compare-two-lists-in-python-and-return-matches
#http://stackoverflow.com/quest | ions/480214/how-do-you-remove-duplicates-fro | m-a-list-in-whilst-preserving-order
#list1=['03:39:04','03:40:04','03:41:15','03:43:20','03:45:39']
#list2=['03:39:04','03:41:15','03:43:20','03:45:39','03:40:04','03:40:04']
list1=list(ls1)
list2=list(ls2)
matches=set(list1).intersection(list2)
indlist1=[i for i,item in enumerate(list1) if item in matches]
indlist2=[i for i,item in enumerate(list2) if item in matches]
matchlist1=np.array(list1)[indlist1]
matchlist2=np.array(list2)[indlist2]
seen=set()
seen_add = seen.add
damn=[i for i,item in enumerate([x for x in matchlist1 if not
(x in seen or seen_add(x))]) if item in matchlist1]
findlist1=list(np.array(indlist1)[damn])
seen=set()
seen_add = seen.add
damn=[i for i,item in enumerate([x for x in matchlist2 if not
(x in seen or seen_add(x))]) if item in matchlist2]
findlist2=list(np.array(indlist2)[damn])
return findlist1,findlist2
def read_maxfile(maxfile):
print 'Reading file '+maxfile
if not os.path.exists(maxfile):
print "#### No Max file found: "+maxfile+" ####"
return -1
maxdata=ascii.read(maxfile)
return maxdata
#The new data location
if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
CHANNELS = ['062-063','069-070','076-077','084-085','093-094','113-114','139-140','153-154','169-170','187-188','125-126']
CHANNELS = ['093-094','084-085','103-104','113-114','125-126','139-140','153-154','187-188']
#CHANNELS = ['093-094','084-085']
polarizations = ['XX','YY']
polarizations = ['XX']
OBSIDS=['1130643536']
maxindices=['1','2']
maxindices=['1']
reference_channel=CHANNELS[0]
date='2015/11/04 '
force=0 #Overwrite files if present
finchan=[8,14] #the fine channel indices
avgperiod=60. #seconds over which to average
plotbtemp=0
plotintflux=0
plotmaxflux=1
outdir=BASEDIR
for OBSID in OBSIDS:
for polarization in polarizations:
for maxindex in maxindices:
maxinfo={}
for CHANNEL in CHANNELS:
datadir=BASEDIR+CHANNEL+'/'+OBSID+'/'
#GET the maximum information for the image.
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+OBSID+'_'+polarization+'_SFU.txt'
timestrings=[]
if not os.path.exists(datadir+maxfile):
print '### Missing Max'+maxindex+' file for channel '+CHANNEL+', obsid '+OBSID+', pol '+polarization+' ###'
continue
maxdata=read_maxfile(datadir+maxfile)
maxintens=maxdata['maxintens']
maxlocx_px=maxdata['maxlocx_px']
maxlocy_px=maxdata['maxlocy_px']
times=maxdata['times']
#datetimes=[]
#for time in times: datetimes.append(datetime.strptime(time,"%Y/%m/%d %H:%M:%S"))
#alldatetimes[CHANNEL]=datetimes
#Calculate the frequencies
tmp=CHANNEL.split('-')
basefreq=int(tmp[0])*1.28 #base frequency in MHz
startfreq=basefreq+finchan[0]*0.04 #Starting frequency
endfreq=basefreq+finchan[1]*0.04 #Starting frequency
midfreq=np.mean([startfreq,endfreq])
#Populate the info dictionary
maxinfo[CHANNEL]={'times':times,'timestrings':timestrings,'startfreq':startfreq,'endfreq':endfreq,'midfreq':midfreq,'maxintens':maxintens,'maxlocx_px':maxlocx_px,'maxlocy_px':maxlocy_px} #,'fnames':img_list,'integrated_flux':intintens,
reference_times=maxinfo[reference_channel]['times']
frequencies=[info['midfreq'] for info in maxinfo.values()]
freqerror=[(info['endfreq']-info['startfreq']) for info in maxinfo.values()]
frequencies.sort()
if plotbtemp > 0:
#Brightness temperature defined as Tb = (I/nu^2)*(c^2)/(2*k)
#weighting=(c^2)/(2*k) = 3.26e39
kb=1.38064852e-23
c2=9.e16
JANSKY2SI=1.e-26
weighting=3.26e39*JANSKY2SI
weighting=np.divide(weighting,midfreq*midfreq*1.e12)
else:
#Plot the Flux density in SFU = 1.e-4 Jy
#weighting=1.e-4
weighting=1.
#allmaxima=[info['maxintens'] for info in maxinfo.values()]
#datadir=BASEDIR+'subset/'
cc=0
totmax=[]
allfluxes=[]
allintfluxes=[]
#Start writing the spectra to a file
specfile='max'+maxindex+'_'+OBSID+'_'+polarization+'_spectra_SFU.txt'
outf=open(outdir+specfile,'w')
tmp=''
for CHANNEL in CHANNELS:
if not CHANNEL in maxinfo: continue
tmp=tmp+' '+str(maxinfo[CHANNEL]['midfreq'])+' 000'
outf.write("Date Time" + tmp+'\n')
outf.close()
#Do the same but for the integrated flux spectra
#intspecfile='max'+maxindex+'_'+OBSID+'_'+polarization+'_spectra_integrated_SFU.txt'
#outf=open(outdir+intspecfile,'w')
#tmp=''
#for CHANNEL in CHANNELS: tmp=tmp+' '+str(maxinfo[CHANNEL]['midfreq'])+' 000'
#outf.write("Date Time" + tmp+'\n')
#outf.close()
timeindices={}
for CHANNEL in CHANNELS:
if not CHANNEL in maxinfo: continue
if CHANNEL == reference_channel: continue
refind,chanind=match_list_values(reference_times,maxinfo[CHANNEL]['times'])
timeindices[CHANNEL]=chanind
for ind,time in enumerate(reference_times):
maxima=[]
intfluxes=[]
rmses=[]
timestring=''.join(time.split(' ')[1].split(':'))
for CHANNEL in CHANNELS:
if not CHANNEL in maxinfo: continue
if time in maxinfo[CHANNEL]['times']:
chantimind=maxinfo[CHANNEL]['times'].tolist().index(time)
#intfluxes.append(maxinfo[CHANNEL]['integrated_flux'][chantimind])
maxima.append(maxinfo[CHANNEL]['maxintens'][chantimind])
else:
#intfluxes.append(0.)
maxima.append(0.)
if ind == 0:
#totintflux=intfluxes
totmax=maxima
else:
#totintflux=np.add(totintflux,intfluxes)
totmax=np.add(totmax,maxima)
cc=cc+1
if cc == avgperiod:
#totintflux=np.divide(totintflux,1.*avgperiod)
totmax=np.divide(totmax,1.*avgperiod)
brighttemp=np.multiply(totmax,weighting)
#brighttemp_error=np.multiply(totrms,weighting)
fluxdens=np.multiply(totmax,weighting)
#fluxdens_error=np.multiply(totrms,weighting)
#intfluxdens=np.multiply(totintflux,weighting)
allfluxes.append(fluxdens)
#allintfluxes.append(intfluxdens)
#Save the peak fluxes
outf=open(outdir+specfile,'a')
fluxdstring=''
for ii,dd in enumerate(fluxdens):
fluxdstring=fluxdstring + ' {:e}'.forma |
aadarshkarumathil/Webserver | webserver.py | Python | gpl-3.0 | 10,173 | 0.021921 | #!/usr/bin/python2
from conf import *
import socket
import os
from threading import Thread
import time
def get_cookie(request_lines):
#print("cookie data is: " + request_lines[-3])
data = request_lines[-3].split(":")[-1]
return (data.split("=")[-1])
def error_404(addr,request_words):
print("File not Found request")
logging(addr,request_words[1][1:],"error","404")
csock.sendall(error_handle(404,"text/html",False))
response = """<html><head><body>file not found</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_403(addr,request_words):
print("Forbidden")
logging(addr,request_words[1][1:],"error","403")
csock.sendall(error_handle(403,"text/html",False))
response = """<html><head><body>Forbidden</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_400(addr,request_words):
print("Bad request")
logging(addr,request_words[1][1:],"error","400")
csock.sendall(error_handle(400,"text/html",False))
response = """<html><head><body>file not found</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
| #f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_501(addr,request_words):
print("NOT Implemented")
logging(addr,reques | t_words,"error","501")
csock.sendall(error_handle(501,"text/html",False))
response = """<html><head><body>Not Implemented </body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_401(addr,request_words):
print("Unauthorized")
logging(addr,request_words,"error","401")
csock.sendall(error_handle(401,"text/html",False))
response = """<html><head><body>Unauthorized</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_500(e,file_name,addr):
print("Internal Server Error")
logging(addr,file_name,"error","501")
csock.sendall(error_handle(501,"text/html",False))
response = """<html><head><body>Internal Server Error </body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
def error_411(addr,request_words):
print("Length Required")
logging(addr,request_words,"error","411")
csock.sendall(error_handle(411,"text/html",False))
response = """<html><head><body>Length Required</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def error_505(addr,request_words):
print("Trailing whitespaces")
logging(addr,request_words,"error","505")
csock.sendall(error_handle(505,"text/html",False))
response = """<html><head><body>Trailing white spaces</body></head></html>"""
#f = open("404.html","r")
#response = f.read()
#f.close()
csock.sendall(response)
csock.close()
#print(file_name)
def page_handle(method,request_lines,file_name,addr,request_words):
print(method)
data = request_lines[-1]
#print("get data is :".format(data))
#print(file_name.split(".")[-1])
if(file_name.split(".")[-1]=="php"):
isphp = True
else:
isphp = False
print(isphp)
session_id= get_cookie(request_lines)
#file_name = root_dir + file_name
print(file_name)
if(root_dir not in file_name):
error_401(addr,file_name)
file_name = serverside(file_name,data,method,session_id)
mime_type = mime_type_handler(file_name.split(".")[-1],addr)
response_file = open(file_name,"r")
response = response_file.read()
response_file.close()
logging(addr,request_words[1][1:],"OK","200")
avoid_response = ["image/x-icon","image/gif","image/jpeg","image/png"]
#if(mime_type not in avoid_response):
#print(response)
# print("response from error handle\n\n\n")
header = error_handle(200,mime_type,isphp)
#print(header)
csock.sendall(header)
csock.sendall(response)
csock.close()
def serverside(file_name,data,method,session_id):
ext = file_name.split(".")[-1]
path_split = file_name.split("/")
if(ext in lang):
if(ext=="php"):
os.environ["_{}".format(method)]= data
os.environ["SESSION_ID"]=session_id
print(os.environ["_{}".format(method)])
os.system("php-cgi {} > output.html".format(file_name))
file_name = "output.html"
#print("file is returned")
return file_name
else:
#print(dat)
try:
if("nodefiles" in path_split):
resp = os.system("node {} > output.html".format(file_name))
filename="output.html"
return file_name
resp = os.system("{} {} > output.html".format(lang[ext],file_name))
file_name = "output.html"
return file_name
except Exception as e:
error_500(e,file_name,addr)
else :
if(ext in mime_switcher):
print("file is returned")
return file_name
else:
error_501(addr,file_name)
def error_handle(errornum,mime_type,isphp):
if(isphp):
response = """HTTP/1.1 {} {}\r\n""".format(errornum,errorname[errornum])
else:
response = """HTTP/1.1 {} {}\r\nContent-type:{}\r\n\r\n""".format(errornum,errorname[errornum],mime_type)
print(response)
return response
def connhandler(csock,addr):
request = csock.recv(1024)
#print(addr)
#sock.sendall(index.read())
request_lines = request.split("\n")
request_words = request_lines[0].split(" ")
print("\r\n\r\n\r\n")
if(len(request_words)!=3):
error_505(addr,request_words)
#print(request)
#print(root_dir)
if(request_words[0] == "GET"):
if(get_enable):
if(request_words[1] == "/"):
file_name = root_dir+root_file
else:
file_name = root_dir+request_words[1][1:]
print(file_name)
if(os.path.isfile(file_name)):
method="GET"
page_handle(method,request_lines,file_name,addr,request_words)
else:
error_404(addr,request_words)
else:
error_403(addr,request_words)
elif(request_words[0]=="POST"):
if(post_enable):
if(request_words[1] == "/"):
file_name = root_dir+root_file
else:
file_name = root_dir+request_words[1][1:]
print(file_name)
if(request_lines[3].split(":")[-1]== 0):
error_411(addr,request_words)
if(os.path.isfile(file_name)):
method="POST"
page_handle(method,request_lines,file_name,addr,request_words)
else:
error_404(addr,request_words)
else:
error_403(addr,request_words)
elif(request_words[0]=="PUT"):
if(put_enable):
data = request_lines[-1]
#if(data!=""):
file_name = request_words[1][1:]
f = open(filename,"a+")
f.write(data)
f.close()
header = error_handle(200,"text/html",False)
csock.sendall(header)
csock.close()
else:
error_403(addr,request_words)
elif(request_words[0]=="DELETE"):
if(delete_enable):
file_name = request_words[1][1:]
os.system("rm -rf {}".file_name)
header = error_handle(200,"text/html",False)
csock.sendall(header)
csock.sendall("FILE DELETED")
csock.close()
else:
error_403(addr,request_words)
elif(request_words[0]=="CONNECT"):
|
michaelBenin/autopep8 | test/suite/out/long_lines.py | Python | mit | 300 | 0 | if True:
| if True:
if True:
self.__heap.sort(
) # pylint: builtin sort probably faster than O(n)-time heapify
if True:
foo = '( | ' + \
array[0] + ' '
|
lancifollia/laon_crf | feature.py | Python | mit | 8,052 | 0.001987 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Counter
import numpy as np
STARTING_LABEL = '*' # Label of t=-1
STARTING_LABEL_INDEX = 0
def default_feature_func(_, X, t):
"""
Returns a list of feature strings.
(Default feature function)
:param X: An observation vector
:param t: time
:return: A list of feature strings
"""
length = len(X)
features = list()
features.append('U[0]:%s' % X[t][0])
features.append('POS_U[0]:%s' % X[t][1])
if t < length-1:
features.append('U[+1]:%s' % (X[t+1][0]))
features.append('B[0]:%s %s' % (X[t][0], X[t+1][0]))
features.append('POS_U[1]:%s' % X[t+1][1])
features.append('POS_B[0]:%s %s' % (X[t][1], X[t+1][1]))
if t < length-2:
features.append('U[+2]:%s' % (X[t+2][0]))
features.append('POS_U[+2]:%s' % (X[t+2][1]))
features.append('POS_B[+1]:%s %s' % (X[t+1][1], X[t+2][1]))
features.append('POS_T[0]:%s %s %s' % (X[t][1], X[t+1][1], X[t+2][1]))
if t > 0:
features.append('U[-1]:%s' % (X[t-1][0]))
features.append('B[-1]:%s %s' % (X[t-1][0], X[t][0]))
features.append('POS_U[-1]:%s' % (X[t-1][1]))
features.append('POS_B[-1]:%s %s' % (X[t-1][1], X[t][1]))
if t < length-1:
features.append('POS_T[-1]:%s %s %s' % (X[t-1][1], X[t][1], X[t+1][1]))
if t > 1:
features.append('U[-2]:%s' % (X[t-2][0]))
features.append('POS_U[-2]:%s' % (X[t-2][1]))
features.append('POS_B[-2]:%s %s' % (X[t-2][1], X[t-1][1]))
features.append('POS_T[-2]:%s %s %s' % (X[t-2][1], X[t-1][1], X[t][1]))
return features
class FeatureSet():
feature_dic = dict()
observation_set = set()
empirical_counts = Counter()
num_features = 0
label_dic = {STARTING_LABEL: STARTING_LABEL_INDEX}
label_array = [STARTING_LABEL]
feature_func = default_feature_func
def __init__(self, feature_func=None):
# Sets a custom feature function.
if feature_func is not None:
self.feature_func = feature_func
def scan(self, data):
"""
Constructs a feature set, a label set,
and a counter of empirical counts of each feature from the input data.
:param data: A list of (X, Y) pairs. (X: observation vector , Y: label vector)
"""
# Constructs a feature set, and counts empirical counts.
for X, Y in data:
prev_y = STARTING_LABEL_INDEX
for t in range(len(X)):
# Gets a label id
try:
y = self.label_dic[Y[t]]
except KeyError:
y = len(self.label_dic)
self.label_dic[Y[t]] = y
self.label_array.append(Y[t])
# Adds features
self._add(prev_y, y, X, t)
prev_y = y
def load(self, feature_dic, num_features, label_array):
self.num_features = num_features
self.label_array = label_array
self.label_dic = {label: i for label, i in enumerate(label_array)}
self.feature_dic = self.deserialize_feature_dic(feature_dic)
def __len__(self):
ret | urn self.num_features
def _add(self, prev_y, y, X, t):
"""
Generates features, constructs feature_dic.
:param prev_y: previous label
:param | y: present label
:param X: observation vector
:param t: time
"""
for feature_string in self.feature_func(X, t):
if feature_string in self.feature_dic.keys():
if (prev_y, y) in self.feature_dic[feature_string].keys():
self.empirical_counts[self.feature_dic[feature_string][(prev_y, y)]] += 1
else:
feature_id = self.num_features
self.feature_dic[feature_string][(prev_y, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
if (-1, y) in self.feature_dic[feature_string].keys():
self.empirical_counts[self.feature_dic[feature_string][(-1, y)]] += 1
else:
feature_id = self.num_features
self.feature_dic[feature_string][(-1, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
else:
self.feature_dic[feature_string] = dict()
# Bigram feature
feature_id = self.num_features
self.feature_dic[feature_string][(prev_y, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
# Unigram feature
feature_id = self.num_features
self.feature_dic[feature_string][(-1, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
def get_feature_vector(self, prev_y, y, X, t):
"""
Returns a list of feature ids of given observation and transition.
:param prev_y: previous label
:param y: present label
:param X: observation vector
:param t: time
:return: A list of feature ids
"""
feature_ids = list()
for feature_string in self.feature_func(X, t):
try:
feature_ids.append(self.feature_dic[feature_string][(prev_y, y)])
except KeyError:
pass
return feature_ids
def get_labels(self):
"""
Returns a label dictionary and array.
"""
return self.label_dic, self.label_array
def calc_inner_products(self, params, X, t):
"""
Calculates inner products of the given parameters and feature vectors of the given observations at time t.
:param params: parameter vector
:param X: observation vector
:param t: time
:return:
"""
inner_products = Counter()
for feature_string in self.feature_func(X, t):
try:
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
inner_products[(prev_y, y)] += params[feature_id]
except KeyError:
pass
return [((prev_y, y), score) for (prev_y, y), score in inner_products.items()]
def get_empirical_counts(self):
empirical_counts = np.ndarray((self.num_features,))
for feature_id, counts in self.empirical_counts.items():
empirical_counts[feature_id] = counts
return empirical_counts
def get_feature_list(self, X, t):
feature_list_dic = dict()
for feature_string in self.feature_func(X, t):
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
if (prev_y, y) in feature_list_dic.keys():
feature_list_dic[(prev_y, y)].add(feature_id)
else:
feature_list_dic[(prev_y, y)] = {feature_id}
return [((prev_y, y), feature_ids) for (prev_y, y), feature_ids in feature_list_dic.items()]
def serialize_feature_dic(self):
serialized = dict()
for feature_string in self.feature_dic.keys():
serialized[feature_string] = dict()
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
serialized[feature_string]['%d_%d' % (prev_y, y)] = feature_id
return serialized
def deserialize_feature_dic(self, serialized):
feature_dic = dict()
for feature_string in serialized.keys():
feature_dic[feature_string] = dict()
for transition_string, feature_id in serialized[feature_string].items():
prev_y, y = transition_string.split('_')
feature_dic[feature_string][(int(prev_y), int(y))] = feature_id
return feature_dic |
rartino/ENVISIoN | envisionpy/hdf5parser/vasp/check_for_parse.py | Python | bsd-2-clause | 3,028 | 0.012884 | ## ENVISIoN
##
## Copyright (c) 2021 Gabriel Anderberg, Didrik Axén, Adam Engman,
## Kristoffer Gubberud Maras, Joakim Stenborg
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above co | pyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRI | GHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## ##############################################################################################
import os, sys
import inspect
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
def has_been_parsed(parse_name, h5file, vasp_dir):
prior_h5 = True
prior_vasp = True
parse_file = open(path_to_current_folder + "/../priorparses.txt", 'r')
lines = parse_file.readlines()
for index, line in enumerate(lines):
if line == parse_name + "\n":
if h5file in lines[index + 1]:
prior_h5 = True
elif lines[index + 1] == "\n":
lines[index + 1] = h5file + "\n"
else:
prior_h5 = False
lines[index + 1] = h5file + "\n"
if line == parse_name + "\n":
if vasp_dir in lines[index + 2]:
prior_vasp = True
elif lines[index + 2] == "\n":
lines[index + 2] = vasp_dir + "\n"
else:
prior_vasp = False
lines[index + 2] = vasp_dir + "\n"
with open(path_to_current_folder + "/../priorparses.txt", 'w') as file:
file.writelines(lines)
if prior_h5 and prior_vasp:
return True
if prior_h5 and not prior_vasp:
try:
os.remove(h5file)
return False
except:
pass
if not prior_h5 and not prior_vasp:
return False
if prior_vasp and not prior_h5:
return False
|
daviddoria/itkHoughTransform | Wrapping/WrapITK/Languages/SwigInterface/pygccxml-1.0.0/pygccxml/utils/__init__.py | Python | apache-2.0 | 5,211 | 0.014585 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
defines logger classes and few convinience methods, not related to the declarations
tree
"""
import os
import sys
import logging
import tempfile
from fs_utils import files_walker
from fs_utils import directories_walker
def _create_logger_( name ):
"""implementation details"""
logger = logging.getLogger(name)
handler = logging.StreamHandler()
#handler.setFormatter( logging.Formatter( os.linesep + '%(levelname)s %(message)s' ) )
handler.setFormatter( logging.Formatter( '%(levelname)s %(message)s' ) )
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
return logger
class loggers:
"""class-namespace, defines few loggers classes, used in the project"""
cxx_parser = _create_logger_( 'pygccxml.cxx_parser' )
"""logger for C++ parser functionality
If you set this logger level to DEBUG, you will be able to see the exact
command line, used to invoke GCC-XML and errors that occures during XML parsing
"""
gccxml = cxx_parser #backward compatability
pdb_reader = _create_logger_( 'pygccxml.pdb_reader' )
"""logger for MS .pdb file reader functionality
"""
queries_engine = _create_logger_( 'pygccxml.queries_engine' )
"""logger for query engine functionality.
If you set this logger level to DEBUG, you will be able to see what queries
you do against declarations tree, measure performance and may be even to improve it.
Query engine reports queries and whether they are optimized or not.
"""
declarations_cache = _create_logger_( 'pygccxml.declarations_cache' )
"""logger for declarations tree cache functionality
If you set this logger level to DEBUG, you will be able to see what is exactly
happens, when you read the declarations from cache file. You will be able to
decide, whether it worse for you to use this or that cache strategy.
"""
root = logging.getLogger( 'pygccxml' )
"""root logger exists for your convinience only"""
all = [ root, cxx_parser, queries_engine, declarations_cache, pdb_reader ]
"""contains all logger classes, defined by the class"""
def remove_file_no_raise(file_name ):
"""removes file from disk, if exception is raised, it silently ignores it"""
try:
if os.path.exists(file_name):
os.remove( file_name )
except Exception, error:
loggers.root.error( "Error ocured while removing temprorary created file('%s'): %s"
% ( file_name, str( error ) ) )
def create_temp_file_name(suffix, prefix=None, dir=None):
"""small convinience function that creates temporal file.
This function is a wrapper aroung Python built-in function - tempfile.mkstemp
"""
if not prefix:
prefix = tempfile.template
fd, name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir )
file_obj = os.fdopen( fd )
file_obj.close()
return name
def normalize_path( some_path ):
"""return os.path.normpath( os.path.normcase( some_path ) )"""
return os.path.normpath( os.path.normcase( some_path ) )
def get_architecture(): |
"""returns computer architecture: 32 or 64.
The guess is based on maxint.
"""
if | sys.maxint == 2147483647:
return 32
elif sys.maxint == 9223372036854775807:
return 64
else:
raise RuntimeError( "Unknown architecture" )
#The following code is cut-and-paste from this post:
#http://groups.google.com/group/comp.lang.python/browse_thread/thread/5b71896c06bd0f76/
#Thanks to Michele Simionato, for it
class cached(property):
'Convert a method into a cached attribute'
def __init__(self, method):
private = '_' + method.__name__
def fget(s):
try:
return getattr(s, private)
except AttributeError:
value = method(s)
setattr(s, private, value)
return value
def fdel(s):
del s.__dict__[private]
super(cached, self).__init__(fget, fdel=fdel)
@staticmethod
def reset(self):
cls = self.__class__
for name in dir(cls):
attr = getattr(cls, name)
if isinstance(attr, cached):
delattr(self, name)
class enum( object ):
"""Usage example:
class fruits(enum):
apple = 0
orange = 1
fruits.has_value( 1 )
fruits.name_of( 1 )
"""
@classmethod
def has_value( cls, enum_numeric_value ):
for name, value in cls.__dict__.iteritems():
if enum_numeric_value == value:
return True
else:
return False
@classmethod
def name_of( cls, enum_numeric_value ):
for name, value in cls.__dict__.iteritems():
if enum_numeric_value == value:
return name
else:
raise RuntimeError( 'Unable to find name for value(%d) in enumeration "%s"'
% ( enum_numeric_value, cls.__name__ ) )
|
tkarabela/pysubs2 | tests/test_parse_tags.py | Python | mit | 1,695 | 0.00413 | from pysubs2 import SSAStyle
from pysubs2.substation | import parse_tags
def test_no_tags():
text = "Hello, world!"
assert parse_tags(text) == [(text, SSAStyle())]
def test_i_tag():
text = "Hello, {\\i1}world{\\i0}!"
| assert parse_tags(text) == [("Hello, ", SSAStyle()),
("world", SSAStyle(italic=True)),
("!", SSAStyle())]
def test_r_tag():
text = "{\\i1}Hello, {\\r}world!"
assert parse_tags(text) == [("", SSAStyle()),
("Hello, ", SSAStyle(italic=True)),
("world!", SSAStyle())]
def test_r_named_tag():
styles = {"other style": SSAStyle(bold=True)}
text = "Hello, {\\rother style\\i1}world!"
assert parse_tags(text, styles=styles) == \
[("Hello, ", SSAStyle()),
("world!", SSAStyle(italic=True, bold=True))]
def test_drawing_tag():
text = r"{\p1}m 0 0 l 100 0 100 100 0 100{\p0}test"
fragments = parse_tags(text)
assert len(fragments) == 3
drawing_text, drawing_style = fragments[0]
assert drawing_text == ""
assert drawing_style.drawing is False
drawing_text, drawing_style = fragments[1]
assert drawing_text == "m 0 0 l 100 0 100 100 0 100"
assert drawing_style.drawing is True
drawing_text, drawing_style = fragments[2]
assert drawing_text == "test"
assert drawing_style.drawing is False
def test_no_drawing_tag():
text = r"test{\paws}test"
fragments = parse_tags(text)
assert len(fragments) == 2
for fragment_text, fragment_style in fragments:
assert fragment_text == "test"
assert fragment_style.drawing is False
|
zepto/musio | examples/musioencode.py | Python | gpl-3.0 | 11,944 | 0.000084 | #!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# Test the vorbis encoder.
# Copyright (C) 2013 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test the vorbis encoder."""
def main(args: dict) -> bool:
"""Encode args['filename'] times."""
from os.path import basename, isfile, splitext
from select import select
from sys import stdin
from termios import (ECHO, ICANON, TCSANOW, VMIN, VTIME, tcgetattr,
tcsetattr)
from musio import open_file
if args['debug']:
from musio import io_util
io_util.DEBUG = True
filename = args['filename']
output = splitext(basename(filename))[0] + '.' + args['filetype']
output_bytes = output.encode('utf-8', 'surrogateescape')
output_printable = output_bytes.decode('utf-8', 'ignore')
if isfile(output):
overwrite = input(f"Overwrite {output_printable} (y/n): ").lower()
if overwrite.startswith("n"):
return False
# Save the current terminal state.
normal = tcgetattr(stdin)
quiet = tcgetattr(stdin)
# Do not wait for key press and don't echo.
quiet[3] &= ~(ECHO | ICANON)
quiet[6][VMIN] = 0
quiet[6][VTIME] = 0
# Set the new terminal state.
tcsetattr(stdin, TCSANOW, quiet)
# Value returned to tell the calling function whether to quit or
# not.
quit_val = True
if args['filetype'].lower() == 'ogg':
quality = args['quality'] / 10 if args['quality'] in range(-1, 11) else 0.5
elif args['filetype'].lower() == 'mp3':
quality = args['quality'] if args['quality'] in range(0, 10) else 2
else:
quality = 5
try:
with open_file(blacklist=args['input_blacklist'], **args) as in_file:
in_file_title = in_file._info_dict.get('title',
in_file._info_dict['name'])
comment_dict = {'title': in_file_title}
comment_dict.update(in_file._info_dict)
for i in ['title', 'artist', 'album', 'year', 'comment',
'track', 'genre']:
if args.get(i, ''):
comment_dict[i] = args[i]
with open_file(output, 'w', depth=in_file.depth,
rate=in_file.rate, channels=in_file.channels,
quality=quality, floatp=in_file._floatp,
unsigned=in_file._unsigned,
comment_dict=comment_dict,
bit_rate=args['bit_rate'],
blacklist=args['output_blacklist']
) as out_file:
in_file.loops = 0
if args['debug']:
print(repr(in_file))
print(repr(out_file))
if args['show_position']:
filename_bytes = filename.encode('utf-8',
'surrogateescape')
filename_printable = filename_bytes.decode('utf-8',
'ignore')
print(f"Encoding: {filename_printable} to "
f"{output_printable}")
print(in_file)
for data in in_file:
if args['show_position']:
if in_file.length > 0:
# Calculate the percentage played.
pos = (in_file.position * 100) / in_file.length
# Make the string.
pos_str = f"Position: {pos:.2f}%"
# Find the length of the string.
format_len = len(pos_str) + 2
# Print the string and after erasing the old
# one using ansi escapes.
print(f"\033[{format_len}D\033[K{pos_str}", end='',
flush=True)
out_file.write(data)
# Check for input.
r, _, _ = select([stdin], [], [], 0)
# Get input if there was any otherwise continue.
if r:
command = r[0].readline().lower()
# Handle input commands.
if command.startswith('q'):
| quit_val = False
break
elif command == '\n':
break
except Exception as err:
print("Error: %s" % err, flush=True)
raise(err)
finally:
# Re-set the terminal state.
tcsetattr(stdin, TCSANOW, norm | al)
if args['show_position']:
print("\nDone.")
return quit_val
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Musio encoder")
parser.add_argument('-e', '--quality', action='store', default=-10,
type=int, help='Encoding quality (1-10)',
dest='quality')
parser.add_argument('-t', '--track', action='store', default=0, type=int,
help='Track to play', dest='track')
parser.add_argument('-tt', '--title', action='store', default='',
help='id3 Title tag', dest='title')
parser.add_argument('-ta', '--artist', action='store', default='',
help='id3 Artist tag', dest='artist')
parser.add_argument('-tl', '--album', action='store', default='',
help='id3 Album tag', dest='album')
parser.add_argument('-ty', '--year', action='store', default='',
help='id3 Year tag', dest='year')
parser.add_argument('-tc', '--comment', action='store', default='',
help='id3 Comment tag', dest='comment')
parser.add_argument('-tr', '--id3track', action='store', default='',
help='id3 Track tag', dest='track')
parser.add_argument('-tg', '--genre', action='store', default=0,
help='id3 Genre tag', dest='genre')
parser.add_argument('-p', '--path', action='store', default=[],
type=lambda a: a.split(','), help='Codec path',
dest='mod_path')
parser.add_argument('-ib', '--input-blacklist', action='extend',
default=['dummy'],
type=lambda a: a.split(','),
help='Blacklist an input Codec',
dest='input_blacklist')
parser.add_argument('-ob', '--output-blacklist', action='extend',
default=['dummy'],
type=lambda a: a.split(','),
help='Blacklist an output Codec',
dest='output_blacklist')
parser.add_argument('-s', '--soundfont', action='store',
default='/usr/share/soundfonts/FluidR3_GM.sf2',
help='Soundfont to use when playing midis',
dest='soundfont')
parser.add_argument('-ab', '--bank', action='store', type=str,
default='-1',
help='Bank used by adlmidi.',
dest='bank')
parser.add_argument('-av', '--volume-model', action='store', type=int,
default=0,
help=('Set the volume range m |
nxvl/critsend_test | test_proyect/urls.py | Python | mit | 482 | 0 | # coding=utf-8
"""
CritSend test proyect urls.
Copyright (C) 2013 Nicolas Valcárcel Scerpella
Authors:
Nicolas Valcárcel Scerpella <nvalcarcel@gmail.com>
"""
# Standard library imports
# Framework imports
from dj | ango.conf.urls import patterns, include, url
from django.contrib import admin
# 3rd party imports
# Local imports
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^', include('upload.urls')) | ,
url(r'^admin/', include(admin.site.urls)),
)
|
jawilson/home-assistant | homeassistant/components/freedompro/binary_sensor.py | Python | apache-2.0 | 2,578 | 0.000388 | """Support for Freedompro binary_sensor."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
DEVICE_CLASS_MAP = {
"smokeSensor": DEVICE_CLASS_SMOKE,
"occupancySensor": DEVICE_CLASS_OCCUPANCY,
"motionSensor": DEVICE_CLASS_MOTION,
"contactSensor": DEVICE_CLASS_OPENING,
}
DEVICE_KEY_MAP = {
"smokeSensor": "smokeDetected",
"occupancySensor": "occupancyDetected",
"motionSensor": " | motionDetected",
"contactSensor": "contactSensorState",
}
SUPPORTED_SENSORS = {"smokeSensor", "occupancySensor", "motionSensor", "contactSensor"}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Freedompro binary_sensor."""
coordi | nator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(device, coordinator)
for device in coordinator.data
if device["type"] in SUPPORTED_SENSORS
)
class Device(CoordinatorEntity, BinarySensorEntity):
"""Representation of an Freedompro binary_sensor."""
def __init__(self, device, coordinator):
"""Initialize the Freedompro binary_sensor."""
super().__init__(coordinator)
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._type = device["type"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_device_class = DEVICE_CLASS_MAP[device["type"]]
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self.unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
self._attr_is_on = state[DEVICE_KEY_MAP[self._type]]
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
|
b12io/orchestra | orchestra/admin.py | Python | apache-2.0 | 13,202 | 0 | from ajax_select import make_ajax_form
from ajax_select.admin import AjaxSelectAdmin
from bitfield import BitField
from bitfield.admin import BitFieldListFilter
from bitfield.forms import BitFieldCheckboxSelectMultiple
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.html import format_html
from django_object_actions import DjangoObjectActions
from phonenumber_field.modelfields import PhoneNumberField
from phonenumber_field.widgets import PhoneNumberPrefixWidget
from related_admin import RelatedFieldAdmin
from orchestra.communication.slack import get_slack_user_id
from orchestra.models import Certification
from orchestra.models import CommunicationPreference
from orchestra.models import Iteration
from orchestra.models import PayRate
from orchestra.models import Project
from orchestra.models import SanityCheck
from orchestra.models import StaffBotRequest
from orchestra.models import StaffingRequestInquiry
from orchestra.models import StaffingResponse
from orchestra.models import Step
from orchestra.models import Task
from orchestra.models import TaskAssignment
from orchestra.models import TimeEntry
from orchestra.models import Todo
from orchestra.models import TodoQA
from orchestra.models import TodoListTemplate
from orchestra.models import TodoListTemplateImportRecord
from orchestra.models import Worker
from orchestra.models import WorkerCertification
from orchestra.models import WorkerAvailability
from orchestra.models import Workflow
from orchestra.models import WorkflowVersion
from orchestra.todos.import_export import export_to_spreadsheet
admin.site.site_header = 'Orchestra'
admin.site.site_title = 'Orchestra'
admin.site.index_title = 'Orchestra'
@admin.register(Certification)
class CertificationAdmin(admin.ModelAdmin):
list_display = ('id', 'slug', 'workflow', 'name')
ordering = ('slug',)
search_fields = ('slug', 'description', 'name')
list_filter = ('workflow',)
@admin.register(Iteration)
class IterationAdmin(AjaxSelectAdmin):
form = make_ajax_form(Iteration, {
'assignment': 'task_assignments'
})
list_display = (
'id', 'edit_assignment', 'start_datetime', 'end_datetime',
'status')
search_fields = (
'assignment__task__step__name',
'assignment__task__project__short_description',
'assignment__worker__user__username')
ordering = ('assignment__worker__user__username',)
list_filter = ('status', 'assignment__worker__user__username')
def edit_assignment(self, obj):
return edit_link(obj.assignment)
@admin.register(PayRate)
class PayRateAdmin(AjaxSelectAdmin):
form = make_ajax_form(PayRate, {
'worker': 'workers'
})
list_display = (
'id', 'edit_worker', 'hourly_rate', 'hourly_multiplier', 'start_date',
'end_date')
search_fields = ('worker__user__username',)
ordering = ('worker__user__username',)
list_filter = ('worker',)
def edit_worker(self, obj):
return edit_link(obj.worker)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = (
'id', 'short_description', 'workflow_version', 'start_datetime')
search_fields = ('short_description',
'workflow_version__slug',
'workflow_version__workflow__slug',)
list_filter = ('workflow_version',)
@admin.register(SanityCheck)
class SanityCheckAdmin(AjaxSelectAdmin):
form = make_ajax_form(SanityCheck, {
'project': 'projects',
})
list_display = ('id', 'created_at', 'project', 'check_slug', 'handled_at')
ordering = ('-created_at',)
search_fields = (
'project__short_description', 'check_slug')
list_filter = ('project__workflow_version',)
@admin.register(Step)
class StepAdmin(admin.ModelAdmin):
list_display = (
'id', 'slug', 'workflow_version', 'name', 'description', 'is_human')
ordering = ('slug',)
search_fields = ('slug', 'name', 'description',)
list_filter = ('workflow_version', 'is_human')
@admin.register(Task)
class TaskAdmin(AjaxSelectAdmin):
form = make_ajax_form(Task, {
'project': 'projects',
})
list_display = (
'id', 'edit_project', 'step_name', 'workflow_version',
'start_datetime')
ordering = ('-project', 'start_datetime',)
search_fields = ('project__short_description', 'step__name',)
list_filter = ('step__is_human', 'project__workflow_version')
def step_name(self, obj):
return obj.step.name
def workflow_version(self, obj):
return obj.project.workflow_version
def edit_project(self, obj):
return edit_link(obj.project, obj.project.short_description)
@admin.register(TaskAssignment)
class TaskAssignmentAdmin(AjaxSelectAdmin):
form = make_ajax_form(TaskAssignment, {
'worker': 'workers',
'task': 'tasks',
})
list_display = (
'id', 'edit_project', 'edit_task', 'assignment_counter', 'edit_worker',
'workflow_version', 'start_datetime')
ordering = ('-task__project', 'task__start_datetime', 'assignment_counter')
search_fields = (
'task__project__short_description', 'task__step__name',
'worker__user__username')
list_filter = ('task__step__is_human', 'task__project__workflow_version')
def workflow_version(self, obj):
return obj.task.project.workflow_version
def edit_task(self, obj):
return edit_link(obj.task, obj.task.step.name)
def edit_project(self, obj):
return edit_link(obj.task.project, obj.task.project.short_description)
def edit_worker(self, obj):
return edit_link(obj.worker)
@admin.register(TimeEntry)
class TimeEntryAdmin(AjaxSelectAdmin):
form = make_ajax_form(TimeEntry, {
'worker': 'workers',
'assignment': 'task_assignments',
})
list_display = ('id', 'date', 'worker', 'time_worked', 'assignment')
search_fields = (
'id', 'worker__user__username', 'assignment__task__step__name',
'assignment__task__project__short_description')
list_filter = ('worker',)
@admin.register(Todo)
class TodoAdmin(admin.ModelAdmin):
# TODO(murat): remove `task` with its removal from the model
autocomplete_fields = ('task', 'project', 'step', 'parent_todo')
list_display = ('id', 'created_at', 'task', 'title', 'completed')
ordering = ('-created_at',)
search_fields = (
'project__short_description', 'step__name',
'title')
list_filter = ('project__workflow_version',)
@admin.register(TodoQA)
class TodoQAAdmin(AjaxSelectAdmin):
list_display = ('id', 'created_at', 'todo', 'comment', 'approved')
ordering = ('-cre | ated_at',)
search_fields = ('todo__title', 'comment',)
@admin.register(TodoListTemplateImportRecord)
class TodoListTemplateImportRecordAdmin(AjaxSelectAdmin):
list_display = ('id', 'created_at', 'todo_list_template', 'importer')
list_filter = ('todo_list_template',)
search_fields = (
'todo_list_template__slug',
'todo_list_template__name',
'todo_list_templa | te__description',
'import_url'
)
ordering = ('-created_at',)
@admin.register(TodoListTemplate)
class TodoListTemplateAdmin(DjangoObjectActions, AjaxSelectAdmin):
change_actions = ('export_spreadsheet', 'import_spreadsheet')
form = make_ajax_form(TodoListTemplate, {
'creator': 'workers',
})
list_display = ('id', 'created_at', 'slug', 'name')
ordering = ('-created_at',)
search_fields = (
'slug', 'name', 'todos',
'description')
list_filter = ('creator__user__username',)
def export_spreadsheet(self, request, todo_list_template):
return HttpResponseRedirect(export_to_spreadsheet(todo_list_template))
export_spreadsheet.attrs = {'target': '_blank'}
export_spreadsheet.short_description = 'Export to spreadsheet'
export_spreadsheet.label = 'Export to spreadsheet'
def import_spreadsheet(self, request, todo_list_template):
return redirect(
'orchestra:todos:import_todo_list_temp |
gocardless/gocardless-pro-python | gocardless_pro/services/mandate_pdfs_service.py | Python | mit | 3,470 | 0.007205 | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
from . import base_service
from .. import resources
from ..paginator import Paginator
from .. import errors
class MandatePdfsService(base_service.BaseService):
"""Service class that provides access to the mandate_pdfs
endpoints of the GoCardless Pro API.
"""
RESOURCE_CLASS = resources.MandatePdf
RESOURCE_NAME = 'mandate_pdfs'
def create(self,params=None, headers=None):
"""Create a mandate PDF.
Generates a PDF mandate and returns its temporary URL.
Customer and bank account details can be left blank (for a blank
mandate), provided manually, or inferred from the ID of an existing
[mandate](#core-endpoints-mandates).
By default, we'll generate PDF mandates in English.
To generate a PDF mandate in another language, set the
`Accept-Language` header when creating the PDF mandate to the relevant
[ISO 639-1](http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)
language code supported for the scheme.
| Scheme | Supported languages
|
| :--------------- |
:-------------------------------------------------------------------------------------------------------------------------------------------
|
| ACH | English (`en`)
|
| Autogiro | English (`en`), Swedish (`sv`)
|
| Bacs | English (`en`)
|
| BECS | English (`en`)
|
|
| BECS NZ | English (`en`)
|
|
| Betalingsservice | Danish (`da`), English (`en`)
|
| PAD | English (`en`)
|
| SEPA Core | Danish (`da`), Dutch (`nl`), English (`en`),
French (`fr`), German (`de`), Italian (`it`), Portuguese (`pt`),
Spanish (`es`), Swedish (`sv`) |
Args:
params (dict, optional): Request body.
Returns:
MandatePdf
"""
path = '/mandate_pdfs'
if params is not None:
params = {self._envelope_key(): params}
response = self._perform_request('POST', path, params, headers,
retry_failures=True)
return self._resource_for(response)
|
ya790206/call_seq | setup.py | Python | apache-2.0 | 446 | 0.044843 | #!/usr/bin/en | v python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import call_seq
setup(
name = 'call_seq',
version = '0.0.2',
description = 'call sequence visualization',
author = 'ya790206',
url = 'https://github.com/ya790206/call_seq',
license = 'Apache License Version 2.0',
platforms = 'any',
classifiers = [
],
| packages = find_packages(),
entry_points = {
}
)
|
open-homeautomation/home-assistant | homeassistant/components/sensor/ups.py | Python | apache-2.0 | 3,298 | 0 | """
Sensor for UPS packages.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.ups/
"""
from collections import defaultdict
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
ATTR_ATTRIBUTION)
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from homeassistant.util import Throttle
from homeassistant.util.dt import now, parse_date
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['upsmychoice==1.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'ups'
COOKIE = 'upsmychoice_cookies.pickle'
CONF_UPDATE_INTERVAL = 'update_interval'
ICON = 'mdi:package-variant-closed'
STATUS_DELIVERED = 'delivered'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_UPDATE_INTERVAL, default=timedelta(seconds=1800)): (
vol.All(cv.time_period, cv.positive_timedelta)),
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the UPS platform."""
import upsmychoice
try:
cookie = hass.config.path(COOKIE)
session = upsmychoice.get_session(config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
cookie_path=cookie)
except upsmychoice.UPSError:
_LOGGER.exception('Could not connect to UPS My Choice')
return False
add_devices([UPSSensor(session, config.get(CONF_NAME),
config.get(CONF_UPDATE_INTERVAL))])
class UPSSensor(Entity):
"""UPS Sensor."""
def __init__(self, session, name, interval):
"""Initialize the sensor."""
self._session = session
self._name = name
self._attributes = None
self._state = None
self.update = T | hrottle(interval)(self._update)
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._na | me or DOMAIN
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def _update(self):
"""Update device state."""
import upsmychoice
status_counts = defaultdict(int)
for package in upsmychoice.get_packages(self._session):
status = slugify(package['status'])
skip = status == STATUS_DELIVERED and \
parse_date(package['delivery_date']) < now().date()
if skip:
continue
status_counts[status] += 1
self._attributes = {
ATTR_ATTRIBUTION: upsmychoice.ATTRIBUTION
}
self._attributes.update(status_counts)
self._state = sum(status_counts.values())
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
|
landscape-test/all-messages | messages/pep8/E128.py | Python | unlicense | 65 | 0 | """
E128
|
Continuation line under-indented for visu | al indent
"""
|
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pylint/test/input/func_unused_overridden_argument.py | Python | agpl-3.0 | 913 | 0.003286 | # pylint: disable=R0903, print-statement
"""for Sub.inherited, only the warning for "aay" is desired.
The warnings for "aab" and "aac" are most likely false positives though,
because there could be another subclass that overrides the same method and does
use the arguments (eg Sub2)
"""
__revision__ = 'thx to Maarten ter Huurne'
class Base(object):
"parent"
def inherited(self, aaa, aab, aac):
| "abstract method"
raise NotImplementedError
class Sub(Base):
"child 1"
def in | herited(self, aaa, aab, aac):
"overridden method, though don't use every argument"
return aaa
def newmethod(self, aax, aay):
"another method, warning for aay desired"
print(self, aax)
class Sub2(Base):
"child 1"
def inherited(self, aaa, aab, aac):
"overridden method, use every argument"
return aaa + aab + aac
|
scieloorg/opac | opac/webapp/models.py | Python | bsd-2-clause | 5,095 | 0.001185 | # coding: utf-8
"""
Conjunto de modelos relacionais para o controle da app (Usuarios, auditorias, logs, etc)
Os modelos do catálogo do OPAC (periódicos, números, artigos) estão definidos na
lib: opac_schema (ver requirements.txt)
"""
import os
from sqlalchemy.event import listens_for
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils.types.choice import ChoiceType
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from flask import current_app
from webapp.utils import thumbgen_filename
from . import dbsql as db
from . import login_manager
from . import notifications
LANGUAGES_CHOICES = [
('pt', 'Português'),
('en', 'English'),
('es', 'Español'),
]
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), nullable=False, unique=True)
_password = db.Column(db.String(128), nullable=True) # deve ser possível add novo user sem setar senha
email_confirmed = db.Column(db.Boolean, nullable=False, default=False)
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = generate_password_hash(plaintext)
def define_password(self, plaintext):
self._password = generate_password_hash(plaintext)
def is_correct_password(self, plaintext):
"""
Compara a string ``plaintext`` com a senha "hasheada" armazenada para este usuário.
"""
if not self._password:
return False
else:
return check_password_hash(self._password, plaintext)
def send_confirmation_email(self):
if not self._check_valid_email():
raise ValueError('Usuário deve ter email válido para realizar o envío')
else:
return notifications.send_confirmation_email(self.email)
def send_reset_password_email(self):
if not self._check_valid_email():
raise ValueError('Usuário deve ter email válido para realizar o envío')
else:
return notifications.send_reset_password_email(self.email)
def _check_valid_email(self):
"""
retorna True quando a instânc | ia (self) do usuário, tem um email válido.
retorna False em outro caso.
"""
from webapp.admin.forms import EmailForm
if not self.email or self.email == '' or self.email == '':
return False
else:
form = Em | ailForm(data={'email': self.email})
return form.validate()
# Required for administrative interface
def __unicode__(self):
return self.email
@login_manager.user_loader
def load_user(user_id):
"""
Retora usuário pelo id.
Necessário para o login manager.
"""
return User.query.get(int(user_id))
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64), nullable=False)
path = db.Column(db.Unicode(256), nullable=False)
language = db.Column(ChoiceType(LANGUAGES_CHOICES), nullable=True)
def __unicode__(self):
return self.name
@property
def get_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
return '%s/%s' % (media_url, self.path)
# Delete hooks: remove arquivos quando o modelo é apagado
@listens_for(File, 'after_delete')
def delelte_file_hook(mapper, connection, target):
if target.path:
media_root = current_app.config['MEDIA_ROOT']
try:
os.remove(os.path.join(media_root, target.path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
class Image(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64), nullable=False)
path = db.Column(db.Unicode(256), nullable=False)
language = db.Column(ChoiceType(LANGUAGES_CHOICES), nullable=True)
def __unicode__(self):
return self.name
@property
def get_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
return '%s/%s' % (media_url, self.path)
@property
def get_thumbnail_absolute_url(self):
media_url = current_app.config['MEDIA_URL']
thumb_path = thumbgen_filename(self.path)
return '%s/%s' % (media_url, thumb_path)
# Delete hooks: remove arquivos quando o modelo é apagado
@listens_for(Image, 'after_delete')
def delelte_image_hook(mapper, connection, target):
if target.path:
media_root = current_app.config['MEDIA_ROOT']
# Remover a imagem
try:
os.remove(os.path.join(media_root, target.path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
# Remover o thumbnail
try:
thumb_path = thumbgen_filename(target.path)
os.remove(os.path.join(media_root, thumb_path))
except OSError:
pass # Se der erro não importa, o arquivo já não existe
|
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/third_party/gcs-oauth2-boto-plugin/setup.py | Python | bsd-3-clause | 3,026 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing pe | rmissions and
# limitations under the License.
"""Setup installation module for gcs-oauth2-boto-plugin."""
from setuptools import find_packages
from setuptools import setup
long_desc = """
gcs-oauth2-boto-plugin is a Python application whose purpose is to behave as an
auth plugin for the boto auth plugin framework for use with OAuth 2.0
credentials for the Google Cloud Platform. This plugin is compatible with both
us | er accounts and service accounts, and its functionality is essentially a
wrapper around oauth2client with the addition of automatically caching tokens
for the machine in a thread- and process-safe fashion.
"""
requires = [
'boto>=2.29.1',
'google-reauth>=0.1.0',
'httplib2>=0.8',
'oauth2client>=2.2.0',
'pyOpenSSL>=0.13',
# Not using 1.02 because of:
# https://code.google.com/p/socksipy-branch/issues/detail?id=3
'SocksiPy-branch==1.01',
'retry_decorator>=1.0.0',
'six>=1.12.0'
]
extras_require = {
'dev': [
'freezegun',
'mock',
],
}
setup(
name='gcs-oauth2-boto-plugin',
version='2.5',
url='https://developers.google.com/storage/docs/gspythonlibrary',
download_url=('https://github.com/GoogleCloudPlatform'
'/gcs-oauth2-boto-plugin'),
license='Apache 2.0',
author='Google Inc.',
author_email='gs-team@google.com',
description=('Auth plugin allowing use the use of OAuth 2.0 credentials '
'for Google Cloud Storage in the Boto library.'),
long_description=long_desc,
zip_safe=True,
platforms='any',
packages=find_packages(exclude=['third_party']),
include_package_data=True,
install_requires=requires,
extras_require=extras_require,
tests_require=extras_require['dev'],
test_suite='gcs_oauth2_boto_plugin.test_oauth2_client',
classifiers=[
'Development Status :: 7 - Inactive',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
xStream-Kodi/plugin.video.xstream | resources/lib/handler/requestHandler.py | Python | gpl-3.0 | 11,867 | 0.002949 | #!/usr/bin/env python2.7
import hashlib
import httplib
import os
import socket
import sys
import time
import urllib
import mechanize
import xbmcgui
from resources.lib import common
from resources.lib import logger, cookie_helper
from resources.lib.cBFScrape import cBFScrape
from resources.lib.cCFScrape import cCFScrape
from resources.lib.config import cConfig
class cRequestHandler:
def __init__(self, sUrl, caching=True, ignoreErrors=False, compression=True, formIndex=0):
self.__sUrl = sUrl
self.__sRealUrl = ''
self.__cType = 0
self.__aParameters = {}
self.__aResponses = {}
self.__headerEntries = {}
self.__cachePath = ''
self.__formIndex = formIndex
self._cookiePath = ''
self.ignoreDiscard(False)
self.ignoreExpired(False)
self.caching = caching
self.ignoreErrors = ignoreErrors
self.compression = compression
self.cacheTime = int(cConfig().getSetting('cacheTime', 600))
self.requestTimeout = int(cConfig().getSetting('requestTimeout', 60))
self.removeBreakLines(True)
self.removeNewLines(True)
self.__setDefaultHeader()
self.setCachePath()
self.__setCookiePath()
self.__sResponseHeader = ''
if self.requestTimeout >= 60 or self.requestTimeout <= 10:
self.requestTimeout = 60
def removeNewLines(self, bRemoveNewLines):
self.__bRemoveNewLines = bRemoveNewLines
def removeBreakLines(self, bRemoveBreakLines):
self.__bRemoveBreakLines = bRemoveBreakLines
def setRequestType(self, cType):
self.__cType = cType
def addHeaderEntry(self, sHeaderKey, sHeaderValue):
self.__headerEntries[sHeaderKey] = sHeaderValue
def getHeaderEntry(self, sHeaderKey):
if sHeaderKey in self.__headerEntries:
return self.__headerEntries[sHeaderKey]
def addParameters(self, key, value, quote=False):
if not quote:
self.__aParameters[key] = value
else:
self.__aParameters[key] = urllib.quote(str(value))
def addResponse(self, key, value):
self.__aResponses[key] = value
def setFormIndex(self, index):
self.__formIndex = index
def getResponseHeader(self):
return self.__sResponseHeader
# url after redirects
def getRealUrl(self):
return self.__sRealUrl
def request(self):
self.__sUrl = self.__sUrl.replace(' ', '+')
return self.__callRequest()
def getRequestUri(self):
return self.__sUrl + '?' + urllib.urlencode(self.__aParameters)
def __setDefaultHeader(self):
self.addHeaderEntry('User-Agent', common.FF_USER_AGENT)
self.addHeaderEntry('Accept-Language', 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3')
self.addHeaderEntry('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
if self.compression:
self.addHeaderEntry('Accept-Encoding', 'gzip')
def __callRequest(self):
if self.caching and self.cacheTime > 0:
sContent = self.readCache(self.getRequestUri())
if sContent:
return sContent
cookieJar = mechanize.LWPCookieJar(filename=self._cookiePath)
try: # TODO ohne try evtl.
cookieJar.load(ignore_discard=self.__bIgnoreDiscard, ignore_expires=self.__bIgnoreExpired)
except Exception as e:
logger.info(e)
sParameters = urllib.urlencode(self.__aParameters, True)
handlers = [mechanize.HTTPCookieProcessor(cookiejar=cookieJar),
mechanize.HTTPEquivProcessor,
mechanize.HTTPRefreshProcessor]
if sys.version_info >= (2, 7, 9) and sys.version_info < (2, 7, 11):
handlers.append(newHTTPSHandler)
opener = mechanize.build_opener(*handlers)
if (len(sParameters) > 0):
oRequest = mechanize.Request(self.__sUrl, sParameters)
else:
oRequest = mechanize.Request(self.__sUrl)
for key, value in self.__headerEntries.items():
oRequest.add_header(key, value)
cookieJar.add_cookie_header(oRequest)
user_agent = self.__headerEntries.get('User-Agent', common.FF_USER_AGENT)
try:
oResponse = opener.open(oRequest, timeout=self.requestTimeout)
except mechanize.HTTPError, e:
if e.code == 503 and e.headers.get("Server") == 'cloudflare-nginx':
html = e.read()
oResponse = self.__check_protection(html, user_agent, cookieJar)
if not oResponse:
logger.error("Failed to get CF-Cookie for Url: " + self.__sUrl)
return ''
elif not self.ignoreErrors:
xbmcgui.Dialog().ok('xStream', 'Fehler beim Abrufen der Url:', self.__sUrl, str(e))
logger.error("HTTPError " + str(e) + " Url: " + self.__sUrl)
return ''
else:
oResponse = e
except mechanize.URLError, e:
if not self.ignoreErrors:
if hasattr(e.reason, 'args') and e.reason.args[0] == 1 and sys.version_info < (2, 7, 9):
xbmcgui.Dialog().ok('xStream', str(e.reason), '','For this request is Python v2.7.9 or higher required.')
else:
xbmcgui.Dialog().ok('xStream', str(e.reason))
logger.error("URLError " + str(e.reason) + " Url: " + self.__sUrl)
return ''
except httplib.HTTPException, e:
if not self.ignoreErrors:
xbmcgui.Dialog().ok('xStream', str(e))
logger.error("HTTPException " + str(e) + " Url: " + self.__sUrl)
return ''
self.__sResponseHeader = oResponse.info()
# handle gzipped content
if self.__sResponseHeader.get('Content-Encoding') == 'gzip':
import gzip
import StringIO
data = StringIO.StringIO(oResponse.read())
gzipper = gzip.GzipFile(fileobj=data, mode='rb')
try:
oResponse.set_data(gzipper.read())
except:
oResponse.set_data(gzipper.extrabuf)
if self.__aResponses:
forms = mechanize.ParseResponse(oResponse, backwards_compat=False)
form = forms[self.__formIndex]
for field in self.__aResponses:
#logger.info("Field: " + str(not field in form))
try: form.find_control(name=field)
except:
form.new_control("text", field, {"value":""})
form.fixup()
form[field] = self.__aResponses[field]
o = mechanize.build_opener(mechanize.HTTPCookieProcessor(cookieJar))
oResponse = o.open(form.click(), timeout=self.requestTimeout)
sContent = oResponse.read()
checked_response = self.__check_protection(sContent, user_agent, cookieJar)
if checked_response:
oResponse = checked_response
sContent = oResponse.read()
cookie_helper.check_cookies(cookieJar)
cookieJar.save(ignore_discard=self.__bIgnoreDiscard, ignore_expires=self.__bIgnoreExpired)
if (self.__bRemoveNewLines == True):
sContent = sContent.replace("\n", "")
sContent = sContent.replace("\r\t", "") |
if (self.__bRemoveBreakLines == True):
sContent = sContent.replace(" ", "")
self.__sRealUrl = oResponse.geturl()
oResponse.close()
if self.caching and self.cacheTime > 0:
self.writeCache(self.getRequestUri(), sContent)
return sContent
def __check_protection(self, html, user_agent, cookie_jar):
oResponse = None
if 'cf-browser-v | erification' in html:
oResponse = cCFScrape().resolve(self.__sUrl, cookie_jar, user_agent)
elif 'Blazingfast.io' in html:
oResponse = cBFScrape().resolve(self.__sUrl, cookie_jar, user_agent)
return oResponse
def getHeaderLocationUrl(self):
opened = mechanize.urlopen(self.__sUrl)
|
caronc/nzb-subliminal | Subliminal/subliminal/providers/thesubdb.py | Python | gpl-3.0 | 3,140 | 0.003503 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import babelfish
import requests
from . import Provider
from .. import __version__
from ..exceptions import InvalidSubtitle, ProviderNotAvailable, ProviderError
from ..subtitle import Subtitle, is_valid_subtitle, detect
logger = logging.getLogger(__name__)
class TheSubDBSubtitle(Subtitle):
provider_name = 'thesubdb'
def __init__(self, language, hash): # @ReservedAssignment
super(TheSubDBSubtitle, self).__init__(language)
self.hash = hash
def compute_matches(self, video):
matches = set()
# hash
if 'thesubdb' in video.hashes and video.hashes['thesubdb'] == self.hash:
matches.add('hash')
return matches
class TheSubDBProvider(Provider):
languages = set([babelfish.Language.fromalpha2(l) for l in ['en', 'es', 'fr', 'it', 'nl', 'pl', 'pt', 'ro', 'sv', 'tr']])
required_hash = 'thesubdb'
def initialize(self):
self.session = requests.Session()
self.session.headers = {'User-Agent': 'SubDB/1.0 (subliminal/%s; https://github.com/Diaoul/subliminal)' %
__version__}
def terminate(self):
self.session.close()
def get(self, params):
"""Make a GET request on the server with the given parameters
:param params: params of the request
:return: the response
:rtype: :class:`requests.Response`
:raise: :class:`~subliminal.exceptions.ProviderNotAvailable`
"""
try:
r = self.session.get('http://api.thesubdb.com', params=params, timeout=10)
except requests.Timeout:
raise ProviderNotAvailable('Timeout after 10 seconds')
return r
def query(self, hash): # @ReservedAssignment
params = {'action': 'search', 'hash': hash}
logger.debug('Searching subtitles %r', params)
r = self.get(params)
if r.status_code == 404:
logger.debug('No subtitle found')
return []
elif r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
return [TheSubDBSubtitle(language, hash) for language in
set([babelfish.Language.fromalpha2(l) for l in r.content. | split(',')])]
def list_subtitles(self, video, languages):
return [s for s in self.query(video.hashes['thesubdb']) if s.language in languages]
def download_subtitle(self, subtitle):
params = {'action': | 'download', 'hash': subtitle.hash, 'language': subtitle.language.alpha2}
r = self.get(params)
if r.status_code != 200:
raise ProviderError('Request failed with status code %d' % r.status_code)
logger.debug('Download URL: %s {hash=%s, lang=%s}' % (
'http://api.thesubdb.com', subtitle.hash, subtitle.language.alpha2,
))
subtitle_text = r.content.decode(
detect(r.content, subtitle.language.alpha2)['encoding'], 'replace')
if not is_valid_subtitle(subtitle_text):
raise InvalidSubtitle
return subtitle_text
|
koduj-z-klasa/python101 | docs/conf.py | Python | mit | 9,897 | 0.00475 | # -*- coding: utf-8 -*-
#
# Python 101 documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 16 15:47:34 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# the path to the latex executable
pngmath_latex = "/usr/bin/latex"
# the path to the dvipng executable
pngmath_dvipng = "/usr/bin/dvipng"
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python 101'
copyright = u'2014, Centrum Edukacji Obywatelskiej'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# bu | ilt documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-f | alse value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Python101doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Python101.tex', u'Python 101 Documentation',
u'Centrum Edukacji Obywatelskiej', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python101', u'Python 101 Documentation',
[u'Centrum Edukacji Obywatelskiej'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author |
CingHu/neutron-ustack | neutron/tests/unit/services/loadbalancer/drivers/haproxy/test_namespace_driver.py | Python | apache-2.0 | 25,764 | 0.000116 | # Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from neutron.common import exceptions
from neutron.services.loadbalancer.drivers.haproxy import namespace_driver
from neutron.tests import base
class TestHaproxyNSDriver(base.BaseTestCase):
def setUp(self):
super(TestHaproxyNSDriver, self).setUp()
conf = mock.Mock()
conf.haproxy.loadbalancer_state_path = '/the/path'
conf.interface_driver = 'intdriver'
conf.haproxy.user_group = 'test_group'
conf.haproxy.send_gratuitous_arp = 3
conf.AGENT.root_helper = 'sudo_test'
self.conf = conf
self.mock_importer = mock.patch.object(namespace_driver,
'importutils').start()
self.rpc_mock = mock.Mock()
self.driver = namespace_driver.HaproxyNSDriver(
conf,
self.rpc_mock
)
self.vif_driver = mock.Mock()
self.driver.vif_driver = self.vif_driver
self.fake_config = {
'pool': {'id': 'pool_id', 'status': 'ACTIVE',
'admin_state_up': True},
'vip': {'id': 'vip_id', 'port': {'id': 'port_id'},
'status': 'ACTIVE', 'admin_state_up': True}
}
def test_get_name(self):
self.assertEqual(self.driver.get_name(), namespace_driver.DRIVER_NAME)
def test_create(self):
with mock.patch.object(self.driver, '_plug') as plug:
with mock.patch.object(self.driver, '_spawn') as spawn:
self.driver.create(self.fake_config)
plug.assert_called_once_with(
'qlbaas-pool_id', {'id': 'port_id'}
)
spawn.assert_called_once_with(self.fake_config)
def test_update(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(self.driver, '_spawn'),
mock.patch('__builtin__.open')
) as (gsp, spawn, mock_open):
mock_open.return_value = ['5']
self.driver.update(self.fake_config)
mock_open.assert_called_once_with(gsp.return_value, 'r')
spawn.assert_called_once_with(self.fake_config, ['-sf', '5'])
def test_spawn(self):
with contextlib.nested(
mock.patch.object(namespace_driver.hacfg, 'save_config'),
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
) as (mock_save, gsp, ip_wrap):
gsp.side_effect = lambda x, y: y
self.driver._spawn(self.fake_config)
mock_save.assert_called_once_with('conf', self.fake_config,
'sock', 'test_group')
cmd = ['haproxy', '-f', 'conf', '-p', 'pid']
ip_wrap.assert_has_calls([
mock.call('sudo_test', 'qlbaas-pool_id'),
mock.call().netns.execute(cmd)
])
def test_undeploy_instance(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(namespace_driver, 'kill_pids | _in_file'),
mock.patch.object(self.driver, | '_unplug'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('os.path.isdir'),
mock.patch('shutil.rmtree')
) as (gsp, kill, unplug, ip_wrap, isdir, rmtree):
gsp.side_effect = lambda x, y: '/pool/' + y
self.driver.pool_to_port_id['pool_id'] = 'port_id'
isdir.return_value = True
self.driver.undeploy_instance('pool_id')
kill.assert_called_once_with('sudo_test', '/pool/pid')
unplug.assert_called_once_with('qlbaas-pool_id', 'port_id')
isdir.assert_called_once_with('/pool')
rmtree.assert_called_once_with('/pool')
ip_wrap.assert_has_calls([
mock.call('sudo_test', 'qlbaas-pool_id'),
mock.call().garbage_collect_namespace()
])
def test_undeploy_instance_with_ns_cleanup(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch.object(self.driver, 'vif_driver'),
mock.patch.object(namespace_driver, 'kill_pids_in_file'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('os.path.isdir'),
mock.patch('shutil.rmtree')
) as (gsp, vif, kill, ip_wrap, isdir, rmtree):
device = mock.Mock()
device_name = 'port_device'
device.name = device_name
ip_wrap.return_value.get_devices.return_value = [device]
self.driver.undeploy_instance('pool_id', cleanup_namespace=True)
vif.unplug.assert_called_once_with(device_name,
namespace='qlbaas-pool_id')
def test_remove_orphans(self):
with contextlib.nested(
mock.patch.object(self.driver, 'exists'),
mock.patch.object(self.driver, 'undeploy_instance'),
mock.patch('os.listdir'),
mock.patch('os.path.exists')
) as (exists, undeploy, listdir, path_exists):
known = ['known1', 'known2']
unknown = ['unknown1', 'unknown2']
listdir.return_value = known + unknown
exists.side_effect = lambda x: x == 'unknown2'
self.driver.remove_orphans(known)
undeploy.assert_called_once_with('unknown2',
cleanup_namespace=True)
def test_exists(self):
with contextlib.nested(
mock.patch.object(self.driver, '_get_state_file_path'),
mock.patch('neutron.agent.linux.ip_lib.IPWrapper'),
mock.patch('socket.socket'),
mock.patch('os.path.exists'),
) as (gsp, ip_wrap, socket, path_exists):
gsp.side_effect = lambda x, y, z: '/pool/' + y
ip_wrap.return_value.netns.exists.return_value = True
path_exists.return_value = True
self.driver.exists('pool_id')
ip_wrap.assert_has_calls([
mock.call('sudo_test'),
mock.call().netns.exists('qlbaas-pool_id')
])
self.assertTrue(self.driver.exists('pool_id'))
def test_get_stats(self):
raw_stats = ('# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,'
'dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,'
'act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,'
'sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,'
'check_status,check_code,check_duration,hrsp_1xx,'
'hrsp_2xx,hrsp_3xx,hrsp_4xx,hrsp_5xx,hrsp_other,hanafail,'
'req_rate,req_rate_max,req_tot,cli_abrt,srv_abrt,\n'
'8e271901-69ed-403e-a59b-f53cf77ef208,BACKEND,1,2,3,4,0,'
'10,7764,2365,0,0,,0,0,0,0,UP,1,1,0,,0,103780,0,,1,2,0,,0'
',,1,0,,0,,,,0,0,0,0,0,0,,,,,0,0,\n\n'
'a557019b-dc07-4688-9af4-f5cf02bb6d4b,'
'32a6c2a3-420a-44c3-955d-86bd2fc6871e,0,0,0,1,,7,1120,'
'224,,0,,0,0,0,0,UP,1,1,0,0,1,2623,303,,1,2,1,,7,,2,0,,'
'1,L7OK,200,98,0,7,0,0,0,0,0,,,,0,0,\n'
'a557019b-dc07-4688-9af4-f5cf02 |
MartinSilvert/overlappingGenesCreator | src/modifDoublon2.py | Python | apache-2.0 | 1,846 | 0.022814 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 09:30:47 2015
@author: martin
"""
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.SubsMat.MatrixInfo import blosum62
#remarque : Ceci n'est pas une maélioration de modifDoublon,
#c'est une version alternative qui ne s'applique pas aux même doublons.
def score_match(a,b, matrix):
if b == '*' :
return -500000000000000000000000000
elif a == '-' :
return 0
elif (a,b) not in matrix:
return matrix[(tuple(reversed((a,b))))]
else:
| return matrix[(a,b)]
def modifDoublon2(x,y,z,ldna,offset,doublon,i,q,patternX,patternY):
ATCG = ["A","T","C","G"]
# on | commence par déterminer quels acides aminés de x et y sont "ciblés"
aaX = Seq("", IUPAC.protein)
aaY = Seq("", IUPAC.protein)
q_bis = 0.
q_bis += q
if(z<=0):
aaX += x[i]
q_bis /= patternX[i]
else:
aaX += x[i+1+z//3]
q_bis /=patternX[i+1+z//3]
if(z>0):
aaY += y[-i]
q_bis*=patternY[-i]
else:
aaY +=y[-i + z//3]
q_bis*=patternY[-i + z//3]
scores = []
for a in ATCG:
for b in ATCG:
currentDNAx = Seq("", IUPAC.unambiguous_dna)
currentDNAy = Seq("", IUPAC.unambiguous_dna)
currentDNAx += a + b + ldna[doublon+2]
currentaaX = currentDNAx.translate()
currentDNAy += ldna[doublon-1] +a + b
currentaaY = currentDNAy.reverse_complement().translate()
score = score_match(aaX[0].upper(),currentaaX[0].upper(),blosum62)
score += q_bis*score_match(aaY[0].upper(),currentaaY[0].upper(),blosum62)
scores.append(score)
result = scores.index(max(scores))
ldna[doublon] = ATCG[result//4]
ldna[doublon+1]= ATCG[result%4]
|
mikemaccana/resilience | __init__.py | Python | mit | 2,874 | 0.012178 | import sys
import traceback
import logging
import time
import inspect
def run_resilient(function, function_args=[], function_kwargs={}, tolerated_errors=(Exception,), log_prefix='Something failed, tolerating error and retrying: ', retries=5, delay=True, critical=False, initial_delay_time=0.1, delay_multiplier = 2.0):
"""Run the function with function_args and function_kwargs. Warn if it excepts, and retry. If retries are exhausted,
log that, and if it's critical, properly throw the exception """
def show_exception_info(log_prefix):
"""Warn about an exception with a lower priority message, with a text prefix and the error type"""
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
traceback_text = lines[2]
logging.info(log_prefix + traceback_text)
return
delay_time = initial_delay_time
while retries:
retries -= 1
try:
return function(*function_args, **function_kwargs)
except tolerated_errors, error: #IGNORE:W0703
# One of our anticipated errors happened.
if retries:
# We've got more retries left. Log the error, and continue.
show_exception_info(log_prefix)
if delay:
time.sleep(delay_time)
delay_time = delay_time * delay_multiplier
else:
delay_time = 0
logging.info('We have %d tries left. Delaying for %.2f seconds and trying again.', retries, delay_time)
else:
logging.warn('Could not complete action after %d retries.', retries)
if critical:
logging.error('Critical action failed.')
raise error
except Exception:
# We've recieved an error we didn't anticipate. This is bad.
# Depending on the error we the developers | should either fix something, or, if we want to tolerate it,
# add it to our tolerated_errors.
# Those things require human judgement, so we'll raise the exception.
logging.exception('Unanticipated error recieved!') #Log the exception
raise #Re-raise
except:
typ, value, unused = sys.exc_info()
| # We've received an exception that isn't even an Exception subclass!
# This is bad manners - see http://docs.python.org/tutorial/errors.html:
# "Exceptions should typically be derived from the Exception class, either directly or indirectly."
logging.exception("Bad mannered exception. Class was: %s Value was: %s Source file: %s", typ.__name__, str(value), inspect.getsourcefile(typ))
raise
|
eammx/proyectosWeb | proyectoPython/app/__init__.py | Python | mit | 269 | 0.011152 | from flask import F | lask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
bootstrap = Bootstrap()
from .views import page
def create_app(config):
app.config.from_object(config)
bootstrap.init_app(app)
app.register_blueprint(page)
return | app |
NeCTAR-RC/nectar-images | community_image_tests/community_image_tests_tempest_plugin/services/v2/community_image_client.py | Python | apache-2.0 | 1,416 | 0 | # Copyright (c) 2016, Monash e-Research Centre
# (Monash University, Australia)
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from community_image_tests_tempest_plugin.\
api_schema.response.nectar_community_image.v2\
import image as schema
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.image.v2 impo | rt images_client
class CommunityImagesClient(images_client.ImagesClient):
def show_image(self, image_id):
url = 'images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
self. | validate_response(schema.community_image_schema, resp, body)
return rest_client.ResponseBody(resp, body)
|
arfathpasha/kb_cufflinks | lib/kb_cufflinks/core/script_utils.py | Python | mit | 5,654 | 0.000707 | import logging
import os
import subprocess
import traceback
from zipfile import ZipFile
from os import listdir
from os.path import isfile, join
'''
A utility python module containing a set of methods necessary for this kbase
module.
'''
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def create_logger(log_dir, name):
"""Create a logger
args: name (str): name of logger
returns: logger (obj): logging.Logger instance
"""
logger = logging.getLogger(name)
fmt = logging.Formatter('%(asctime)s - %(process)d - %(name)s - '
' %(levelname)s -%(message)s')
hdl = logging.FileHandler(os.path.join(log_dir, name + '.log'))
hdl.setFormatter(fmt)
logger.addHandler(hdl)
return logger
def if_obj_exists(logger, ws_client, ws_id, o_type, obj | _l):
obj_list = ws_client.list_objects({"workspaces": [ws_id], "type": o_type, 'showHidden': 1})
obj_names = [i[1] for i in obj_list]
existing_names = [i for i in obj_l if i in obj_names]
obj_ids = | None
if len(existing_names) != 0:
e_queries = [{'name': j, 'workspace': ws_id} for j in existing_names]
e_infos = ws_client.get_object_info_new({"objects": e_queries})
obj_ids = [(str(k[1]), (str(k[6]) + '/' + str(k[0]) + '/' + str(k[4]))) for k in e_infos]
return obj_ids
def log(message, level=logging.INFO, logger=None):
if logger is None:
if level == logging.DEBUG:
print('\nDEBUG: ' + message + '\n')
elif level == logging.INFO:
print('\nINFO: ' + message + '\n')
elif level == logging.WARNING:
print('\nWARNING: ' + message + '\n')
elif level == logging.ERROR:
print('\nERROR: ' + message + '\n')
elif level == logging.CRITICAL:
print('\nCRITICAL: ' + message + '\n')
else:
logger.log(level, '\n' + message + '\n')
def zip_files(logger, src_path, output_fn):
"""
Compress all index files (not directory) into an output zip file on disk.
"""
files = [f for f in listdir(src_path) if isfile(join(src_path, f))]
with ZipFile(output_fn, 'w', allowZip64=True) as izip:
for f in files:
izip.write(join(src_path, f), f)
def unzip_files(logger, src_fn, dst_path):
"""
Extract all index files into an output zip file on disk.
"""
with ZipFile(src_fn, 'r') as ozip:
ozip.extractall(dst_path)
def whereis(program):
"""
returns path of program if it exists in your ``$PATH`` variable or `
`None`` otherwise
"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(
os.path.join(path, program)):
return os.path.join(path, program)
return None
def runProgram(logger=None,
progName=None,
argStr=None,
script_dir=None,
working_dir=None):
"""
Convenience func to handle calling and monitoring output of external programs.
:param progName: name of system program command
:param argStr: string containing command line options for ``progName``
:returns: subprocess.communicate object
"""
# Ensure program is callable.
if script_dir is not None:
progPath = os.path.join(script_dir, progName)
else:
progPath = progName
progPath = whereis(progName)
if not progPath:
raise RuntimeError(
None,
'{0} command not found in your PATH environmental variable. {1}'.format(
progName,
os.environ.get(
'PATH',
'')))
# Construct shell command
cmdStr = "%s %s" % (progPath, argStr)
print "Executing : " + cmdStr
if logger is not None:
logger.info("Executing : " + cmdStr)
# if working_dir is None:
logger.info("Executing: " + cmdStr + " on cwd")
else:
logger.info("Executing: " + cmdStr + " on " + working_dir)
# Set up process obj
process = subprocess.Popen(cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir)
# Get results
result, stderr = process.communicate()
# print result
# print stderr
# keep this until your code is stable for easier debugging
if logger is not None and result is not None and len(result) > 0:
logger.info(result)
else:
print result
if logger is not None and stderr is not None and len(stderr) > 0:
logger.info(stderr)
else:
print stderr
# Check returncode for success/failure
if process.returncode != 0:
raise Exception("Command execution failed {0}".format(
"".join(traceback.format_exc())))
raise RuntimeError(
'Return Code : {0} , result {1} , progName {2}'.format(
process.returncode, result, progName))
# Return result
return {"result": result, "stderr": stderr}
def check_sys_stat(logger):
check_disk_space(logger)
check_memory_usage(logger)
check_cpu_usage(logger)
def check_disk_space(logger):
runProgram(logger=logger, progName="df", argStr="-h")
def check_memory_usage(logger):
runProgram(logger=logger, progName="vmstat", argStr="-s")
def check_cpu_usage(logger):
runProgram(logger=logger, progName="mpstat", argStr="-P ALL")
|
dkamotsky/program-y | src/programy/utils/classes/loader.py | Python | mit | 1,706 | 0.005862 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import importlib
class ClassLoader(object):
@staticmethod
def instantiate_class(class_string):
processor_path = class_string.strip()
logging.debug("Processor path [%s]", processor_path)
last_dot = processor_path.rfind(".")
module_path = processor_path[:last_dot]
class_name = processor_path[last_dot+1:]
logging.debug("Importing module [%s]", module_path)
imported_module = importlib.import_module(module_path)
logging.debug("Instantiating class [%s]", class_name)
new_class = getattr(imported_modul | e, class_name)
| return new_class
|
Cyber-Neuron/inception_v3 | inception/inception/data/build_image_test_data.py | Python | apache-2.0 | 15,171 | 0.007251 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('test_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('test_shards', 1,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 1,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels ar | e held in th | is file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data |
Vettejeep/Boulder_County_Home_Prices | plots.py | Python | gpl-3.0 | 5,261 | 0.00114 | # Plotting routines in Python
# requires data from Assemble_Data.py
# Copyright (C) 2017 Kevin Maher
# This progra | m is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR | PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Data for this project may be the property of the Boulder County Assessor's office,
# they gave me free access as a student but were not clear about any restrictions regarding
# sharing the URL from which the data was downloaded.
# The data has been pre-processed from xlsx to csv files because OpenOffice had
# problems with the xlsx files.
# Data was pre-processed by a data setup script, Assemble_Data.py which produced the
# file '$working_data_5c.csv'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
working_df = pd.read_csv('Data\\$working_data_5c.csv')
working_df = working_df[working_df['Age_Yrs'] > 0]
working_df = working_df[working_df['totalActualVal'] <= 2000000]
# y = working_df['price']
# X = working_df.drop(labels=['price'], axis=1)
# 70/30 split of data into training and test sets
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=245)
# finished sq ft vs price
# x_val = X_test['TotalFinishedSF']
# y_price = y_test
#
# print 'Max, X axis: %.2f' % np.max(x_val)
# print 'Min, X axis: %.2f' % np.min(x_val)
#
# plot home size vs price
# plt.figure(0)
# plt.plot(x_val, y_price, ".")
# plt.xlim(0, 7000)
# plt.ylim(0, 3500000)
# plt.xlabel("Home Sq. Ft.")
# plt.ylabel("Sales Price")
# plt.title("Finished Sq. Ft. to Sales Price")
# plt.show()
# plt.close()
# lot size vs price
# x_val = X_test['GIS_sqft']
# y_price = y_test
#
# print 'Max, X axis: %.2f' % np.max(x_val)
# print 'Min, X axis: %.2f' % np.min(x_val)
#
# plot lot vs. price
# plt.figure(0)
# plt.plot(x_val, y_price, ".")
# plt.xlim(0, 100000)
# plt.ylim(0, 3500000)
# plt.xlabel("Lot Size, Sq. Ft.")
# plt.ylabel("Sales Price")
# plt.title("Lot Size vs. Sales Price")
# plt.show()
# plt.close()
# home median age vs price
Age_Median_Price = working_df['price'].groupby(working_df['Age_Yrs']).median()
Age_Median_Price = Age_Median_Price[:60]
print Age_Median_Price
age = [x for x in range(1, 61)]
# plot age vs. price
# plt.figure(0)
# plt.plot(age, Age_Median_Price, "-")
# plt.xlim(0, 60)
# plt.ylim(0, 800000)
# plt.xlabel("Age, years")
# plt.ylabel("Median Sales Price")
# plt.title("Age vs. Median Sales Price")
# plt.show()
# plt.close()
Age_Count = working_df['price'].groupby(working_df['Age_Yrs']).count()
print Age_Count
Age_Count = Age_Count[:60]
# plt.figure(1)
# plt.plot(age, Age_Count, "-")
# plt.xlim(0, 60)
# plt.ylim(0, 500)
# plt.xlabel("Age, years")
# plt.ylabel("Count of Sales")
# plt.title("Age vs. Count of Sales")
# plt.show()
# plt.close()
median_home_size_by_year = working_df['TotalFinishedSF'].groupby(working_df['Age_Yrs']).median()
median_home_size_by_year = median_home_size_by_year[:60]
# check median size by year
# plt.figure(2)
# plt.plot(age, median_home_size_by_year, "-")
# plt.xlim(0, 60)
# plt.ylim(0, 3000)
# plt.xlabel("Age, years")
# plt.ylabel("Median Home SF")
# plt.title("Age vs. Median Home SF")
# plt.show()
# plt.close()
#
print np.median(working_df['TotalFinishedSF'])
print min(working_df['TotalFinishedSF'])
# effective age in years vs price
Age_Median_Price = working_df['price'].groupby(working_df['Effective_Age_Yrs']).median()
Age_Median_Price = Age_Median_Price[:60]
print Age_Median_Price
age = [x for x in range(1, 61)]
# plot effective age vs. price
# effective age is age since last remodel
# plt.figure(3)
# plt.plot(age, Age_Median_Price, "-")
# plt.xlim(0, 60)
# plt.ylim(0, 800000)
# plt.xlabel("Effective Age, years")
# plt.ylabel("Median Sales Price")
# plt.title("Effective Age vs. Median Sales Price")
# plt.show()
# plt.close()
# main floor sf vs price
main_floor_sf = working_df['mainfloorSF']
price = working_df['price']
# plot main floor size vs price
# plt.figure(4)
# plt.plot(main_floor_sf, price, ".")
# plt.xlim(0, 6000)
# plt.ylim(0, 3500000)
# plt.xlabel("Main Floor, Sq. Ft.")
# plt.ylabel("Sales Price")
# plt.title("Main Floor Sq. Ft. vs. Sales Price")
# plt.show()
# plt.close()
# basement sf vs price
# basement_sf = working_df['bsmtSF']
# price = working_df['price']
#
# plot basement size vs. price
# plt.figure(5)
# plt.plot(basement_sf, price, ".")
# plt.xlim(0, 5000)
# plt.ylim(0, 3500000)
# plt.xlabel("Basement, Sq. Ft.")
# plt.ylabel("Sales Price")
# plt.title("Basement Sq. Ft. vs. Sales Price")
# plt.show()
# plt.close()
print 'done'
|
dhermes/project-euler | python/complete/no071.py | Python | apache-2.0 | 971 | 0.00103 | #!/usr/bin/env python
# neighbors
# a/b < c/d
# need bc - ad = 1
# The converse is also true. If
# bc - ad = 1
# for positive integers | a,b,c and d with a < b and c < d then a/b and c/d
# will be neighbours in the Farey sequence of order max(b,d).
# By listing the set of reduced proper fractions for D <= 1,000,000 in
# ascending order of size, find the numerator of the fraction immediately
# to the left of 3/7.
#########################################################
# c = 3, | d = 7, 3b - 7a = 1
# 0 + 2a == 1 mod 3, a == 2 mod 3
# a = 3k + 2, b = 7k + 5
# a < b <==> 3k + 2 < 7k + 5, -3 < 4k, -0.75 < k, k >= 0
# a/b < 3/7 <==> 7a < 3b <==> 0 < 3b - 7a <==> ALWAYS
# gcd(a,b) = (3k+2,7k+5) = (3k+2,k+1) = (k,k+1) = 1
# b <= D
# 7k + 5 <= D
# k <= floor((D-5)/7)
from python.decorators import euler_timer
def main(verbose=False):
D = 10 ** 6
return 3 * int((D - 5) / 7.0) + 2
if __name__ == '__main__':
print euler_timer(71)(main)(verbose=True)
|
jjd27/xapi-storage-datapath-plugins | src/tapdisk/plugin.py | Python | lgpl-2.1 | 1,061 | 0.000943 | #!/usr/bin/env python
import os
import sys
import xapi
import xapi.storage.api.plugin
from xapi.storage import log
class Implementation(xapi.storage.api.plugin.Plugin_skeleton):
def query(self, dbg):
return {
"plugin": "tapdisk",
"name": "The tapdisk user-space datapath plugin",
"description": ("This plugin manages and configures tapdisk"
| " instances backend by either raw or vhd"
" format files"),
"vendor": "Citrix",
"copyrig | ht": "(C) 2015 Citrix Inc",
"version": "3.0",
"required_api_version": "3.0",
"features": [
],
"configuration": {},
"required_cluster_stack": []}
if __name__ == "__main__":
log.log_call_argv()
cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation())
base = os.path.basename(sys.argv[0])
if base == "Plugin.Query":
cmd.query()
else:
raise xapi.storage.api.plugin.Unimplemented(base)
|
annarev/tensorflow | tensorflow/python/autograph/pyct/testing/basic_definitions.py | Python | apache-2.0 | 1,533 | 0.009132 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with basic entity definitions for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import with_statement # An extra future import for testing.
def simple_function(x):
"""Docstring."""
return x # comment
def nested_functions(x):
"""Docstring."""
def inner_fn(y):
return y
return inner_fn(x)
def function_with_print():
print('foo')
simple_lambda = lambda: None
class SimpleClass(object):
def simple_method(self):
return self
def method_with_print(self):
print('foo')
def function_with_multiline_call(x):
"""Docstring."""
return range(
x, |
x + 1,
)
def basic_decorator(f):
return f
@basic_decorator
@basic_decorator
def decorated_function(x):
if x > 0:
return 1 |
return 2
|
gwtsa/gwtsa | examples/reads/test_knmidata.py | Python | mit | 1,974 | 0.00304 | """
@author: ruben
"""
import matplotlib.pyplot as p | lt
from pastas.read.knmi import KnmiStation
import numpy as np
# How to use it?
# data from | a meteorological station
download = True
meteo = True
hourly = False
if hourly and not meteo:
raise(ValueError('Hourly data is only available in meteorological stations'))
if download:
# download the data directly from the site of the KNMI
if meteo:
if hourly:
knmi = KnmiStation.download(stns=260, start='2017', end='2018',
interval='hourly') # de bilt
else:
# https://www.knmi.nl/nederland-nu/klimatologie/daggegevens
knmi = KnmiStation.download(stns=260, start='1970',end='1971')
# de bilt
else:
# from a rainfall-station
knmi = KnmiStation.download(stns=550, start='2018', end='2019',
vars='RD') # de bilt
else:
# import the data from files
if meteo:
if hourly:
# hourly data without locations
knmi = KnmiStation.fromfile('../data/KNMI_Hourly.txt')
else:
# without locations, that was downloaded from the knmi-site
knmi = KnmiStation.fromfile('../data/KNMI_NoLocation.txt')
# use a file with locations:
knmi = KnmiStation.fromfile('../data/KNMI_Bilt.txt')
else:
knmi = KnmiStation.fromfile('../data/KNMI_Akkrum.txt')
# plot
f1, axarr = plt.subplots(2, sharex=True)
if 'RD' in knmi.data.columns and not np.all(np.isnan(knmi.data['RD'])):
knmi.data['RD'].plot(ax=axarr[0])
axarr[0].set_title(knmi.variables['RD'])
if 'RH' in knmi.data.columns and not np.all(np.isnan(knmi.data['RH'])):
knmi.data['RH'].plot(ax=axarr[0])
axarr[0].set_title(knmi.variables['RH'])
if 'EV24' in knmi.data.columns and not np.all(np.isnan(knmi.data['EV24'])):
knmi.data['EV24'].plot(ax=axarr[1])
axarr[1].set_title(knmi.variables['EV24'])
plt.show()
|
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/setuptools/svn_utils.py | Python | mit | 18,879 | 0.002013 | import os
import re
import sys
from distutils import log
import xml.dom.pulldom
import shlex
import locale
import codecs
import unicodedata
import warnings
from setuptools.compat import unicode
from setuptools.py31compat import TemporaryDirectory
from xml.sax.saxutils import unescape
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from subprocess import Popen as _Popen, PIPE as _PIPE
#NOTE: Use of the command line options require SVN 1.3 or newer (December 2005)
# and SVN 1.3 hasn't been supported by the developers since mid 2008.
#subprocess is called several times with shell=(sys.platform=='win32')
#see the follow for more information:
# http://bugs.python.org/issue8557
# http://stackoverflow.com/questions/5658622/
# python-subprocess-popen-environment-path
def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0):
#regarding the shell argument, see: http://bugs.python.org/issue8557
try:
proc = _Popen(args, stdout=stdout, stderr=stderr,
shell=(sys.platform == 'win32'))
data = proc.communicate()[stream]
except OSError:
return 1, ''
#doubled checked and
data = decode_as_string(data, encoding)
#communciate calls wait()
return proc.returncode, data
def _get_entry_schedule(entry):
schedule = entry.getElementsByTagName('schedule')[0]
return "".join([t.nodeValue
for t in schedule.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_target_property(target):
property_text = target.getElementsByTagName('property')[0]
return "".join([t.nodeValue
for t in property_text.childNodes
if t.nodeType == t.TEXT_NODE])
def _get_xml_data(decoded_str):
if sys.version_info < (3, 0):
#old versions want an encoded string
data = decoded_str.encode('utf-8')
else:
data = decoded_str
return data
def joinpath(prefix, *suffix):
if not prefix or prefix == '.':
return os.path.join(*suffix)
return os.path.join(prefix, *suffix)
def determine_console_encoding():
try:
#try for the preferred encoding
encoding = locale.getpreferredencoding()
#see if the locale.getdefaultlocale returns null
#some versions of python\platforms return US-ASCII
#when it cannot determine an encoding
if not encoding or encoding == "US-ASCII":
encoding = locale.getdefaultlocale()[1]
if encoding:
codecs.lookup(encoding) # make sure a lookup error is not made
except (locale.Error, LookupError):
encoding = None
is_osx = sys.platform == "darwin"
if not encoding:
return ["US-ASCII", "utf-8"][is_osx]
elif encoding.startswith("mac-") and is_osx:
#certain versions of python would return mac-roman as default
#OSX as a left over of earlier mac versions.
return "utf-8"
else:
return encoding
_console_encoding = determine_console_encoding()
def decode_as_string(text, encoding=None):
"""
Decode the console or file output explicitly using getpreferredencoding.
The text paraemeter should be a encoded string, if not no decode occurs
If no encoding is given, getpreferredencoding is used. If encoding is
specified, that is used instead. This would be needed for SVN --xml
output. Unicode is explicitly put in composed NFC form.
--xml should be UTF-8 (SVN Issue 2938) the discussion on the Subversion
DEV List from 2007 seems to indicate the same.
"""
#text should be a byte string
if encoding is None:
encoding = _console_encoding
if not isinstance(text, unicode):
text = text.decode(encoding)
text = unicodedata.normalize('NFC', text)
return text
def parse_dir_entries(decoded_str):
'''Parse the entries from a recursive info xml'''
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
entries = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'entry':
doc.expandNode(node)
if not _get_entry_schedule(node).startswith('delete'):
entries.append((node.getAttribute('path'),
node.getAttribute('kind')))
return entries[1:] # do not want the root directory
def parse_externals_xml(decoded_str, prefix=''):
'''Parse a propget svn:externals xml'''
prefix = os.path.normpath(prefix)
prefix = os.path.normcase(prefix)
doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str))
externals = list()
for event, node in doc:
if event == 'START_ELEMENT' and node.nodeName == 'target':
doc.expandNode(node)
path = os.path.normpath(node.getAttribute('path'))
if os.path.normcase(path).startswith(prefix):
path = path[len(prefix)+1:]
data = _get_target_property(node)
#data should be decoded already
for external in parse_external_prop(data):
externals.append(joinpath(path, external))
return externals # do not want the root directory
def parse_external_prop(lines):
"""
Parse the value of a retrieved svn:externals entry.
possible token setups (with quotng and backscaping in laters versions)
URL[@#] EXT_FOLDERNAME
[-r#] URL EXT_FOLDERNAME
EXT_FOLDERNAME [-r#] URL
"""
externals = []
for line in lines.splitlines():
line = line.lstrip() # there might be a "\ "
if not line:
continue
if sys.version_info < (3, 0):
#shlex handles NULLs just fine and shlex in 2.7 tries to encode
#as ascii automatiically
line = line.encode('utf-8')
line = shlex.split(line)
if sys.version_info < (3, 0):
line = [x.decode('utf-8') for x in line]
#EXT_FOLDERNAME is either the first or last depending on where
#the URL falls
if urlparse.urlsplit(line[-1])[0]:
external = line[0]
else:
external = line[-1]
external = decode_as_string(external, encoding="utf-8")
externals.append(os.path.normpath(external))
return externals
def parse_prop_file(filename, key):
found = False
f = open(filename, 'rt')
data = ''
try:
for line in iter(f.readline, ''): # can't use direct iter!
parts = line.split()
if len(parts) == 2:
kind, length = parts
data = f.read(int(length))
if kind == 'K' and data == key:
found = True
elif kind == 'V' and found:
break
finally:
f.close()
return data
class SvnInfo(object):
'''
Generic svn_info object. No has little knowledge of how to extract
information. Use cls.load to instatiate according svn version.
Paths are not filesystem encoded.
'''
@ | staticmethod
def get_svn_version():
# Temp config directory should be enough to check for repository
# This is needed because .svn always creates .subversion and
# some operating systems do not handle dot directory correctly.
# Real queries i | n real svn repos with be concerned with it creation
with TemporaryDirectory() as tempdir:
code, data = _run_command(['svn',
'--config-dir', tempdir,
'--version',
'--quiet'])
if code == 0 and data:
return data.strip()
else:
return ''
#svnversion return values (previous implementations return max revision)
# 4123:4168 mixed revision working copy
# 4168M |
tapo-it/odoo-addons-worktrail | addons_worktrail/tapoit_hr_project/wizard/tapoit_hr_project_sign_in_out.py | Python | agpl-3.0 | 10,543 | 0.00332 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 TaPo-IT (http://tapo-it.at) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero Ge | neral Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
import logging
import time
_logger = | logging.getLogger(__name__)
class hr_so_project(orm.TransientModel):
_name = 'hr.sign.out.task.work'
_description = 'Sign Out By Task Work'
_columns = {
'task_id': fields.many2one('project.task', 'Task', required=True),
'action_desc': fields.many2one('hr.action.reason', 'Action description'),
'info': fields.char('Work Description', size=256, required=True),
'date_start': fields.datetime('Starting Date', readonly=True),
'date': fields.datetime('Closing Date', help="Keep empty for current time"),
'name': fields.char('Employees name', size=32, required=True, readonly=True),
'state': fields.related('emp_id', 'state', string='Current state', type='char', required=True, readonly=True),
'pause': fields.boolean('Break'),
'server_date': fields.datetime('Current Date', required=True, help="Local time on the server side", readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID'),
'start_attendance': fields.integer('Related Start Attendance ID'),
'end_attendance': fields.integer('Related End Attendance ID'),
'workcontext': fields.many2one('tapoit.hr.project.workcontext', 'Work Context'),
}
def _get_empid(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
for employee in emp_obj.browse(cr, uid, emp_ids, context=context):
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_ids[0], 'server_date': time.strftime('%Y-%m-%d %H:%M:%S')}
def _get_empid2(self, cr, uid, context=None):
res = self._get_empid(cr, uid, context=context)
cr.execute('SELECT name,action FROM hr_attendance WHERE employee_id=%s ORDER BY name DESC LIMIT 1', (res['emp_id'],))
res['server_date'] = time.strftime('%Y-%m-%d %H:%M:%S')
date_start = cr.fetchone()
if date_start:
res['date_start'] = date_start[0]
return res
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_so_project, self).default_get(cr, uid, fields_list, context=context)
res.update(self._get_empid2(cr, uid, context=context))
return res
def _write(self, cr, uid, data, emp_id, context=None):
context = dict(context or {})
hour = (time.mktime(time.strptime(data['date'] or time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')) -
time.mktime(time.strptime(data['date_start'], '%Y-%m-%d %H:%M:%S'))) / 3600.0
task_obj = self.pool.get('project.task.work')
res = {
'hours': hour,
'date': str(data['date_start']),
'user_id': uid,
'name': data['info'],
'task_id': data['task_id'].id,
'start_attendance': data.start_attendance,
'end_attendance': data.end_attendance,
'workcontext': data.workcontext.id
}
return task_obj.create(cr, uid, res, type=context['type'], context=context)
def sign_out_break(self, cr, uid, ids, context=None):
context = dict(context or {})
context['pause'] = True
return self.sign_out_result_end(cr, uid, ids, context)
def sign_out_result_end(self, cr, uid, ids, context=None):
context = dict(context or {})
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
cr.execute('SELECT id FROM hr_attendance WHERE employee_id=%s ORDER BY name DESC LIMIT 1', (emp_id,))
data.start_attendance = (cr.fetchone() or (False,))[0]
if 'pause' in context:
data.pause = True
context['data'] = data
# _logger.info('Context: %s', context)
data.end_attendance = emp_obj.attendance_action_change(cr, uid, [emp_id], action_type='sign_out', dt=data.date, context=context)
context['type'] = 'sign_out'
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
def sign_out_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
cr.execute('SELECT id FROM hr_attendance WHERE employee_id=%s ORDER BY name DESC LIMIT 1', (emp_id,))
data.start_attendance = (cr.fetchone() or (False,))[0]
data.end_attendance = emp_obj.attendance_action_change(cr, uid, [emp_id], action_type='action', dt=data.date, context=data)
context['type'] = 'action'
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
hr_so_project()
class hr_si_project(orm.TransientModel):
_name = 'hr.sign.in.task.work'
_description = 'Sign In By Task Work'
_columns = {
'name': fields.char('Employees name', size=32, readonly=True),
'state': fields.related('emp_id', 'state', string='Current state', type='char', required=True, readonly=True),
'date': fields.datetime('Starting Date', help="Keep empty for current time"),
'server_date': fields.datetime('Current Date', readonly=True, help="Local time on the server side"),
'pause': fields.boolean('Break'),
'action_desc': fields.many2one('hr.action.reason', 'Action description'),
'workcontext': fields.many2one('tapoit.hr.project.workcontext', 'Work Context'),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def get_emp_id(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise orm.except_orm(_('UserError'), _('No employee defined for your user !'))
if len(emp_id) > 1:
raise orm.except_orm(_('UserError'), _('More than one employee defined for this user! Please correct this issue'))
return emp_id
def _get_pause_state(self, cr, uid, context={}):
emp_id = self.get_emp_id(cr, uid)
pause = False
res = cr.execute('SELECT pause FROM hr_attendance WHERE employee_id=%s AND action IN (\'sign_in\',\'sign_out\') ORDER BY name DESC LIMIT 1', emp_id)
result = cr.fetchone()
if result is not None:
pause = result[0]
return pause
_defaults = {
'emp_id': get_emp_id,
'pause': _get_pause_state,
}
def check_state(self, cr, uid, field_list, context=None):
context = dict(context or {})
# _logger.info("Fields: %s | Context: %s", field_list, context)
res = self.default_get(cr, uid, field_list, context)
emp_id = res['emp_id']
in_out = (res['state'] == 'absent') and 'out' or 'in'
# get the latest action (sign_in or out) for this employee
cr.execute( |
hendrikjeb/Euler | 07.py | Python | mit | 364 | 0.021978 | # -*- | coding: utf-8 -*-
"""
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13,
we can see that the 6th prime is 13.
What is the 10 001st prime number?
"""
from time import time
s = time()
x = 3
priem = [2, 3]
while len(priem) < 10001:
x += 2
for y in | priem:
if x % y == 0:
break
else:
priem.append(x)
print priem[-1]
print time() - s |
ktan2020/legacy-automation | win/Lib/site-packages/docutils/writers/manpage.py | Python | mit | 35,646 | 0.001964 | # -*- coding: utf-8 -*-
# $Id: manpage.py 7485 2012-07-06 08:17:28Z grubert $
# Author: Engelbert Gruber <grubert@users.sourceforge.net>
# Copyright: This module is put into the public domain.
"""
Simple man page writer for reStructuredText.
Man pages (short for "manual pages") contain system documentation on unix-like
systems. The pages are grouped in numbered sections:
1 executable programs and shell commands
2 system calls
3 library functions
4 special files
5 file formats
6 games
7 miscellaneous
8 system administration
Man pages are written *troff*, a text file formatting system.
See http://www.tldp.org/HOWTO/Man-Page for a start.
Man pages have no subsection only parts.
Standard parts
NAME ,
SYNOPSIS ,
DESCRIPTION ,
OPTIONS ,
FILES ,
SEE ALSO ,
BUGS ,
and
AUTHOR .
A unix-like system keeps an index of the DESCRIPTIONs, which is accesable
by the command whatis or apropos.
"""
__docformat__ = 'reStructuredText'
import re
import docutils
from docutils import nodes, writers, languages
try:
import roman
except ImportError:
import docutils.utils.roman as roman
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
OPTION_LIST_INDENT = 7
BLOCKQOUTE_INDENT = 3.5
LITERAL_BLOCK_INDENT = 3.5
# Define two macros so man/roff can calculate the
# indent/unindent margins by itself
MACRO_DEF = (r""".
.nr rst2man-indent-level 0
.
.de1 rstReportMargin
\\$1 \\n[an-margin]
level \\n[rst2man-indent-level]
level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
-
\\n[rst2man-indent0]
\\n[rst2man-indent1]
\\n[rst2man-indent2]
..
.de1 INDENT
.\" .rstReportMargin pre:
. RS \\$1
. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin]
. nr rst2man-indent-level +1
.\" .rstReportMargin post:
..
.de UNINDENT
. RE
.\" indent \\n[an-margin]
.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]]
.nr rst2man-indent-level -1
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
""")
class Writer(writers.Writer):
supported = ('manpage',)
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = Translator
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class Table(object):
def __init__(self):
self._rows = []
self._options = ['center']
self._tab_char = '\t'
self._coldefs = []
def new_row(self):
self._rows.append([])
def append_separator(self, separator):
"""Append the separator for table head."""
self._rows.append([separator])
def append_cell(self, cell_lines):
"""cell_lines is an array of lines"""
start = 0
if len(cell_lines) > 0 and cell_lines[0] == '.sp\n':
start = 1
self._rows[-1].append(cell_lines[start:])
if len(self._coldefs) < len(self._rows[-1]):
self._coldefs.append('l')
def _minimize_cell(self, cell_lines):
"""Remove leading and trailing blank and ``.sp` | ` lines"""
while (cell_lines and cell_lines[0] in ('\n', '.sp\n')):
| del cell_lines[0]
while (cell_lines and cell_lines[-1] in ('\n', '.sp\n')):
del cell_lines[-1]
def as_list(self):
text = ['.TS\n']
text.append(' '.join(self._options) + ';\n')
text.append('|%s|.\n' % ('|'.join(self._coldefs)))
for row in self._rows:
# row = array of cells. cell = array of lines.
text.append('_\n') # line above
text.append('T{\n')
for i in range(len(row)):
cell = row[i]
self._minimize_cell(cell)
text.extend(cell)
if not text[-1].endswith('\n'):
text[-1] += '\n'
if i < len(row)-1:
text.append('T}'+self._tab_char+'T{\n')
else:
text.append('T}\n')
text.append('_\n')
text.append('.TE\n')
return text
class Translator(nodes.NodeVisitor):
""""""
words_and_spaces = re.compile(r'\S+| +|\n')
possibly_a_roff_command = re.compile(r'\.\w')
document_start = """Man page generated from reStructuredText."""
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode, document.reporter)
self.head = []
self.body = []
self.foot = []
self.section_level = 0
self.context = []
self.topic_class = ''
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
# the list style "*" bullet or "#" numbered
self._list_char = []
# writing the header .TH and .SH NAME is postboned after
# docinfo.
self._docinfo = {
"title" : "", "title_upper": "",
"subtitle" : "",
"manual_section" : "", "manual_group" : "",
"author" : [],
"date" : "",
"copyright" : "",
"version" : "",
}
self._docinfo_keys = [] # a list to keep the sequence as in source.
self._docinfo_names = {} # to get name from text not normalized.
self._in_docinfo = None
self._active_table = None
self._in_literal = False
self.header_written = 0
self._line_block = 0
self.authors = []
self.section_level = 0
self._indent = [0]
# central definition of simple processing rules
# what to output on : visit, depart
# Do not use paragraph requests ``.PP`` because these set indentation.
# use ``.sp``. Remove superfluous ``.sp`` in ``astext``.
#
# Fonts are put on a stack, the top one is used.
# ``.ft P`` or ``\\fP`` pop from stack.
# ``B`` bold, ``I`` italic, ``R`` roman should be available.
# Hopefully ``C`` courier too.
self.defs = {
'indent' : ('.INDENT %.1f\n', '.UNINDENT\n'),
'definition_list_item' : ('.TP', ''),
'field_name' : ('.TP\n.B ', '\n'),
'literal' : ('\\fB', '\\fP'),
'literal_block' : ('.sp\n.nf\n.ft C\n', '\n.ft P\n.fi\n'),
'option_list_item' : ('.TP\n', ''),
'reference' : (r'\fI\%', r'\fP'),
'emphasis': ('\\fI', '\\fP'),
'strong' : ('\\fB', '\\fP'),
'term' : ('\n.B ', '\n'),
'title_reference' : ('\\fI', '\\fP'),
'topic-title' : ('.SS ',),
'sidebar-title' : ('.SS ',),
'problematic' : ('\n.nf\n', '\n.fi\n'),
}
# NOTE do not specify the newline before a dot-command, but ensure
# it is there.
def comment_begin(self, text):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
out_text = ''.join(
[(prefix + in_line + '\n')
for in_line in text.split('\n')])
return out_text
def comment(self, text):
"""Return commented version of the passed text."""
return self.comment_begin(text)+'.\n'
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body[-1][-1] != '\n':
self.body.append('\n')
def astext(self):
"""Return the final formatted document as a string."""
if not self.header_written:
# ensure we get a ".TH" as viewers require it.
self.head.append(self.header())
# filter body
for i in xrange(len(self.body)-1, 0, -1):
# remove superfluous vertical gaps.
if self.body[i] == '.sp\n':
if self.body[i - 1][:4] in ('.BI ','.IP '):
|
ivanxalie/learning_log | learning_logs/forms.py | Python | unlicense | 394 | 0.005076 | from django import forms
from .models import Topic, Entry
class Topi | cForm(forms.ModelForm):
class Meta:
model = To | pic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['text']
labels = {'text': ''}
widgets = {'text': forms.Textarea(attrs={'cols': 80})}
|
m3zbaul/ai-search | simulated_annealing.py | Python | mit | 668 | 0.001497 | # coding=utf-8
import random
import math
import n_queen
def simulated_annealing_search(problem):
# annealing parameters
alpha = 0.99
T = 10000.0
T_min = 3.95
current = problem
while T > T_min:
T = T * alpha
successor = n_queen.NQueenState.random | _successor(current)
E = current.heuristic_value - successor.heuristic_value
#print "E: ", E
if E > 0:
# (old-new) > 0 is ` | `good`` trade for n-queen
current = successor
elif math.exp(E/T) > random.random():
current = successor
if current.heuristic_value == 0:
return current
return current
|
vincentbetro/NACA-SIM | scripts/adaptationruns1.py | Python | gpl-2.0 | 9,065 | 0.046884 | #!/usr/bin/env python
#import sys and global var libraries, as well as option parser to make command line args
import os,sys
import glob
from optparse import OptionParser
#first, define system call with default retval for debugging
def mysystem(s,defaultretval=0):
#allows us to set effective debug flag
global dryrun
#want to be able to see statement executed
print(s)
#set for debugging
retval = defaultretval
if not dryrun:
retval = os.system(s)
return retval
#allows us to set up file names with all requisite info, including a default refinelevel of 0 and blank pre and post ext
#+03 gives signed three digit, auto filled integer
def format_name(case,order,mach,alpha,refinelevel=0,postext="",preext=""):
return "%s_%d_%0.2f_%+03d_%02d%s%s"%(case,order,mach,alpha,refinelevel,preext,postext)
#writes file to rotate grids with multiline string
def write_smooth_file(fname,case,alpha):
f = open(fname,"w")
s = """%s_%+03d
0
1
0.25
100
1
1
%d
2 3
1.0
1.0e-14
"""%(case,alpha,alpha)
f.write(s)
#writes opt smooth file
def write_smooth1_file(fname,case,order,mach,alpha,refinelevel):
f = open(fname,"w")
#allows us to concatenate carriage return
f.write(format_name(case,order,mach,alpha,refinelevel)+"\n")
s = """0
2
500
1
1
0
2 3
1.0e-6
"""
f.write(s)
#writes subdiv file, always sets output one level higher
def write_subdiv_file(fname,case,order,mach,alpha,refinelevel):
f = open(fname,"w")
f.write(format_name(case,order,mach,alpha,refinelevel+1)+"\n")
s = """1
3
5
0
1.0
1.0
1.0
5.0
5.0
5.0
2.0
2.0
2.0
1
"""
f.write(s)
#writes euler file
def write_euler_file(fname,case,alpha,mach,order,cfl,ptiter,refinelevel,extension="",path=""):
f = open(fname,"w")
s = """%d
%0.2f
1.4
"""%(alpha,mach)
f.write(s)
f.write("%s%s"%(path,format_name(case,order,mach,alpha,refinelevel,".mesh",extension)) + "\n")
s = """2 3
1
1.0
%d
100
%d
%d
10000
1.0
1.0e-15
"""%(cfl,order,ptiter)
f.write(s)
def main():
global dryrun
parser = OptionParser()
parser.add_option("--grids",action="store_true",dest="grids",default=False,help="Generates only initial grids at all alphas. Parallel must be set to 0.")
parser.add_option("--dryrun",action="store_true",dest="dryrun",default=False,help="Just print the commands; do not execute them.")
parser.add_option("--case",dest="case",default="naca0012",help="Original meshfile name, without extension.")
parser.add_option("--parallel",dest="parallel",default="0",help="Splits job into 21 separate jobs. Each must be given proc number from 1 to 21. Zero may only be used for generating grids.")
(options,args) = parser.parse_args()
#sets global variable to allow retvals to reflect debug and not execute
dryrun = options.dryrun
#if we set parallel to 0, runs all on one
#else, we need to split up parallel artifically (which could be done more automatically, but it is simple to do it this way too)
if options.parallel == "0":
alphas = range(-10,11)
if options.parallel == "3":
alpha = -8
if options.parallel == "4":
alpha = -7
if options.parallel == "5":
alpha = -6
if options.parallel == "6":
alpha = -5
if options.parallel == "7":
alpha = -4
if options.parallel == "8":
alpha = -3
if options.parallel == "9":
alpha = -2
if options.parallel == "10":
alpha = -1
if options.parallel == "11":
alpha = 0
if options.parallel == "12":
alpha = 1
if options.parallel == "13":
alpha = 2
if options.parallel == "14":
alpha = 3
if options.parallel == "15":
alpha = 4
if options.parallel == "16":
alpha = 5
if options.parallel == "17":
alpha = 6
if options.parallel == "18":
alpha = 7
if options.parallel == "19":
alpha = 8
orders = [2]
machs = [0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25]
#allows us to get whole range, excluding last number, and inc by third value
cfls = range(50,550,50)
ptiters = range(20,220,20)
#always do grid run separate
if options.grids:
for alpha in alphas:
write_smooth_file("MYSMOOTH",options.case,alpha)
mysystem("./SMOOTH %s.mesh %s.mesh < MYSMOOTH > stdo.out"%(options.case,options.case))
for order in orders:
for mach in machs:
f1 = "%s_%+03d_01.dat"%(options.case,alpha)
f2 = "/ibrix-scr/vbetro/meshes/%s"%format_name(options.case,order,mach,alpha,0,".dat")
mysystem("cp %s %s"%(f1,f2))
f1 = "%s_%+03d_01.mesh"%(options.case,alpha)
f2 = "/ibrix-scr/vbetro/meshes/%s"%format_name(options.case,order,mach,alpha,0,".mesh")
mysystem("cp %s %s"%(f1,f2))
#now, remove all .dat and deprecated mesh files
mysystem("rm -f *.dat *_01.mesh")
sys.exit(1)
#need to artifically set refinelevel
refinelevel = 1
#now, loop over all parameters and do all three adaptation runs for each
for order in orders:
for mach in machs:
for cfl in cfls:
for ptiter in ptiters:
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,order,cfl,ptiter,refinelevel,"","/ibrix-scr/vbetro/meshes/")
result = mysystem("./EULER < MYSOLVER%s > stdo.out"%options.parallel)
#need to signify went fine without 1st 2nd switch
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_00.dat")
mysystem("mv %s %s"%(f,newf))
#if we did not get results 2nd order, we do first then second and reappend name
if result==0 and order==2:
mysystem("rm -f *_%d_%+03d_%0.2f_%03d_%03d_%02d_00.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,1,cfl,ptiter,refinelevel,"","/ibrix-scr/vbetro/meshes/")
mysys | tem("./EULER < MYSO | LVER%s > stdo.out"%options.parallel)
mysystem("rm -f *_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
write_euler_file("MYSOLVER%s"%options.parallel,options.case,alpha,mach,order,cfl,ptiter,refinelevel,"_out")
result = mysystem("./EULER < MYSOLVER%s > stdo.out"%options.parallel)
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_12.dat")
mysystem("mv %s %s"%(f,newf))
if result==0:
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_nan.dat")
mysystem("mv %s %s"%(f,newf))
if result==-1:
files = glob.glob("*_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel))
for f in files:
newf = f.replace(".dat","_uncvg.dat")
mysystem("mv %s %s"%(f,newf))
#d = "/tmp/vbetro/order%d/mach%0.2f/alpha%+03d"%(order,mach,alpha)
#mysystem("mkdir -p " + d)
#mysystem("mv *_%d_%+03d_%0.2f_%03d_%03d_%02d*.dat"%(order,alpha,mach,ptiter,cfl,refinelevel) + d)
if result==1 and refinelevel < 2:
write_subdiv_file("MYSUBDIV%s"%options.parallel,options.case,order,mach,alpha,refinelevel)
fname = format_name(options.case,order,mach,alpha,refinelevel,".mesh","_out")
|
iCarto/siga | extScripting/scripts/jython/Lib/xml/dom/html/HTMLDocument.py | Python | gpl-3.0 | 11,605 | 0.004222 | ########################################################################
#
# File Name: HTMLDocument.py
#
# Documentation: http://docs.4suite.com/4DOM/HTMLDocument.py.html
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import Node
from xml.dom import NotSupportedErr
from xml.dom.Document import Document
from xml.dom import implementation
from xml.dom import ext
import string, sys
from xml.dom.html import HTML_DTD
class HTMLDocument(Document):
def __init__(self):
Document.__init__(self, None)
# These only make sense in a browser environment, therefore
# they never change
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__URL'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 0
self.__dict__['_html'] = vars(sys.modules['xml.dom.html'])
### Attribute Methods ###
def _get_URL(self):
return self.__dict__['__URL']
def _get_anchors(self):
anchors = self.getElementsByTagName('A');
anchors = filter(lambda x: x._get_name(), anchors)
return implementation._4dom_createHTMLCollection(anchors)
def _get_applets(self):
al = self.getElementsByTagName('APPLET')
ol = self.getElementsByTagName('OBJECT')
ol = filter(lambda x: x._get_code(), ol)
return implementation._4dom_createHTMLCollection(al+ol)
def _get_body(self):
body = ''
#Try to find the body or FRAMESET
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
body = elements[0]
else:
#Create a body
body = self.createElement('BODY')
self.documentElement.appendChild(body)
return body
def _set_body(self, newBody):
elements = self.getElementsByTagName('FRAMESET')
if not elements:
elements = self.getElementsByTagName('BODY')
if elements:
# Replace the existing one
elements[0].parentNode.replaceChild(newBody, elements[0])
else:
# Add it
self.documentElement.appendChild(newBody)
def _get_cookie(self):
return self.__dict__['__cookie']
def _set_cookie(self, cookie):
self.__dict__['__cookie'] = cookie
def _get_domain(self):
return self.__dict__['__domain']
def _get_forms(self):
forms = self.getElementsByTagName('FORM')
return implementation._4dom_createHTMLCollection(forms)
def _get_images(self):
images = self.getElementsByTagName('IMG')
return implementation._4dom_createHTMLCollection(images)
def _get_links(self):
areas = self.getElementsByTagName('AREA')
anchors = self.getElementsByTagName('A')
links = filter(lambda x: x._get_href(), areas+anchors)
return implementation._4dom_createHTMLCollection(links)
def _get_referrer(self):
return self.__dict__['__referrer']
def _get_title(self):
elements = self.getElementsByTagName('TITLE')
if elements:
#Take the first
title = elements[0]
title.normalize()
if title.firstChild:
return title.firstChild.data
return ''
def _set_title(self, title):
# See if we can find the title
title_nodes = self.getElementsByTagName('TITLE')
if title_nodes:
title_node = title_nodes[0]
title_node.normalize()
if title_node.firstChild:
title_node.firstChild.data = title
return
else:
title_node = self.createElement('TITLE')
self._4dom_getHead().appendChild(title_node)
text = self.createTextNode(title)
title_node.appendChild(text)
### Methods ###
def close(self):
self.__dict__['__writable'] = 0
def getElementsByName(self, elementName):
return self._4dom_getElementsByAttribute('*', 'NAME', elementName)
def open(self):
#Clear out the doc
self.__dict__['__referrer'] = ''
self.__dict__['__domain'] = None
self.__dict__['__url'] = ''
self.__dict__['__cookie'] = ''
self.__dict__['__writable'] = 1
def write(self, st):
if not self.__dict__['__writable']:
return
#We need to parse the string here
from xml.dom.ext.reader.HtmlLib import FromHTML
d = FromHtml(st, self)
if d != self:
self.appendChild(d)
def writeln(self, st):
st = st + '\n'
self.write(st)
def getElementByID(self, ID):
hc = self._4dom_getElementsByAttribute('*','ID',ID)
if hc.length != 0:
return hc[0]
return None
### Overridden Methods ###
def createElement(self, tagName):
return self._4dom_createHTMLElement(tagName)
def createAttribute(self, name):
return Document.createAttribute(self, string.upper(name))
def createCDATASection(*args, **kw):
raise NotSupportedErr()
def createEntityReference(*args, **kw):
raise NotSupportedErr()
def createProcessingInstruction(*args, **kw):
raise NotSupportedErr()
def _4dom_createEntity(*args, **kw):
raise NotSupportedErr()
def _4dom_createNotation(*args, **kw):
raise NotSupportedErr()
### Internal Methods ###
def _4dom_getElementsByAttribute(self, tagName, attribute, attrValue=None):
nl = self.getElementsByTagName(tagName)
hc = implementation._4dom_createHTMLCollection()
for elem in nl:
attr = elem.getAttribute(attribute)
if attrValue == None and attr != '':
hc.append(elem)
elif attr == attrValue:
hc.append(elem)
return hc
def _4dom_getHead(self):
nl = self.getElementsByTagName('HEAD')
if not nl:
head = self.createElement('HEAD')
#The head goes in front of the body
body = self._get_body()
self.documentElement.insertBefore(head, body)
else:
head = nl[0]
return head
def _4dom_createHTMLElement(self, tagName):
lowered = string.lower(tagName)
if not HTML_DTD.has_key(lowered):
raise TypeError('Unknown HTML Element: %s' % tagName | )
if lowered in NoClassTags:
from HTMLElement import HTMLElement
return HTMLElement(self, tagName)
#FIXME: capitalize() broken with unicode in Python 2.0
#normTagName = string.capitalize(tagName)
capitalized = string.up | per(tagName[0]) + lowered[1:]
element = HTMLTagMap.get(capitalized, capitalized)
module = 'HTML%sElement' % element
if not self._html.has_key(module):
#Try to import it (should never fail)
__import__('xml.dom.html.%s' % module)
# Class and module have the same name
klass = getattr(self._html[module], module)
return klass(self, tagName)
def cloneNode(self, deep):
clone = HTMLDocument()
clone.__dict__['__referrer'] = self._get_referrer()
clone.__dict__['__domain'] = self._get_domain()
clone.__dict__['__URL'] = self._get_URL()
clone.__dict__['__cookie'] = self._get_cookie()
if deep:
if self.doctype is not None:
# Cannot have any children, no deep needed
dt = self.doctype.cloneNode(0)
clone._4dom_setDocumentType(dt)
if self.documentElement is not None:
# The root element can have children, duh
root = self.documentElement.cloneNode(1, newOwner=clone)
clone.appendChild(root)
return clone
def isXml(self):
return 0
def isHtml(self):
return 1
### Att |
chubbymaggie/simuvex | simuvex/procedures/cgc/receive.py | Python | bsd-2-clause | 3,563 | 0.003929 | import simuvex
from itertools import count
fastpath_data_counter = count()
class receive(simuvex.SimProcedure):
#pylint:disable=arguments-differ,attribute-defined-outside-init,redefined-outer-name
IS_SYSCALL = True
def run(self, fd, buf, count, rx_bytes):
if simuvex.options.CGC_ENFORCE_FD in self.state.options:
fd = 0
if self.state.mode == 'fastpath':
# Special case for CFG generation
if not self.state.se.symbolic(count):
actual_size = count
data = self.state.se.Unconstrained(
'receive_data_%d' % fastpath_data_counter.next(),
self.state.se.exactly_int(actual_size) * 8
)
self.state.memory.store(buf, data)
else:
actual_size = self.state.se.Unconstrained('receive_length', self.state.arch.bits)
self.state.memory.store(rx_bytes, actual_size, endness='Iend_LE')
return self.state.se.BVV(0, self.state.arch.bits)
if CGC_NO_SYMBOLIC_RECEIVE_LENGTH in self.state.options:
# rules for invalid
# greater than 0xc0 or wraps around
if self.state.se.max_int(buf + count) > 0xc0000000 or \
self.state.se.min_int(buf + count) < self.state.se.min_int(buf):
return 2
try:
writable = self.state.se.any_int(self.state.memory.permissions(self.state.se.any_int(buf))) & 2 != 0
except simuvex.SimMemoryError:
writable = False
if not writable:
return 2
read_length = self.state.posix.read(fd, buf, count)
self.state.memory.store(rx_bytes, read_length, condition=rx_bytes != 0, endness='Iend_LE')
self.size = read_length
return self.state.se.BVV(0, self.state.arch.bits)
else:
if ABSTRACT_MEMORY in self.state.options:
actual_size = count
else:
actual_size = self.state.se.Unconstrained('receive_length', self.state.arch.bits)
self.state.add_constraints(self.state.se.ULE(actual_size, count), action=True)
if self.state.se.solution(count != 0, True):
read_length = self.state.posix.read(fd, buf, actual_size)
action_list = list(self.state.log.actions)
try:
# get and fix up the memory write
action = next(
a for a in reversed(action_list) if
isinstance(a, SimActionData) and a.action == 'write' and a.type == 'mem'
)
action.size.ast = actual_size
action.dat | a.ast = action.actual_value.ast
self.data = self.state.memory.load(buf, read_length)
except StopIteration:
# the write didn't occur (i.e., size of 0)
self.data = None
else:
self.data = None
self.size = actual_size
self.state.memory.store(rx_bytes, act | ual_size, condition=rx_bytes != 0, endness='Iend_LE')
# return values
return self.state.se.If(
actual_size == 0,
self.state.se.BVV(0xffffffff, self.state.arch.bits),
self.state.se.BVV(0, self.state.arch.bits)
)
from ...s_options import ABSTRACT_MEMORY, CGC_NO_SYMBOLIC_RECEIVE_LENGTH
from ...s_action import SimActionData
|
pyQode/pyqode-uic | setup.py | Python | mit | 958 | 0.001044 | from setuptools import setup
setup(
name='pyqode-uic',
version='0.1.1',
py_modules=['pyqode_uic'],
url='https://github.com/pyQode/pyqode-pyuic',
license='MIT',
author='Colin Duquesnoy',
author_email='colin.duquesnoy',
description='pyQode Qt ui compiler',
entry_points={
'console_scripts': [
'pyqode-uic = pyqode_uic:main_uic',
'pyqode-rcc = pyqode_uic:main_rcc',
],
},
classifiers=[
'Environment :: X11 Applications :: Qt',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
| 'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: | 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
)
|
halfak/Difference-Engine | diffengine/synchronizers/xml_dump.py | Python | mit | 5,555 | 0.008461 | """
Assumptions:
* Revisions appear ordered by page ASC, timestamp ASC, rev_id ASC
* The max(rev_id) and max(timestamp) of revisions represents the last revision
chronologically captured by the dump
"""
import logging
import traceback
from mw.xml_dump import Iterator, map, open_file
from ..errors import RevisionOrderError
from ..types import ProcessorStatus, Revision, Timestamp, User
from .synchronizer import Synchronizer
logger = logging.getLogger("diffengine.synchronizers.xml_dump")
class XMLDump(Synchronizer):
def __init__(self, engine, store, paths, force_config=False, **map_kwargs):
super().__init__(engine, store, force_config=force_config)
self.paths = [str(path) for path in paths]
self.map_kwargs = map_kwargs
def run(self):
def _process_dump(dump, path):
try:
for page in dump:
logger.debug("Constructing new processor for {0}:{1}"\
.format(page.namespace, page.title))
processor_status = self.store.processor_status.get(page.id,
type=self.engine.Processor.Status)
if processor_status is None:
processor_status = self.engine.Processor.Status(page.id)
processor = self.engine.processor(processor_status)
for rev in page:
if rev.id <= processor_status.last_rev_id:
logger.debug(
"Skipping revision (already processed) " +\
"{0}:{1}".format(rev.id, rev.timestamp))
continue
try:
user = User(rev.contributor.id,
rev.contributor.user_text)
delta = processor.process(rev.id, rev.timestamp,
rev.text)
revision = Revision(rev.id, rev.timestamp, page.id,
user, delta)
yield (revision, None)
except RevisionOrderError as e:
logger.error(traceback.format_exc())
logger.info("Skipping revision (out of order) " + \
"{0}:{1}".format(rev.id, rev.timestamp))
logger.debug("Finished processing page {0}:{1}"\
.format(page.namespace, page.title))
yield (processor.status, page.title)
logger.debug("Finished processing dump at {0}".format(path))
yield (path, None)
except Exception as e:
logger.error(traceback.format_exc())
raise
engine_status = self.store.engine_status.get(type=self.engine.Status)
if engine_status is None:
logger.info("Starting {0} from scratch.".format(self.engine.info()))
engine_status = self.engine.Status(self.engine.info())
max_rev_id = 0
max_timestamp = Timestamp(0)
if len(self.paths) == 1:
dump = Iterator.from_file(open_file(self.paths[0]))
rev_proc_or_paths = _process_dump(dump, self.paths[0])
else:
rev_proc_or_paths = map(self.paths, _process_dump,
**self.map_kwargs)
try:
for rev_proc_or_path, meta in rev_proc_or_paths:
if isinstance(rev_proc_or_path, Revision):
revision = rev_proc_or_path
self.store.revisions.store(revision)
self.status.stats['revisions_processed'] += 1
max_rev_id = max(revision.rev_id, max_rev_id)
max_timestamp = max(revision.timestamp, max_timestamp)
elif isinstance(rev_proc_or_path, ProcessorStatus):
processor_status = rev_proc_or_path
page_title = meta
logger.debug("Completed processing page " + \
"{0}. {1}".format(
page_title,
processor_status.stats))
self.store.processor_status.store(processor_status)
| elif isinstance | (rev_proc_or_path, str):
path = rev_proc_or_path
logger.info("Completed processing dump {0}".format(path))
else:
raise RuntimeError(
"Did not expect a " + \
"{0}".format(type(rev_proc_or_path)))
self.status.update(max_rev_id, max_timestamp)
self.store.engine_status.store(engine_status)
except Exception as e:
logger.error(traceback.format_exc())
raise
|
FreshetDMS/FDCapacityPlanner | tests/test_generator.py | Python | gpl-3.0 | 941 | 0.03932 | import unittest
from vsvbp.container import Item, Bin, Instance
from vsvbp.generator import generator
class ItemBinTestCase(unittest.TestCase):
def setUp(self):
self.i1 = Item([1,2,9]); self.i2 = Item([4,5,3])
self.i3 = Item([0,1,0]); self.i4 = Item([9,8,7])
self.i1.size = 1; self.i2.size = 2; self.i3.size = 3; self.i4.size = 0;
self.items = [self.i4, self.i3, self.i2, self.i1]
self.b1=Bin([5,8,4]); self.b2=Bin([100,0,100]); self.b3=Bin([1,2,9]);
self.b1.size=1; self.b2.size=2; self.b3.size=3;
self.bins = [self.b1,self.b2,self.b3]
| self.ins = Instance(self.items, self.bins)
def testInstance(self):
assert str(self.ins)=="Items:\n"+str(self.items)+"\nBins:\n"+str(self.bins)
# def testGenerator(self):
# iss=generator(2,2,.5,seed=0)
# assert iss.items[1].requirements==[356, 197]
# | assert iss.bins[1].capacities == [516,411] |
Spiderlover/Toontown | otp/friends/PlayerFriendsManager.py | Python | mit | 9,425 | 0.001167 | from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.otpbase import OTPGlobals
from otp.avatar.Avatar import teleportNotify
from otp.friends import FriendResponseCodes
class PlayerFriendsManager(DistributedObjectGlobal):
notify = directNotify.newCategory('PlayerFriendsManager')
def __init__(self, cr):
DistributedObjectGlobal.__init__(self, cr)
self.playerFriendsList = set()
self.playerId2Info = {}
self.playerAvId2avInfo = {}
self.accept('gotExtraFriendHandles', self.__handleFriendHandles)
def delete(self):
self.ignoreAll()
def sendRequestInvite(self, playerId):
print 'PFM sendRequestInvite id:%s' % playerId
self.sendUpdate('requestInvite', [0, playerId, True])
def sendRequestDecline(self, playerId):
self.sendUpdate('requestDecline', [0, playerId])
def sendRequestRemove(self, playerId):
self.sendUpdate('requestRemove', [0, playerId])
def sendRequestUnlimitedSecret(self):
self.sendUpdate('requestUnlimitedSecret', [0])
def sendRequestLimitedSecret(self, username, password):
self.sendUpdate('requestLimitedSecret', [0, username, password])
def sendRequestUseUnlimitedSecret(self, secret):
pass
def sendRequestUseLimitedSecret(self, secret, username, password):
pass
def sendSCWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCTo', [0, recipientId, msgId])
def sendSCCustomWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCCustomTo', [0, recipientId, msgId])
def sendSCEmoteWhisper(self, recipientId, msgId):
self.sendUpdate('whisperSCEmoteTo', [0, recipientId, msgId])
def setTalkAccount(self, toAc, fromAc, fromName, message, mods, flags):
localAvatar.displayTalkAccount(fromAc, fromName, message, mods)
toName = None
friendInfo = self.getFriendInfo(toAc)
if friendInfo:
toName = friendInfo.playerName
elif toAc == localAvatar.DISLid:
toName = localAvatar.getName()
base.talkAssistant.receiveAccountTalk(None, None, fromAc, fromName, toAc, toName, message)
return
def invitationFrom(self, playerId, avatarName):
messenger.send(OTPGlobals.PlayerFriendInvitationEvent, [playerId, avatarName])
def retractInvite(self, playerId):
messenger.send(OTPGlobals.PlayerFriendRetractInviteEvent, [playerId])
def rejectInvite(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, reason])
def rejectRemove(self, playerId, reason):
messenger.send(OTPGlobals.PlayerFriendRejectRemoveEvent, [playerId, reason])
def secretResponse(self, secret):
print 'secretResponse %s' % secret
messenger.send(OTPGlobals.PlayerFriendNewSecretEvent, [secret])
def rejectSecret(self, reason):
print 'rejectSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectNewSecretEvent, [reason])
def rejectUseSecret(self, reason):
print 'rejectUseSecret %s' % reason
messenger.send(OTPGlobals.PlayerFriendRejectUseSecretEvent, [reason])
def invitationResponse(self, playerId, respCode, context):
if respCode == FriendResponseCodes.INVITATION_RESP_DECLINE:
messenger.send(OTPGlobals.PlayerFriendRejectInviteEvent, [playerId, respCode])
elif respCode == FriendResponseCodes.INVITATION_RESP_NEW_FRIENDS:
pass
def updatePlayerFriend(self, id, info, isNewFriend):
self.notify.warning('updatePlayerFriend: %s, %s, %s' % (id, info, isNewFriend))
info.calcUnderstandableYesNo()
if info.playerName[0:5] == 'Guest':
info.playerName = 'Guest ' + info.playerName[5:]
if id not in self.playerFriendsList:
self.playerFriendsList.add(id)
self.playerId2Info[id] = info
messenger.send(OTPGlobals.PlayerFriendAddEvent, [id, info, isNewFriend])
elif id in self.playerId2Info:
if not self.playerId2Info[id].onlineYesNo and info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOnline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
elif self.playerId2Info[id].onlineYesNo and not info.onlineYesNo:
self.playerId2Info[id] = info
messenger.send('playerOffline', [id])
base.talkAssistant.receiveFriendAccountUpdate(id, info.playerName, info.onlineYesNo)
if not self.askAvatarKnownHere(info.avatarId):
self.requestAvatarInfo(info.avatarId)
self.playerId2Info[id] = info
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendUpdateEvent, [id, info])
return
def removePlayerFriend(self, id):
if id not in self.playerFriendsList:
return
self.playerFriendsList.remove(id)
info = self.playerId2Info.pop(id, None)
if info is not None:
av = base.cr.doId2do.get(info.avatarId, None)
if av is not None:
av.considerUnderstandable()
messenger.send(OTPGlobals.PlayerFriendRemoveEvent, [id])
return
def whisperSCFrom(self, playerId, msg):
base.talkAssistant.receivePlayerWhisperSpeedChat(msg, playerId)
def isFriend(self, pId):
return self.isPlayerFriend(pId)
def isPlayerFriend(self, pId):
if not pId:
return 0
return pId in self.playerFriendsList
def isAvatarOwnerPlayerFriend(self, avId):
pId = self.findPlayerIdFromAvId(avId)
if pId and self.isPlayerFriend(pId):
return True
else:
return False
def getFriendInfo(self, pId):
return self.playerId2Info.get(pId)
def findPlayerIdFromAvId(self, avId):
for playerId in self.playerId2Info:
if self.playerId2Info[playerId].avatarId == avId:
if self.playerId2Info[playerId].onlineYesNo:
return playerId
return None
def findAvIdFromPlayerId(self, pId):
pInfo = self.playerId2Info.get(pId)
if pInfo:
retur | n pInfo.avatarId
else:
return None
return None
def findPlayerInfoFromAvId(self, avId):
playerId = self.findPlayerIdFromAvId(avId)
if playerId:
return self.getFriendInfo(playerId)
else:
return None
return None
def askAvatarOnline(self, avId):
returnValue = 0
| if avId in self.cr.doId2do:
returnValue = 1
if avId in self.playerAvId2avInfo:
playerId = self.findPlayerIdFromAvId(avId)
if playerId in self.playerId2Info:
playerInfo = self.playerId2Info[playerId]
if playerInfo.onlineYesNo:
returnValue = 1
return returnValue
def countTrueFriends(self):
count = 0
for id in self.playerId2Info:
if self.playerId2Info[id].openChatFriendshipYesNo:
count += 1
return count
def askTransientFriend(self, avId):
if (avId in self.playerAvId2avInfo) and (not base.cr.isAvatarFriend(avId)):
return 1
else:
return 0
def askAvatarKnown(self, avId):
if self.askAvatarKnownElseWhere(avId) or self.askAvatarKnownHere(avId):
return 1
else:
return 0
def askAvatarKnownElseWhere(self, avId):
if hasattr(base, 'cr'):
if base.cr.askAvatarKnown(avId):
return 1
return 0
def askAvatarKnownHere(self, avId):
if avId in self.playerAvId2avInfo:
return 1
else:
return 0
def requestAvatarInfo(self, avId):
if hasattr(base, 'cr'):
base.cr.queueRequestAvatarInfo(avId)
def __han |
khchine5/atelier | atelier/invlib/tasks.py | Python | bsd-2-clause | 23,664 | 0.001225 | # -*- coding: UTF-8 -*-
# Copyright 2013-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
This is the module that defines the invoke namespace.
It is imported by :func:`atelier.invlib.setup_from_tasks` which passes
it to :func:`invoke.Collection.from_module`.
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import glob
import time
import datetime
import six
from datetime import timedelta
from atelier.utils import i2d
from babel.dates import format_date
from atelier import rstgen
from atelier.projects import load_projects
from unipath import Path
try:
from invoke import ctask as task #, tasks
# before version 0.13 (see http://www.pyinvoke.org/changelog.html)
except ImportError:
from invoke import task #, tasks
from invoke.exceptions import Exit
from invoke import run
import atelier
from atelier.utils import confirm, cd
from .utils import must_confirm
LASTREL_INFO = "Last PyPI release was %(filename)s \
(%(upload_time)s, %(downloads)d downloads)."
RELEASE_CONFIRM = """
This is going to officially release %(name)s %(version)s to PyPI.
It will fail if version %(version)s of %(name)s has previously been released.
Your `docs/changes.rst` should have a section about this version.
Your working directory should be clean (otherwise answer 'n' and run `inv ci`).
Are you sure?"""
def local(*args, **kwargs): # probably no longer used
"""Call :func:`invoke.run` with `pty=True
<http://www.pyinvoke.org/faq.html#why-is-my-command-behaving-differently-under-invoke-versus-being-run-by-hand>`_.
This is useful e.g. to get colors in a terminal.
"""
kwargs.update(pty=True)
# kwargs.update(encoding='utf-8')
run(*args, **kwargs)
def get_current_date(today=None):
"""
"""
if today is None:
# return datetime.datetime.utcnow()
return datetime.date.today()
return i2d(today)
def rmtree_after_confirm(p, batch=False):
if not p.exists():
return
if batch or confirm(
"OK to remove %s and everything under it?" % p.absolute()):
p.rmtree()
def cleanup_pyc(p, batch=False): # no longer used
"""Thanks to oddthinking on http://stackoverflow.com/questions/2528283
"""
for root, dirs, files in os.walk(p):
pyc_files = [filename for filename in files if filename.endswith(".pyc")]
py_files = set([filename for filename in files if filename.endswith(".py")])
excess_pyc_files = [pyc_filename for pyc_filename in pyc_files if pyc_filename[:-1] not in py_files]
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if batch or confirm("Remove excess file %s:" % full_path):
os.remove(full_path)
def sphinx_clean(ctx, batch=False):
"""Delete all generated Sphinx files.
"""
for b in atelier.current_project.get_doc_trees():
rmtree_after_confirm(b.out_path, batch)
def py_clean(ctx, batch=False):
"""
Delete :xfile:`.pyc` files, :xfile:`.eggs` and :xfile:`__cache__` |
directories under the project's root direcotory.
"""
for root, dirs, files in os.walk(ctx.root_dir):
for fn in files:
if fn.endswith(".pyc"):
full_path = os.path.join(root, fn)
if batch or confirm("Remove file %s:" % full_path):
os.remove(full_path)
# cleanup_pyc(ctx.root_dir, batch)
# if atelier.current_project.main_package is not None:
# try:
# p = Path(atelier.current_project.main_pac | kage.__file__).parent
# cleanup_pyc(atelier.current_project.root_dir, batch)
# except AttributeError:
# # happened 20170310 in namespace package:
# # $ pywhich commondata
# # Traceback (most recent call last):
# # File "<string>", line 1, in <module>
# # AttributeError: 'module' object has no attribute '__file__'
# pass
for root, dirs, files in os.walk(ctx.root_dir):
p = Path(root).child('__pycache__')
rmtree_after_confirm(p, batch)
# p = ctx.root_dir.child('tests')
# if p.exists():
# cleanup_pyc(p, batch)
p = ctx.root_dir.child('.eggs')
if p.exists():
rmtree_after_confirm(p, batch)
files = []
for pat in ctx.cleanable_files:
for p in glob.glob(os.path.join(ctx.root_dir, pat)):
files.append(p)
if len(files):
if batch or confirm(
"Remove {0} cleanable files".format(len(files))):
for p in files:
os.remove(p)
class RstFile(object):
def __init__(self, local_root, url_root, parts):
self.path = local_root.child(*parts) + '.rst'
self.url = url_root + "/" + "/".join(parts) + '.html'
# if parts[0] == 'docs':
# self.url = url_root + "/" + "/".join(parts[1:]) + '.html'
# else:
# raise Exception("20131125")
# self.url = url_root + "/" + "/".join(parts) + '.html'
class MissingConfig(Exception):
def __init__(self, name):
msg = "Must set `config.{0}` in `tasks.py`!"
msg = msg.format(name)
Exception.__init__(self, msg)
@task(name='test')
def run_tests(ctx):
"""Run the test suite of this project."""
# assert os.environ['COVERAGE_PROCESS_START']
if not ctx.root_dir.child('setup.py').exists():
return
# if ctx.root_dir.child('pytest.ini').exists():
# ctx.run('py.test', pty=True)
# else:
# ctx.run(sys.executable + ' setup.py -q test', pty=True)
ctx.run(sys.executable + ' setup.py -q test', pty=True)
@task(name='readme')
def write_readme(ctx):
"""Generate or update `README.txt` or `README.rst` file from `SETUP_INFO`. """
if not atelier.current_project.main_package:
return
atelier.current_project.load_info()
info = atelier.current_project.SETUP_INFO
if not info.get('long_description'):
return
# if len(ctx.doc_trees) == 0:
# # when there are no docs, then the README file is manually maintained
# return
if ctx.revision_control_system == 'git':
readme = ctx.root_dir.child('README.rst')
else:
readme = ctx.root_dir.child('README.txt')
# for k in ('name', 'description', 'long_description', 'url'):
# if k not in env.current_project.SETUP_INFO:
# msg = "SETUP_INFO for {0} has no key '{1}'"
# raise Exception(msg.format(env.current_project, k))
title = rstgen.header(1, "The ``{}`` package".format(info['name']))
txt = """\
{title}
{long_description}
""".format(title=title, **info)
if six.PY2:
txt = txt.encode('utf-8')
if readme.exists() and readme.read_file() == txt:
return
must_confirm("Overwrite %s" % readme.absolute())
readme.write_file(txt)
docs_index = ctx.root_dir.child('docs', 'index.rst')
if docs_index.exists():
docs_index.set_times()
@task(write_readme, name='bd')
def build_docs(ctx, *cmdline_args):
"""Build docs. Build all Sphinx HTML doctrees for this project. """
# print("Build docs for {}".format(atelier.current_project))
for tree in atelier.current_project.get_doc_trees():
tree.build_docs(ctx, *cmdline_args)
@task(name='clean')
def clean(ctx, batch=False):
"""Remove temporary and generated files."""
# def clean(ctx, *cmdline_args):
sphinx_clean(ctx, batch)
py_clean(ctx, batch)
# clean_demo_caches()
@task(name='sdist')
def setup_sdist(ctx):
"Create a source distribution."
atelier.current_project.load_info()
info = atelier.current_project.SETUP_INFO
if not info.get('version'):
return
if not info.get('name'):
return
show_pypi_status(ctx, False)
dist_dir = ctx.sdist_dir.format(prj=info.get('name'))
args = [sys.executable, "setup.py"]
args += ["sdist", "--formats=gztar"]
args += ["--dist-dir", dist_dir]
ctx.run(' '.join(args), pty=True)
@task(name='release')
def pypi_release(ctx, notag=False):
"""
Publish a new version |
erochest/threepress-rdfa | bookworm/api/forms.py | Python | bsd-3-clause | 139 | 0.007194 | from dj | ango import forms
class APIUploadForm(forms.Form):
epub_data = forms.FileField()
api_key = forms.CharField(max_length=25 | 5)
|
plotly/python-api | packages/python/plotly/plotly/validators/volume/surface/_pattern.py | Python | mit | 582 | 0.001718 | import _plotly_utils.basevalidators
class PatternValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="pattern", parent_name="volume.surface", **kwargs):
super( | PatternValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["all", "odd", "even"]),
flags=kwargs.pop("flags", ["A", "B", "C", "D", "E | "]),
role=kwargs.pop("role", "style"),
**kwargs
)
|
sunfishcode/cretonne | lib/cretonne/meta/isa/arm32/defs.py | Python | apache-2.0 | 417 | 0 | """
ARM 32-bit definitions.
Commonly used definitions.
"""
from __future__ import absolute_import
from cdsl.isa import TargetISA, CP | UMode
import base.instructions
from base.legalize import narrow
ISA = TargetISA('arm32', [base.instructions.GROUP])
# CPU modes for 32-bit ARM and Thumb | 2.
A32 = CPUMode('A32', ISA)
T32 = CPUMode('T32', ISA)
# TODO: Refine these.
A32.legalize_type(narrow)
T32.legalize_type(narrow)
|
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/nmt/utils/iterator_utils.py | Python | apache-2.0 | 10,400 | 0.006923 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "Lic | ense");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing perm | issions and
# limitations under the License.
# ==============================================================================
"""For loading data into NMT models."""
from __future__ import print_function
import tensorflow as tf
from mlperf_compliance import mlperf_log
from utils import vocab_utils
__all__ = ["get_iterator", "get_infer_iterator"]
# pylint: disable=g-long-lambda,line-too-long
def get_iterator(src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size,
sos,
eos,
random_seed,
num_buckets,
src_max_len=None,
tgt_max_len=None,
num_parallel_calls=4,
output_buffer_size=None,
skip_count=None,
num_shards=1,
shard_index=0,
reshuffle_each_iteration=True,
use_char_encode=False,
filter_oversized_sequences=False):
"""Function that returns input dataset."""
# Total number of examples in src_dataset/tgt_dataset
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES,
value=4068191)
if not output_buffer_size:
output_buffer_size = batch_size * 100
if use_char_encode:
src_eos_id = vocab_utils.EOS_CHAR_ID
else:
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
mlperf_log.gnmt_print(key=mlperf_log.INPUT_SHARD, value=num_shards)
src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Filter zero length input sequences.
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
# Filter oversized input sequences (542 examples are filtered).
if filter_oversized_sequences:
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) < src_max_len,
tf.size(tgt) < tgt_max_len))
if src_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src[:src_max_len], tgt),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
if tgt_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src, tgt[:tgt_max_len]),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING)
if use_char_encode:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.reshape(vocab_utils.tokens_to_bytes(src), [-1]),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
else:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src,
tf.concat(([tgt_sos_id], tgt), 0),
tf.concat((tgt, [tgt_eos_id]), 0)),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Add in sequence lengths.
if use_char_encode:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out,
tf.to_int32(tf.size(src) / vocab_utils.DEFAULT_CHAR_MAXLEN),
tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
else:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.cache()
# TODO(saeta): investigate shuffle_and_repeat.
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed,
reshuffle_each_iteration).repeat()
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([tgt_max_len]), # tgt_input
tf.TensorShape([tgt_max_len]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
tgt_eos_id, # tgt_input
tgt_eos_id, # tgt_output
0, # src_len -- unused
0),
# For TPU, must set drop_remainder to True or batch size will be None
drop_remainder=True) # tgt_len -- unused
if num_buckets > 1:
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
"""Calculate bucket_width by maximum source sequence length."""
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
batched_dataset = src_tgt_dataset.apply(
tf.contrib.data.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
# Make_one_shot_iterator is not applicable here since we have lookup table.
# Instead return a tf.data.dataset and let TpuEstimator to initialize and make
# iterator out of it.
batched_dataset = batched_dataset.map(
lambda src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
return batched_dataset
def get_infer_iterator(src_dataset,
|
darvid/reqwire | src/reqwire/__init__.py | Python | mit | 264 | 0 | """reqwire: wire up Python requirements with p | ip-tools."""
from __future__ import absolute_import
import pkg_resources
try: # pragma: no cover
__version__ = pkg_resources.get_distribution(__name__).version
except: # noqa: B901
__version__ = 'unkno | wn'
|
priyesingh/rijenpy | libs/robotremoteserver.py | Python | gpl-3.0 | 21,857 | 0.000229 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import Mapping
import inspect
import os
import re
import select
import signal
import sys
import threading
import traceback
if sys.version_info < (3,):
from SimpleXMLRPCServer import SimpleXMLRPCServer
from StringIO import StringIO
from xmlrpclib import Binary, ServerProxy
PY2, PY3 = True, False
else:
from io import StringIO
from xmlrpc.client import Binary, ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
PY2, PY3 = False, True
unicode = str
long = int
__all__ = ['RobotRemoteServer', 'stop_remote_server', 'test_remote_server']
__version__ = 'devel'
BINARY = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F]')
NON_ASCII = re.compile('[\x80-\xff]')
class RobotRemoteServer(object):
def __init__(self, library, host='127.0.0.1', port=8270, port_file=None,
allow_stop= | 'DEPRECATED', serve=True, allow_remote_stop=True):
"""Configure and start-up remote server.
:param library: Test library instance or module to host.
:param host: | Address to listen. Use ``'0.0.0.0'`` to listen
to all available interfaces.
:param port: Port to listen. Use ``0`` to select a free port
automatically. Can be given as an integer or as
a string.
:param port_file: File to write the port that is used. ``None`` means
no such file is written. Port file is created after
the server is started and removed automatically
after it has stopped.
:param allow_stop: DEPRECATED since version 1.1. Use
``allow_remote_stop`` instead.
:param serve: If ``True``, start the server automatically and
wait for it to be stopped.
:param allow_remote_stop: Allow/disallow stopping the server using
``Stop Remote Server`` keyword and
``stop_remote_server`` XML-RPC method.
"""
self._library = RemoteLibraryFactory(library)
self._server = StoppableXMLRPCServer(host, int(port))
self._register_functions(self._server)
self._port_file = port_file
self._allow_remote_stop = allow_remote_stop \
if allow_stop == 'DEPRECATED' else allow_stop
if serve:
self.serve()
def _register_functions(self, server):
server.register_function(self.get_keyword_names)
server.register_function(self.run_keyword)
server.register_function(self.get_keyword_arguments)
server.register_function(self.get_keyword_documentation)
server.register_function(self.stop_remote_server)
@property
def server_address(self):
"""Server address as a tuple ``(host, port)``."""
return self._server.server_address
@property
def server_port(self):
"""Server port as an integer.
If the initial given port is 0, also this property returns 0 until
the server is activated.
"""
return self._server.server_address[1]
def activate(self):
"""Bind port and activate the server but do not yet start serving.
:return Port number that the server is going to use. This is the
actual port to use, even if the initially given port is 0.
"""
return self._server.activate()
def serve(self, log=True):
"""Start the server and wait for it to be stopped.
:param log: When ``True``, print messages about start and stop to
the console.
Automatically activates the server if it is not activated already.
If this method is executed in the main thread, automatically registers
signals SIGINT, SIGTERM and SIGHUP to stop the server.
Using this method requires using ``serve=False`` when initializing the
server. Using ``serve=True`` is equal to first using ``serve=False``
and then calling this method.
In addition to signals, the server can be stopped with the ``Stop
Remote Server`` keyword and the ``stop_remote_serve`` XML-RPC method,
unless they are disabled when the server is initialized. If this method
is executed in a thread, then it is also possible to stop the server
using the :meth:`stop` method.
"""
self._server.activate()
self._announce_start(log, self._port_file)
with SignalHandler(self.stop):
self._server.serve()
self._announce_stop(log, self._port_file)
def _announce_start(self, log, port_file):
self._log('started', log)
if port_file:
with open(port_file, 'w') as pf:
pf.write(str(self.server_port))
def _announce_stop(self, log, port_file):
self._log('stopped', log)
if port_file and os.path.exists(port_file):
os.remove(port_file)
def _log(self, action, log=True, warn=False):
if log:
address = '%s:%s' % self.server_address
if warn:
print('*WARN*', end=' ')
print('Robot Framework remote server at %s %s.' % (address, action))
def stop(self):
"""Stop server."""
self._server.stop()
# Exposed XML-RPC methods. Should they be moved to own class?
def stop_remote_server(self, log=True):
if not self._allow_remote_stop:
self._log('does not allow stopping', log, warn=True)
return False
self.stop()
return True
def get_keyword_names(self):
return self._library.get_keyword_names() + ['stop_remote_server']
def run_keyword(self, name, args, kwargs=None):
if name == 'stop_remote_server':
return KeywordRunner(self.stop_remote_server).run_keyword(args, kwargs)
return self._library.run_keyword(name, args, kwargs)
def get_keyword_arguments(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_arguments(name)
def get_keyword_documentation(self, name):
if name == 'stop_remote_server':
return ('Stop the remote server unless stopping is disabled.\n\n'
'Return ``True/False`` depending was server stopped or not.')
return self._library.get_keyword_documentation(name)
def get_keyword_tags(self, name):
if name == 'stop_remote_server':
return []
return self._library.get_keyword_tags(name)
class StoppableXMLRPCServer(SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, host, port):
SimpleXMLRPCServer.__init__(self, (host, port), logRequests=False,
bind_and_activate=False)
self._activated = False
self._stopper_thread = None
def activate(self):
if not self._activated:
self.server_bind()
self.server_activate()
self._activated = True
return self.server_address[1]
def serve(self):
self.activate()
try:
self.serve_forever()
except select.error:
# Signals seem to cause this error with Python 2.6.
if sys.version_info[:2] > (2, 6):
raise
self.server_close()
if self._stopper_thread:
self._stopper_thread.join |
himanshu-dixit/oppia | core/domain/question_services_test.py | Python | apache-2.0 | 4,658 | 0.000859 | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(question_models,) = models.Registry.import_models([models.NAMES.question])
memcache_services = models.Registry.import_memcache_services()
class QuestionServicesUnitTest(test_utils.GenericTestBase):
"""Test the question services module."""
def setUp(self):
"""Before each individual test, create dummy user."""
super(QuestionServicesUnitTest, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
def test_add_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_da | ta_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, quest | ion)
model = question_models.QuestionModel.get(question_model.id)
self.assertEqual(model.title, title)
self.assertEqual(model.question_data, question_data)
self.assertEqual(model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(model.collection_id, collection_id)
self.assertEqual(model.language_code, language_code)
def test_delete_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_data_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, question)
question_services.delete_question(self.owner_id, question_model.id)
with self.assertRaisesRegexp(Exception, (
'Entity for class QuestionModel with id %s not found' %(
question_model.id))):
question_models.QuestionModel.get(question_model.id)
def test_update_question(self):
state = exp_domain.State.create_default_state('ABC')
question_data = state.to_dict()
question_id = 'dummy'
title = 'A Question'
question_data_schema_version = 1
collection_id = 'col1'
language_code = 'en'
question = question_domain.Question(
question_id, title, question_data, question_data_schema_version,
collection_id, language_code)
question.validate()
question_model = question_services.add_question(self.owner_id, question)
change_dict = {'cmd': 'update_question_property',
'property_name': 'title',
'new_value': 'ABC',
'old_value': 'A Question'}
change_list = [question_domain.QuestionChange(change_dict)]
question_services.update_question(
self.owner_id, question_model.id, change_list, 'updated title')
model = question_models.QuestionModel.get(question_model.id)
self.assertEqual(model.title, 'ABC')
self.assertEqual(model.question_data, question_data)
self.assertEqual(model.question_data_schema_version,
question_data_schema_version)
self.assertEqual(model.collection_id, collection_id)
self.assertEqual(model.language_code, language_code)
|
dmccue/ansible | lib/ansible/plugins/lookup/file.py | Python | gpl-3.0 | 2,091 | 0.002869 | # (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with | Ansible. If n | ot, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not isinstance(terms, list):
terms = [ terms ]
ret = []
if 'role_path' in variables:
basedir = variables['role_path']
else:
basedir = self._loader.get_basedir()
for term in terms:
# Special handling of the file lookup, used primarily when the
# lookup is done from a role. If the file isn't found in the
# basedir of the current file, use dwim_relative to look in the
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
lookupfile = self._loader.path_dwim_relative(basedir, 'files', term)
try:
if lookupfile:
contents, show_data = self._loader._get_file_contents(lookupfile)
ret.append(contents.rstrip())
else:
raise AnsibleParserError()
except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
|
patilsangram/erpnext | erpnext/shopping_cart/product_info.py | Python | gpl-3.0 | 2,040 | 0.022549 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.shopping_cart.cart import _get_cart_quotation
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings \
import is_cart_enabled, get_shopping_cart_settings, show_quantity_in_website
from erpnext.utilities.product import get_price, get_qty_in_stock
@frappe.whitelist(allow_guest=True)
def get_product_info_for_website(item_code):
"""get product price / stock info for website"""
cart_quotation = _get_cart_quotation()
cart_settings = get_shopping_cart_settings()
price = get_price(
item_code,
cart_quotation.selling_price_list,
cart_settings.default_customer_group,
cart_settings.company
)
stock_status = get_qty_in_stock(item_code, "website_warehouse")
product_info = {
"price": price,
"stock_qty": stock_status.stock_qty,
"in_stock": stock_status.in_stock if stock_status.is_stock_item else 1,
"qty": 0,
"uom": frappe.db.get_value("Item", item_code, "stock_uom"),
"show_sto | ck_qty": show_quantity_in_website(),
"sales_uom": frappe.db.get_value("Item", item_code, "sales_uom")
}
if product_info["price"]:
if frappe.session.user != "Guest":
item = cart_quotation.get({"item_code": item_code})
| if item:
product_info["qty"] = item[0].qty
return {
"product_info": product_info,
"cart_settings": cart_settings
}
def set_product_info_for_website(item):
"""set product price uom for website"""
product_info = get_product_info_for_website(item.item_code)
if product_info:
item.update(product_info)
item["stock_uom"] = product_info.get("uom")
item["sales_uom"] = product_info.get("sales_uom")
if product_info.get("price"):
item["price_stock_uom"] = product_info.get("price").get("formatted_price")
item["price_sales_uom"] = product_info.get("price").get("formatted_price_sales_uom")
else:
item["price_stock_uom"] = ""
item["price_sales_uom"] = "" |
detuxsandbox/detux | core/sandbox.py | Python | mit | 8,692 | 0.009894 | # Copyright (c) 2015 Vikas Iyengar, iyengar.vikas@gmail.com (http://garage4hackers.com)
# Copyright (c) 2016 Detux Sandbox, http://detux.org
# See the file 'COPYING' for copying permission.
import pexpect
import paramiko
import time
from ConfigParser import ConfigParser
from hashlib import sha256
from magic import Magic
import os
import random
class Sandbox:
def __init__(self, config_path):
self.config = ConfigParser()
self.config.read(config_path)
self.default_cpu = self.config.get("detux", "default_cpu")
def execute(self, binary_filepath, platform, sandbox_id, interpreter = None):
sandbox_starttime = time.time()
sandbox_endtime = sandbox_starttime
vm_exec_time = self.config.getint("detux", "vm_exec_time")
qemu_command = self.qemu_commands(platform, sandbox_id)
pcap_folder = self.config.get("detux", "pcap_folder")
if not os.path.isdir(pcap_folder):
os.mkdir(pcap_folder)
ssh_host = self.config.get(platform+"-"+sandbox_id, "ip")
ssh_user = self.config.get(platform+"-"+sandbox_id, "user")
macaddr = self.config.get(platform+"-"+sandbox_id, "macaddr")
ssh_password = self.config.get(platform+"-"+sandbox_id, "password")
ssh_port = self.config.getint(platform+"-"+sandbox_id, "port")
pcap_command = "/usr/bin/dumpcap -i %s -P -w %s -f 'not ((tcp dst port %d and ip dst host %s) or (tcp src port %d and ip src host %s))'"
# A randomly generated sandbox filename
dst_binary_filepath = "/tmp/" + ("".join(chr(random.choice(xrange(97,123))) for _ in range(random.choice(range(6 | ,12)))))
sha256hash = sha256(open(binary_filepath, "rb").read()).hexdigest()
interpreter_path = { "python" : "/usr/bin/python", "perl" : "/usr/bin/perl", "sh" : "/bin/sh", "bash" : "/bin/bash" }
if qemu_command == None :
return {}
qemu_command += " -net nic,macaddr=%s -net tap -monitor stdio" % (macaddr,)
print qemu_command
qemu = pexpect.spawn(qemu_command)
| try:
qemu.expect("(qemu).*")
qemu.sendline("info network")
qemu.expect("(qemu).*")
ifname = qemu.before.split("ifname=", 1)[1].split(",", 1)[0]
qemu.sendline("loadvm init")
qemu.expect("(qemu).*")
pre_exec = {}
post_exec = {}
#pre_exec = self.ssh_execute(ssh_host, ssh_port, ssh_user, ssh_password, ["netstat -an", "ps aux"])
# Wait for the snapshot to be restored and then transfer the binary
time.sleep(5)
self.scp(ssh_host, ssh_port, ssh_user, ssh_password, binary_filepath, dst_binary_filepath)
print "[+] Binary transferred"
# Pre binary execution commands
pre_exec = self.ssh_execute(ssh_host, ssh_port, ssh_user, ssh_password, ["chmod +x %s" % (dst_binary_filepath,)])
# Start Packet Capture
pcap_filepath = os.path.join(pcap_folder, "%s_%d.cap" %(sha256hash,time.time(),))
pcapture = pexpect.spawn(pcap_command % (ifname, pcap_filepath, ssh_port, ssh_host, ssh_port, ssh_host))
print "[+] Packet Capture started"
# Wait for pcapture to start and then Execute the binary
time.sleep(5)
command_to_exec = dst_binary_filepath if interpreter == None else "%s %s" % (interpreter_path[interpreter], dst_binary_filepath,)
print "[+] Executing %s" % (command_to_exec,)
exec_ssh = self.ssh_execute(ssh_host, ssh_port, ssh_user, ssh_password, command_to_exec, True, False )
starttime = time.time()
while time.time() < starttime + vm_exec_time:
if not qemu.isalive():
vm_exec_time = 0
if qemu.isalive():
# Post binary execution commands
post_exec = self.ssh_execute(ssh_host, ssh_port, ssh_user, ssh_password, ["ps aux"])
try:
if exec_ssh <> None:
exec_ssh.close()
except Exception as e:
print "[+] Error while logging out exec_ssh: %s" % (e,)
qemu.sendline("q")
# Stop Packet Capture
if pcapture.isalive():
pcapture.close()
sandbox_endtime = time.time()
result = {'start_time' : sandbox_starttime, 'end_time' : sandbox_endtime, 'pcap_filepath' : pcap_filepath}
result['post_exec_result'] = post_exec
result['cpu_arch'] = platform
result['interpreter'] = interpreter
except Exception as e:
print "[-] Error:", e
if qemu.isalive():
qemu.close()
return {}
return result
def identify_platform(self, filepath):
filemagic = Magic()
filetype = ""
try:
filetype = filemagic.id_filename(filepath)
except Exception as e:
# certain version of libmagic throws error while parsing file, the CPU information is however included in the error in somecases
filetype = str(e)
# filemagic.close()
if "ELF 32-bit" in filetype:
if "ARM" in filetype:
return "ELF", "arm"
if "80386" in filetype:
return "ELF", "x86"
if ("MIPS" in filetype) and ("MSB" in filetype):
return "ELF", "mips"
if "MIPS" in filetype:
return "ELF", "mipsel"
if "PowerPC" in filetype:
return "ELF", "powerpc"
if "ELF 64-bit" in filetype:
if "x86-64" in filetype:
return "ELF", "x86-64"
return filetype, self.default_cpu
def qemu_commands(self, platform, sandbox_id):
if platform == "x86":
return "sudo qemu-system-i386 -hda qemu/x86/%s/debian_wheezy_i386_standard.qcow2 -vnc 127.0.0.1:1%s" % (sandbox_id, sandbox_id, )
if platform == "x86-64":
return "sudo qemu-system-x86_64 -hda qemu/x86-64/%s/debian_wheezy_amd64_standard.qcow2 -vnc 127.0.0.1:2%s" % (sandbox_id, sandbox_id,)
if platform == "mips":
return 'sudo qemu-system-mips -M malta -kernel qemu/mips/%s/vmlinux-3.2.0-4-4kc-malta -hda qemu/mips/%s/debian_wheezy_mips_standard.qcow2 -append "root=/dev/sda1 console=tty0" -vnc 127.0.0.1:3%s' % (sandbox_id, sandbox_id, sandbox_id,)
if platform == "mipsel":
return 'sudo qemu-system-mipsel -M malta -kernel qemu/mipsel/%s/vmlinux-3.2.0-4-4kc-malta -hda qemu/mipsel/%s/debian_wheezy_mipsel_standard.qcow2 -append "root=/dev/sda1 console=tty0" -vnc 127.0.0.1:4%s' % (sandbox_id, sandbox_id, sandbox_id, )
if platform == "arm":
return 'sudo qemu-system-arm -M versatilepb -kernel qemu/arm/%s/vmlinuz-3.2.0-4-versatile -initrd qemu/arm/%s/initrd.img-3.2.0-4-versatile -hda qemu/arm/%s/debian_wheezy_armel_standard.qcow2 -append "root=/dev/sda1" -vnc 127.0.0.1:5%s' % (sandbox_id, sandbox_id, sandbox_id, sandbox_id,)
return None
def ssh_execute(self, host, port, user, password, commands, noprompt = False, logout = True):
result = None
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port=port, username=user, password=password)
if type(commands) == type(str()):
stdin, stdout, stderr = ssh.exec_command(commands, timeout=10)
if noprompt == False:
result = "".join(stdout.readlines())
if type(commands) == type(list()):
result = {}
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command, timeout=10)
result[command] = "".join(stdout.readlines())
if logout:
ssh.close()
else:
return ssh # Return SSH object to logout later
except Exception as |
noironetworks/horizon | openstack_dashboard/dashboards/identity/users/forms.py | Python | apache-2.0 | 14,486 | 0 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from django.conf import settings
from django.forms import ValidationError
from django import http
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions as utils
from horizon.utils import validators
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
PROJECT_REQUIRED = api.keystone.VERSIONS.active < 3
class PasswordMixin(forms.SelfHandlingForm):
password = forms.RegexField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_password = forms.CharField(
label=_("Confirm Password"),
widget=forms.PasswordInput(render_value=False))
no_autocomplete = True
def clean(self):
'''Check to make sure password fields match.'''
data = super(forms.Form, self).clean()
if 'password' in data and 'confirm_password' in data:
if data['password'] != data['confirm_password']:
raise ValidationError(_('Passwords do not match.'))
return data
class BaseUserForm(forms.SelfHandlingForm):
def __init__(self, request, *args, **kwargs):
super(BaseUserForm, self).__init__(request, *args, **kwargs)
# Populate project choices
project_choices = []
# If the user is already set (update action), list only projects which
# the user has access to.
user_id = kwargs['initial'].get('id', None)
domain_id = kwargs['initial'].get('domain_id', None)
default_project_id = kwargs['initial'].get('project', None)
try:
if api.keystone.VERSIONS.active >= 3:
projects, has_more = api.keystone.tenant_list(
request, domain=domain_id)
else:
projects, has_more = api.keystone | .tenant_list(
request, user=user_id)
for project in sorted(projects, key=lambda p: p.name.lower()):
if project.enabled:
project_choices.append((project.id, project.name))
if not project_choices:
project_choices.insert(0, ('', _("No available projects")))
# TODO(david-lyle): if k | eystoneclient is fixed to allow unsetting
# the default project, then this condition should be removed.
elif default_project_id is None:
project_choices.insert(0, ('', _("Select a project")))
self.fields['project'].choices = project_choices
except Exception:
LOG.debug("User: %s has no projects", user_id)
class AddExtraColumnMixIn(object):
def add_extra_fields(self, ordering=None):
if api.keystone.VERSIONS.active >= 3:
# add extra column defined by setting
EXTRA_INFO = getattr(settings, 'USER_TABLE_EXTRA_INFO', {})
for key, value in EXTRA_INFO.items():
self.fields[key] = forms.CharField(label=value,
required=False)
if ordering:
ordering.append(key)
ADD_PROJECT_URL = "horizon:identity:projects:create"
class CreateUserForm(PasswordMixin, BaseUserForm, AddExtraColumnMixIn):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("User Name"))
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
email = forms.EmailField(
label=_("Email"),
required=False)
project = forms.ThemableDynamicChoiceField(label=_("Primary Project"),
required=PROJECT_REQUIRED,
add_item_link=ADD_PROJECT_URL)
role_id = forms.ThemableChoiceField(label=_("Role"),
required=PROJECT_REQUIRED)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(CreateUserForm, self).__init__(*args, **kwargs)
# Reorder form fields from multiple inheritance
ordering = ["domain_id", "domain_name", "name",
"description", "email", "password",
"confirm_password", "project", "role_id",
"enabled"]
self.add_extra_fields(ordering)
self.fields = collections.OrderedDict(
(key, self.fields[key]) for key in ordering)
role_choices = [
(role.id, role.name) for role in
sorted(roles, key=lambda r: r.name.lower())
]
self.fields['role_id'].choices = role_choices
# For keystone V3, display the two fields in read-only
if api.keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
# For keystone V2.0, hide description field
else:
self.fields["description"].widget = forms.HiddenInput()
# We have to protect the entire "data" dict because it contains the
# password and confirm_password strings.
@sensitive_variables('data')
def handle(self, request, data):
domain = api.keystone.get_default_domain(self.request, False)
try:
LOG.info('Creating user with name "%s"', data['name'])
desc = data["description"]
if "email" in data:
data['email'] = data['email'] or None
# add extra information
if api.keystone.VERSIONS.active >= 3:
EXTRA_INFO = getattr(settings, 'USER_TABLE_EXTRA_INFO', {})
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
else:
kwargs = {}
new_user = \
api.keystone.user_create(request,
name=data['name'],
email=data['email'],
description=desc or None,
password=data['password'],
project=data['project'] or None,
enabled=data['enabled'],
domain=domain.id,
**kwargs)
messages.success(request,
_('User "%s" was successfully created.')
% data['name'])
|
tschalch/pyTray | src/lib/reportlab/pdfgen/pycanvas.py | Python | bsd-3-clause | 12,353 | 0.006152 | # a Pythonesque Canvas v0.8
# Author : Jerome Alet - <alet@librelogiciel.com>
# License : ReportLab's license
#
# $Id: pycanvas.py,v 1.1 2006/05/26 19:19:48 thomas Exp $
#
__doc__ = """pycanvas.Canvas : a Canvas class which can also output Python source code.
pycanvas.Canvas class works exactly like canvas.Canvas, but you can
call str() on pycanvas.Canvas instances. Doing so will return the
Python source code equivalent to your own program, which would, when
run, produce the same PDF document as your original program.
Generated Python source code defines a doIt() function which accepts
a filename or file-like object as its first parameter, and an
optional boolean parameter named "regenerate".
The doIt() function will generate a PDF document and save it in the
file you specified in this argument. If the regenerate parameter is
set then it will also return an automatically generated equivalent
Python source code as a string of text, which you can run again to
produce the very same PDF document and the Python source code, which
you can run again... ad nauseam ! If the regenerate parameter is
unset or not used at all (it then defaults to being unset) then None
is returned and the doIt() function is much much faster, it is also
much faster than the original non-serialized program.
the reportlab/test/test_pdfgen_pycanvas.py program is the test suite
for pycanvas, you can do the following to run it :
First set verbose=1 in reportlab/rl_config.py
then from the command interpreter :
$ cd reportlab/test
$ python test_pdfgen_pycanvas.py >n1.py
this will produce both n1.py and test_pdfgen_pycanvas.pdf
then :
$ python n1.py n1.pdf >n2.py
$ python n2.py n2.pdf >n3.py
$ ...
n1.py, n2.py, n3.py and so on will be identical files.
they eventually may end being a bit different because of
rounding problems, mostly in the comments, but this
doesn't matter since the values really are the same
(e.g. 0 instead of 0.0, or .53 instead of 0.53)
n1.pdf, n2.pdf, n3.pdf and so on will be PDF files
similar to test_pdfgen_pycanvas.pdf.
Alternatively you can import n1.py (or n3.py, or n16384.py if you prefer)
in your own program, and then call its doIt function :
import n1
pythonsource = n1.doIt("myfile.pdf", regenerate=1)
Or if you don't need the python source code and want a faster result :
import n1
n1.doIt("myfile.pdf")
When the generated source code is run directly as an independant program,
then the equivalent python source code is printed to stdout, e.g. :
python n1.py
will print the python source code equivalent to n1.py
Why would you want to use such a beast ?
- To linearize (serialize?) a program : optimizing some complex
parts for example.
- To debug : reading the generated Python source code may help you or
the ReportLab team to diagnose problems. The generated code is now
clearly commented and shows nesting levels, page numbers, and so
on. You can use the generated script when asking for support : we
can see the results you obtain without needing your datas or complete
application.
- To create standalone scripts : say your program uses a high level
environment to generate its output (databases, RML, etc...), using
this class would give you an equivalent program but with complete
independance from the high level environment (e.g. if you don't
have Oracle).
- To contribute some nice looking PDF documents to the ReportLab website
without having to send a complete application you don't want to
distribute.
- ... Insert your own ideas here ...
- For fun because you can do it !
"""
import cStringIO
from reportlab.pdfgen import canvas
from reportlab.pdfgen import pathobject
from reportlab.pdfgen import textobject
PyHeader = '''#! /usr/bin/env python
#
# This code was entirely generated by ReportLab (http://www.reportlab.com)
#
import sys
from reportlab.pdfgen import pat | hobject
from reportlab.pdfgen import textobject
from reportlab.lib.colors import Color
def doIt(file, regenerate=0) :
"""Generates a PDF document, save it into file.
file : either a filename or a file-like | object.
regenerate : if set then this function returns the Python source
code which when run will produce the same result.
if unset then this function returns None, and is
much faster.
"""
if regenerate :
from reportlab.pdfgen.pycanvas import Canvas
else :
from reportlab.pdfgen.canvas import Canvas
'''
PyFooter = '''
# if we want the equivalent Python source code, then send it back
if regenerate :
return str(c)
if __name__ == "__main__" :
if len(sys.argv) != 2 :
# second argument must be the name of the PDF file to create
sys.stderr.write("%s needs one and only one argument\\n" % sys.argv[0])
sys.exit(-1)
else :
# we've got a filename, we can proceed.
print doIt(sys.argv[1], regenerate=1)
sys.exit(0)'''
def buildargs(*args, **kwargs) :
"""Constructs a printable list of arguments suitable for use in source function calls."""
arguments = ""
for arg in args :
arguments = arguments + ("%s, " % repr(arg))
for (kw, val) in kwargs.items() :
arguments = arguments+ ("%s=%s, " % (kw, repr(val)))
if arguments[-2:] == ", " :
arguments = arguments[:-2]
return arguments
class PDFAction :
"""Base class to fake method calls or attributes on PDF objects (Canvas, PDFPathObject, PDFTextObject)."""
def __init__(self, parent, action) :
"""Saves a pointer to the parent object, and the method name."""
self._parent = parent
self._action = action
def __getattr__(self, name) :
"""Probably a method call on an attribute, returns the real one."""
return getattr(getattr(self._parent._object, self._action), name)
def __call__(self, *args, **kwargs) :
"""The fake method is called, print it then call the real one."""
if not self._parent._parent._in :
self._precomment()
self._parent._parent._PyWrite(" %s.%s(%s)" % (self._parent._name, self._action, apply(buildargs, args, kwargs)))
self._postcomment()
self._parent._parent._in = self._parent._parent._in + 1
retcode = apply(getattr(self._parent._object, self._action), args, kwargs)
self._parent._parent._in = self._parent._parent._in - 1
return retcode
def __hash__(self) :
return hash(getattr(self._parent._object, self._action))
def __coerce__(self, other) :
"""Needed."""
return coerce(getattr(self._parent._object, self._action), other)
def _precomment(self) :
"""To be overriden."""
pass
def _postcomment(self) :
"""To be overriden."""
pass
class PDFObject :
"""Base class for PDF objects like PDFPathObject and PDFTextObject."""
_number = 0
def __init__(self, parent) :
"""Saves a pointer to the parent Canvas."""
self._parent = parent
self._initdone = 0
def __getattr__(self, name) :
"""The user's programs wants to call one of our methods or get an attribute, fake it."""
return PDFAction(self, name)
def __repr__(self) :
"""Returns the name used in the generated source code (e.g. 'p' or 't')."""
return self._name
def __call__(self, *args, **kwargs) :
"""Real object initialisation is made here, because now we've got the arguments."""
if not self._initdone :
self.__class__._number = self.__class__._number + 1
methodname = apply(self._postinit, args, kwargs)
self._parent._PyWrite("\n # create PDF%sObject number %i\n %s = %s.%s(%s)" % (methodname[5:], self.__class__._number, s |
fantasyy8/pythonintask | BITs/2014/KOSTAREV_A_I/task_10_13.py | Python | apache-2.0 | 3,987 | 0.016903 | #Задача №10, Вариант 13
#Напишите программу "Генератор персонажей" для игры.
#Пользователю должно быть предоставлено 30 пунктов,
#которые можно распределить между четырьмя
#характеристиками: Сила, Здоровье, Мудрость и Ловкость.
#Надо сделать так, чтобы пользователь мог не только брать
#эти пункты из общего "пула", но и возвращать их туда из
#характеристик, которым он решил присвоить другие значения.
#Костарев Андрей
#16.05.2016
POINT = 30
ochki = 30
person = {"Сила":"0","Здоровье":"0","Ум":"0","Ловкость":"0"}
points = 0
choice = None
while choice != 0:
print("""
0 - Выход
1 - Добавить пункты к характеристике
2 - Уменьшить пункты характеристики
3 - Просмотр характеристик
""")
choice = int(input("Choose option: "))
if choice == 1:
print("Пожалуйста, введите характеристику для добавления пунктов. Для изменения доступны", len(person), "характеристики:")
for item in person:
print(item)
char = str(input("\n:"))
char = char.title()
while char not in person:
print("Нет такой характеристики, проверьте введенные данные: ")
char = str(input("\n:"))
char = char.title()
else:
print("\nВведите количество пунктов для данной характеристики. У вас", ochki, "свободных пунктов")
points = int(input("\n:"))
while points > ochki or points < 0:
print("Вы не можете назначить такое количество пунктов", "Доступно", ochki, "свободных пунктов")
points = int(input("\n:"))
person[char] = points
print(points, "пунктов было добавлено к", char)
ochki -= points
elif choice == 2:
print("Пожалуйста, введите имя характеристики для снятия пунктов.", "Доступно изменение для: ")
for item in person:
if int(person[item]) > 0:
print(item)
char = str(input("\n:"))
char = char.title()
while char not in person:
print("Нет тако | й характеристики, проверьте введенные данные: ")
char = str(input("\n:"))
ch | ar = char.title()
else:
print("\nВведите количество пунктов для характеристики. Доступно", person[char], "пунктов:")
points = int(input("\n:"))
while points > int(person[char]) or points < 0:
print("Невозможно удалить такое количество пунктов. Доступно", person[char], "пунктов")
points = int(input("\n:"))
person[char] = points
print(points, "пунктов было удалено")
ochki += points
elif choice == 3:
print("\nХарактеристики героя")
for item in person:
print(item, "\t\t", person[item])
elif choice == 0:
print("Пока!")
else:
print("В меню нет такого пункта")
input("\n\nНажмите Enter, чтобы выйти.") |
sargas/scipy | scipy/sparse/setupscons.py | Python | bsd-3-clause | 656 | 0.010671 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
import sys
def configuration(parent_package='',top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('spa | rse',parent_package,top_path,
setup_name = 'setupscons.py')
config.add_data_dir('tests')
config.add_subpackage('linalg')
config.add_subpackage('sparsetools')
config.add_subpackage('csgraph')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setu | p(**configuration(top_path='').todict())
|
ModestoCabrera/is210-week-05-warmup | tests/test_task_02.py | Python | mpl-2.0 | 689 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests Task 02."""
# Import Python libs
import unittest
import hamlet
import task_02
class Task02TestCase(unittest.TestCase):
"""
Test cases for Task 02.
"""
def test_positional_value(self):
"""
Tests that the POSITIONAL constant has the expected value.
"""
monkeys = 4
hours = 100000
bananas = 98
banana_effect = bananas * hamlet.BANANA_MULTIPLIER
chance = (hours * | ((monkeys / hamlet.SHIFTS) + banana_effect))
chance /= hamlet.HAMLET_HOURS
self.assertEqual(task_02.POSITIONAL, chance)
if __name__ == '__ma | in__':
unittest.main()
|
GaryKriebel/osf.io | website/addons/dropbox/__init__.py | Python | apache-2.0 | 961 | 0.002081 | import os
from website.addons.dropbox import model, routes, views
MODELS = [model.DropboxUserSettings, model.DropboxNodeSettings, model.DropboxFile]
USER_SETTINGS_MODEL = model.DropboxUserSettings
NODE_SETTINGS_MODEL = model.DropboxNodeSettings
ROUTES = [routes.auth_routes, routes.api_routes]
SHORT_NAME = 'dropbox'
FULL_NAME = 'Dropbox'
OWNERS = ['user', 'node']
ADDED_DEFAULT = []
ADDED_MANDATORY = []
VIEWS = []
CONFIGS = ['user', 'node']
CATEGORIES = ['storage']
# TODO: Deprecate in favor of webpack/CommonJS bundles
INCLUDE_JS = {
'widget': [ | ],
'page': [],
'files': []
}
INCLUDE_CSS = {
'widget': [],
'page': [],
}
| HAS_HGRID_FILES = True
GET_HGRID_DATA = views.hgrid.dropbox_addon_folder
# MAX_FILE_SIZE = 5 # MB
HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # use default node settings template
USER_SETTINGS_TEMPLATE = os.path.join(HERE, 'templates', 'dropbox_user_settings.mako')
|
CivilHub/CivilHub | blog/forms.py | Python | gpl-3.0 | 1,065 | 0.003759 | # -*- coding: utf-8 -*-
from django import forms
from djan | go.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from | places_core.forms import BootstrapBaseForm
from .models import Category, News
class NewsForm(forms.ModelForm, BootstrapBaseForm):
""" Edit/update/create blog entry. """
title = forms.CharField(
label=_(u"Da tytuł"),
max_length=64,
widget=forms.TextInput(attrs={
'class': 'form-control',
'maxlength': '64',}))
tags = TagField(required=False, label= _(u"Tags"))
def clean_title(self):
title = self.cleaned_data['title']
return title
class Meta:
model = News
exclude = ('edited', 'slug', 'creator',)
widgets = {
'content': forms.Textarea(attrs={'class': 'form-control custom-wysiwyg'}),
'category': forms.Select(attrs={'class': 'form-control'}),
'location': forms.HiddenInput(),
'image': forms.ClearableFileInput(attrs={'class': 'civ-img-input', }),
}
|
patdaburu/mothergeo-py | mothergeo/db/postgis/__init__.py | Python | gpl-2.0 | 175 | 0.011429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
. | . currentmodule:: __ini | t__.py
.. moduleauthor:: Pat Daburu <pat@daburu.net>
Provide a brief description of the module.
""" |
chaosking121/mal | plaintext.py | Python | mit | 1,309 | 0.004584 | def interpret(slackMessage):
import random
import tools.settings
if ("@mal" in slackMessage.message.lower()):
return ".1.2.1."
elif (slackMessage.message.lower() == '{}, what is the meaning of life?'.format(tools.settings.getName()).lower()):
from tools.file_handling import randomMessage
return randomMessage('life')
elif (slackMessage.message.lower() == '{}, are you sentient?'.format(tools.settings.getName()).lower()):
return "...next question."
| elif (slackMessage.message.lower() == '{}, how are you?'.format(tools.settings.getName()).lower()):
from tools.classes import Conversation
return Conversation(slackMessage.user, slackMessage.channel, "I'm fine. How about you?", [('tools. | arshad', 'greeting')])
elif ('*her*' in slackMessage.message):
return "*her*"
elif ('*she*' in slackMessage.message):
return"*she*"
elif ('superhot' in slackMessage.message.lower()):
return "SUPERHOT IS THE MOST INNOVATIVE SHOOTER I'VE PLAYED IN YEARS!"
elif ((('thank you') in slackMessage.message.lower()) and (tools.settings.getName().lower() in slackMessage.message.lower()) and (slackMessage.user == tools.settings.getOwner())):
return ":heart:"
else:
return "" |
PGower/Unsync | unsync_timetabler/unsync_timetabler/dof9/emergency_teacher_import.py | Python | apache-2.0 | 2,918 | 0.007882 | """Timetabler DOF9 import functions."""
import unsync
import petl
@unsync.command()
@unsync.option('--input-file', '-i', type=unsync.Path(exists=True, dir_okay=False, readable=True, resolve_path=True), help='Timetabler DOF9 file to extract data from.', required=True)
@unsync.option('--destination', '-d', required=True, help='The destination table that these courses will be stored in.')
def dof9_emergency_teacher_import(data, input_file, destination):
"""Import the emergency teaher information from a Timetabler DOF9 file."""
emergency_teachers = petl.fromxml(input_file, '{http://www.timetabling.com.au/DOV9}EmergencyTeachers/{http://www.timetabling.com.au/DOV9}EmergencyTeacher', {
'EmergencyTeacherID': '{http://www.timetabling.com.au/DOV9}EmergencyTeacherID',
'Code': '{http://www.timetabling.com.au/DOV9}Code',
'FirstName': '{http://www.timetabling.com.au/DOV9}FirstName',
'MiddleName': '{http://www.timetabling.com.au/DOV9}MiddleName',
'LastName': '{http://www.timetabling.com.au/DOV9}LastName',
'Salutation': '{http://www.timetabling.com.au/DOV9}Salutation',
'Email': '{http://www.timetabling.com.au/DOV9}Email',
'Address': '{http://www.timetabling.com.au/DOV9}Address',
'Suburb': '{http://www.timetabling.com.au/DOV9}Suburb',
'State': '{http://www.timetabling.com.au/DOV9}State',
'Postcode': '{http://www.timetabling.com.au/DOV9}Postcode',
'Phone': '{http://www.timetabling.com.au/DOV9}Phone',
'Mobile': '{http: | //www.timetabling.com.au/DOV9}Mobile',
'OtherPhone': '{http://w | ww.timetabling.com.au/DOV9}OtherPhone',
'Priority': '{http://www.timetabling.com.au/DOV9}Priority',
'Notes': '{http://www.timetabling.com.au/DOV9}Notes',
'SpareField1': '{http://www.timetabling.com.au/DOV9}SpareField1',
'SpareField2': '{http://www.timetabling.com.au/DOV9}SpareField2',
'SpareField3': '{http://www.timetabling.com.au/DOV9}SpareField3'})
data.set(destination, emergency_teachers)
dof9_emergency_teacher_import.display_name = 'emergency_teacher_import'
|
leighpauls/k2cro4 | third_party/python_26/Lib/json/tests/test_indent.py | Python | bsd-3-clause | 906 | 0.004415 | from unittest import TestCase
import json
import textwrap
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
| {'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shta | eou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)
|
CSC522-Data-mining-NCSU/reputation-hpc | calc_repu/getMovieGrade.py | Python | mit | 1,211 | 0.050372 | import csv
import pickle
from WeightFinder import Weigh | tFinder
import time
from mpi4py import MPI
import pickle
from demo | s import cmd
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
finder = WeightFinder()
def get_movie_grade(movie_id):
try:
f = open('../training_set/mv_'+str(movie_id).zfill(7)+'.txt','r')
except: return 0
reader = csv.reader(f)
reader.next()
score = 0
sum_w = 0
for row in reader:
a = int(row[0])
rate = float(row[1])
foo, weight = finder.get_user_weight(a)
score = score + weight * rate
sum_w += weight
#ground = pickle.load(open('movie_ground'))
#ground[movie_id] = score/sum_w
#pickle.dump(ground,open('movie_ground','w'))
f.close()
return score/sum_w
def run(q):
processors = 4
era = 0
r = {}
while True:
k = era * processors + rank
if k >= 1000: break
if k%100 == 0: print k
#if rank==0: print k
k = k + int(q)*1000
if k > 17770: break
r[str(k)] = get_movie_grade(k)
era += 1
if rank == 0:
for i in range(1,processors):
temp = comm.recv(source=i)
r.update(temp)
with open('temgrade/'+str(q)+'_tem_grade','wb') as teg:
pickle.dump(r, teg)
else:
comm.send(r,dest=0)
if __name__ == '__main__':
eval(cmd())
#run(7)
|
arturh85/projecteuler | python/src/problem002.py | Python | mit | 1,592 | 0.011935 | '''
Problem 2
19 October 2001
Each new term in the Fibonacci sequence is generated by adding the
previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not
e | xceed four million, find the sum of the even-valued terms.
----------------------------------------------------------
Created on 25.01.2012
@author: ahallmann
'''
import unittest
import timeit
def generate_fibonacci_sequence(limit=0):
| a = 1
yield a
b = 1
yield b
i = 0
while(limit == 0 or a + b < limit):
i = a + b
a = b
b = i
yield i
'''
By considering the terms in the Fibonacci sequence whose values do not
exceed four million, find the sum of the even-valued terms.
'''
def solve(limit=4000000):
msum = 0
for i in generate_fibonacci_sequence(limit):
if i % 2 == 0:
msum += i
return msum
class Test(unittest.TestCase):
def test_sample(self):
self.assertEqual(44, solve(100))
def test_answer(self):
self.assertEqual(4613732, solve())
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
t = timeit.Timer("run()", "from __main__ import run")
count = 10000
print str(t.timeit(count)) + " seconds for " + str(count) + " runs"
|
bernardopires/django-tenant-schemas | examples/tenant_tutorial/customers/views.py | Python | mit | 1,718 | 0 | from django.contrib.auth.models import User
from django.db.utils import DatabaseError
from django.views.generic import FormView
from customers.forms import GenerateUsersForm
from customers.models import Client
from random import choice
class TenantView(FormView):
form_class = GenerateUsersForm
template_name = "index_tenant.html"
success_url = "/"
def get_context_data(self, **kwargs):
context = super(TenantView, self).get_context_data(**kwargs)
context['tenants_list'] = Client.objects.all()
context['users'] = User.objects.all()
return context
def form_valid(self, form):
User.objects.all().delete() # clean current users
# generate five random users
USERS_TO_GENERATE = 5
first_names = ["Aiden", "Jackson", "Ethan", "Liam", "Mason", "Noah",
"Lucas", "Jacob", "Jayden", "Jack", "Sophia", "Emma",
"Olivia", "Isabella", "Ava", "Lily", "Zoe", "Chloe",
"Mia", "Madison"]
last_names = ["Smith", "Brown", "Lee ", "Wilson", "Martin", "Patel",
"Taylor", "Wong", "Campbell", "Williams"]
while User.objects.count() != USERS_TO_GENERATE:
first_name = choice(first_names)
last_name = choice(last_names)
try:
user = User(username=(first_name + last_name).lower(),
| email="%s | @%s.com" % (first_name, last_name),
first_name=first_name,
last_name=last_name)
user.save()
except DatabaseError:
pass
return super(TenantView, self).form_valid(form)
|
nthall/pip | tests/unit/test_index.py | Python | mit | 4,551 | 0 | import os.path
import pytest
from pip.download import PipSession
from pip.index import HTMLPage
from pip.index import PackageFinder, Link
def test_sort_locations_file_expand_dir(data):
"""
Test that a file:// dir gets listdir run with expand_dir
"""
finder = PackageFinder([data.find_links], [], session=PipSession())
files, urls = finder._sort_locations([data.find_links], expand_dir=True)
assert files and not urls, (
"files and not urls should have been found at find-links url: %s" %
data.find_links
)
def test_sort_locations_file_not_find_link(data):
"""
Test that a file:// url dir that's not a find-link, doesn't get a listdir
run
"""
finder = PackageFinder([], [], session=PipSession())
files, urls = finder._sort_locations([data.index_url("empty_with_pkg")])
assert urls and not files, "urls, but not files should have been found"
def test_sort_locat | ions_non_existing_path():
"""
Test that a non-existing path is ignored.
"""
finder = PackageFinder([], [], session=PipSession())
files, urls = finder._sort_locations(
[os.path.join('this', 'doesnt', 'exist')])
assert not urls and not files, "nothing should have been found"
class TestLink(object):
def test_splitext(self):
assert ('wheel', '.whl') == Link('http://yo/wheel.whl').splitext()
@pytest.mark.parametrize(
| ("url", "expected"),
[
("http://yo/wheel.whl", "wheel.whl"),
("http://yo/wheel", "wheel"),
(
"http://yo/myproject-1.0%2Bfoobar.0-py2.py3-none-any.whl",
"myproject-1.0+foobar.0-py2.py3-none-any.whl",
),
],
)
def test_filename(self, url, expected):
assert Link(url).filename == expected
def test_no_ext(self):
assert '' == Link('http://yo/wheel').ext
def test_ext(self):
assert '.whl' == Link('http://yo/wheel.whl').ext
def test_ext_fragment(self):
assert '.whl' == Link('http://yo/wheel.whl#frag').ext
def test_ext_query(self):
assert '.whl' == Link('http://yo/wheel.whl?a=b').ext
def test_is_wheel(self):
assert Link('http://yo/wheel.whl').is_wheel
def test_is_wheel_false(self):
assert not Link('http://yo/not_a_wheel').is_wheel
def test_fragments(self):
url = 'git+https://example.com/package#egg=eggname'
assert 'eggname' == Link(url).egg_fragment
assert None is Link(url).subdirectory_fragment
url = 'git+https://example.com/package#egg=eggname&subdirectory=subdir'
assert 'eggname' == Link(url).egg_fragment
assert 'subdir' == Link(url).subdirectory_fragment
url = 'git+https://example.com/package#subdirectory=subdir&egg=eggname'
assert 'eggname' == Link(url).egg_fragment
assert 'subdir' == Link(url).subdirectory_fragment
@pytest.mark.parametrize(
("html", "url", "expected"),
[
("<html></html>", "https://example.com/", "https://example.com/"),
(
"<html><head>"
"<base href=\"https://foo.example.com/\">"
"</head></html>",
"https://example.com/",
"https://foo.example.com/",
),
(
"<html><head>"
"<base><base href=\"https://foo.example.com/\">"
"</head></html>",
"https://example.com/",
"https://foo.example.com/",
),
],
)
def test_base_url(html, url, expected):
assert HTMLPage(html, url).base_url == expected
class MockLogger(object):
def __init__(self):
self.called = False
def warning(self, *args, **kwargs):
self.called = True
@pytest.mark.parametrize(
("location", "trusted", "expected"),
[
("http://pypi.python.org/something", [], True),
("https://pypi.python.org/something", [], False),
("git+http://pypi.python.org/something", [], True),
("git+https://pypi.python.org/something", [], False),
("git+ssh://git@pypi.python.org/something", [], False),
("http://localhost", [], False),
("http://127.0.0.1", [], False),
("http://example.com/something/", [], True),
("http://example.com/something/", ["example.com"], False),
],
)
def test_secure_origin(location, trusted, expected):
finder = PackageFinder([], [], session=[], trusted_hosts=trusted)
logger = MockLogger()
finder._validate_secure_origin(logger, location)
assert logger.called == expected
|
googleapis/python-datacatalog | samples/generated_samples/datacatalog_generated_datacatalog_v1beta1_data_catalog_create_tag_template_sync.py | Python | apache-2.0 | 1,586 | 0.001892 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License | .
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific languag | e governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTagTemplate
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_generated_datacatalog_v1beta1_DataCatalog_CreateTagTemplate_sync]
from google.cloud import datacatalog_v1beta1
def sample_create_tag_template():
# Create a client
client = datacatalog_v1beta1.DataCatalogClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.CreateTagTemplateRequest(
parent="parent_value",
tag_template_id="tag_template_id_value",
)
# Make the request
response = client.create_tag_template(request=request)
# Handle the response
print(response)
# [END datacatalog_generated_datacatalog_v1beta1_DataCatalog_CreateTagTemplate_sync]
|
blablacar/exabgp | lib/exabgp/bgp/message/update/nlri/ipvpn.py | Python | bsd-3-clause | 3,348 | 0.028674 | # encoding: utf-8
"""
bgp.py
Created by Thomas Mangin on 2012-07-08.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message import OUT
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.update.nlri.cidr import CIDR
from exabgp.bgp.message.update.nlri.labelled import Labelled
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
from exabgp.bgp.message.update.nlri.qualifier import PathInfo
from exabgp.protocol.ip import IP
from exabgp.protocol.ip import NoNextHop
# ====================================================== IPVPN
# RFC 4364
@NLRI.register(AFI.ipv4,SAFI.mpls_vpn)
@NLRI.register(AFI.ipv6,SAFI.mpls_vpn)
class IPVPN (Labelled):
__slots__ = ['rd']
def __init__ (self, afi, safi, action=OU | T.UNSET):
Labelled.__init__(self, afi, safi, action)
self.rd = RouteDistinguisher.NORD
@classmethod
def new (cls, afi, safi, packed, mask, labels, rd, nexthop=None, action=OUT.UNSET):
instance = cls(afi,safi,action)
instance.cidr = CIDR(packed, mask)
instance.labels = labels
instance.rd = rd
instance.nexthop = IP.create(nexthop) if nexthop else NoNextHop
instance.action = action
return instance
def extensive (sel | f):
return "%s%s%s%s%s" % (self.prefix(),str(self.labels),str(self.rd),str(self.path_info),str(self.rd))
def __len__ (self):
return Labelled.__len__(self) + len(self.rd)
def __repr__ (self):
nexthop = ' next-hop %s' % self.nexthop if self.nexthop else ''
return "%s%s" % (self.extensive(),nexthop)
def __eq__ (self, other):
return \
Labelled.__eq__(self, other) and \
self.rd == other.rd
@classmethod
def has_rd (cls):
return True
def pack (self, negotiated=None):
addpath = self.path_info.pack() if negotiated and negotiated.addpath.send(self.afi,self.safi) else ''
mask = chr(len(self.labels)*8 + len(self.rd)*8 + self.cidr.mask)
return addpath + mask + self.labels.pack() + self.rd.pack() + self.cidr.pack_ip()
def index (self, negotiated=None):
addpath = 'no-pi' if self.path_info is PathInfo.NOPATH else self.path_info.pack()
mask = chr(len(self.labels)*8 + len(self.rd)*8 + self.cidr.mask)
return addpath + mask + self.labels.pack() + self.rd.pack() + self.cidr.pack_ip()
def _internal (self, announced=True):
r = Labelled._internal(self,announced)
if announced and self.rd:
r.append(self.rd.json())
return r
# @classmethod
# def _rd (cls, data, mask):
# mask -= 8*8 # the 8 bytes of the route distinguisher
# rd = data[:8]
# data = data[8:]
#
# if mask < 0:
# raise Notify(3,10,'invalid length in NLRI prefix')
#
# if not data and mask:
# raise Notify(3,10,'not enough data for the mask provided to decode the NLRI')
#
# return RouteDistinguisher(rd), mask, data
#
# @classmethod
# def unpack_mpls (cls, afi, safi, data, action, addpath):
# pathinfo, data = cls._pathinfo(data,addpath)
# mask, labels, data = cls._labels(data,action)
# rd, mask, data = cls._rd(data,mask)
# nlri, data = cls.unpack_cidr(afi,safi,mask,data,action)
# nlri.path_info = pathinfo
# nlri.labels = labels
# nlri.rd = rd
# return nlri,data
#
# @classmethod
# def unpack_nlri (cls, afi, safi, data, addpath):
# return cls.unpack_mpls(afi,safi,data,addpath)
|
fad4470/pastagram | meatballify.py | Python | gpl-3.0 | 2,227 | 0.035474 | import numpy as np
import cv2
from PIL import Image, ImageDraw
import sys
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread(sys.argv[1])
h, w = img.shape[:2]
png = Image.new('RGBA',(w,h))
png.save('meatball-' + sys.argv[1], 'PNG')
meatball = Image.open('meatball.png')
meatballCV = cv2.imread('meatball.png')
#meatballGray = cv2.cvtColor(meatballCV, cv2.COLOR_BGR2GRAY)
#ret, orig_mask = cv2.threshold(meatballGray, 10, 255, cv2.THRESH_BINARY)
#orig_mask_inv = cv2.bitwise_not(orig_mask)
#meatballCV = meat | ballCV[:,:,0:3]
#origMeatballHeight, origMeatballWidth = meatballCV.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for(x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+ | h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for(ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0,255,0), 2)
for(ex, ey, ew, eh) in eyes:
meatballWidth = ew
meatballHeight = eh
x1 = ex
x2 = ex + ew
y1 = ey
y2 = ey + eh
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
meatballWidth = x2 - x1
meatballHeight = y2 - y1
meatball = meatball.resize((ew, eh), Image.ANTIALIAS)
#newMeatball = cv2.resize(meatballCV, (meatballWidth, meatballHeight), interpolation = cv2.INTER_AREA)
offset = (x1 + x, y1 + y)
meatballEyes = Image.open("eye.png")
png.paste(meatball, offset)
png.save('meatball-' + sys.argv[1])
#mask = cv2.resize(orig_mask, (meatballWidth, meatballHeight), interpolation = cv2.INTER_AREA)
#mask_inv = cv2.resize(orig_mask_inv, (meatballWidth, meatballHeight), interpolation = cv2.INTER_AREA)
# roi = roi_color[y1:y2, x1:x2]
# roi_bg = cv2.bitwise_and(roi, roi, mask = mask_inv)
# roi_fg = cv2.bitwise_and(newMeatball, newMeatball, mask = mask)
# dst = cv2.add(roi_bg, roi_fg)
# roi_color[y1:y2, x1:x2] = dst
#cv2.imshow('img', img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
|
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/modules/linux_lvm_test.py | Python | apache-2.0 | 11,687 | 0.000941 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
import os.path
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import linux_lvm
from salt.exceptions import CommandExecutionError
# Globals
linux_lvm.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LinuxLVMTestCase(TestCase):
'''
TestCase for the salt.modules.linux_lvm module
'''
def test_version(self):
'''
Tests LVM version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertEqual(linux_lvm.version(), '1')
def test_fullversion(self):
'''
Tests all version info from lvm version
'''
mock = MagicMock(return_value='Library version : 1')
with patch.dict(linux_lvm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(linux_lvm.fullversion(),
{'Library version': '1'})
def test_pvdisplay(self):
'''
Tests information about the physical volume(s)
'''
moc | k = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.pvdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
| self.assertDictEqual(linux_lvm.pvdisplay(),
{'A': {'Allocated Physical Extents': 'K',
'Current Logical Volumes Here': 'G',
'Free Physical Extents': 'J',
'Internal Physical Volume Number': 'D',
'Physical Extent Size (kB)': 'H',
'Physical Volume (not) Allocatable': 'F',
'Physical Volume Device': 'A',
'Physical Volume Size (kB)': 'C',
'Physical Volume Status': 'E',
'Total Physical Extents': 'I',
'Volume Group Name': 'B'}})
mockpath = MagicMock(return_value='Z')
with patch.object(os.path, 'realpath', mockpath):
self.assertDictEqual(linux_lvm.pvdisplay(real=True),
{'Z': {'Allocated Physical Extents': 'K',
'Current Logical Volumes Here': 'G',
'Free Physical Extents': 'J',
'Internal Physical Volume Number': 'D',
'Physical Extent Size (kB)': 'H',
'Physical Volume (not) Allocatable': 'F',
'Physical Volume Device': 'A',
'Physical Volume Size (kB)': 'C',
'Physical Volume Status': 'E',
'Real Physical Volume Device': 'Z',
'Total Physical Extents': 'I',
'Volume Group Name': 'B'}})
def test_vgdisplay(self):
'''
Tests information about the volume group(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.vgdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K:L:M:N:O:P:Q'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.vgdisplay(),
{'A': {'Actual Physical Volumes': 'K',
'Allocated Physical Extents': 'O',
'Current Logical Volumes': 'F',
'Current Physical Volumes': 'J',
'Free Physical Extents': 'P',
'Internal Volume Group Number': 'D',
'Maximum Logical Volume Size': 'H',
'Maximum Logical Volumes': 'E',
'Maximum Physical Volumes': 'I',
'Open Logical Volumes': 'G',
'Physical Extent Size (kB)': 'M',
'Total Physical Extents': 'N',
'UUID': 'Q',
'Volume Group Access': 'B',
'Volume Group Name': 'A',
'Volume Group Size (kB)': 'L',
'Volume Group Status': 'C'}})
def test__lvdisplay(self):
'''
Return information about the logical volume(s)
'''
mock = MagicMock(return_value={'retcode': 1})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvdisplay(), {})
mock = MagicMock(return_value={'retcode': 0,
'stdout': 'A:B:C:D:E:F:G:H:I:J:K:L:M'})
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(linux_lvm.lvdisplay(),
{'A': {'Allocated Logical Extents': 'I',
'Allocation Policy': 'J',
'Current Logical Extents Associated': 'H',
'Internal Logical Volume Number': 'E',
'Logical Volume Access': 'C',
'Logical Volume Name': 'A',
'Logical Volume Size': 'G',
'Logical Volume Status': 'D',
'Major Device Number': 'L',
'Minor Device Number': 'M',
'Open Logical Volumes': 'F',
'Read Ahead Sectors': 'K',
'Volume Group Name': 'B'}})
def test_pvcreate(self):
'''
Tests for set a physical device to be used as an LVM physical volume
'''
self.assertEqual(linux_lvm.pvcreate(''),
'Error: at least one device is required')
self.assertRaises(CommandExecutionError, linux_lvm.pvcreate, 'A')
pvdisplay = MagicMock(return_value=True)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
with patch.object(os.path, 'exists', return_value=True):
ret = {'stdout': 'saltines', 'stderr': 'cheese', 'retcode': 0, 'pid': '1337'}
mock = MagicMock(return_value=ret)
with patch.dict(linux_lvm.__salt__, {'cmd.run_all': mock}):
self.assertEqual(linux_lvm.pvcreate('A', metadatasize=1000), True)
def test_pvremove(self):
'''
Tests for remove a physical device being used as an LVM physical volume
'''
pvdisplay = MagicMock(return_value=False)
with patch('salt.modules.linux_lvm.pvdisplay', pvdisplay):
self.assertRaises(CommandExecutionError, linux_lvm.pvremove, 'A', override=False)
pvdisplay = MagicMock(return_value=False)
with patch('salt.modules |
ivorbosloper/pestileaks | fabfile.py | Python | gpl-2.0 | 973 | 0.006166 | from __future__ import with_statement
from fabric.api import local, abort, run, cd, env
from fabric.context_managers import prefix
env.directory = '/home/pestileaks/pestileaks'
env.activate = 'source /home/pestileaks/env/bin/activat | e'
env.user = 'pestileaks'
env.hosts = ['pestileaks.nl']
env.restart = 'killall -HUP gunicorn'
#Show current status versus current github master state
def status():
with cd(env.directory):
run('git status')
def dep | loy():
with cd(env.directory):
run("git pull")
#run("rm -rf /home/pestileaks/run/static")
run("mkdir -p /home/pestileaks/run/static")
with prefix(env.activate):
run("if [ doc/requirements.txt -nt doc/requirements.pyc ]; then pip install -r doc/requirements.txt; touch doc/requirements.pyc; fi")
run('./manage.py syncdb')
run('./manage.py migrate --noinput')
run('./manage.py collectstatic --noinput')
run(env.restart)
|
AlanBell/ExceptionalEmails | mongoreceiver.py | Python | agpl-3.0 | 7,066 | 0.020662 | #!/usr/bin/python
import smtpd
import asyncore
import email
import re
from pymongo import MongoClient
from bson.dbref import DBRef
from datetime import datetime
from pytz import timezone
import pytz
from sendfail import sendfail
#this is the daemon part of exceptional emails
#it recieves emails and if they are to a valid user they get put in the database
#if the user profile limits from addresses then it checks the from addresses
#anything else gets rejected
class CustomSMTPServer(smtpd.SMTPServer):
def payloadtoarray(self,msg):
payload=msg.get_payload()
if msg.is_multipart():
result=[]
for part in payload:
result.append(self.payloadtoarray(part))
return result
else:
return {msg.get_content_type():payload}
def process_message(self, peer, mailfrom, rcpttos, data):
#print ('Receiving message from:', peer)
#print ('Message addressed from:', mailfrom)
#print ('Message addressed to :', rcpttos)
#print ('Message length :', len(data))
#print (data)
#we check in the database that the rcpttos is one valid user
#otherwise fail with return "550 No such user"
try:
msg = email.message_from_string(data)#parse the headers
print msg['subject'],rcpttos[0]
sendto=rcpttos[0]
#username is stuff to the left of the +
if not "@" in sendto:
email_id = exceptionalemails.erroremails.insert(post)
return "550 Invalid email address"
#we should accept postmaster address, or forward it somewhere else to be compliant
if not "+" in sendto:
email_id = exceptionalemails.erroremails.insert(post)
return "550 Invalid address"
username=sendto[:sendto.find('+')]#user is the bit up to the +
alert=sendto[sendto.find('+')+1:sendto.find('@')]#alert is from the + to the @
print username
print alert
post={"mailfrom":mailfrom, "mailto":rcpttos[0], "subject":msg['subject'], "data":data ,"headers":msg.items(),"received":datetime.now()}
post['payload']=self.payloadtoarray(msg)#this stores the parsed payload for PHP to render
client = MongoClient()
exceptionalemails = client.exceptionalemails
emails=exceptionalemails.emails
#lets see if we can add a bit to the post
post['timestamp']=datetime.now()
#can we find this user in the database?
user=exceptionalemails.users.find_one({"username":username})
if user:
userref=DBRef("users",user['_id'])
post['user']=userref
else:
#the email is not for any recognised user, record an error
print "ERROR user not found"
email_id = exceptionalemails.erroremails.insert(post)
return "550 Invalid address, user not found"
#is the alert found for that user?
alertobj=exceptionalemails.alerts.find_one({"user":userref,"emailslug":alert,"pause":{'$ne':"1"}})
if alertobj:
alertref=DBRef("alerts",alertobj['_id'])
post['alert']=alertref
else:
#the email is not for any recognised user, record an error
email_id = exceptionalemails.erroremails.insert(post)
print email_id
print "ERROR alert not found"
return "550 Invalid alert, alert not found"
#now link it to an event today and see if it meets the criteria
#we need to know what date it is now in the user's timezone
userzone=timezone(user['timezone'])
now=datetime.now(userzone)#arguably this should use now + 1 hour, or we should do two loops, now and now plus a bit
userdate=now.strftime('%Y-%m-%d')#just the date part in our standard setup, not the | user preference
eventobj=exceptionalemails.events.find_one({"user":userref,"alert":alertref,"date":userdate})
#this really should find the event for the day
if eventobj:
#we can tie this email to an expectation, lets check it for good words and badwords
#we do both checks, even if the first fails so we can do better error reporting
#try:
| goodre=re.compile(alertobj['goodregex'])
print alertobj['goodregex'],post['subject']
if goodre.search(post['subject'] or goodre.search(post['data'])):
eventobj['goodregex']=True
#print "good match"
else:
eventobj['goodregex']=False
#print "bad match"
#except:
#compile failed, lets complain about it a bit
# print "goodwords compile failed"
try:
badre=re.compile(alertobj['badregex'])
if (alertobj['badregex']!='' and (badre.search(post['subject']) or badre.search(post['data']))):
eventobj['badregex']=True
#print "good bad match"
else:
eventobj['badregex']=False
#print "bad bad match"
except:
#compile failed, lets complain about it a bit
#lets see if the mail matches, first subject then body
print "badwords compile failed "
raise
#should we unset the body before saving it?
eventobj['complete']=True
exceptionalemails.events.save(eventobj)
email_id = emails.insert(post)
emailref=DBRef("emails",email_id)
eventobj['email']=emailref
exceptionalemails.events.save(eventobj)
if (eventobj['badregex'] or not eventobj['goodregex']):
sendfail(eventobj,"Bad email")
if ('Options' in alertobj and "deleteonfail" in alertobj['Options']):
post['data']=None
post['payload']=None
else:
if ('Options' in alertobj and "deleteonsuccess" in alertobj['Options']):
post['data']=None
post['payload']=None
else:
#we don't have an expectation created for today, but the email did arrive and we know what alert it belongs to.
#it is possible that today isn't a day we were expecting this event to fire - or something is wrong with the cronjob that creates expectations
#we could save it, but lets put it in a separate collection
email_id = exceptionalemails.erroremails.insert(post)
print email_id
#depending on the alert preferences we might have to unset the body before saving it
return
except:
#something utterly unexpected happened, lets put the email in our exception bucket for later analysis
email_id = exceptionalemails.erroremails.insert(post)
print "ERROR processing %s saved to errormails " % email_id
return
server = CustomSMTPServer(('0.0.0.0', 1025), None)
asyncore.loop()
|
skitazaki/django-access-dashboard | src/accesslog/serializers.py | Python | apache-2.0 | 1,315 | 0 | # -*- coding: utf-8 -*-
from rest_framework import serializers
from accesslog.models import AccessLog, DaySummary, MonthSummary
class SourceSerializer(serializers.Serializer):
source = serializers.CharField(max_length=200)
total = serializers.IntegerField()
time_min = serializers.DateTimeField()
time_max = serializers.DateTimeField()
class AccessLogSerializer(serializers. | HyperlinkedModelSerializer):
class Meta:
model = AccessLog
fields = ('id', 'time', 'host', 'path', 'query', 'method', 'protocol',
'status', 'size', 'referer', 'ua', 'trailing', 'source')
class DaySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DaySummary
fields = ('id', 'day', 'host_kind', 'path_kind', 'protocol', 'method',
'status', 'size_min', 'size_max', 'size_a | vg', 'referer_kind',
'ua_kind', 'total', 'source')
class MonthSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = MonthSummary
fields = ('id', 'year', 'month', 'host_kind', 'path_kind', 'protocol',
'method', 'status', 'size_min', 'size_max', 'size_avg',
'referer_kind', 'ua_kind', 'total', 'source')
# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
|
Akagi201/learning-python | pyramid/misc/MyProject/myproject/views.py | Python | mit | 1,090 | 0.001835 | from pyramid.response import Response
from pyramid.view import view_config
from sqlalchemy.exc import DBAPIError
from .models import (
DBSession,
MyModel,
)
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
try:
one = DBSession.query(MyModel).filter(MyModel.name == 'one').first()
except DBAPIError:
return Response(conn_err_msg, content_type='text/p | lain', status_int=500)
return {'one': one, 'project': 'MyProject'}
conn_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to run the "initialize_MyProject_db" script
to initialize your database tables. Check your virtual
environment's "bin" directory for this sc | ript and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
|
nkoech/csacompendium | csacompendium/countries/models.py | Python | mit | 1,969 | 0.001016 | from __future__ import unicode_literals
from csacompendium.utils.abstractmodels import (
AuthUserDetail,
CreateUpdateTime,
)
from csacompendium.locations.models import | Location
from cs | acompendium.utils.createslug import create_slug
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
class Country(AuthUserDetail, CreateUpdateTime):
"""
Country model. Creates country entity.
"""
country_code = models.CharField(max_length=3, unique=True, help_text='Country abbreviated name')
country_name = models.CharField(max_length=50, unique=True)
slug = models.SlugField(unique=True, blank=True)
def __unicode__(self):
return self.country_name
def __str__(self):
return self.country_name
def get_api_url(self):
"""
Get country URL as a reverse from another app
:return: URL
:rtype: String
"""
return reverse('country_api:detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'Countries'
@property
def locations(self):
"""
Get related location object/record
:return: Query result from the location model
:rtye: object/record
"""
instance = self
qs = Location.objects.filter_by_instance(instance)
return qs
@receiver(pre_save, sender=Country)
def pre_save_country_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending objec
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, Country, instance.country_name)
|
omelkonian/cds | cds/modules/records/api.py | Python | gpl-2.0 | 2,845 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the | terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if n | ot, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record API."""
from __future__ import absolute_import, print_function
import uuid
import os
from invenio_records.api import Record
from invenio_jsonschemas import current_jsonschemas
from .minters import kwid_minter
class Keyword(Record):
"""Define API for a keywords."""
_schema = 'keywords/keyword-v1.0.0.json'
@classmethod
def create(cls, data, id_=None, **kwargs):
"""Create a keyword."""
data['$schema'] = current_jsonschemas.path_to_url(cls._schema)
key_id = data.get('key_id', None)
name = data.get('name', None)
data.setdefault('deleted', False)
if not id_:
id_ = uuid.uuid4()
kwid_minter(id_, data)
data['suggest_name'] = {
'input': name,
'payload': {
'key_id': key_id,
'name': name
},
}
return super(Keyword, cls).create(data=data, id_=id_, **kwargs)
@property
def ref(self):
"""Get the url."""
return Keyword.get_ref(self['key_id'])
@classmethod
def get_id(cls, ref):
"""Get the ID from the reference."""
return os.path.basename(ref)
@classmethod
def get_ref(cls, id_):
"""Get reference from an ID."""
return 'https://cds.cern.ch/api/keywords/{0}'.format(str(id_))
class Category(Record):
"""Define API for a category."""
_schema = 'categories/category-v1.0.0.json'
@classmethod
def create(cls, data, id_=None, **kwargs):
"""Create a category."""
data['$schema'] = current_jsonschemas.path_to_url(cls._schema)
data['suggest_name'] = {
'input': data.get('name', None),
'payload': {
'types': data.get('types', [])
}
}
return super(Category, cls).create(data=data, id_=id_, **kwargs)
|
sejust/pykit | humannum/__init__.py | Python | mit | 341 | 0 | from .humannum import (
K,
M,
G,
T,
P,
E,
Z,
Y,
humannum,
parsenum,
parseint,
value_to_unit,
unit_to_value,
| )
__all__ = [
'K',
'M',
'G',
'T',
'P',
'E',
'Z',
'Y',
'humannum',
'parsenum',
' | parseint',
'value_to_unit',
'unit_to_value',
]
|
getnamo/UnrealEnginePython | tutorials/FaceRecognitionWithOpenCVAndUnrealEnginePython_Assets/eyes_fourth.py | Python | mit | 1,501 | 0.004664 | import unreal_engine as ue
from unreal_engine.classes import SceneCaptureComponent2D, PyHUD
from unreal_engine.enums import ESceneCaptureSource
class Sight:
def __init__(self):
self.what_i_am_seeing = ue.create_transient_texture_render_target2d(512, 512)
def pre_initialize_components(self):
# add a new root component (a SceneCaptureComponent2D one)
self.scene_capturer = self.uobject.add_actor_root_component(SceneCaptureComponent2D, 'Scene Capture')
# use the previously created texture as the render target
self.scene_capturer.TextureTarget = self.what_i_am_seeing
# store pixels as linear colors (non HDR)
self.scene_capturer.CaptureSource = ESceneCaptureSource.SCS_FinalColorLDR
def begin_play(self):
# get a reference to the pawn currently controlled by the player
mannequin = self.uobject.get_player_pawn()
# attach myself to the 'head' bone of the | mannequin Mesh component
self.uobject.attach_to_component(mannequin.Mesh, 'head')
# spawn a new HUD (well, a PyHUD)
hud = self.uobject.actor_spawn(PyHUD, PythonModule='hud_first', P | ythonClass='FacesDetector')
# get a reference to its proxy class
self.py_hud = hud.get_py_proxy()
# set the texture to draw
self.py_hud.texture_to_draw = self.what_i_am_seeing
# use this new HUD as the player default one (so the engine will start drawing it)
self.uobject.set_player_hud(hud) |
alex8866/cinder | cinder/tests/api/v1/test_snapshot_metadata.py | Python | apache-2.0 | 21,261 | 0.000141 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import snapshot_metadata
from cinder.api.v1 import snapshots
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensit | ive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
| return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {},
'project_id': context.project_id}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound('bogus test message')
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v1/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
|
DonaldWhyte/machine-learning-experiments | scripts/combine_dataset.py | Python | bsd-2-clause | 4,113 | 0.004863 | #-------------------------------------------------------------------------------
# Name: combine_dataset.py
# Author: Donald Whyte
# Date last modified: 29/06/12
# Description:
# Reads the directory containing a shellcode training/test dataset and combines
# ALL of the data items (individual files) into a single file. The dataset
# directory must have two directories - 'shellcode' and 'non-shellcode'.
# Which directory a data item is in determines the class assigned to it.
#-------------------------------------------------------------------------------
import sys
import os
import struct
# Constants, used to assign classes to data items in the combined file
CLASS_NON_SHELLCODE = 0
CLASS_SHELLCODE = 1
def Int2Bytes(integer):
"""Converts an Int32 into a 4 byte 'bytes' object."""
return struct.pack('i', integer)
def read_binary_file(filename):
"""Reads the entire contents of the binary file called 'filename' and
returns a bytes object."""
with open(filename, 'rb') as f:
data = f.read()
return data
def get_num_data_items(dataset_directory):
"""Returns the number of identified data items inside the given directory.
A data item is defined as a file that has the 'bin' extension."""
num_data_items = 0
for filename in os.listdir(os.path.join(dataset_directory, "non-shellcode")):
name, extension = os.path.splitext(filename)
if extension == ".bin":
num_data_items += 1
for filename in os.listdir(os.path.join(dataset_directory, "shellcode")):
name, extension = os.path.splitext(filename)
if extension == ".bin":
num_data_items += 1
return num_data_items
def process_directory(file, directory, class_to_assign):
"""TODO"""
for filename in os.listdir(directory):
name, extension = os.path.splitext(filename)
if extension == ".bin":
# Read the file's data
data = read_binary_file(os.path.join(directory, filename))
# Write class of the data item to the file
file.write(Int2Bytes(class_to_assign))
# Write the LENGTH of the file
length = len(data)
file.write(Int2Bytes(length))
# NOW write the actual binary data
file.write(data)
def combine_dataitems(filename, dataset_directory):
"""Searches through all the files in 'dataset_directory' RECURSIVELY,
adding all the ones that end with .bin to the final archive, which is
saved at 'filename'."""
# Check if the two required directories are present
non_shellcode_directory = os.path.join(dataset_directory, "non-shellcode")
shellcode_directory = os.path.join(dataset_directory, "shellcode")
if not os.path.isdir(non_shellcode_directory):
return False
if not os.path.isdir(shellcode_directory):
return False
with open(filename, 'wb') as f:
# First write how many data items have been added to the archive
num_data_items = get_num_data_items(dataset_directory)
f.write(Int2Bytes(num_data_items))
# Add any files with the '.bin' extension if they're in the shellcode
# or non-shellcode directories. The class assigned to each data item
# is determined by what directory they're in
process_directory(f, non_shellcode_directory, CLASS_NON_SHELLCODE)
process_directory(f, shellcode_directory, CLASS_SHELLCODE)
return True
def parse_commandline_arguments():
"""Parses the command line arguments and returns a dictionary containing
the arguments' values."""
if len(sys.argv) < 3:
sys.exit("Usage: python {} <archive_filename> <da | taset_directory>".format(sys.argv[0]))
args = { 'archive_filename' : sys.argv[1], 'dataset_directory' : sys.argv[2] }
return args
if __name__ == "__main__":
args = parse_commandline_arguments()
if not combine_dataitems(args['archive_filename'], args['dataset_directory']):
print("Attempt to combine directory {} into archive file {} failed.".
format(args['archive_filename'], args['dataset_d | irectory'])) |
googleapis/python-redis | samples/generated_samples/redis_v1beta1_generated_cloud_redis_reschedule_maintenance_async.py | Python | apache-2.0 | 1,639 | 0.00061 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apach | e License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License i | s distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for RescheduleMaintenance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-redis
# [START redis_v1beta1_generated_CloudRedis_RescheduleMaintenance_async]
from google.cloud import redis_v1beta1
async def sample_reschedule_maintenance():
# Create a client
client = redis_v1beta1.CloudRedisAsyncClient()
# Initialize request argument(s)
request = redis_v1beta1.RescheduleMaintenanceRequest(
name="name_value",
reschedule_type="SPECIFIC_TIME",
)
# Make the request
operation = client.reschedule_maintenance(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END redis_v1beta1_generated_CloudRedis_RescheduleMaintenance_async]
|
pmelchior/hydra | push_jobs.py | Python | mit | 1,520 | 0.003289 | #!/bin/env python
from os import system
from sys import argv
import json
from time import sleep
if len(argv) < 2:
print "usage: " + argv[0] + " <job file>"
exit(1)
def push_to_hosts(config):
counter = 0
counting = False
sleep_time = 1
try:
counting = config['counting']
counter = config['min_count']
max_count = config['max_count']
sleep_time = config['sleep_time']
except KeyError:
pass
for host in config['hosts']:
if counting:
if counter > max_count:
break
for i in range(host['cpus']):
command = 'ssh ' + host['name'] + ' \''
if counting:
command += config['command'] % counter
else:
command += config['command']
| if config['listen']:
command += '\''
else:
command += ' < /dev/null >& /dev/null &\''
if counting:
if counter > max_count:
break
print "starting job %d on %s" % (counter, host['name'])
else:
print "starting job %d on %s" % (i, host['name'])
system(command)
sleep(sleep_time)
| counter += 1
if counting:
print "submitted jobs %d/%d" % (counter-1, max_count)
else:
print "submitted %d jobs" % (counter-1)
# read the job file
fp = open(argv[1])
config = json.load(fp)
push_to_hosts(config)
fp.close()
|
ostree/plaso | plaso/lib/eventdata.py | Python | apache-2.0 | 1,819 | 0.020891 | # -*- coding: utf-8 -*-
"""A place to store information about events, such as format strings, etc."""
# TODO: move this class to events/definitions.py or equiv.
class EventTimestamp(object):
"""Class to manage event data."""
# The timestamp_desc values.
ACCESS_TIME = u'Last Access Time'
CHANGE_TIME = u'Metadata Modification Time'
CREATION_TIME = u'Creation Time'
MODIFICATION_TIME = u'Content Modification Time'
ENTRY_MODIFICATION_TIME = u'Metadata Modification Time'
# Added time and Creation time are considered the same.
ADDED_TIME = u'Creation Time'
# Written time and Modification time are considered the same.
WRITTEN_TIME = u'Content Modification Time'
EXIT_TIME = u'Exit Time'
LAST_RUNTIME = u'Last Time Executed'
DELETED_TIME = u'Content Deletion Time'
INSTALLATION_TIME = u'Installation Time'
FILE_DOWNLOADED = u'File Downloaded'
PAGE_VISITED = u'Page Visited'
# TODO: change page visited into last visited time.
LAST_VISITED_TIME = u'Last Visited Time'
LAST_CHECKED_TIME = u'Last Checked Time'
EXPIRATION_TIME = u'Expiration Time'
START_TIME = u'Start Time'
END_TIME = u'End Time'
UPDATE_TIME = u'Update Time'
LAST_SHUTDOWN = u'Last Shutdown Time'
ACCOUNT_CREATED = u'Account Created'
LAST_LOGIN_TIME = u'Last Login Time'
LAST_PASSWORD_RESET = u'Last Password Reset'
FIRST_CONNECTED = u'First Connection Time'
LAST_CONNECTED = u'Last Connection Time'
LAST_PRINTED = u'Last Printed Time'
LAST_RESUME_TIME = u'Last Re | sume Time'
# The timestamp does not represent a date and time value.
NOT_A_TIME = u'Not a time'
# Note that the unknown time is used for date and time values
| # of which the exact meaning is unknown and being researched.
# For most cases do not use this timestamp description.
UNKNOWN = u'Unknown Time'
|
DTOcean/dtocean-core | tests/test_data_definitions_xgrid2d.py | Python | gpl-3.0 | 4,013 | 0.009469 | import pytest
import numpy as np
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import XGrid2D
def test_XGrid2D_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "XGrid2D" in all_objs.keys()
def test_XGrid2D():
raw = { | "values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
| "units": [None, 'm', 'POWER!']})
test = XGrid2D()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b.values.shape == (2,3)
assert b.units == 'POWER!'
assert b.y.units == 'm'
def test_get_None():
test = XGrid2D()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".nc"])
def test_XGrid2D_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": [None, 'm', 'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin.meta.result = meta
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result.values.shape == (2,3)
assert result.units == 'POWER!'
assert result.y.units == 'm'
def test_XGrid2D_auto_plot(tmpdir):
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_XGrid2D_auto_plot_reverse(tmpdir):
raw = {"values": np.random.randn(3, 2),
"coords": [[-2, 0, 2], ['a', 'b']]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
|
jacenkow/inspire-next | tests/integration/test_orcid.py | Python | gpl-2.0 | 5,384 | 0.000743 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from inspirehep.modules.orcid.models import InspireOrcidRecords
from inspirehep.utils.record_getter import get_db_record
from invenio_accounts.models import User
from invenio_db import db
from invenio_oauthclient.models import RemoteAccount, RemoteToken, UserIdentity
from invenio_search import current_search_client as es
import pytest
class OrcidApiMock(object):
def __init__(self, put_code):
self.put_code = put_code
def add_record(self, author_orcid, token, category, orcid_json):
return self.put_code
def update_record(self, author_orcid, token, category, orcid_json, put_code):
pass
def remove_record(self, author_orcid, token, category, put_code):
pass
class MockUser:
def __init__(self, app):
self.app = app
@pytest.fixture(scope="function")
def mock_user(app, request):
def teardown(app):
with app.app_context():
user = User.query.filter_by(id=2).first()
token = RemoteToken.query.filter_by(access_token='123').first()
user_identity = UserIdentity.query.filter_by(
id='0000-0001-9412-8627', method='orcid').first()
remote_account = RemoteAccount.query.filter_by(user_id=2).first()
with db.session.begin_nested():
db.session.delete(token)
db.session.delete(user_identity)
db.session.delete(remote_account)
db.session.delete(user)
db.session.commit()
request.addfinalizer(lambda: teardown(app))
user = User(
id=2,
)
token = RemoteToken(
id_remote_account=1,
access_token='123'
)
user_identity = UserIdentity(
id='0000-0001-9412-8627',
id_user='2',
method='orcid')
remote_account = RemoteAccount(
id=1,
user_id=2,
extra_data={},
client_id=1,
user=user)
with app.app_context():
with db.session.begin_nested():
db.session.add(user)
db.session.add(user_identity)
db.session.add(remote_account)
db.session.add(token)
db.session.commit()
return MockUser(app)
@pytest.fixture(scope='function')
def orcid_tes | t(mock_user, request):
"""Orcid test fixture."""
app = mock_user.app
def teardown(app):
with app.app_context():
es.delete(index='records-authors', doc_type='authors', id=10)
record = {
"name": {
"status": "ACTIVE",
"preferred_name": "Full Name",
"value": "Full Name"
},
"$schema": "http://localhost:5000/ | schemas/records/authors.json",
"control_number": "10",
"self": {"$ref": "http://localhost:5000/api/authors/10"},
"ids": [{
"type": "INSPIRE",
"value": "INSPIRE-0000000"
},
{
"type": "ORCID",
"value": "0000-0001-9412-8627"
}],
"self_recid": 10,
"earliest_date": "2015-09-23"
}
request.addfinalizer(lambda: teardown(app))
with app.app_context():
es.index(index='records-authors',
doc_type='authors', id=10, body=record)
es.indices.refresh('records-authors')
record = get_db_record('literature', 782466)
record['authors'].append({u'affiliations': [{u'value': u'St. Petersburg, INP'}], u'curated_relation': True, u'full_name': u'Full, Name', u'profile': {
u'__url__': u'http://inspirehep.net/record/00000000'}, u'record': {u'$ref': u'http://localhost:5000/api/authors/10'}})
mock_orcid_api = OrcidApiMock(1)
return mock_orcid_api, record
def test_record_is_sent_to_orcid(app, orcid_test):
mock_orcid_api, record = orcid_test
with app.app_context():
from inspirehep.modules.orcid.tasks import send_to_orcid
send_to_orcid(record, api=mock_orcid_api)
expected = 1
result = len(InspireOrcidRecords.query.all())
assert result == expected
def test_record_is_deleted_from_orcid(app, orcid_test):
mock_orcid_api, record = orcid_test
with app.app_context():
from inspirehep.modules.orcid.tasks import delete_from_orcid, send_to_orcid
send_to_orcid(record, api=mock_orcid_api)
delete_from_orcid(record, api=mock_orcid_api)
expected = 0
result = len(InspireOrcidRecords.query.all())
assert result == expected
|
PhoenixRacing/PhoenixRacingWebApp-noregrets | application/controllers/index.py | Python | bsd-3-clause | 156 | 0.019231 | from flask import request, | render_template
from flask.ext.login import current_user
d | ef index():
return render_template('index.html', active_page='index') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.