repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
websterkgd/PeasantMath | Roots/code/Roots.py | Python | cc0-1.0 | 3,247 | 0.009547 | from __future__ import division
import sys
import os
import matplotlib.pyplot as plt
import scipy
from scipy import special
mydir = os.path.expanduser("~/")
sys.path.append(mydir + "/GitHub/PeasantMath/Roots/code")
import functions as fxn
ks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5]
for i, k in enumerate(ks):
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
x = 2.0
xs = []
rts = []
WLs = []
for j in range(100):
xs.append(x)
y = x**(1.0/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
WLs.append(a)
x += 1
plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+ | str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9, label='WHL rule')
#plt.yscale('log')
#plt.xscale('log')
plt.xlabel('x', fontsize=8)
plt.ylabel('y', fontsize=8)
plt.xlim(min(xs), max(xs))
plt.yl | im(min(WLs), max(rts))
plt.legend(bbox_to_anchor=(-0.04, 1.08, 2.59, .2), loc=10, ncol=2,
mode="expand",prop={'size':16})
#leg = plt.legend(loc=4,prop={'size':12})
#leg.draw_frame(False)
#plt.text(-50, 14, "How well does the WHL Rule approximate square roots?", fontsize=16)
ax = fig.add_subplot(2,2,2)
x = 2.0
xs = []
rts = []
WLs = []
for j in range(100):
xs.append(x)
y = x**(1/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
WLs.append(a)
x += 10
plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9, label='WHL rule')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('x', fontsize=8)
plt.ylabel('y', fontsize=8)
plt.xlim(min(xs)*0.5, max(xs)*1.5)
plt.ylim(min(WLs)*0.5, max(rts)*1.5)
ax = fig.add_subplot(2,2,3)
x = 2.0
xs = []
rts = []
WLs = []
for j in range(30):
xs.append(x)
y = x**(1/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
WLs.append(a)
x = x*1.5
plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9, label='WHL rule')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('x', fontsize=8)
plt.ylabel('y', fontsize=8)
plt.xlim(min(xs)*0.5, max(xs)*1.5)
plt.ylim(min(WLs)*0.5, max(rts)*1.5)
ax = fig.add_subplot(2,2,4)
x = 2.0
xs = []
rts = []
WLs = []
for j in range(30):
xs.append(x)
y = x**(1/k)
rts.append(y)
a = fxn.WHL_kth(x, k)
WLs.append(a)
x = x*2
plt.scatter(xs, rts, s=50, color='m', facecolors='none', label='root'+str(k))
plt.scatter(xs, WLs, color='c', alpha=0.9, label='WHL rule')
plt.yscale('log')
plt.xscale('log')
plt.xlabel('x', fontsize=8)
plt.ylabel('y', fontsize=8)
plt.xlim(min(xs)*0.5, max(xs)*1.5)
plt.ylim(min(WLs)*0.5, max(rts)*1.5)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.subplots_adjust(wspace=0.5, hspace=0.3)
plt.savefig(mydir+'/GitHub/PeasantMath/Roots/figs/WHL_root'+str(k)+'.png', dpi=600)#, bbox_inches = 'tight')#, pad_inches=0)
print 'finished root',k
#plt.show()
|
Mat-Frayne/HsBot | cogs/Admin.py | Python | mit | 7,956 | 0.001131 | #!/usr/bin/env python
"""."""
import asyncio
import inspect
import io
import subprocess
import textwrap
import traceback
from contextlib import redirect_stdout
import discord
from discord.ext import commands
from sys import platform
class Admin:
"""."""
def __init__(self, bot):
"""."""
self.bot = bot
self.extensions = ["Hearthstone", "Runescape", "Commands"]
self._last_result = None
self.sessions = set()
for extension in self.extensions:
try:
self.bot.load_extension('cogs.' + extension)
except Exception as exc:
print('Failed to load extension {}\n{}: {}'
.format(extension, type(exc).__name__, exc))
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
@staticmethod
def get_syntax_error(err):
if err.text is None:
return f'```py\n{err.__class__.__name__}: {err}\n```'
return f'```py\n{err.text}{"^":>{err.offset}}\n{err.__class__.__name__}: {err}```'
@commands.command(name='reload', aliases=["r"], hidden=True)
@commands.is_owner()
async def _reload(self, ctx, *, module: str = "all"):
"""Reload a module."""
out = ""
if "all" in module.lower():
for extension in self.extensions + ["Admin"]:
try:
self.bot.unload_extension('cogs.' + extension)
self.bot.load_extension('cogs.' + extension)
out = "{}\nReloaded {}".format(out, 'cogs.' + extension)
except Exception as exc:
out = "{}\nFailed to reload {}\n\t{}: {}"\
.format(out, extension, type(exc).__name__, exc)
else:
try:
self.bot.unload_extension(module)
self.bot.load_extension(mo | dule)
out = "Reloaded {}".format(module)
except Exception as exc:
out = '{}: {}'.format(type(exc).__name__, exc)
await ctx.send(out)
@commands.is_owner()
@commands.command(pass_context=True, hidden=True, name='eval')
async def _eval(self, ctx, *, body: str):
"""Evaluates a code"""
|
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'db': self.bot.models,
'_': self._last_result
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as exc:
return await ctx.send(f'```py\n{exc.__class__.__name__}: {exc}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except Exception:
pass
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```py\n{value}{ret}\n```')
@commands.is_owner()
@commands.command(pass_context=True, hidden=True)
async def repl(self, ctx):
"""Launches an interactive REPL session."""
variables = {
'ctx': ctx,
'bot': self.bot,
'message': ctx.message,
'guild': ctx.guild,
'channel': ctx.channel,
'author': ctx.author,
'_': None,
}
if ctx.channel.id in self.sessions:
await ctx.send('Already running a REPL session in this channel. Exit it with `quit`.')
return
self.sessions.add(ctx.channel.id)
await ctx.send('Enter code to execute or evaluate. `exit()` or `quit` to exit.')
def check(chk):
"""."""
return chk.author.id == ctx.author.id and \
chk.channel.id == ctx.channel.id and \
chk.content.startswith('`')
while True:
try:
response = await self.bot.wait_for('message', check=check, timeout=10.0 * 60.0)
except asyncio.TimeoutError:
await ctx.send('Exiting REPL session.')
self.sessions.remove(ctx.channel.id)
break
cleaned = self.cleanup_code(response.content)
if cleaned in ('quit', 'exit', 'exit()'):
await ctx.send('Exiting.')
self.sessions.remove(ctx.channel.id)
return
executor = exec
if cleaned.count('\n') == 0:
# single statement, potentially 'eval'
try:
code = compile(cleaned, '<repl session>', 'eval')
except SyntaxError:
pass
else:
executor = eval
if executor is exec:
try:
code = compile(cleaned, '<repl session>', 'exec')
except SyntaxError as err:
await ctx.send(self.get_syntax_error(err))
continue
variables['message'] = response
fmt = None
stdout = io.StringIO()
try:
with redirect_stdout(stdout):
result = executor(code, variables)
if inspect.isawaitable(result):
result = await result
except Exception:
value = stdout.getvalue()
fmt = f'```py\n{value}{traceback.format_exc()}\n```'
else:
value = stdout.getvalue()
if result is not None:
fmt = f'```py\n{value}{result}\n```'
variables['_'] = result
elif value:
fmt = f'```py\n{value}\n```'
try:
if fmt is not None:
if len(fmt) > 2000:
await ctx.send('Content too big to be printed.')
else:
await ctx.send(fmt)
except discord.Forbidden:
pass
except discord.HTTPException as err:
await ctx.send(f'Unexpected error: `{err}`')
@commands.is_owner()
@commands.command()
async def ping(self, ctx):
"""Ping to sydney77's ip."""
await ctx.message.add_reaction('\u2705')
if platform == "linux":
mycmd = subprocess.getoutput("ping -c 10 168.1.24.83")
else:
mycmd=subprocess.getoutput("ping 168.1.24.83")
resp = mycmd.decode("utf-8")
await ctx.send("```prolog\n{}```".format(resp))
@commands.is_owner()
@commands.command()
async def trace(self, ctx, server):
"""."""
await ctx.message.add_reaction('\u2705')
if not server.endswith(".discord.gg"):
server = server + ".discord.gg"
p = subprocess.Popen(
["tracert.exe", server], stdout=subprocess.PIPE)
resp = p.communicate()[0].decode("utf-8")
await ctx.send("```prolog\n" + resp + "```")
# git clone -b rewrite https://github.com/Mat-Frayne/HsBot.git
def setup(bot):
"""."""
bot.add_cog(Admin(bot))
|
Johnzero/erp | openerp/addons/auction/report/photo_shadow.py | Python | agpl-3.0 | 2,248 | 0.010231 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def convert_catalog(from_file, to_file, size=220) :
return __convert(from_file, to_file, size)
def convert(from_file, to_file):
size = 95
__convert(from_file, to_file, size=95)
def __convert(from_file, to_file, size=95):
from PIL import Image, ImageDraw, ImageFilter
im = Image.open(from_file)
if float(im.size[1]/im.size[0])>2:
im = im.resize((im.size[0]*size/im.size[1], size))
else:
im = im.resize((size,im.size[1]*size/im.size[0]))
newimg = Image.new('RGB', (im.size[0]+8,im.size[1]+8), (255,255,255) ) |
draw = ImageDraw.Draw(newimg)
draw.rectangle((6, im.size[1]-5, im.size[0], im.size[1]+5), fill=(90,90,90))
draw.rectangle((im.size[0]-5, 6, im.size[0]+5, im.size[1]), fill=(90,90,90))
del draw
newimg = newimg.filter(ImageFilter.BLUR)
newimg = newimg.filter(ImageFilt | er.BLUR)
newimg = newimg.filter(ImageFilter.BLUR)
newimg.paste(im, (0,0))
draw = ImageDraw.Draw(newimg)
draw.rectangle((0, 0, im.size[0], im.size[1]), outline=(0,0,0))
del draw
to_fp = file(to_file, 'wb')
newimg.save(to_fp, "JPEG")
to_fp.close()
res = newimg.size
del im
del newimg
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
radiosilence/wire | wire/frontend/views.py | Python | mit | 20,501 | 0.002975 | import json
import redis
import uuid
import subprocess
from flask import Blueprint, request, session, g, redirect, url_for, abort, \
render_template, flash, current_app
from flaskext.uploads import (UploadSet, configure_uploads, IMAGES,
UploadNotAllowed)
from wire.frontend import frontend
from wire.models import User, UserValidationError, \
Update, UpdateError, UserNotFoundError
from wire.models import Message, \
MessageValidationError
from wire.models import Inbox
from wire.models import Thread, \
ThreadError, InvalidRecipients
from wire.models import Contacts, \
ContactExistsError, ContactInvalidError
from wire.models import Event, EventValidationError,\
EventNotFoundError, EventCommentError
from wire.utils import Auth, AuthError
from wire import uploaded_images, uploaded_avatars
@frontend.route('/timeline')
def timeline():
try:
g.user.username
except AttributeError:
abort(401)
timeline = g.user.timeline
return render_template('timeline.html',
timeline=timeline.updates,
title='Timeline')
@frontend.route('/mentions')
def mentions():
try:
g.user.username
except AttributeError:
abort(401)
timeline = g.user.mentions
g.user.reset_mentions()
return render_template('timeline.html',
timeline=timeline.updates,
title='Mentions')
@frontend.route('/user/<string:username>')
def user_updates(username):
u = User(redis=g.r)
try:
u.load_by_username(username)
except UserNotFoundError:
abort(404)
if u.username in g.user.contacts:
state = 'contact'
else:
state = 'nocontact'
return render_template('timeline.html',
timeline=u.posted.updates,
user=u,
state=state,
title='%s' % username,
disable_input=True)
@frontend.route('/conversation/<int:conversation_id>')
def conversation(conversation_id):
updates = []
for key in g.r.lrange('conversation:%s' % conversation_id, 0, -1):
u = Update(redis=g.r, user=g.user)
u.load(key)
updates.append(u)
return render_template('timeline.html',
timeline=updates,
title='Conversation #%s' % conversation_id,
disable_input=True,
disable_userbox=True)
@frontend.route('/post-update', methods=['POST'])
def post_update():
try:
g.user.username
except AttributeError:
abort(401)
u = Update(text=request.form['text'], user=g.user, redis=g.r,
respond=request.form['respond'])
try:
u.save()
flash("Update posted.", 'success')
except UpdateError:
pass
return redirect(url_for('frontend.timeline'))
@frontend.route('/respond/<int:update_id>')
def respond_update(update_id):
u = Update(redis=g.r, user=g.user)
u.load(update_id)
return render_template('respond.html',
update=u)
@frontend.route('/delete-update/<int:update_id>')
def delete_update(update_id):
u = Update(redis=g.r, user=g.user)
u.load(update_id)
if u.user.key != g.user.key:
abort(401)
u.delete()
flash("Update deleted.", 'success')
return redirect(url_for('frontend.timeline'))
@frontend.route('/')
def intro():
return render_template('intro.html')
@frontend.route('/developers')
def developers():
return render_template('developers.html')
@frontend.route('/inbox')
def inbox():
i = g.inbox
i.load_messages()
if len(i.threads) == 0:
empty = True
else:
empty = False
return render_template('inbox.html',
threads=i.threads,
empty=empty)
@frontend.route('/inbox/mark-all-read')
def mark_all_read():
try:
g.user.username
except AttributeError:
abort(401)
i = g.inbox
i.load_messages()
for thread in i.threads:
thread.reset_unread_count()
flash('All messages marked read.', 'success')
return redirect(url_for('frontend.inbox'))
@frontend.route('/thread/<int:thread_id>', methods=['POST', 'GET'])
def view_thread(thread_id):
if str(thread_id) not in g.user.get_threads():
abort(401)
t = Thread(redis=g.r, user=g.user)
try:
t.load(thread_id)
if request.method == "POST":
if request.form['action'] == 'reply':
m = Message(redis=g.r, key=False, user=g.user)
m.update(request.form)
t.save()
t.add_message(m)
m.send()
t.load(thread_id)
flash("Reply has been sent.", 'success')
return redirect(url_for('frontend.view_thread', thread_id=t.key))
return render_template('thread.html',
messages=t.messages,
thread=t,
subject=t.subject)
except ThreadError:
abort(404)
@frontend.route('/send/<string:recipient>')
def send_message_recipient(recipient):
return send_message(recipient=recipient)
@frontend.route('/send', methods=['POST', 'GET'])
def send_message(recipient=False):
try:
g.user.username
except AttributeError:
abort(401)
t = Thread(redis=g.r, user=g.user)
m = Message(redis=g.r, key=False, user=g.user)
if(recipient):
try:
t.parse_recipients(recipient)
except InvalidRecipients:
pass
if request.method == 'POST':
try:
t.subject = request.form['subject']
m.update(request.form)
t.parse_recipients(request.form['recipients'])
t.encryption = request.form['encryption']
t.save()
t.add_message(m)
m.send()
flash('Your message has been successfully wired, \
and should arrive shortly.', 'success')
return redirect(url_for('frontend.view_thread', thread_id=t.key))
except MessageValidationError:
for error in m.validation_errors:
flash(error, 'error')
except InvalidRecipients:
for recipient in t.invalid_recipients:
flash('%s is not a valid recipient' % recipient, 'error')
return render_template('forms/message.html',
new=True,
message=m,
thread=t,
recipients=t.get_form_recipients())
@frontend.route('/delete-message/<int:message_id>/<int:thread_id>',
methods=['POST', 'GET'])
def delete_message(message_id, thread_id):
if request.method == 'POST':
t = Thread(redis=g.r, user=g.user)
t.load(thread_id)
m = Message(redis=g.r, user=g.user, key=message_id)
m.load()
if g.r.get('username:%s' % m.sender.username) != g.user.key:
abort(401)
t.delete_message(m)
flash(u'Message deleted', 'success')
return redirect(url_for('frontend.vi | ew_thread', thread_ | id=thread_id))
else:
return render_template('confirm.html',
_message='Are you sure you want to delete this message?',
_ok=url_for('frontend.delete_message', thread_id=thread_id,
message_id=message_id),
_cancel=url_for('frontend.view_thread', thread_id=thread_id)
)
@frontend.route('/thread/<int:thread_id>/mark-read')
def mark_thread_read(thread_id):
try:
if str(thread_id) not in g.user.get_threads():
abort(401)
except AttributeError:
abort(401)
t = Thread(redis=g.r, user=g.user)
t.load(thread_id)
t.reset_unread_count()
abort(200)
@frontend.route('/unsubscribe-thread/<int:thread_id>', methods=['POST', 'GET'])
def unsubscribe_thread(thread_id):
try:
g.user.username
except AttributeError:
abort(401)
if request.method == "POST":
t = Thread(redis=g.r, user=g.user)
t.load(thread_id)
t.unsubscribe()
flash(u'Unsubscribed from thread.', 'success')
return redirect(url_for('frontend.inbox'))
else:
return render_template('confirm.html',
_message='Are you sure you wish to unsubscribe from this thread?',
_ok=url_for('frontend.unsubscribe_thread', thread_id=thread_id),
|
chreman/isis-praktikum | sustainabilitylsa.py | Python | mit | 20,232 | 0.007266 | #
import string
import glob
import time
import xml.etree.ElementTree as ET
from itertools import chain
# Import reader
import xlrd
import csv
import requests
# Import data handlers
import collections
# Import Network Analysis Tools
import networkx as nx
import igraph as ig
# Import language processing tools
from gensim import corpora, models
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer as ES
def main():
"""
Runs a standard analysis.
Put pdf files in an 'files' subfolder in the working
directory, and run the script.
"""
depth = "paragraph"
convert_pdfs()
xmls = get_filelist("files", "xml")
docs = []
for xml in xmls:
try:
docs.append(ET.ElementTree(file=xml))
except Exception, e:
print e, xml
continue
print "%s documents are in the corpus." %str(len(docs))
#docs = [ET.ElementTree(file=xml) for xml in xmls]
texts = [[p.text for p in doc.getroot().findall(".//*[@class='DoCO:TextChunk']")
if p.text != None]
for doc in docs]
perform_analysis("isis", content = texts,
model="lsa", depth=depth, num_topics=110,
show_topics = 20, num_words=20, threshold=0)
perform_analysis("isis", content = texts,
model="lda", depth=depth, num_topics = 20,
show_topics = 20, num_words=10)
def convert_pdfs():
"""
Converts pdfs to xml via
https://gist.github.com/yoavram/4351598
and http://pdfx.cs.man.ac.uk
It looks for unconverted pdfs.
"""
pdfs = get_filelist("files", "pdf")
pdfs = set([f.rstrip(".pdf").replace(" ", "") for f in pdfs])
xmls = get_filelist("files", "xml")
xmls = set([f.rstrip(".xml") for f in xmls])
filelist = pdfs - xmls
for pdf in filelist:
pypdfx(pdf)
def perform_analysis(keyword, content=None, testdata = None,
model="lsa", depth="document", num_topics = 20,
show_topics = 20, num_words = 20, threshold=0):
"""
Workflow for topic analysis.
Looks for earlier dicionary and corpus, if not creates them
from provided documents.
Creates either LSA or LDA model and evaluates it.
Output: nodes and edges csv for gephi, a topic csv and
a network visualization.
"""
try:
dictionary, corpus = load_dictionary(keyword, depth)
except Exception, e:
dictionary, corpus = preprocess_content(content, keyword, depth)
print "\nBeginning with analysis at %s." % time.ctime()
if model is "lsa":
_model = create_lsi_model(dictionary, corpus, num_topics)
if model is "lda":
_model = create_lda_model(dictionary, corpus, num_topics)
testdata = load_reference_texts(model)
evaluate_model(keyword, testdata, model, _model, num_words, threshold, depth) |
#test_for_topic_convergence(keyword, testdata, model, _model, num_topics, threshold, depth)
export_matrix(keyword, dictionary, model, _model, show_topics, num_words, depth)
export_topic_list(keyword, dictionary, model, _model, show_topics, num_words, depth)
export_word_graph(keyword, dictionary, model, _model, show_topics, num_words, threshold | , depth)
def get_filelist(path, extension):
"""
Creates a list of files in a folder with a given extension.
Navigate to this folder first.
"""
return [f for f in glob.glob("{0}/*.{1}".format(path, extension))]
def preprocess_content(content, keyword, depth="document"):
"""
Takes a list of documents, removes non-alphabetical characters,
removes a list of stopwords, performs stemming and creates
a dictionary and a corpus for this set of documents for re-use.
"""
print "\nBeginning with preprocessing at %s." % time.ctime()
if depth is "document":
if type(content[0]) is list:
documents = [" ".join(text) for text in content]
else:
documents = content
if depth is "paragraph":
documents = list(chain.from_iterable(content))
if depth is "sentence":
documents = list(chain.from_iterable(["".join(text).split(". ") for text in content]))
#filter out digits and special characters
delete_table = string.maketrans(string.ascii_lowercase,
' ' * len(string.ascii_lowercase))
# remove common words and tokenize
stope = stopwords.words("english")
#stoplist can be extended like this:
# stope.extend(["worda","wordb",...])
with open("stopwords.csv") as stopcsv:
reader = csv.reader(stopcsv)
for row in reader:
stope.extend(row)
print "\nThis is a raw input document:"
print documents[0]
#texts are cleaned (characters only), filtered (stopwords removed) and stemmed (reduced to word stem)
texts = [[ES().stem(str(word.encode("utf8")).translate(None, delete_table))
for word in document.lower().split()
if str(word.encode("utf8")).translate(None, delete_table) not in stope]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens)
if all_tokens.count(word) == 1)
texts = [[word for word in text
if word not in tokens_once and len(word) > 1]
for text in texts]
print "\nThis is the raw document after cleaning, filtering, stemming and removal of unique words."
print texts[0]
#create dictionary and save for later use
dictionary = corpora.Dictionary(texts)
dictionary.save('{0}_{1}.dict'.format(keyword, depth))
#create corpus and save for later use
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('{0}_{1}_corpus.mm'.format(keyword, depth), corpus)
return dictionary, corpus
def preprocess_query(query):
"""
Performs preprocessing steps for a query string.
Removing stopword, filtering for alphabet character only,
and stemming.
"""
try:
if type(query[0]) is list:
query = [" ".join(text) for text in query]
except Exception, e:
pass
if type(query) is list:
query = " ".join(query)
#filter out digits and special characters
delete_table = string.maketrans(string.ascii_lowercase,
' ' * len(string.ascii_lowercase))
# remove common words and tokenize
stope = stopwords.words("english")
#stoplist can be extended like this:
with open("stopwords.csv") as stopcsv:
reader = csv.reader(stopcsv)
for row in reader:
stope.extend(row)
query = [ES().stem(str(word.encode("utf8")).translate(None, delete_table))
for word in query.lower().split()
if str(word.encode("utf8")).translate(None, delete_table) not in stope]
return query
def load_dictionary(keyword, depth):
"""
Load dictionary and corpus from disk.
"""
dictionary = corpora.Dictionary.load('{0}_{1}.dict'.format(keyword, depth))
corpus = corpora.MmCorpus('{0}_{1}_corpus.mm'.format(keyword, depth))
return dictionary, corpus
def create_lsi_model(dictionary, corpus, num_topics):
"""
Perform an analysis with an LSI-Model.
"""
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
return models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=num_topics)
def create_lda_model(dictionary, corpus, num_topics):
"""
Perform an analysis with an LDA-Model.
"""
return models.LdaModel(corpus, id2word=dictionary, num_topics=num_topics)
def load_reference_texts(model):
"""
Loads reference texts from disk.
Reference texts should be placed in a folder in the scripts
directory and have to be direct output from MaxQDA.
"""
with open("testdata/{0}_codes.csv".format(model)) as codes:
reader = csv.reader(codes)
translation = {row[0]:int(row[1]) for row in re |
billlai95/bilimining | biligrab/SentenceSimilarityEvaluation.py | Python | apache-2.0 | 2,527 | 0.001187 | # @see http://www.idayer.com/sentences-similarity.html
# kindled by https://github.com/ineo6/chinese-segmentation
import sys
import math
from pinyin.pinyin import Pinyin
import biligrab.Functions
from importlib import reload
reload(sys)
class Evaluator:
d = {}
@staticmethod
def log(x):
if not x:
return float('-inf')
else:
return math.log(x)
@staticmethod
def prob(x):
if x in Evaluator.d:
return Evaluator.d[x]
elif len(x) > 1:
return 0
else:
return 1
first_run = True
@staticmethod
def init(filename='dic_utf8.dic'):
Evaluator.d['_t_'] = 0.0
with open(filename, 'r', encoding="utf-8") as handle:
for line in handle:
word, freq = line.split('\t')[0:2]
Evaluator.d['_t_'] += int(freq) + 1
try:
Evaluator.d[word.decode('utf-8')] = int(freq) + 1
except:
Evaluator.d[word] = int(freq) + 1
@staticmethod
def __solve(s):
l = len(s)
p = [0] * (l+1)
t = [0] * l
for i in range(l - 1, -1, -1):
p[i], t[i] = max((Evaluator.log(Evaluator.prob(s[i:i + k]) / Evaluator.d['_t_']) + p[i + k], k)
for k in range(1, l - i + 1))
while p[l] < l:
yield s[p[l]:p[l] + t[p[l]]]
p[l] += t[p[l]]
@staticmethod
def __cos_dist(a, b):
if len(a) != len(b):
return None
part_up = | 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a, b):
part_up += a1 * b1
a_sq += a1 ** 2
b_sq += b1 ** 2
part_down = math.sqrt(a_sq * b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
@staticmethod
def lingo_dist(sentence1, sentence2):
if Evaluator.fi | rst_run:
Evaluator.init()
Evaluator.first_run = False
s1 = list(Evaluator.__solve(sentence1))
s2 = list(Evaluator.__solve(sentence2))
key = list(set(s1 + s2))
keylenth = len(key)
keyvalue = 0
sk1 = [keyvalue] * keylenth
sk2 = [keyvalue] * keylenth
for index, keyElement in enumerate(key):
if keyElement in s1:
sk1[index] += 1
if keyElement in s2:
sk2[index] += 1
return Evaluator.__cos_dist(sk1, sk2)
|
Transkribus/TranskribusDU | TranskribusDU/tasks/TablePrototypes/DU_ABPTableRCut1SIO.py | Python | bsd-3-clause | 34,506 | 0.015218 | # -*- coding: utf-8 -*-
"""
DU task for ABP Table: doing jointly row BIESO and horizontal cuts
block2line edges do not cross another block.
The cut are based on baselines of text blocks.
- the labels of horizontal cuts are SIO (instead of SO in previous version)
Copy | right Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant | agreement No 674943.
"""
import sys, os
import math
from lxml import etree
import collections
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks import _checkFindColDir, _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from xml_formats.PageXml import MultiPageXml
import graph.GraphModel
from crf.Edge import Edge, SamePageEdge
from crf.Graph_MultiPageXml import Graph_MultiPageXml
from crf.NodeType_PageXml import NodeType_PageXml_type_woText
#from crf.FeatureDefinition_PageXml_std_noText import FeatureDefinition_PageXml_StandardOnes_noText
from crf.FeatureDefinition import FeatureDefinition
from crf.Transformer import Transformer, TransformerListByType
from crf.Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from crf.Transformer_PageXml import NodeTransformerXYWH_v2, NodeTransformerNeighbors, Node1HotFeatures
from crf.Transformer_PageXml import Edge1HotFeatures, EdgeBooleanFeatures_v2, EdgeNumericalSelector
from crf.PageNumberSimpleSequenciality import PageNumberSimpleSequenciality
from tasks.DU_ABPTableCutAnnotator import BaselineCutAnnotator
class GraphCut(Graph_MultiPageXml):
"""
We specialize the class of graph because the computation of edges is quite specific
"""
#Cut stuff
#iModulo = 1 # map the coordinate to this modulo
fMinPageCoverage = 0.5 # minimal coverage to consider a GT table separator
iLineVisibility = 5 * 11 # a cut line sees other cut line up to N pixels downward
iBlockVisibility = 3*7*13 # a block sees neighbouring cut lines at N pixels
_lClassicNodeType = None
@classmethod
def setClassicNodeTypeList(cls, lNodeType):
"""
determine which type of node goes thru the classical way for determining
the edges (vertical or horizontal overlap, with occlusion, etc.)
"""
cls._lClassicNodeType = lNodeType
def parseDocFile(self, sFilename, iVerbose=0):
"""
Load that document as a CRF Graph.
Also set the self.doc variable!
Return a CRF Graph object
"""
self.doc = etree.parse(sFilename)
self.lNode, self.lEdge = list(), list()
self.lNodeBlock = [] # text node
self.lNodeCutLine = [] # cut line node
root = self.doc.getroot()
doer = BaselineCutAnnotator()
doer.setLabelScheme_SIO() #use SIO instead of SO labels!
#doer.setModulo(self.iModulo) # this is optional
#load the groundtruth table separators, if any, per page (1 in tABP)
ltlYlX = doer.get_separator_YX_from_DOM(root, self.fMinPageCoverage)
for (lHi, lVi) in ltlYlX:
traceln(" - found %d horizontal, %d vertical GT separators" % (len(lHi), len(lVi)))
#create DOM node reflecting the cuts
#first clean (just in case!)
n = doer.remove_cuts_from_dom(root)
if n > 0:
traceln(" - removed %d pre-existing cut lines" % n)
# if GT, then we have labelled cut lines in DOM
_ltlYCutXCut = doer.add_cut_to_DOM(root, ltlYlX=ltlYlX)
lClassicType = [nt for nt in self.getNodeTypeList() if nt in self._lClassicNodeType]
lSpecialType = [nt for nt in self.getNodeTypeList() if nt not in self._lClassicNodeType]
for (pnum, page, domNdPage) in self._iter_Page_DocNode(self.doc):
#now that we have the page, let's create the node for each type!
lClassicPageNode = [nd for nodeType in lClassicType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
lSpecialPageNode = [nd for nodeType in lSpecialType for nd in nodeType._iter_GraphNode(self.doc, domNdPage, page) ]
self.lNode.extend(lClassicPageNode) # e.g. the TextLine objects
self.lNodeBlock.extend(lClassicPageNode)
self.lNode.extend(lSpecialPageNode) # e.g. the cut lines!
self.lNodeCutLine.extend(lSpecialPageNode)
#no previous page to consider (for cross-page links...) => None
lClassicPageEdge = Edge.computeEdges(None, lClassicPageNode)
self.lEdge.extend(lClassicPageEdge)
# Now, compute edges between special and classic objects...
lSpecialPageEdge = self.computeSpecialEdges(lClassicPageNode,
lSpecialPageNode,
doer.bCutIsBeforeText)
self.lEdge.extend(lSpecialPageEdge)
#if iVerbose>=2: traceln("\tPage %5d %6d nodes %7d edges"%(pnum, len(lPageNode), len(lPageEdge)))
if iVerbose>=2:
traceln("\tPage %5d"%(pnum))
traceln("\t block: %6d nodes %7d edges (to block)" %(pnum, len(lClassicPageNode), len(lClassicPageEdge)))
traceln("\t line: %6d nodes %7d edges (from block)"%(pnum, len(lSpecialPageNode), len(lSpecialPageEdge)))
if iVerbose: traceln("\t\t (%d nodes, %d edges)"%(len(self.lNode), len(self.lEdge)) )
return self
@classmethod
def computeSpecialEdges(cls, lClassicPageNode, lSpecialPageNode):
"""
return a list of edges
"""
raise Exception("Specialize this method")
class Edge_BL(Edge):
"""Edge block-to-Line"""
pass
class Edge_LL(Edge):
"""Edge line-to-Line"""
pass
class GraphCut_H(GraphCut):
"""
Only horizontal cut lines
"""
def __init__(self):
self.showClassParam()
@classmethod
def showClassParam(cls):
try:
cls.bParamShownOnce
assert cls.bParamShownOnce == True
except:
#traceln(" - iModulo : " , cls.iModulo)
traceln(" - block_see_line : " , cls.iBlockVisibility)
traceln(" - line_see_line : " , cls.iLineVisibility)
traceln(" - fMinPageCoverage : " , cls.fMinPageCoverage)
cls.bParamShownOnce = True
def getNodeListByType(self, iTyp):
if iTyp == 0:
return self.lNodeBlock
else:
return self.lNodeCutLine
def getEdgeListByType(self, typA, typB):
if typA == 0:
if typB == 0:
return (e for e in self.lEdge if isinstance(e, SamePageEdge))
else:
return (e for e in self.lEdge if isinstance(e, Edge_BL))
else:
if typB == 0:
return []
else:
return (e for e in self.lEdge if isinstance(e, Edge_LL))
@classmethod
def computeSpecialEdges(self, lClassicPageNode, lSpecialPageNode,
bCutIsBeforeText):
"""
Compute:
- edges between each block and the cut line above/across/below the block
- edges between cut lines
return a list of edges
"""
#augment the block with the coordinate of its baseline central point
for blk in lClassicPageNode:
try:
x,y = BaselineCutAnnotator.getDomBaselineXY(blk.node)
blk.x_bslne = x
blk.y_bslne = y
except IndexError:
|
kareemallen/beets | beetsplug/types.py | Python | mit | 1,775 | 0 | # -*- coding: utf-8 -*-
# This file is | part of beets.
# Copyright 2015, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following con | ditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets.dbcore import types
from beets.util.confit import ConfigValueError
from beets import library
class TypesPlugin(BeetsPlugin):
@property
def item_types(self):
return self._types()
@property
def album_types(self):
return self._types()
def _types(self):
if not self.config.exists():
return {}
mytypes = {}
for key, value in self.config.items():
if value.get() == 'int':
mytypes[key] = types.INTEGER
elif value.get() == 'float':
mytypes[key] = types.FLOAT
elif value.get() == 'bool':
mytypes[key] = types.BOOLEAN
elif value.get() == 'date':
mytypes[key] = library.DateType()
else:
raise ConfigValueError(
u"unknown type '{0}' for the '{1}' field"
.format(value, key))
return mytypes
|
nickchen-mitac/fork | src/ava/core/defines.py | Python | apache-2.0 | 1,103 | 0.001813 | # -*- coding: utf-8 -*-
"""
Various definitions used across different packages.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from .. import VERSION_STRING
# return as the root resource.
AG | ENT_INFO = {
"EAvatar": "A versatile agent.",
"version": VERSION_STRING,
"vendor": {
"name": "EAvatar Technology Ltd.",
"version": VERSION_STRING
},
}
# activated engines
INSTALLED_ENGINES = [
"ava.log.engine:LogEngine",
"ava.data.engine:DataEngine",
"ava.task.engine:TaskEngine",
"ava.mod.engine:ModuleEngine",
"ava.user.engine:UserEngine",
"ava.job.engine:JobEngine",
"ava.web.webfront:Web | frontEngine",
]
##### Environment variable ####
AVA_POD_FOLDER = 'AVA_POD' # where the working directory.
AVA_AGENT_SECRET = 'AVA_AGENT_SECRET' # the agent's own secret key.
AVA_SWARM_SECRET = 'AVA_SWARM_SECRET' # the swarm's secret key.
AVA_USER_XID = 'AVA_USER_XID' # the user's XID.
# tries to import definitions from the global settings.
try:
from ava_settings import *
except ImportError:
pass
|
CroceRossaItaliana/jorvik | autenticazione/viste.py | Python | gpl-3.0 | 986 | 0.002028 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import redirect
from django.contrib.auth.views import logout as original_logout
from loginas import settings as la_settings
from loginas.utils import restore_original_login
def logout(request, next_page=None, template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME, extra_context=None):
"""
This can replace your default logout view. In you settings, do:
from django.core.ur | lresolvers import reverse_lazy
LOGOUT_URL = reverse_lazy('logout')
"""
original_session = request.session.get(la_settings.USER_SESSION_FLAG)
if original_session:
restore_original_login(request)
return redirect(la_settings.LOGOUT_REDIRECT)
else:
return original_logout(request, next_page, template_n | ame, redirect_field_name, extra_context)
|
kasper190/SPAforum | forum/api/permissions.py | Python | mit | 441 | 0.006803 | fr | om rest_framework.permissions import BasePermission, SAFE_METHODS
class IsAdminOrModeratorOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
if request.user and request.user.is_staff:
return True
is_moderator = obj.subforum.moderators.filter(id=request.user.id).exists()
return is_moderator | |
leiyue/microblog | app/forms.py | Python | mit | 1,399 | 0.00143 | # -*- coding: utf-8 -*-
# @Date : 2016-01-21 13:15
# @Author : le | iyue (mr.leiyue@gmail.com)
# @Link : https://leiyue.wordpress.com/
from flask_wtf import Form
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, length
from .models import User
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_ | me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
about_me = TextAreaField('about_me', validators=[length(min=0, max=140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname=self.nickname.data).first()
if user is not None:
self.nickname.errors.append('This nickname is already in use, Please choose another one.')
return False
return True
class PostForm(Form):
post = TextAreaField('post', validators=[DataRequired(), length(min=0, max=140)])
class SearchForm(Form):
search = StringField('search', validators=[DataRequired()])
|
abertschi/postcards | postcards/plugin_pexels/postcards_pexels.py | Python | mit | 618 | 0.001618 | #!/usr/bin/env python
# encoding: utf-8
from postcards.postcards import Postcards
from postcards.plugin_pexels.util.pexels import get_random_image_url, read_from_url
import sys
class PostcardsPexel(Postcards):
"""
Send postcards with random images from pexels.com
"""
def get_img_and_text(self, plugin_config, cli_args):
url | = get_random_image_url()
self.logger.info('using pexels picture: ' + url)
return {
'img': read_from_url(url),
'text': ''
| }
def main():
PostcardsPexel().main(sys.argv[1:])
if __name__ == '__main__':
main()
|
qianwenming/mapnik | bindings/python/mapnik/__init__.py | Python | lgpl-2.1 | 35,583 | 0.005115 | #
# This file is part of Mapnik (C++/Python mapping toolkit)
# Copyright (C) 2009 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""Mapnik Python module.
Boost Python bindings to the Mapnik C++ shared library.
Several things happen when you do:
>>> import mapnik
1) Mapnik C++ objects are imported via the '__init__.py' from the '_mapnik.so' shared object
(_mapnik.pyd on win) which references libmapnik.so (linux), libmapnik.dylib (mac), or
mapnik.dll (win32).
2) The paths to the input plugins and font directories are imported from the 'paths.py'
file which was constructed and installed during SCons installation.
3) All available input plugins and TrueType fonts are automatically registered.
4) Boost Python metaclass injectors are used in the '__init__.py' to extend several
objects adding extra convenience when accessed via Python.
"""
import itertools
import os
import sys
import warnings
try:
import json
except ImportError:
import simplejson as json
def bootstrap_env():
"""
If an optional settings file exists, inherit its
environment settings before loading the mapnik library.
This feature is intended for customized packages of mapnik.
The settings file should be a python file with an 'env' variable
that declares a dictionary of key:value pairs to push into the
global process environment, if not already set, like:
env = {'ICU_DATA':'/usr/local/share/icu/'}
"""
if os.path.exists(os.path.join(os.path.dirname(__file__),'mapnik_settings.py')):
from mapnik_settings import env
process_keys = os.environ.keys()
for key, value in env.items():
if key not in process_keys:
os.environ[key] = value
bootstrap_env()
from _mapnik import *
import printing
printing.renderer = render
# The base Boost.Python class
BoostPythonMetaclass = Coord.__class__
class _MapnikMetaclass(BoostPythonMetaclass):
def __init__(self, name, bases, dict):
for b in bases:
if type(b) not in (self, type):
for k,v in list(dict.items()):
if hasattr(b, k):
setattr(b, '_c_'+k, getattr(b, k))
setattr(b,k,v)
return type.__init__(self, name, bases, dict)
# metaclass injector compatible with both python 2 and 3
# http://mikewatkins.ca/2008/11/29/python-2-and-3-metaclasses/
_injector = _MapnikMetaclass('_injector', (object, ), {})
def Filter(*args,**kwargs):
warnings.warn("'Filter' | is deprecated and will be removed in Mapnik 3.x, use 'Expression' instead",
DeprecationWarning, 2)
return Expression(*args, **kwargs)
class Envelope(Box2d):
def __init__(self, *args, **kwargs):
warnings.warn("'Envelope' is deprecated and will be removed in Mapnik 3.x, use 'Box2d' instead",
DeprecationWarning, 2)
Box2d.__init__(self, *args, **kwargs)
class _C | oord(Coord,_injector):
"""
Represents a point with two coordinates (either lon/lat or x/y).
Following operators are defined for Coord:
Addition and subtraction of Coord objects:
>>> Coord(10, 10) + Coord(20, 20)
Coord(30.0, 30.0)
>>> Coord(10, 10) - Coord(20, 20)
Coord(-10.0, -10.0)
Addition, subtraction, multiplication and division between
a Coord and a float:
>>> Coord(10, 10) + 1
Coord(11.0, 11.0)
>>> Coord(10, 10) - 1
Coord(-9.0, -9.0)
>>> Coord(10, 10) * 2
Coord(20.0, 20.0)
>>> Coord(10, 10) / 2
Coord(5.0, 5.0)
Equality of coords (as pairwise equality of components):
>>> Coord(10, 10) is Coord(10, 10)
False
>>> Coord(10, 10) == Coord(10, 10)
True
"""
def __repr__(self):
return 'Coord(%s,%s)' % (self.x, self.y)
def forward(self, projection):
"""
Projects the point from the geographic coordinate
space into the cartesian space. The x component is
considered to be longitude, the y component the
latitude.
Returns the easting (x) and northing (y) as a
coordinate pair.
Example: Project the geographic coordinates of the
city center of Stuttgart into the local
map projection (GK Zone 3/DHDN, EPSG 31467)
>>> p = Projection('+init=epsg:31467')
>>> Coord(9.1, 48.7).forward(p)
Coord(3507360.12813,5395719.2749)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the point from the cartesian space
into the geographic space. The x component is
considered to be the easting, the y component
to be the northing.
Returns the longitude (x) and latitude (y) as a
coordinate pair.
Example: Project the cartesian coordinates of the
city center of Stuttgart in the local
map projection (GK Zone 3/DHDN, EPSG 31467)
into geographic coordinates:
>>> p = Projection('+init=epsg:31467')
>>> Coord(3507360.12813,5395719.2749).inverse(p)
Coord(9.1, 48.7)
"""
return inverse_(self, projection)
class _Box2d(Box2d,_injector):
"""
Represents a spatial envelope (i.e. bounding box).
Following operators are defined for Box2d:
Addition:
e1 + e2 is equvalent to e1.expand_to_include(e2) but yields
a new envelope instead of modifying e1
Subtraction:
Currently e1 - e2 returns e1.
Multiplication and division with floats:
Multiplication and division change the width and height of the envelope
by the given factor without modifying its center..
That is, e1 * x is equivalent to:
e1.width(x * e1.width())
e1.height(x * e1.height()),
except that a new envelope is created instead of modifying e1.
e1 / x is equivalent to e1 * (1.0/x).
Equality: two envelopes are equal if their corner points are equal.
"""
def __repr__(self):
return 'Box2d(%s,%s,%s,%s)' % \
(self.minx,self.miny,self.maxx,self.maxy)
def forward(self, projection):
"""
Projects the envelope from the geographic space
into the cartesian space by projecting its corner
points.
See also:
Coord.forward(self, projection)
"""
return forward_(self, projection)
def inverse(self, projection):
"""
Projects the envelope from the cartesian space
into the geographic space by projecting its corner
points.
See also:
Coord.inverse(self, projection).
"""
return inverse_(self, projection)
class _Projection(Projection,_injector):
def __repr__(self):
return "Projection('%s')" % self.params()
def forward(self,obj):
"""
Projects the given object (Box2d or Coord)
from the geographic space into the cartesian space.
See also:
Box2d.forward(self, projection),
Coord.forward(self, projection).
"""
return forward_(obj,self)
def inverse(self,obj):
"""
Projects the given object (Box2d or Coord)
from the cartesian space into the geographic space.
See also:
Box2d.inverse(self, projection),
Coord.inverse(self, projection).
"""
return inverse_(obj,self)
class _Feature(Feature,_injector):
__geo_interface__ = property(lambda self: json.loads(self.to_geojson()))
class _Path(Pa |
twpDone/SimpleAsIRC | Core.py | Python | mit | 5,635 | 0.017746 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# Core of the application, implement a part of the RFC 1459: Internet Relay Chat ProtocolA (Client side).
from Message import Message
from Action import Action
import socket
import string
import re
import time
#ssl support
from ssl import SSLSocket
##
# Core of the application, implement a part of the RFC 1459: Internet Relay Chat ProtocolA (Client side).
class Core:
##
# Constructor.
# @param self Self.
# @param channel Channel to connect.
# @param name Nickname for the IRC user.
# @param port Port to use.
# @param host IRC Server host.
def __init__(self,channel="#dut.info",name="twp_bot",port=6667,host='irc.freenode.net'):
## @var m_channel
# Channel to connect.
self.m_channel=channel
## @var m_host
# IRC Server host.
self.m_host=host
## @var m_port
# Port to use.
self.m_port=port
## @var m_name
# Nickname for the IRC user.
self.m_name=name
## @var m_socket
# Socket to use for I/O.
self.m_socket=self.createSocket();
##
# Start the communication
# @note connect the .m_socket
# @param self Self.
def start(self):
self.m_socket.connect((self.m_host, self.m_port)) # connect the socket
self.helloIRC() # start the IRC protocol
##
# Start the IRC protocol.
# @param self Self.
def helloIRC(self):
self.m_socket.sendall('PASS irc\r\n') #send password
self.m_socket.sendall('NICK '+self.m_name+'\r\n') # define nickname
self.m_socket.sendall('USER '+self.m_name+' 127.0.0.1 '+self.m_host+' '+self.m_name+'\r\n') # define user
self.m_socket.sendall('JOIN '+self.m_channel+'\r\n') # Join channel
##
# Create a new TCP socket.
# @param self Self.
def createSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a new TCP socket.
##
# Send a Message to a channel or destination user
# @param self Self.
# @param Message Message object to send.
def send2Chan(self,Message):
time.sleep(1) # wait 1 second, avoid the flood kick
# Send the Message to a channel or destination user
self.m_socket.sendall("PRIVMSG "+Message.getDest()+" : "+Message.getText()+" \r\n")
##
# Send a Message as an IRC Action to a channel or destination user.
# @param self Self.
# @param Message Message object to send.
def sendAction2Chan(self,Message):
time.sleep(1) # wait 1 second, avoid the flood kick
# Send the Message as an IRC Action to a channel or destination user.
self.m_socket.sendall("PRIVMSG "+Message.getDest()+" :"+chr(1)+"ACTION "+Message.getText()+chr(1)+" \r\n")
##
# Read from the m_socket, if the message dest is the joined chan or the current nickname.
# @note Define how the application react for the "nickname already in use" warning
# @note Send Pong (Ping back) to avoid IRC Time out.
# @return Return a Message Object if the read data match .
# @param self Self.
def read(self):
try:
data = self.m_socket.recv(1024) # read from socket
# delete \r and \n line's ending
data=data.replace("\r","")
data=data.replace("\n","")
#get data for constructing the Message objet.
tabData=data.split(":")
pseudo=data.split("!")[0] # get source's nickname
prompt="<"+"".join(pseudo)+"> : " # def prompt
# pop useless infos
tabData.pop(0)
tabData.pop(0)
msg=":".join(tabData) # re-join text
# if "nickname already in use" warning
if re.match(".*already in use.*".lower(),data.lower())!=None:
self.m_name+="_" # append an underscore to the current nickname
self.m_socket.sendall('NICK '+self.m_name+'\r\n') # define the new nickname
self.m_socket.sendall('JOIN '+self.m_channel+'\r\n') # join the channel with the new nickname
#manque recuperation du destinataire
regName = ".*(PRIVMSG|NOTICE) ("+self.m_name+"|\*) :.*" # define the regex wich must match
# if the regex matches
if re.match(regName.lower(),data.lower())!=None:
# return a new Message object.
return Message(self.m_name,pseudo,msg)
# define the regex wich must match
regChan = ".*PRIVMSG "+self.m_channel+" :.*"
if re.match(regChan.lower(),data.lower())!=None:
# return a new Message object.
return Message(self.m_channel,pseudo,msg)
# if the server sends a ping, ping back => send pong
| if data.upper().__contains__("PING"):
self.m_socket.sendall('PONG\r\n')
except Exception as ex:
print("Erreur de reception" | )
print(ex)
##
# Ends the IRC protocol
# @note Close the socket
# @param self Self.
def quit(self):
self.m_socket.close();
##
# Overload the Core class to use a SSL Socket.
class secureCore(Core):
def __init__(self,channel="#dut.info",name="twp_bot",port=6697,host='irc.freenode.net'):
Core.__init__(self,channel,name,port,host)
self.m_socket=self.createSocket()
##
# Overload the Core.createSocket to use a SSL Socket.
def createSocket(self):
return SSLSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
|
beslave/auto-collector | auto/server/__init__.py | Python | mit | 678 | 0.001475 | import aiohttp_jinja2
import asyncio
import jinja2
from aiohttp import web
import setti | ngs
from auto.server.middlewares import middlewares
from auto.server.urls import url_patterns
loop = asyncio.get_event_loop()
app = web.Application(middlewares=middlewares)
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(settings.TEMPLATE_DIR))
for url in url_patterns:
app.router.add_route(url.method, url.pattern, url.view)
async def serve_task():
return await loop.create_server(
app.make_handler(),
host=settings.SITE_ADDR,
port | =settings.SITE_PORT,
# reuse_address=False,
# reuse_port=False,
)
tasks = [serve_task]
|
e2crawfo/dps | dps/tf/updater.py | Python | apache-2.0 | 21,553 | 0.002969 | import abc
import json
from future.utils import with_metaclass
from collections import defaultdict
import numpy as np
import tensorflow as tf
from dps import cfg
from dps.utils import Parameterized, Param
from dps.utils.tf import build_gradient_train_op, trainable_variables, get_scheduled_values, ScopedFunction
from dps.datasets.base import Dataset
class Updater(with_metaclass(abc.ABCMeta, Parameterized)):
build_saver = True
def __init__(self, env, scope=None, mpi_context=None, **kwargs):
self.scope = scope
self.env = env
self.mpi_context = mpi_context
self._n_experiences = 0
self.step = 0
self._saver = None
@property
def n_experiences(self):
return self._n_experiences
def build_graph(self):
# with tf.name_scope(self.scope or self.__class__.__name__) as scope:
# self._scope = scope
self._build_graph()
global_step = tf.train.get_or_create_global_step()
self.inc_global_step_op = tf.assign_add(global_step, 1)
global_step_input = tf.placeholder(tf.int64, ())
assign_global_step = tf.assign(global_step, global_step_input)
tf.get_default_session().run(assign_global_step, feed_dict={global_step_input: 0})
if self.build_saver:
updater_variables = {v.name: v for v in self.trainable_variables(for_opt=False)}
self.saver = tf.train.Saver(updater_variables)
@abc.abstractmethod
def _build_graph(self):
raise Exception("NotImplemented")
def update(self, batch_size, step):
update_result = self._update(batch_size)
sess = tf.get_default_session()
sess.run(self.inc_global_step_op)
self._n_experiences += batch_size
return update_result
@abc.abstractmethod
def _update(self, batch_size):
raise Exception("NotImplemented")
def evaluate(self, batch_size, step, mode="val"):
assert mode in "val test".split()
return self._evaluate(batch_size, mode)
@abc.abstractmethod
def _evaluate(self, batch_size, mode):
raise Exception("NotImplemented")
def trainable_variables(self, for_opt):
raise Exception("AbstractMethod")
def save(self, filename):
path = self.saver.save(tf.get_default_session(), filename)
return path
def restore(self, path):
self.saver.restore(tf.get_default_session(), path)
class DummyUpdater(Updater):
""" For when you just want to build datasets. Much faster than most normal updaters. """
build_saver = False
def trainable_variables(self, for_opt):
return []
def _build_graph(self):
pass
def _update(self, batch_size):
return dict()
def _evaluate(self, batch_size, mode):
return dict()
def save(self, session, filename):
return ''
def restore(self, path):
pass
class DifferentiableUpdater(Updater):
""" Update parameters of a differentiable function `f` using gradient-based algorithm.
Must be used in context of a default graph, session and config.
Parameters
----------
env: gym Env
The environment we're trying to learn about.
f: An instance of ScopedFunction
Accepts a tensor (input), returns a tensor (inference).
"""
optimizer_spec = Param()
lr_schedule = Param()
noise_schedule = Param()
max_grad_norm = Param()
l2_weight = Param(None)
stopping_criteria = "loss,min"
def __init__(self, env, f, **kwargs):
assert hasattr(env, 'build'), (
"Environments used with DifferentiableUpdater must possess "
"a method called `build` which builds returns a dictionary of scalar tensors."
)
self.f = f
super(DifferentiableUpdater, self).__init__(env, **kwargs)
def trainable_variables(self, for_opt):
return trainable_variables(self.f.scope, for_opt=for_opt)
def _build_graph(self):
self.recorded_tensors = self.env.build(self.f)
self.loss = self.recorded_tensors['loss']
tvars = self.trainable_variables(for_opt=True)
if self.l2_weight is not None:
self.loss += self.l2_weight * sum(tf.nn.l2_loss(v) for v in tvars if 'weights' in v.name)
self.train_op, self.train_recorded_tensors = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule)
self.recorded_tensors.update(get_scheduled_values())
def _update(self, batch_size):
feed_dict = self.env.data_manager.do_train()
sess = tf.get_default_session()
_, record, train_record = sess.run(
[self.train_op, self.recorded_tensors, self.train_recorded_tensors], feed_dict=feed_dict)
record.update(train_record)
return record
def _evaluate(self, batch_size, mode):
if mode == "val":
feed_dict = self.env.data_manager.do_val()
elif mode == "test":
feed_dict = self.env.data_manager.do_test()
else:
raise Exception("Unknown evaluation mode: {}".format(mode))
sess = tf.get_default_session()
return sess.run(self.recorded_tensors, feed_dict=feed_dict)
class VideoUpdater(Updater):
optimizer_spec = Param()
lr_schedule = Param()
noise_schedule = Param()
max_grad_norm = Param()
grad_n_record_groups = Param(None)
def __init__(self, env, scope=None, **kwargs):
self.obs_shape = env.obs_shape
*other, self.image_height, self.image_width, self.image_depth = self.obs_shape
self.n_frames = other[0] if other else 0
self.network = cfg.build_network(env, self, scope="network")
super(VideoUpdater, self).__init__(env, scope=scope, **kwargs)
def trainable_variables(self, for_opt):
return self.network.trainable_variables(for_opt)
def _update(self, batch_size):
if cfg.get('no_gradient', False):
return dict()
feed_dict = self.data_manager.do_train()
sess = tf.get_default_session()
_, record, train_record = sess.run(
[self.train_op, self.recorded_tensors, self.train_records], feed_dict=feed_dict)
record.update(train_record)
return record
def _evaluate(self, _batch_size, mode):
return self.evaluator.eval(self.recorded_tensors, self.data_manager, mode)
def _build_graph(self):
self.data_manager = DataManager(datasets=self.env.datasets)
self.data_manager.build_graph()
data = self.data_manager.iterator.get_next()
self.inp = data["image"]
network_outputs = self.network(data, self.data_manager.is_training)
network_tensors = network_outputs["tensors"]
network_recorded_tensors = network_outputs["recorded_tensors"]
network_losses = network_outputs["losses"]
self.tensors = network_tensors
self.recorded_tensors = recorded_tensors = dict(global_step=tf.train.get_or_create_global_step())
# --- loss ---
self.loss = tf.constant(0., tf.float32)
for name, tensor in network_losses.items():
self.loss += tensor
recorded_tensors['loss_' + name] = tensor
recorded_tensors['loss'] = self.loss
# - | -- train op ---
if cfg.do_train and not c | fg.get('no_gradient', False):
tvars = self.trainable_variables(for_opt=True)
self.train_op, self.train_records = build_gradient_train_op(
self.loss, tvars, self.optimizer_spec, self.lr_schedule,
self.max_grad_norm, self.noise_schedule, grad_n_record_groups=self.grad_n_record_groups)
sess = tf.get_default_session()
for k, v in getattr(sess, 'scheduled_values', None).items():
if k in recorded_tensors:
recorded_tensors['scheduled_' + k] = v
else:
recorded_tensors[k] = v
# --- recorded values ---
intersection = recorded_tensors.keys() & network_recorded_tensors.keys()
assert not intersection, "Key sets have non-ze |
soumide1102/pycbc | pycbc/transforms.py | Python | gpl-3.0 | 57,402 | 0.001115 | # Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have re | ceived a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for transforming parameters.
"""
import copy
import logging
import | numpy
from six import string_types
from pycbc import conversions
from pycbc import coordinates
from pycbc import cosmology
from pycbc.io import record
from pycbc.waveform import parameters
from pycbc.boundaries import Bounds
from pycbc import VARARGS_DELIM
class BaseTransform(object):
"""A base class for transforming between two sets of parameters.
"""
name = None
inverse = None
_inputs = []
_outputs = []
def __init__(self):
self.inputs = set(self._inputs)
self.outputs = set(self._outputs)
def __call__(self, maps):
return self.transform(maps)
def transform(self, maps):
""" This function transforms from inputs to outputs.
"""
raise NotImplementedError("Not added.")
def inverse_transform(self, maps):
""" The inverse conversions of transform. This function transforms from
outputs to inputs.
"""
raise NotImplementedError("Not added.")
def jacobian(self, maps):
""" The Jacobian for the inputs to outputs transformation.
"""
raise NotImplementedError("Jacobian transform not implemented.")
def inverse_jacobian(self, maps):
""" The Jacobian for the outputs to inputs transformation.
"""
raise NotImplementedError("Jacobian transform not implemented.")
@staticmethod
def format_output(old_maps, new_maps):
""" This function takes the returned dict from `transform` and converts
it to the same datatype as the input.
Parameters
----------
old_maps : {FieldArray, dict}
The mapping object to add new maps to.
new_maps : dict
A dict with key as parameter name and value is numpy.array.
Returns
-------
{FieldArray, dict}
The old_maps object with new keys from new_maps.
"""
# if input is FieldArray then return FieldArray
if isinstance(old_maps, record.FieldArray):
keys = new_maps.keys()
values = [new_maps[key] for key in keys]
for key, vals in zip(keys, values):
try:
old_maps = old_maps.add_fields([vals], [key])
except ValueError:
old_maps[key] = vals
return old_maps
# if input is dict then return dict
elif isinstance(old_maps, dict):
out = old_maps.copy()
out.update(new_maps)
return out
# else error
else:
raise TypeError("Input type must be FieldArray or dict.")
@classmethod
def from_config(cls, cp, section, outputs, skip_opts=None,
additional_opts=None):
"""Initializes a transform from the given section.
Parameters
----------
cp : pycbc.workflow.WorkflowConfigParser
A parsed configuration file that contains the transform options.
section : str
Name of the section in the configuration file.
outputs : str
The names of the parameters that are output by this transformation,
separated by `VARARGS_DELIM`. These must appear in the "tag" part
of the section header.
skip_opts : list, optional
Do not read options in the given list.
additional_opts : dict, optional
Any additional arguments to pass to the class. If an option is
provided that also exists in the config file, the value provided
will be used instead of being read from the file.
Returns
-------
cls
An instance of the class.
"""
tag = outputs
if skip_opts is None:
skip_opts = []
if additional_opts is None:
additional_opts = {}
else:
additional_opts = additional_opts.copy()
outputs = set(outputs.split(VARARGS_DELIM))
special_args = ['name'] + skip_opts + additional_opts.keys()
# get any extra arguments to pass to init
extra_args = {}
for opt in cp.options("-".join([section, tag])):
if opt in special_args:
continue
# check if option can be cast as a float
val = cp.get_opt_tag(section, opt, tag)
try:
val = float(val)
except ValueError:
pass
# add option
extra_args.update({opt:val})
extra_args.update(additional_opts)
out = cls(**extra_args)
# check that the outputs matches
if outputs-out.outputs != set() or out.outputs-outputs != set():
raise ValueError("outputs of class do not match outputs specified "
"in section")
return out
class CustomTransform(BaseTransform):
"""Allows for any transform to be defined.
Parameters
----------
input_args : (list of) str
The names of the input parameters.
output_args : (list of) str
The names of the output parameters.
transform_functions : dict
Dictionary mapping input args to a string giving a function call;
e.g., ``{'q': 'q_from_mass1_mass2(mass1, mass2)'}``.
jacobian : str, optional
String giving a jacobian function. The function must be in terms of
the input arguments.
Examples
--------
Create a custom transform that converts mass1, mass2 to mtotal, q:
>>> t = transforms.CustomTransform(['mass1', 'mass2'], ['mtotal', 'q'], {'mtotal': 'mass1+mass2', 'q': 'mass1/mass2'}, '(mass1 + mass2) / mass2**2')
Evaluate a pair of masses:
>>> t.transform({'mass1': 10., 'mass2': 5.})
{'mass1': 10.0, 'mass2': 5.0, 'mtotal': 15.0, 'q': 2.0}
The Jacobian for the same pair of masses:
>>> t.jacobian({'mass1': 10., 'mass2': 5.})
0.59999999999999998
"""
name = "custom"
def __init__(self, input_args, output_args, transform_functions,
jacobian=None):
if isinstance(input_args, string_types):
input_args = [input_args]
if isinstance(output_args, string_types):
output_args = [output_args]
self.inputs = set(input_args)
self.outputs = set(output_args)
self.transform_functions = transform_functions
self._jacobian = jacobian
# we'll create a scratch FieldArray space to do transforms on
# we'll default to length 1; this will be changed if a map is passed
# with more than one value in it
self._createscratch()
def _createscratch(self, shape=1):
"""Creates a scratch FieldArray to use for transforms."""
self._scratch = record.FieldArray(shape, dtype=[(p, float)
for p in self.inputs])
def _copytoscratch(self, maps):
"""Copies the data in maps to the scratch space.
If the maps contain arrays that are not the same shape as the scratch
space, a new scratch space will be created.
"""
try:
for p in self.inputs:
self._scratch[p][:] = maps[p]
except ValueError:
# we'll get a ValueError if the scratch space isn't the same size
# as the maps; in that case, re-create the scratch space with the
|
ppizarror/Hero-of-Antair | bin/simplejson/scanner.py | Python | gpl-2.0 | 4,693 | 0.000213 | # coding=utf-8
"""
JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner', 'JSONDecodeError']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corres | ponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
# Note that this exception is used from _speedups
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = l | inecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def __reduce__(self):
return self.__class__, (self.msg, self.doc, self.pos, self.end)
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos + 1
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
lineno, colno = linecol(doc, pos)
msg = msg.replace('%r', repr(doc[pos:pos + 1]))
if end is None:
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
errmsg = 'Expecting value'
try:
nextchar = string[idx]
except IndexError:
raise JSONDecodeError(errmsg, string, idx)
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise JSONDecodeError(errmsg, string, idx)
def scan_once(string, idx):
if idx < 0:
# Ensure the same behavior as the C speedup, otherwise
# this would work for *some* negative string indices due
# to the behavior of __getitem__ for strings. #98
raise JSONDecodeError('Expecting value', string, idx)
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
|
Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/base_renderer.py | Python | mit | 4,974 | 0.000402 | """
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import locale
import os
import subprocess
import sys
import tempfile
PY3K = sys.version_info >= (3, 0, 0)
class MarkupRenderer(object):
def __init__(self):
self.renderer_options = {}
def load_settings(self, global_setting, renderer_options):
self.renderer_options = renderer_options
@classmethod
def is_enabled(cls, filename, syntax):
return False
def render(self, text, **kwargs):
raise NotImplementedError()
class InputMethod(object):
STDIN = 1
TEMPFILE = 2
FILE = 3
class CommandlineRenderer(MarkupRenderer):
def __init__(self, input_method=InputMethod.STDIN, executable=None, args=[]):
super(CommandlineRenderer, self).__init__()
self.input_method = input_method
self.executable = executable
self.args = args
def pre_process_encoding(self, text, **kwargs):
return text.encode('utf-8')
def pre_process(self, text, **kwargs):
return text
def post_process(self, rendered_text, **kwargs):
return rendered_text
def post_process_encoding(self, rendered_text, **kwargs):
return rendered_text.decode('utf-8')
def render(self, text, **kwargs):
text = self.pre_process_encoding(text, **kwargs)
text = self.pre_process(text, **kwargs)
text = self.executable_check(text, kwargs['filename'])
text = self.post_process_encoding(text, **kwargs)
return self.post_process(text, **kwargs)
| def executable_check(self, text, filename):
tempfile_ = None
result = ''
try:
args = [self.get_executable()]
if self.input_method == InputMethod.STDIN:
args.extend(self.get_args())
| elif self.input_method == InputMethod.TEMPFILE:
_, ext = os.path.splitext(filename)
tempfile_ = tempfile.NamedTemporaryFile(suffix=ext)
tempfile_.write(text)
tempfile_.flush()
args.extend(self.get_args(filename=tempfile_.name()))
text = None
elif self.input_method == InputMethod.FILE:
args.extend(self.get_args(filename=filename))
text = None
else:
return u''
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=self.get_startupinfo())
result, errdata = proc.communicate(text)
if len(errdata) > 0:
print(errdata)
finally:
if tempfile_ is not None:
tempfile_.close() # Also delete file
return result.strip()
def get_executable(self):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
if isinstance(self.executable, unicode):
encoding = locale.getpreferredencoding()
return self.executable.encode(encoding)
return self.executable
def get_args(self, filename=None):
if not PY3K and os.name == 'nt':
# [PY2K] On Windows, popen won't support unicode args
encoding = locale.getpreferredencoding()
args = [arg if isinstance(arg, str) else arg.encode(encoding) for arg in self.args]
else:
args = self.args
return [arg.format(filename=filename) for arg in args]
def get_startupinfo(self):
if os.name != 'nt':
return None
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
return info
def renderer(renderer_type):
renderer_type.IS_VALID_RENDERER__ = True
return renderer_type
|
Uli1/mapnik | scons/scons-local-2.4.0/SCons/Tool/__init__.py | Python | lgpl-2.1 | 34,247 | 0.008234 | """SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/__init__.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import imp
import sys
import re
import os
import shutil
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initilization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
| src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
retur | n program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ |
ctmil/bc_website_purchase | wizards/request_relevant_suppliers.py | Python | agpl-3.0 | 7,875 | 0.007238 | __author__ = 'tbri'
from openerp import fields, models, api, _
import logging
_logger = logging.getLogger(__name__)
class request_relevant_suppliers(models.TransientModel):
_name = "requisition_suppliers"
_description = "Purchase Requisition Suppliers"
def _get_active_id(self):
print "GETTING ACTIVE_ID", self.env.context
return self.env.context.get('active_id', False)
name = fields.Char('What?')
tender = fields.Many2one('purchase.requisition', 'CallForBitds', default=lambda self: self._get_active_id())
numsupp = fields.Integer('Number of suppliers', compute='_get_numsupp')
product_suppliers = fields.One2many('relevant_supplierinfo', 'relevant_suppliers',
string='Suppliers')
"""
@api.depends('tender')
def onchange_tender(self):
print "ONCHANGE_TENDER"
if self.tender:
sellers = [{'supplier': 1290, 'leadtime': 12}, {'supplier': 579, 'leadtime': 27}]
self.product_suppliers = [(0, 0, v) for v in sellers]
_logger.info('product_suppliers onchange %s', self.product_suppliers)
"""
@api.one
def _get_numsupp(self):
self.numsupp = len(self.product_suppliers)
@api.one
def _comp_product_suppliers(self):
sellers = [{'supplier': 1290, 'leadtime': 12}, {'supplier': 579, 'leadtime': 27}]
self.product_suppliers = [(0, 0, v) for v in sellers]
return sellers
@api.v7
def Xdefault_get(self, cr, uid, fields_list=None, context=None):
dg = self.xxdefault_get(cr, uid)
print "DEFAULT GET V7", dg
return dg[0]
@api.one
def xxdefault_get(self):
vals = {}
_logger.info(' CONTEXT %s', self.env.context)
active_id = self.env.context['active_id']
tender = self.env['purchase.requisition'].browse(active_id)
products = [line.product_id for line in tender.line_ids]
sellers = []
for product in products:
for seller in product.seller_ids:
_logger.info('CREATING SELLER %s',seller.name.name)
#supp = self.env['bc_website_purchase.relevant.supplierinfo'].create(
# {'supplier' : seller.name.id,
# 'leadtime' : seller.delay,
# 'relevant_suppliers' : self.id
# }
#)
info = {'supplier' : seller.name.id,
'leadtime' : seller.delay,
#'relevant_suppliers' : self.id
}
_logger.info('About to create %s', info)
#self.product_suppliers = [(0,0, info)]
sellers.append(info)
_logger.info('SUPP %s', sellers)
#self.product_suppliers = [(0, 0, v) for v in sellers]
vals['product_suppliers'] = [(0, 0, v) for v in sellers]
return vals
def default_get(self, cr, uid, fields_list=None, context=None):
val = {}
active_id = context['active_id']
tender = self.pool('purchase.requisition').browse(cr, uid, [active_id])
products = [line.product_id for line in tender.line_ids]
sellers = []
for product in products:
for seller in product.seller_ids:
info = {'name' : seller.name.id,
'leadtime' : seller.delay,
#'relevant_suppliers' : self.id
}
_logger.info('About to create %s', info)
# do not add existing sellers
if info not in sellers:
sellers.append(info)
#sellers = [{'supplier': 1290, 'leadtime': 12}, {'supplier': 579, 'leadtime': 27}]
val['product_suppliers'] = [(0, 0, v) for v in sellers]
return val
def Xview_init(self, cr, uid, fields, context=None):
res = super(request_relevant_suppliers, self).view_init(cr, uid, fields, context)
_logger.info(' %s VIEW INIT CONTEXT %s', res, context)
active_id = context['active_id']
tender = self.pool('purchase.requisition').browse(cr, uid, [active_id])
products = [line.product_id for line in tender.line_ids]
for product in products:
for seller in product.seller_ids:
_logger.info('%s CREATING SELLER %s',fields, seller.name.name)
#supp = self.env['bc_website_purchase.relevant.supplierinfo'].create(
# {'supplier' : seller.name.id,
# 'leadtime' : seller.delay,
# 'relevant_suppliers' : self.id
# }
#)
info = {'name' : seller.name.id,
'leadtime' : seller.delay,
'relevant_suppliers' : self.id
}
_logger.info('About to create %s', info)
self.product_suppliers = [(0,0, info)]
_logger.info('SUPP %s', self.product_suppliers)
if not tender.line_ids:
raise Warning(_('Error'), _('Define product(s) you want to include in the call for bids.'))
return res
@api.one
def create_order(self):
_logger.info('create_order in request_relevant_suppliers')
active_id = self.env.context['active_id']
_logger.info('create_order active_id %s' % active_id)
| tender = self.env['purchase.requisition'].browse(active_id)
prods = self.env['relevant_supplierinfo'].search([('relevant_suppliers','=',self.id)])
_logger.info('create_order %s %s', self.id, prods)
for si in prods:
supplierinfo = si.read(['name', 'leadtime'])[0]
_logger.info('create_order %s %s', tender, supplierinfo['name'])
lea | dtime = supplierinfo['leadtime']
rfq_id = tender.make_purchase_order(supplierinfo['name'][0])
_logger.info('create_order rfq %s', rfq_id)
for rfq in rfq_id.values():
_logger.info('searching')
# not great but
po = self.env['purchase.order'].search([('id', '=', rfq)])
po.write({'template_id': tender.template_id.id})
lines = self.env['purchase.order.line'].search([('order_id','=',rfq)])
_logger.info('Lines found %s', lines)
lines.write({'leadtime' : leadtime})
return {'type': 'ir.actions.act_window_close'}
"""
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('Define product(s) you want to include in the call for bids.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
"""
class relevant_supplierinfo(models.TransientModel):
_name = 'relevant_supplierinfo'
# Basically a transient version of product.supplierinfo
relevant_suppliers = fields.Many2one('requisition_suppliers')
name = fields.Many2one('res.partner', 'Supplier')
leadtime = fields.Integer('Leadtime', help='Time from confirmed order to receipt of goods.') |
cmyr/poetryutils2 | tests/realness_tests.py | Python | mit | 1,555 | 0.003215 | # coding: utf-8
from __future__ import print_function
from __future__ import unicode_literals
import re
import poetryutils2
# this is all for use within ipython
def sample_words():
lines = poetryutils2.utils.debug_lines()
words = list()
for l in lines:
words.extend(w.lower() for w in l.split())
# maybe we want to clean words up more? let's just do that I think
# words = [w[:-1] for w in words if w[-1] in {'.',',','!','?'}]
# tofix = [w for w in words if w[-1] in {'.',',','!','?'}]
# words = [w for w in words if w[-1] not in {'.',',','!','?'}]
# words.extend([w[:-1] for w in tofix])
words = [re.sub(r'[^a-zA-Z\']', '', w) for w in words]
return [w for w in words if len(w)]
def realness(sample):
fails = [w for w in sample if not poetryutils2.utils.is_real_word(w)]
return fails
def main():
# print(len(sample_words()))
sample = sample_words()
pri | nt(len(sample))
fails = realness(sample)
# for f in fails:
# print(f)
from collections import Counter
counter = Counter(fails)
for word, count in counter.most_common():
if count > 1:
print(word, count)
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument('arg1', type=str, help="required argument")
# parser.add_argument('arg2', '--argument-2', help='optional bo | olean argument', action="store_true")
# args = parser.parse_args()
"""
okay so what we're having trouble with:
-est
-ies
-py
"""
if __name__ == "__main__":
main() |
PersonalGenomesOrg/open-humans | private_sharing/api_permissions.py | Python | mit | 254 | 0 | from rest_framework.per | missions import BasePermission
class HasValidProjectToken(BasePermission):
"""
Return True if the request has a valid project token.
"""
def has_permission(self, request, view):
return bo | ol(request.auth)
|
bennuttall/chef-hat | chef_hat/chef_hat.py | Python | bsd-3-clause | 12,834 | 0.000156 | from RPi import GPIO
from w1thermsensor import W1ThermSensor
import energenie
from datetime import datetime, timedelta
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Chef(object):
"""
Provides an implementation of temperature moderation for use in sous vide
cooking with the Chef HAT add-on for Raspberry Pi. The API allows user
control in Python, but can also be used with a minimal startup script and
provides control through use of the HAT's buttons and LCD screen.
"""
DEFAULT_TARGET_TEMPERATURE = 55
DEFAULT_DURATION = 120
TEMPERATURE_MARGIN = 1
TEMPERATURE_INCREMENT = 1
DURATION_INCREMENT = 5
LED = 15
BUTTON_UP = 2
BUTTON_DOWN = 3
BUTTON_ENTER = 4
BUTTON_BACK = 10
BUTTONS = [BUTTON_UP, BUTTON_DOWN, BUTTON_ENTER, BUTTON_BACK]
BOUNCETIME = 100
PULL = GPIO.PUD_UP
EDGE = GPIO.FALLING
STATE_SETUP = 0
STATE_PREPARING = 1
STATE_READY = 2
STATE_FOOD_IN = 3
STATE_COOKING = 4
STATE_COOKED = 5
STATE_FINISHED = 6
def __init__(self, temperature=None, duration=None, start=True):
self.state = self.STATE_SETUP
self.sensor = W1ThermSensor()
self.end_time = None
self._setup_gpio()
self._set_target_temperature(temperature)
self._set_duration(duration)
self.display_initial_info()
if start:
self.start()
def start(self):
"""
Progress the object's state to *preparing* and start the cooking
process in the loop of temperature moderation and progressive state
evolution.
"""
self.state = self.STATE_PREPARING
self.write("Preparing")
while self.STATE_PREPARING <= self.state < self.STATE_FINISHED:
temperature = self.get_temperature()
self.state_machine(temperature)
self.moderate_temperature(temperature)
sleep(5)
# State has reached "finished" so run the terminate function to clean up
self.terminate(None)
def _setup_gpio(self):
"""
Setup the GPIO pins used in the Chef HAT (input buttons and status LED)
and create an event on the | back button to terminate the process.
"""
GPIO.setup(self.LED, GPIO.OUT)
self.turn_led_off()
for button in self.BUTTONS:
GPIO.setup(button, GPIO.IN, self.PULL)
self.remove_button_event(button)
self.add_button_event(self.BUTTON_BACK, self.terminate)
def terminate(self, pin):
"""
Moves the object status to *finished* in order to end the cooking
process.
" | ""
self.remove_button_event(self.BUTTON_BACK)
self.state = self.STATE_FINISHED
def write(self, text, line=1):
"""
Prints `text`
TODO: writes to the LCD
"""
print(text)
def turn_led_on(self):
"""
Turns the status LED on
"""
GPIO.output(self.LED, True)
def turn_led_off(self):
"""
Turns the status LED off
"""
GPIO.output(self.LED, False)
def turn_cooker_on(self):
"""
Uses energenie to switch the cooker on. Also turns on the status LED.
"""
energenie.switch_on()
self.turn_led_on()
def turn_cooker_off(self):
"""
Uses energenie to switch the cooker off. Also turns off the status LED.
"""
energenie.switch_off()
self.turn_led_off()
def add_button_event(self, button, callback):
"""
Adds a GPIO event to run a callback function when a particular button
is pressed.
"""
GPIO.add_event_detect(button, self.EDGE, callback, self.BOUNCETIME)
def remove_button_event(self, button):
"""
Removes a GPIO event for a particular button.
"""
GPIO.remove_event_detect(button)
def _wait_for_button_press(self, button):
"""
Halts the program until a paticular button is pressed, then continues.
"""
GPIO.wait_for_edge(button, self.EDGE)
def _setup_up_down_buttons(self, increase_function, decrease_function):
"""
Configures the *up* and *down* buttons on the Chef HAT to run
particular increase and decrease functions accordingly. When the
*enter* button is pressed removes the up/down button events and
continues.
"""
self.add_button_event(self.BUTTON_UP, increase_function)
self.add_button_event(self.BUTTON_DOWN, decrease_function)
self._wait_for_button_press(self.BUTTON_ENTER)
self.remove_button_event(self.BUTTON_UP)
self.remove_button_event(self.BUTTON_DOWN)
def _set_target_temperature(self, temperature):
"""
Sets the object's `target_temperature` property. If `temperature` is
passed, sets it to the *float* of that value, otherwise sets it to its
configured default value and provides the means to change the value
using the up/down buttons and the LCD.
"""
if temperature is not None:
self.target_temperature = float(temperature)
else:
self.target_temperature = float(self.DEFAULT_TARGET_TEMPERATURE)
self.write("Set")
self.write("temp", 2)
sleep(1)
self.write("Temp:")
self.write("%7dC" % self.target_temperature, 2)
self._setup_up_down_buttons(
self.increase_target_temperature,
self.decrease_target_temperature
)
self.target_temperature_margin_lower = self.target_temperature - 1
self.target_temperature_margin_upper = self.target_temperature + 1
def _set_duration(self, duration):
"""
Sets the object's `duration` property. If `duration` is
passed, sets it to the *int* of that value, otherwise sets it to its
configured default value and provides the means to change the value
using the up/down buttons and the LCD.
"""
if duration is not None:
self.duration = int(duration)
else:
self.duration = int(self.DEFAULT_DURATION)
self.write("Set")
self.write("timer", 2)
sleep(1)
self.write("Timer:")
self.write("%2d mins" % self.duration, 2)
self._setup_up_down_buttons(
self.increase_duration,
self.decrease_duration
)
def display_initial_info(self):
"""
Displays temperature and duration values as previously configured.
"""
self.write("%dC" % self.target_temperature)
self.write("%d mins" % self.duration, 2)
def increase_target_temperature(self, pin):
"""
Increases the target temperature by the temperature increment and
displays the new value on the LCD.
"""
self.target_temperature += self.TEMPERATURE_INCREMENT
self.write("Temp:")
self.write("%7dC" % self.target_temperature, 2)
def decrease_target_temperature(self, pin):
"""
Decreases the target temperature by the temperature increment and
displays the new value on the LCD.
"""
self.target_temperature -= self.TEMPERATURE_INCREMENT
self.write("Temp:")
self.write("%7dC" % self.target_temperature, 2)
def increase_duration(self, pin):
"""
Increases the duration by the duration increment and displays the new
value on the LCD.
"""
self.duration += self.DURATION_INCREMENT
self.write("Timer:")
self.write("%3d mins" % self.duration, 2)
def decrease_duration(self, pin):
"""
Decreases the duration by the duration increment and displays the new
value on the LCD.
"""
self.duration -= self.DURATION_INCREMENT
self.write("Timer:")
self.write("%3d mins" % self.duration, 2)
def update_status_to_ready(self):
"""
Updates the object state |
zhangtuoparis13/Vintageous | vi/variables.py | Python | mit | 1,741 | 0.000574 | import sublime
import collections
VAR_MAP_LEADER = 'mapleader'
VAR_MAP_LOCAL_LEADER = 'maplocalleader'
# well-known variables
_SPECIAL_STRINGS = {
'<leader>': VAR_MAP_LEADER,
'<localleader>': VAR_MAP_LOCAL_LEADER,
}
_DEFAULTS = {
VAR_MAP_LEADER: '\\',
VAR_MAP_LOCAL_LEADER: '\\'
}
_VARIABLES = {
}
def expand_keys(seq):
'''Replaces well-known variables in key names with their corresponding
values.
'''
leader = var_name = None
# TODO(guillermooo): Can these variables appear in the middle of a
# sequence instead of at the beginning only?
if seq.lower().startswith('<leader>'):
var_name = '<leader>'
leader = _VARIABLES.get('mapleader', _DEFAULTS.get('mapleader'))
if seq.lower().startswith('<localleader>'):
var = '<localleader>'
local_leader = _VARIABLES.get('maplocalleader',
_DEFAULTS.get('maplocalleader'))
try:
return le | ader + seq[len(var_na | me):]
except TypeError:
return seq
def is_key_name(name):
return name.lower() in _SPECIAL_STRINGS
def get(name):
name = name.lower()
name = _SPECIAL_STRINGS.get(name, name)
return _VARIABLES.get(name, _DEFAULTS.get(name))
def set_(name, value):
# TODO(guillermooo): Set vars in settings.
_VARIABLES[name] = value
class Variables(object):
'''Stores variables during the current Sublime Text session.
Meant to be used as a descriptor with `State`.
'''
def __get__(self, instance, owner):
self.view = instance.view
self.settings = instance.settings
return self
def get(self, name):
return get(name)
def set(self, name, value):
return set_(name, value)
|
Reading-eScience-Centre/pycovjson | pycovjson/writeNetCDF.py | Python | bsd-3-clause | 524 | 0 | from netCDF4 import Dataset
from nump | y import arange, dtype
nx = 4
ny = 4
nz = 4
ncfile = Dataset('test_xy.nc', 'w')
# create the output data.
data_out = arange(nx * ny)
print(data_out)
data_out.shape = (nx, ny) # reshape to 3d array
# create the x and y dimensions.
ncfile.createDimension('x', nx)
ncfile.createDimension('y', ny)
| # ncfile.createDimension('z', nz)
data = ncfile.createVariable('data', dtype('float32').char, ('x', 'y'))
data[:] = data_out
# close the file.
print(ncfile.variables)
print("Wrote file!")
|
slank/awsmeta | awsmeta/metadata.py | Python | mit | 1,422 | 0.000703 | from urllib2 import (
urlopen,
HTTPError, |
URLError,
)
BASEURL = 'http://169.254.169.254/'
DEFAULT_TIMEOUT = 2
DEFAULT_API_VERSION = 'latest'
class MetadataError(Exception):
pass
def path(path=None, api_version=DEFAULT_API_VERSION, timeout=DEFAULT_TIMEOUT):
if not api_version:
api_version = 'latest'
md_path = api_version
if path:
md_path = md_path + "/" + path
try:
u = urlopen(BASEURL + md_path, timeout=t | imeout)
except HTTPError as e:
if e.code == 404:
raise MetadataError("Path not found: /%s" % path)
else:
raise MetadataError(e)
except URLError as e:
raise MetadataError(e)
if not path:
return "\n".join(map(lambda p: p.strip() + "/", u.readlines()))
return u.read()
class ShortNames(object):
'''Provide commonly-used metadata values by name'''
names = {
'az': '/meta-data/placement/availability-zone',
'instance-id': '/meta-data/instance-id',
}
def __init__(self, api_version=None, timeout=DEFAULT_TIMEOUT):
self.api_version = api_version
self.timeout = timeout
def list(self):
return self.names.keys()
def get(self, name):
if name not in self.names:
raise MetadataError('The shortname "{}" is not defined'.format(name))
return path(self.names[name], self.api_version, self.timeout)
|
shanbay/sea | sea/config.py | Python | mit | 1,363 | 0 | from sea.datatypes import ConstantsObject
class ConfigAttribute:
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
def __init__(self, root_path, defaults=None):
supe | r().__init__(defaults | or {})
self.root_path = root_path
def from_object(self, obj):
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return ConstantsObject(rv)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
turbokongen/home-assistant | tests/components/smartthings/conftest.py | Python | apache-2.0 | 10,388 | 0.00077 | """Test configuration and mocks for the SmartThings component."""
import secrets
from unittest.mock import Mock, patch
from uuid import uuid4
from pysmartthings import (
CLASSIFICATION_AUTOMATION,
AppEntity,
AppOAuthClient,
AppSettings,
DeviceEntity,
DeviceStatus,
InstalledApp,
InstalledAppStatus,
InstalledAppType,
Location,
SceneEntity,
SmartThings,
Subscription,
)
from pysmartthings.api import Api
import pytest
from homeassistant.components import webhook
from homeassistant.components.smartthings import DeviceBroker
from homeassistant.components.smartthings.const import (
APP_NAME_PREFIX,
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_INSTANCE_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN, |
DATA_BROKERS,
DOMAIN,
SETTINGS_INSTANCE_ID,
STORAGE_KEY,
STORAGE_VERSION,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.config_entries import CONN_CLASS_CLOUD_PUSH, SOURCE_USER, ConfigEntry
from homeassistant.con | st import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_WEBHOOK_ID,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa
COMPONENT_PREFIX = "homeassistant.components.smartthings."
async def setup_platform(hass, platform: str, *, devices=None, scenes=None):
"""Set up the SmartThings platform and prerequisites."""
hass.config.components.add(DOMAIN)
config_entry = ConfigEntry(
2,
DOMAIN,
"Test",
{CONF_INSTALLED_APP_ID: str(uuid4())},
SOURCE_USER,
CONN_CLASS_CLOUD_PUSH,
system_options={},
)
broker = DeviceBroker(
hass, config_entry, Mock(), Mock(), devices or [], scenes or []
)
hass.data[DOMAIN] = {DATA_BROKERS: {config_entry.entry_id: broker}}
await hass.config_entries.async_forward_entry_setup(config_entry, platform)
await hass.async_block_till_done()
return config_entry
@pytest.fixture(autouse=True)
async def setup_component(hass, config_file, hass_storage):
"""Load the SmartThing component."""
hass_storage[STORAGE_KEY] = {"data": config_file, "version": STORAGE_VERSION}
await async_process_ha_core_config(
hass,
{"external_url": "https://test.local"},
)
await async_setup_component(hass, "smartthings", {})
def _create_location():
loc = Mock(Location)
loc.name = "Test Location"
loc.location_id = str(uuid4())
return loc
@pytest.fixture(name="location")
def location_fixture():
"""Fixture for a single location."""
return _create_location()
@pytest.fixture(name="locations")
def locations_fixture(location):
"""Fixture for 2 locations."""
return [location, _create_location()]
@pytest.fixture(name="app")
async def app_fixture(hass, config_file):
"""Fixture for a single app."""
app = Mock(AppEntity)
app.app_name = APP_NAME_PREFIX + str(uuid4())
app.app_id = str(uuid4())
app.app_type = "WEBHOOK_SMART_APP"
app.classifications = [CLASSIFICATION_AUTOMATION]
app.display_name = "Home Assistant"
app.description = f"{hass.config.location_name} at https://test.local"
app.single_instance = True
app.webhook_target_url = webhook.async_generate_url(
hass, hass.data[DOMAIN][CONF_WEBHOOK_ID]
)
settings = Mock(AppSettings)
settings.app_id = app.app_id
settings.settings = {SETTINGS_INSTANCE_ID: config_file[CONF_INSTANCE_ID]}
app.settings.return_value = settings
return app
@pytest.fixture(name="app_oauth_client")
def app_oauth_client_fixture():
"""Fixture for a single app's oauth."""
client = Mock(AppOAuthClient)
client.client_id = str(uuid4())
client.client_secret = str(uuid4())
return client
@pytest.fixture(name="app_settings")
def app_settings_fixture(app, config_file):
"""Fixture for an app settings."""
settings = Mock(AppSettings)
settings.app_id = app.app_id
settings.settings = {SETTINGS_INSTANCE_ID: config_file[CONF_INSTANCE_ID]}
return settings
def _create_installed_app(location_id, app_id):
item = Mock(InstalledApp)
item.installed_app_id = str(uuid4())
item.installed_app_status = InstalledAppStatus.AUTHORIZED
item.installed_app_type = InstalledAppType.WEBHOOK_SMART_APP
item.app_id = app_id
item.location_id = location_id
return item
@pytest.fixture(name="installed_app")
def installed_app_fixture(location, app):
"""Fixture for a single installed app."""
return _create_installed_app(location.location_id, app.app_id)
@pytest.fixture(name="installed_apps")
def installed_apps_fixture(installed_app, locations, app):
"""Fixture for 2 installed apps."""
return [installed_app, _create_installed_app(locations[1].location_id, app.app_id)]
@pytest.fixture(name="config_file")
def config_file_fixture():
"""Fixture representing the local config file contents."""
return {CONF_INSTANCE_ID: str(uuid4()), CONF_WEBHOOK_ID: secrets.token_hex()}
@pytest.fixture(name="smartthings_mock")
def smartthings_mock_fixture(locations):
"""Fixture to mock smartthings API calls."""
async def _location(location_id):
return next(
location for location in locations if location.location_id == location_id
)
smartthings_mock = Mock(SmartThings)
smartthings_mock.location.side_effect = _location
mock = Mock(return_value=smartthings_mock)
with patch(COMPONENT_PREFIX + "SmartThings", new=mock), patch(
COMPONENT_PREFIX + "config_flow.SmartThings", new=mock
), patch(COMPONENT_PREFIX + "smartapp.SmartThings", new=mock):
yield smartthings_mock
@pytest.fixture(name="device")
def device_fixture(location):
"""Fixture representing devices loaded."""
item = Mock(DeviceEntity)
item.device_id = "743de49f-036f-4e9c-839a-2f89d57607db"
item.name = "GE In-Wall Smart Dimmer"
item.label = "Front Porch Lights"
item.location_id = location.location_id
item.capabilities = [
"switch",
"switchLevel",
"refresh",
"indicator",
"sensor",
"actuator",
"healthCheck",
"light",
]
item.components = {"main": item.capabilities}
item.status = Mock(DeviceStatus)
return item
@pytest.fixture(name="config_entry")
def config_entry_fixture(hass, installed_app, location):
"""Fixture representing a config entry."""
data = {
CONF_ACCESS_TOKEN: str(uuid4()),
CONF_INSTALLED_APP_ID: installed_app.installed_app_id,
CONF_APP_ID: installed_app.app_id,
CONF_LOCATION_ID: location.location_id,
CONF_REFRESH_TOKEN: str(uuid4()),
CONF_CLIENT_ID: str(uuid4()),
CONF_CLIENT_SECRET: str(uuid4()),
}
return MockConfigEntry(
domain=DOMAIN,
data=data,
title=location.name,
version=2,
source=SOURCE_USER,
connection_class=CONN_CLASS_CLOUD_PUSH,
)
@pytest.fixture(name="subscription_factory")
def subscription_factory_fixture():
"""Fixture for creating mock subscriptions."""
def _factory(capability):
sub = Subscription()
sub.capability = capability
return sub
return _factory
@pytest.fixture(name="device_factory")
def device_factory_fixture():
"""Fixture for creating mock devices."""
api = Mock(Api)
api.post_device_command.return_value = {"results": [{"status": "ACCEPTED"}]}
def _factory(label, capabilities, status: dict = None):
device_data = {
"deviceId": str(uuid4()),
"name": "Device Type Handler Name",
"label": label,
"deviceManufacturerCode": "9135fc86-0929-4436-bf73-5d75f523d9db",
"locationId": "fcd829e9-82f4-45b9-acfd-62fda029af80",
"components": [
{
"id": "main",
"capabilities": [
{"id": capability, "version": 1} for capability in capabilities
],
}
],
" |
beagles/neutron_hacking | neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py | Python | apache-2.0 | 4,002 | 0.00025 | # Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import messaging
from neutron.common import constants
from neutron.common import rpc
from neutron.common import topics
from neutron.common import utils
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class MeteringAgentNotifyAPI(object):
"""API for plugin to notify L3 metering agent."""
def __init__(self, topic=topics.METERING_AGENT):
super(MeteringAgentNotifyAPI, self).__init__()
target = messaging.Target(topic=topic, version='1.0')
self.client = rpc.get_client(target)
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_plugin()
l3_routers = {}
for router in routers:
l3_agents = plugin.get_l3_agents_hosting_routers(
adminContext, [router['id']],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.d | ebug(_('Notify metering agent at %(topic)s.%(host)s '
'the message %(method)s'),
{'topic': self.client.target.topic,
'host': l3_agent.host,
'method': method})
l3_router = l3_routers.get(l3 | _agent.host, [])
l3_router.append(router)
l3_routers[l3_agent.host] = l3_router
for host, routers in l3_routers.iteritems():
topic = '%s.%s' % (self.client.target.topic, host)
cctxt = self.client.prepare(topic=topic)
cctxt.cast(context, method, routers=routers)
def _notification_fanout(self, context, method, router_id):
LOG.debug(_('Fanout notify metering agent at %(topic)s the message '
'%(method)s on router %(router_id)s'),
{'topic': self.client.target.topic,
'method': method,
'router_id': router_id})
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, router_id=router_id)
def _notification(self, context, method, routers):
"""Notify all the agents that are hosting the routers."""
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
self._agent_notification(context, method, routers)
else:
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, routers=routers)
def router_deleted(self, context, router_id):
self._notification_fanout(context, 'router_deleted', router_id)
def routers_updated(self, context, routers):
if routers:
self._notification(context, 'routers_updated', routers)
def update_metering_label_rules(self, context, routers):
self._notification(context, 'update_metering_label_rules', routers)
def add_metering_label(self, context, routers):
self._notification(context, 'add_metering_label', routers)
def remove_metering_label(self, context, routers):
self._notification(context, 'remove_metering_label', routers)
|
meriembendris/ADNVideo | src/annotation/grammar/label-stats.py | Python | lgpl-3.0 | 2,190 | 0.005023 | import json, sys
from collections import defaultdict
annotations = json.loads(sys.stdin.read())
stats = defaultdict(lambda: defaultdict(float))
fo | r annotation in annotations:
show = '_'.join(annotation['name'].split('_')[:2])
stats['show'][show] += 1
stats['shots']['num'] += 1
stats['split'][annotation['split']] += 1
for subshot in annotation['subshots']:
stats['subshots']['num'] += 1
stats['subshots'][subshot['type']] += 1
stats['persons']['num'] += len(subshot['persons'])
for person in subshot['persons']:
stats['role'][person['role']] += 1
if person['pose'] | != None:
stats['pose'][person['pose']] += 1
stats['pose']['all'] += 1
if person['location'] != None:
stats['location'][person['location']] += 1
stats['location']['all'] += 1
for show in sorted(stats['show'], key=lambda x: -stats['show'][x]):
print('%s: %.2f%%' % (show, 100.0 * stats['show'][show] / stats['shots']['num']))
print()
for split in sorted(stats['split'], key=lambda x: -stats['split'][x]):
print('%s: %.2f%%' % (split, 100.0 * stats['split'][split] / stats['shots']['num']))
print()
print('avg num subshots: %.4f' % (stats['subshots']['num'] / stats['shots']['num']))
print('avg num persons: %.4f' % (stats['persons']['num'] / stats['shots']['num']))
print()
for subshot in sorted(stats['subshots'], key=lambda x: -stats['subshots'][x]):
if subshot != 'num':
print('%s: %.2f%%' % (subshot, 100.0 * stats['subshots'][subshot] / stats['shots']['num']))
print()
for role in sorted(stats['role'], key=lambda x: -stats['role'][x]):
print('%s: %.2f%%' % (role, 100.0 * stats['role'][role] / stats['persons']['num']))
print()
for pose in sorted(stats['pose'], key=lambda x: -stats['pose'][x]):
if pose != 'all':
print('%s: %.2f%%' % (pose, 100.0 * stats['pose'][pose] / stats['pose']['all']))
print()
for location in sorted(stats['location'], key=lambda x: -stats['location'][x]):
if location != 'all':
print('%s: %.2f%%' % (location, 100.0 * stats['location'][location] / stats['location']['all']))
print()
|
Kloenk/GarrysModserver | GModServer/StartServer.py | Python | apache-2.0 | 1,006 | 0.011928 | #!/usr/bin/env python3
from GModServer import Variables
import os
def StartGarrysModServer(steanApiAuthKey=Variables.SteamApiAuthKey, steamWorkShopID=Variables.SteamWorkShopId,
serverGamemode=Variables.ServerGamemode, serverDefaultMap=Variables.ServerDefaultMap,
serverPort=Variables.ServerPort, serverMaxPlayer=Variables.ServerMaxPlayer,
serverRunFile=Variables.ServerRunFile, debug=False):
Command="%s -game garrysmod +maxplayers %s -authkey %s +host_workshop_collection %s +map %s +gamemode %s +po | rt " \
"%s" % (serverRunFile, serverMaxPl | ayer, steanApiAuthKey,
steamWorkShopID, serverDefaultMap, serverGamemode, serverPort)
if(debug==True):
print(Command)
os.system(Command) #start gMod server
if __name__ == '__main__':
from PythonServerKernel.Exceptions import RunnedFromFalseFile
raise RunnedFromFalseFile('GModServer_StartServer_py') |
dlcs/starsky | app/tests/test_metadata_width_height.py | Python | mit | 1,410 | 0.002128 | import unittest
import starsky_ingest
class TextMetadataWidthHeight(unittest.TestCase):
def test_hocr(self):
hocr = open('app/tests/fixtures/vet1.html').read()
width, height, canvas_width, canvas_height = starsky_ingest.Starsky.get_width_height(hocr, 'hocr', "https://dlcs.io/iiif-img/50/1/000214ef-74f3-4ec2-9a5f-3b79f50fc500")
self.assertEqual(1205, width)
self.assertEqual(2000, height)
self.assertEqual(1929, canvas_width)
self.as | sertEqual(2849, canvas_height)
def test_hocr_nosize(self):
hocr = open('app/tests/fixtures/ocropus_trained.html').read()
width, height, canvas_width, canvas_height = starsky_ingest.Starsky.get_width_height(hocr, 'hocr', "https://dlcs.io/iiif-img/50/1/000214ef-74f3-4ec2-9a5f-3b79f50fc500")
self.assertEqual(1929, width)
self.assertEqual(2849, height)
self.assertEqual(19 | 29, canvas_width)
self.assertEqual(2849, canvas_height)
def test_alto(self):
alto = open('app/tests/fixtures/b20402533_0010.xml').read()
width, height, canvas_width, canvas_height = starsky_ingest.Starsky.get_width_height(alto, 'alto', "https://dlcs.io/iiif-img/50/1/000214ef-74f3-4ec2-9a5f-3b79f50fc500")
self.assertEqual(2319, width)
self.assertEqual(3243, height)
self.assertEqual(1929, canvas_width)
self.assertEqual(2849, canvas_height)
|
ENCODE-DCC/snovault | src/snowflakes/audit/__init__.py | Python | mit | 41 | 0 | def includeme(config):
| config.scan() | |
joefutrelle/pocean-core | pocean/__init__.py | Python | mit | 164 | 0 | #!python |
# coding=utf-8
# Package level logger
import logging
logger = logging.getLogger("pocean")
logger.addHandler(logging.NullHandler())
__version__ | = "1.0.0"
|
CooperLuan/devops.notes | taobao/top/api/rest/ItempropsGetRequest.py | Python | mit | 606 | 0.034653 | '''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.bas | e import RestApi
class ItempropsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.attr_keys = None
self.child_path = None
self.cid = None
self.fields = None
self.is_color_prop = None
self.is_enum_prop = None
self.is_input_prop = None
self.is_item_prop = None
self.is_key_prop = None
self.is_sale_prop = None
self.parent_pid = None
self. | pid = None
self.type = None
def getapiname(self):
return 'taobao.itemprops.get'
|
arviz-devs/arviz | doc/logo/generate_logo.py | Python | apache-2.0 | 1,282 | 0.00078 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from scipy import stats
x = np.linspace(0, 1, 200)
pd | fx = stats.beta(2, 5).pdf(x)
path = Path(np.array([x, pdfx]).transpose())
patch = PathPatch(path, facecolor="none", alpha=0)
plt.gca().add_patch(patch)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["#00bfbf", "#00bfbf", "#126a8a"])
im = plt.imshow(
np.array([[1, 0, 0], [1, 1, 0]]),
cmap=cmap,
| interpolation="bicubic",
origin="lower",
extent=[0, 1, 0.0, 5],
aspect="auto",
clip_path=patch,
clip_on=True,
)
plt.axis("off")
plt.ylim(0, 5.5)
plt.xlim(0, 0.9)
bbox = Bbox([[0.75, 0.5], [5.4, 2.2]])
# plt.savefig('logo_00.png', dpi=300, bbox_inches=bbox, transparent=True)
plt.text(
x=0.04,
y=-0.01,
s="ArviZ",
clip_on=True,
fontdict={"name": "ubuntu mono", "fontsize": 62},
color="w",
)
plt.savefig("ArviZ.png", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.pdf", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.svg", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.jpg", dpi=300, bbox_inches=bbox, transparent=True)
|
yCanta/yCanta | windows-build.py | Python | unlicense | 2,677 | 0.010086 | #!/usr/env python
import os
import sys
import shutil
import tempfile
if os.name != 'nt':
print 'Windows only!'
sys.exit(1)
if not len(sys.argv) == 3:
print 'USAGE: %s PortablePythonDir output-dir' % sys.argv[0]
print ' Example: D:\yCanta>..\PortablePython\Python-Portable.exe windows-build.py d:\PortablePython d:\output'
sys.exit(1)
ppydir = sys.argv[1]
workdir = os.path.abspath(sys.argv[2])
requirements = os.path.abspath('requirements.txt')
if not os.path.exists(workdir):
os.mkdir(workdir)
exclude = [
'song.db',
'songs',
'songbooks',
'songbook_backup',
'webapp\\static\\songs',
'webapp\\static\\songbooks',
'.git*',
'.hg*']
print 'EXCLUDE:', exclude
print 'Copying to working dir:', workdir
shutil.copytree('.', os.path.join(workdir, 'yCanta'), ignore=shutil.ignore_patte | rns(*exclude))
shutil.copytree(ppy | dir, os.path.join(workdir, 'PortablePython'))
print 'Creating launcher script'
launcher = open(os.path.join(workdir, 'yCanta.bat'), 'w')
launcher.write(r'''cd yCanta
..\PortablePython\Python-Portable.exe start-webapp.py --start-browser
'''.rstrip())
launcher.close()
print 'Installing packages into portable python environment'
easy_install = os.path.join(workdir, 'PortablePython', 'App', 'Scripts', 'easy_install.exe')
print 'EASY_INSTALL:', easy_install
for line in open(requirements):
if '#' in line:
continue
os.system(easy_install + ' ' + line.strip())
os.system(easy_install + ' pip')
# run install via pip too cause of weird portable python bug ... if I do it both ways (easy_install and pip) it works, else it doesn't.
os.system(os.path.join(workdir, 'PortablePython', 'Python-Portable.exe') + ' -m pip install -r ' + requirements)
print 'Creating zip archive: yCanta.zip'
shutil.make_archive('yCanta', 'zip', workdir)
print 'DONE'
#print 'Cleaning up working dir ...'
#shutil.rmtree(workdir)
#exclude = [ os.path.abspath(line) for line in open('.gitignore') if '#' not in line ]
#print 'EXCLUDE:', exclude
#
#for root, dirs, files in os.walk('.'):
# for i in reversed(range(len(dirs))): # go through indexes backwords because we're doing deletions
# path = os.path.abspath(os.path.join(root, dirs[i]))
# if path in exclude:
# print 'EXCLUDE:', path
# del dirs[i]
# else:
# print 'INCLUDE:', path
# os.mkdir(os.path.join(workdir, root, dirs[i]))
#
# for i in reversed(range(len(files))): # go through indexes backwords because we're doing deletions
# path = os.path.abspath(os.path.join(root, files[i]))
# if path in exclude:
# print 'EXCLUDE:', path
# else:
# print 'INCLUDE:', path
# os.mkdir(os.path.join(workdir, root, files[i]))
|
smutt/WRL | topThick.py | Python | gpl-3.0 | 2,656 | 0.016943 | #!/usr/bin/python
# The file is part of the WRL Project.
#
# The WRL Project is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The WRL Project is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2017, Andrew McConachie, <andrew.mcconachie@icann.org>
import os
import sys
import random
import dns.resolver
numTestDomains = 100
numTopTLDs = 100
ignoreDomains = ['com', 'net', 'jobs', 'cat', 'mil', 'edu', 'gov', 'int', 'arpa']
serverZone = '.ws.sp.am' # DNS Zone containing CNAME records pointing to whois FQDNs
def dbg(s):
# print s
pass
random.seed()
zFiles = os.listdir('zonefiles/')
#dbgFiles = 10 # How many files to read while developing this, remove when finished coding
tlds = []
for zf in zFiles:
# if len(tlds) >= dbgFiles: # For developing, remove when finished coding
# break
dbg(zf)
tld = {}
if zf.find(".txt") == -1:
dbg("This should not happen")
continue
zfh = open('zonefiles/' + zf, 'r')
lines = zfh.read().splitlines()
zfh.close()
dbg("after file read")
tld['name'] = lines[0].split(".")[0].strip()
if tld['name'] in ignoreDomains:
dbg("Ignoring:" + tld['name'])
continue
dbg("after name split")
rrs = []
for line in lines:
rr = line.split("\t")
rrs.append(rr)
dbg("after rr split")
ns = []
for rr in rrs:
if rr[3].lower() == 'ns':
ns.append(rr[0].split(".")[0])
dbg("after counting NS records")
if len(ns) < numTestDomains:
continue
else:
tld['size'] = len(ns)
tld['domains'] = random.sample(ns, numTestDomains)
for d i | n tld['domains']:
dbg(d + "." + tld['name'])
dbg(tld['name'] + ": " + str(tld['size']))
tlds.append(tld)
tlds.sort(key=lambda tld: tld['size'], reverse=True)
for ii in xrange(numTopTLDs):
# Find FQDN of whois server
| d = dns.resolver.Resolver()
try:
resp = d.query(tlds[ii]['name'] + serverZone, 'CNAME')
if len(resp.rrset) < 1:
whois = 'UNKNOWN'
else:
whois = str(resp.rrset[0]).strip('.')
except:
whois = 'UNKNOWN'
s = whois + ','
for dom in tlds[ii]['domains']:
s += dom + '.' + tlds[ii]['name'] + ','
print s.strip(',')
|
tinkererr/projecteulerpython | 3.py | Python | unlicense | 136 | 0.022059 | factors = lambda x: [y for y in reversed(range(2,round(x/2)+1)) if x % y == 0 and len(factors(y)) == 0]
print(factors(600851475143)[0] | )
| |
Tong-Chen/scikit-learn | sklearn/metrics/scorer.py | Python | bsd-3-clause | 10,407 | 0.000096 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test dat | a and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck | @uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from warnings import warn
import numpy as np
from . import (r2_score, mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score, precision_score,
recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
return self._sign * self._score_func(y_true, y_pred, **self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def _deprecate_loss_and_score_funcs(
loss_func=None, score_func=None, scoring=None,
score_overrides_loss=False):
scorer = None
if loss_func is not None or score_func is not None:
if loss_func is not None:
warn("Passing a loss function is "
"deprecated and will be removed in 0.15. "
"Either use strings or score objects. "
"The relevant new parameter is called ''scoring''. ",
category=DeprecationWarning, stacklevel=2)
scorer = make_scorer(loss_func, greater_is_better=False)
if score_func is not None:
warn("Passing function as ``score_func`` is "
"deprecated and will be removed in 0.15. "
"Either use strings or score objects. "
"The relevant new parameter is called ''scoring''.",
category=DeprecationWarning, stacklevel=2)
if loss_func is None or score_overrides_loss:
scorer = make_scorer(score_func)
elif isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s' % (scoring,
sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
|
hmvp/python-tdbus | lib/tdbus/gevent.py | Python | mit | 4,066 | 0.000492 | #
# This file is part of python-tdbus. Python-tdbus is free software
# available under the terms of the MIT license. See the file "LICENSE" that
# was provided together with this source file for the licensing terms.
#
# Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
# complete list.
from __future__ import division, absolute_import
from gevent import core, local
import gevent
from gevent.hub import get_hub, Waiter
from tdbus import _tdbus
from tdbus.connection import DBusConnection, DBusError
from tdbus.loop import EventLoop
if not hasattr(gevent, 'wait'):
raise ImportError("Must use gevent 1.0 or greater")
class GEventLoop(EventLoop):
"""Integration with the GEvent event loop."""
def __init__(self, connection):
self._connection = connection
self._hub = get_hub()
def add_watch(self, watch):
fd = watch.get_fd()
flags = watch.get_flags()
evtype = 0
if flags & _tdbus.DBUS_WATCH_READABLE:
evtype |= core.READ
if flags & _tdbus.DBUS_WATCH_WRITABLE:
evtype |= core.WRITE
event = get_hub().loop.io(fd, evtype)
if watch.get_enabled():
event.start(self._handle_watch, watch, pass_events=True)
watch.set_data(event)
def remove_watch(self, watch):
event = watch.get_data()
event.stop()
watch.set_data(None)
def watch_toggled(self, watch):
event = watch.get_data()
if watch.get_enabled():
event.start(self._handle_watch, watch, pass_events=True)
else:
event.stop()
def _handle_watch(self, evtype, watch):
flags = 0
if evtype & core.READ:
flags |= _tdbus.DBUS_WATCH_READABLE
if evtype & core.WRITE:
flags |= _tdbus.DBUS_WATCH_WRITABLE
wa | tch.handle(flags)
self._hub.loop.run_callback(self._handle_dispatch, self._connection)
def add_timeout(self, timeout):
interval = timeout.get_interval()
event = get_hub().loop.timer(interval / 1000, interval / 1000)
if timeout.get_enabled():
event.start(self._handle_timeout, timeout) |
# Currently (June 2012) gevent does not support reading or changing
# the interval of a timer. Libdbus however expects it an change the
# interval, so we store it separately outside the event.
timeout.set_data((interval, event))
def remove_timeout(self, timeout):
interval, event = timeout.get_data()
event.stop()
timeout.set_data(None)
def timeout_toggled(self, timeout):
interval, event = timeout.get_data()
if timeout.get_enabled():
if interval != timeout.get_interval():
# Change interval => create new timer
event.stop()
event = get_hub().loop.timer(interval / 1000, interval / 1000)
timeout.set_data(event)
event.start(self._handle_timeout, timeout)
else:
event.stop()
def _handle_timeout(self, timeout):
timeout.handle()
self._hub.loop.run_callback(self._handle_dispatch, self._connection)
def _handle_dispatch(self, connection):
while connection.get_dispatch_status() == _tdbus.DBUS_DISPATCH_DATA_REMAINS:
connection.dispatch()
class GEventDBusConnection(DBusConnection):
Loop = GEventLoop
Local = local.local
def call_method(self, *args, **kwargs):
"""Call a method."""
callback = kwargs.get('callback')
if callback is not None:
super(GEventDBusConnection, self).call_method(*args, **kwargs)
return
waiter = Waiter()
def _gevent_callback(message):
waiter.switch(message)
kwargs['callback'] = _gevent_callback
super(GEventDBusConnection, self).call_method(*args, **kwargs)
reply = waiter.get()
self._handle_errors(reply)
return reply
def spawn(self, handler, *args):
gevent.spawn(handler, *args)
|
openqt/algorithms | leetcode/python/lc414-third-maximum-number.py | Python | gpl-3.0 | 1,193 | 0.015926 | # coding=utf-8
import unittest
"""414. Third Maximum Number
https://leetcode.com/problems/third-maximum-number/description/ |
Given a **non-empty** array of integers, return the **third** maximum number
in this array. If it does not exist, return the maximum number. The time
complexity must be in O(n).
**Example 1:**
**Input:** [3, 2, 1]
**Output:** 1
|
**Explanation:** The third maximum is 1.
**Example 2:**
**Input:** [1, 2]
**Output:** 2
**Explanation:** The third maximum does not exist, so the maximum (2) is returned instead.
**Example 3:**
**Input:** [2, 2, 3, 1]
**Output:** 1
**Explanation:** Note that the third maximum here means the third maximum distinct number.
Both numbers with value 2 are both considered as second maximum.
Similar Questions:
Kth Largest Element in an Array (kth-largest-element-in-an-array)
"""
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
espressif/esp-idf | examples/system/esp_event/user_event_loops/example_test.py | Python | apache-2.0 | 1,311 | 0.003051 | from __future__ import print_function
import ttfw_idf
TASK_ITERATION_LIMIT = 10
TASK_ITERATION_POSTING = 'posting TASK_EVENTS:TASK_ITERATION_EVENT to {}, iteration {} out of ' + str(TASK_ITERATION_LIMIT)
TASK_ITERATION_HANDLING = 'handling TASK_EVENTS:TASK_ITERATION_EVENT from {}, iteration {}'
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32', 'esp32c3'])
def test_user_event_loops_example(env, extra_data):
dut = env.get_dut('user_event_loops', 'examples/system/esp_event/user_event_loops')
dut.start_app()
dut.expect('setting up')
dut.expect('starting event source')
dut.expect('starting application task')
print('Finished setup')
for iteration in range(1, TASK_ITERATION_LIMIT + 1):
loop = None
if (iteration % 2 == 0):
loop = 'loop_with_task'
else:
loop = 'loop_without_task'
dut.expect(TASK_ITERATION_POSTING.format(loop, iterati | on))
print('Posted iteration {} to {}'.format(iteratio | n, loop))
dut.expect(TASK_ITERATION_HANDLING.format(loop, iteration))
print('Handled iteration {} from {}'.format(iteration, loop))
dut.expect('deleting task event source')
print('Deleted task event source')
if __name__ == '__main__':
test_user_event_loops_example()
|
Arkhash/arkhash | contrib/wallettools/walletchangepass.py | Python | mit | 219 | 0.004566 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:2979")
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassph | rasechan | ge(pwd, pwd2) |
thammegowda/incubator-joshua | scripts/training/run_tuner.py | Python | apache-2.0 | 18,921 | 0.004334 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Runs the Z-MERT and PRO tuners.
"""
from __future__ import print_function
import argparse
from collections import namedtuple
import logging
import os
import shutil
import signal
import stat
from subprocess import CalledProcessError, Popen, PIPE, check_output, call
import sys
import re
JOSHUA = os.environ.get('JOSHUA')
EXAMPLE = r"""
Example invocation:
$JOSHUA/scripts/support/run_zmert.py \
/path/to/source.txt \
/path/to/reference.en \
--tuner zmert \
--tunedir working-dir \
--decoder /path/to/decoder/command \
--decoder-output /path/to/decoder/nbest/output \
--decoder-config /path/to/joshua.config
--tuner can be one of zmert or pro. If the path to the reference is a prefix
with ".0", ".1", etc extensions, they are treated as multiple references
(extensions "0", "1", etc also works --- i.e., the path to the reference can
have a trailing period). The decoder command should decode your source file and
produce output at the --decoder-output location in the Joshua n-best format, e.g.,
0 ||| example candidate translation ||| tm_pt_0=1 lm_0=17 ||| -34.2
"""
ZMERT_CONFIG_TEMPLATE = """### MERT parameters
# target sentences file name (in this case, file name prefix)
-r <REF>
-rps <NUMREFS> # references per sentence
-p <TUNEDIR>/params.txt # parameter file
-m <METRIC> # evaluation metric and its options
-maxIt <ITERATIONS> # maximum MERT iterations
-ipi 20 # number of intermediate initial points per iteration
-cmd <DECODER_COMMAND> # file containing commands to run decoder
-decOut <DECODER_OUTPUT> # file produced by decoder
-dcfg <DECODER_CONFIG> # decoder config file
-N 300 # size of N-best list
-v 1 # verbosity level (0-2; higher value => more verbose)
"""
PRO_CONFIG_TEMPLATE = """### Part 1: parameters similar to Z-MERT
# target sentences file name (in this case, file name prefix)
-r <REF>
# references per sentence
-rps <NUMREFS>
# parameter file
-p <TUNEDIR>/params.txt
#metric setting:
-m <METRIC>
#-m TER nocase punc 5 5 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1
#-m TER-BLEU nocase punc 20 50 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1 4 closest
#-m METEOR en norm_yes keepPunc 2 #old meteor interface #Z-MERT Meteor interface | (not working)
#-m Meteor en lowercase '0.5 1.0 0.5 0.5' 'exact stem synonym paraphrase' '1.0 0.5 0.5 0.5' #CMU meteor interface
# maximum PRO iterations
-maxIt <ITERATIONS>
# file containing commands to run decoder
-cmd <DECODER_COMMAN | D>
# file prodcued by decoder
-decOut <DECODER_OUTPUT>
# decoder config file
-dcfg <DECODER_CONFIG>
# size of N-best list
-N 300
# verbosity level (0-2; higher value => more verbose)
-v 1
#use one of the classifiers(and the corresponding parameter setting) below:
#1.perceptron paramters
-classifierClass joshua.pro.ClassifierPerceptron
-classifierParams '30 0.5 0.0'
#2.MegaM parameters
#-classifierClass joshua.pro.ClassifierMegaM
#-classifierParams './megam_command ./megam_train.data ./megam_weights'
#3.Stanford Max-Ent parameters
#-classifierClass joshua.pro.ClassifierMaxEnt
#-classifierParams './maxent_prop_file'
#4.LibSVM parameters
#-classifierClass joshua.pro.ClassifierSVM
#-classifierParams './libsvm_command ./libsvm_train.data ./libsvm_train.data.model'
# num of candidate samples
-Tau 8000
# num of top candidates
-Xi 50
# linear interpolation coef. range:[0,1]. 1=using new weights only; 0=using previous weights only
-interCoef 0.5
# threshold for sample selection
-metricDiff 0.05
"""
MIRA_CONFIG_TEMPLATE = """### Part 1: parameters similar to Z-MERT
# target sentences file name (in this case, file name prefix)
-r <REF>
# references per sentence
-rps <NUMREFS>
# parameter file
-p <TUNEDIR>/params.txt
#metric setting:
-m <METRIC>
#-m TER nocase punc 5 5 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1
#-m TER-BLEU nocase punc 20 50 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1 4 closest
#-m METEOR en norm_yes keepPunc 2 #old meteor interface #Z-MERT Meteor interface(not working)
#-m Meteor en lowercase '0.5 1.0 0.5 0.5' 'exact stem synonym paraphrase' '1.0 0.5 0.5 0.5' #CMU meteor interface
# maximum MIRA iterations
-maxIt <ITERATIONS>
# file containing commands to run decoder
-cmd <DECODER_COMMAND>
# file prodcued by decoder
-decOut <DECODER_OUTPUT>
# decoder config file
-dcfg <DECODER_CONFIG>
# size of N-best list
-N 300
# verbosity level (0-2; higher value => more verbose)
-v 1
### PART 2: MIRA parameters
#oracle selection method:
#1: "hope"(default)
#2: best metric score(ex: max BLEU)
-oracleSelection 1
#prediction selection method:
#1: "fear"(default)
#2: max model score
#3: worst metric score(ex: min BLEU)
-predictionSelection 1
#shuffle the training samples? (default:1)
-needShuffle 1
#average the weights after each epoch? (default:1)
-needAvg 1
#when use BLEU/TER-BLEU as metric, use the pseudo corpus to compute BLEU? (default:1)
-usePseudoCorpus 1
#corpus decay coefficient (only valid when pseudo corpus is used for BLEU, default:0.99)
-corpusDecay 0.99
#scale the model score(in order to make it comparable to the metric score)?(default:1)
-needScaling 1
#options for scaling (only valid when -needScaling=1)
-scoreRatio 5 #scale the model score so that abs(model_score/metric_score) \approx scoreRatio (default:5)
#MIRA internal iterations (default:1)
#-miraIter 1
#regularization parameter (default:0.01)
-C 0.01
#run perceptron mode? (default:0)
-runPercep 0
"""
ADAGRAD_CONFIG_TEMPLATE = """### Part 1: parameters similar to Z-MERT
# target sentences file name (in this case, file name prefix)
-r <REF>
# references per sentence
-rps <NUMREFS>
# parameter file
-p <TUNEDIR>/params.txt
#metric setting:
-m <METRIC>
#-m TER nocase punc 5 5 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1
#-m TER-BLEU nocase punc 20 50 joshua/zmert/tercom-0.7.25/tercom.7.25.jar 1 4 closest
#-m METEOR en norm_yes keepPunc 2 #old meteor interface #Z-MERT Meteor interface(not working)
#-m Meteor en lowercase '0.5 1.0 0.5 0.5' 'exact stem synonym paraphrase' '1.0 0.5 0.5 0.5' #CMU meteor interface
# maximum iterations
-maxIt <ITERATIONS>
# file containing commands to run decoder
-cmd <DECODER_COMMAND>
# file prodcued by decoder
-decOut <DECODER_OUTPUT>
# decoder config file
-dcfg <DECODER_CONFIG>
# size of N-best list
-N 300
# verbosity level (0-2; higher value => more verbose)
-v 1
### PART 2: AdaGrad parameters
#oracle selection method:
#1: "hope"(default)
#2: best metric score(ex: max BLEU)
-oracleSelection 1
#prediction selection method:
#1: "fear"(default)
#2: max model score
#3: worst metric score(ex: min BLEU)
-predictionSelection 1
#shuffle the training samples? (default:1)
-needShuffle 1
#average the weights after each epoch? (default:1)
-needAvg 1
#return the best weights during tuning? (default:1)
-returnBest 1
#when use BLEU/TER-BLEU as metric, use the pseudo corpus to compute BLEU? (default:1)
-usePseudoCorpus 1
#corpus decay coefficient (only valid when pseudo corpus is used for BLEU, default:0.99)
-corpusDecay 0.99
#scale the model score(in order to make it comparable to the metric score)?( |
phil-mansfield/gotetra | render/scripts/add_densities.py | Python | mit | 206 | 0 | import numpy as np
import sys
width = int(sys.argv[1])
out = sys.argv[2]
inputs = sys.argv[3:]
grid = np.zeros(width | * width * width)
for fname in inputs:
grid | += np.fromfile(fname)
grid.tofile(out)
|
evernym/zeno | plenum/test/node_request/test_propagate/helper.py | Python | apache-2.0 | 377 | 0.005305 | from plenum.test.spy_helpers import get_coun | t
def sum_of_request_propagates(node):
return get_count(node. | replicas[0]._ordering_service,
node.replicas[0]._ordering_service._request_propagates_if_needed) + \
get_count(node.replicas[1]._ordering_service,
node.replicas[1]._ordering_service._request_propagates_if_needed)
|
metno/EVA | eva/rest/__init__.py | Python | gpl-2.0 | 5,891 | 0.003395 | """
RESTful API for controlling and monitoring EVA.
"""
import eva
import eva.globe
import eva.gpg
import eva.rest.resources
import falcon
import json
import re
import wsgiref.simple_server
class RequireJSON(object):
def process_request(self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable('This API only supports responses encoded as JSON.')
if req.method in ('POST', 'PUT') and req.content_length not in (None, 0):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType('This API only supports requests encoded as JSON.')
class RequireGPGSignedRequests(eva.globe.GlobalMixin):
TIME_DIFF_THRESHOLD = 2.0
def __init__(self, gpg_key_ids):
self.gpg_key_ids = gpg_key_ids
self.header_regex = re.compile(r'^X-EVA-Request-Signature-\d+$', re.IGNORECASE)
def _gpg_signature_from_headers(self, headers):
signature = []
keys = sorted(headers.keys())
for key in keys:
if not self.header_regex.match(key):
continue
signature += [headers[key]]
return | signature
def _check_signature(self, payload, signature):
checker = eva.gpg.GPGSignatureChecker(payload, signature)
result = checker.verify()
if result.exit_code != 0:
self.logger.warning('GPG verification of request failed: %s', result.stderr[0])
for line in result.stderr:
self.logger.warning(line) |
raise falcon.HTTPUnauthorized('GPG verification of request failed.')
if result.key_id is None:
self.logger.warning('GPG key ID not parsed correctly from GPG output, dropping request.')
raise falcon.HTTPUnauthorized('GPG verification of request failed.')
self.logger.info('Request is signed by %s with %s key %s at %s', result.signer, result.key_type, result.key_id, eva.strftime_iso8601(result.timestamp))
if result.key_id not in self.gpg_key_ids:
self.logger.warning("GPG key ID '%s' is not in whitelist, dropping request.", result.key_id)
raise falcon.HTTPUnauthorized('Only few of mere mortals may try to enter the twilight zone.')
time_diff = eva.now_with_timezone() - result.timestamp
time_diff_secs = abs(time_diff.total_seconds())
if time_diff_secs > self.TIME_DIFF_THRESHOLD:
self.logger.warning("GPG signature differs from local time with %.1f seconds, over threshold of %.1f seconds, dropping request.", time_diff_secs, self.TIME_DIFF_THRESHOLD)
raise falcon.HTTPUnauthorized('Too high time difference between server and client; is your clock correct?')
self.logger.info('Permitting access to %s with %s key %s', result.signer, result.key_type, result.key_id)
def process_request(self, req, resp):
if req.method == 'GET':
return
signature = self._gpg_signature_from_headers(req.headers)
self.logger.info('Verifying request signature:')
[self.logger.info(s) for s in signature]
self._check_signature(req.context['body'], signature)
class JSONTranslator(object):
def process_request(self, req, resp):
req.context['body'] = ''
if req.content_length in (None, 0):
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.')
try:
req.context['body'] = body.decode('utf-8')
req.context['doc'] = json.loads(req.context['body'])
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(
falcon.HTTP_753,
'Malformed JSON', 'Could not decode the request body. The JSON was incorrect or not encoded as UTF-8.',
)
def process_response(self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'])
class Server(eva.config.ConfigurableObject, eva.globe.GlobalMixin):
"""
Run a HTTP REST API based on Falcon web framework.
"""
CONFIG = {
'gpg_key_ids': {
'type': 'list_string',
'default': '',
}
}
OPTIONAL_CONFIG = [
'gpg_key_ids',
]
def init(self):
gpg_middleware = RequireGPGSignedRequests(self.env['gpg_key_ids'])
gpg_middleware.set_globe(self.globe)
self.app = falcon.API(middleware=[
RequireJSON(),
JSONTranslator(),
gpg_middleware,
])
self._resources = []
self._setup_resources()
self.server = None
def start(self, host, port):
self.server = wsgiref.simple_server.make_server(host, port, self.app)
self.server.timeout = 0.001
def _setup_resources(self):
self._add_resource('control', '/control/{method}', eva.rest.resources.ControlResource())
self._add_resource('health', '/health', eva.rest.resources.HealthResource())
self._add_resource('job', '/jobs/{job_id}', eva.rest.resources.JobResource())
self._add_resource('jobs', '/jobs', eva.rest.resources.JobsResource())
self._add_resource('process', '/process/{method}', eva.rest.resources.ProcessResource())
def _add_resource(self, name, path, resource):
self._resources += [name]
setattr(self, name, resource)
resource.set_globe(self.globe)
self.app.add_route(path, resource)
def set_eventloop_instance(self, eventloop):
for resource in self._resources:
instance = getattr(self, resource)
instance.set_eventloop_instance(eventloop)
def respond_to_next_request(self):
if not self.server:
return
self.server.handle_request()
|
billiob/papyon | papyon/msnp2p/session.py | Python | gpl-2.0 | 11,748 | 0.005958 | # -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2007 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2008 Richard Spiers <richard.spiers@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from papyon.event import EventsDispatcher
from papyon.msnp2p.constants import *
from papyon.msnp2p.SLP import *
from papyon.msnp2p.transport import *
from papyon.util.parsing import build_account
from papyon.util.timer import Timer
import papyon.util.element_tree as ElementTree
import gobject
import base64
import logging
import random
import uuid
import os
__all__ = ['P2PSession']
logger = logging.getLogger('papyon.msnp2p.session')
MAX_INT32 = 0x7fffffff
MAX_INT16 = 0x7fff
class P2PSession(gobject.GObject, EventsDispatcher, Timer):
__gsignals__ = {
"accepted" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"rejected" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"completed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"progressed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"canceled" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
"disposed" : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
())
}
def __init__(self, session_manager, peer, peer_guid=None, euf_guid="",
application_id=0, message=None):
gobject.GObject.__init__(self)
EventsDispatcher.__init__(self)
Timer.__init__(self)
self._session_manager = session_manager
self._transport_manager = session_manager._transport_manager
self._client = session_manager._client
self._peer = peer
self._peer_guid = peer_guid
self._euf_guid = euf_guid
self._application_id = application_id
self._completed = False
self._version = 1
if self._client.profile.client_id.supports_p2pv2 and \
peer.client_capabilities.supports_p2pv2:
self._version = 2
if message is not None:
self._id = message.body.session_id
self._call_id = message.call_id
self._cseq = message.cseq
self._branch = message.branch
self._incoming = True
else:
self._id = self._generate_id()
self._call_id = "{%s}" % uuid.uuid4()
self._cseq = 0
self._branch = "{%s}" % uuid.uuid4()
self._incoming = False
self._session_manager._register_session(self)
def _generate_id(self, max=MAX_INT32):
"""
Returns a random ID.
@return: a random integer between 1000 and sys.maxint
@rtype: integer
"""
return random.randint(1000, max)
@property
def id(self):
return self._id
@property
def incoming(self):
return self._incoming
@property
def completed(self):
return self._completed
@property
def call_id(self):
return self._call_id
@property
def peer(self):
return self._peer
@property
def peer_guid(self):
return self._peer_guid
@property
def local_id(self):
if self._version >= 2:
return build_account(self._client.profile.account,
self._client.machine_guid)
return self._client.profile.account
@property
def remote_id(self):
if self._version >= 2:
return build_account(self._peer.account, self._peer_guid)
return self._peer.account
def set_receive_data_buffer(self, buffer, size):
self._transport_manager.register_data_buffer(self.peer,
self.peer_guid, self.id, buffer, size)
def _invite(self, context):
body = SLPSessionRequestBody(self._euf_guid, self._application_id,
context, self._id)
message = SLPRequestMessage(SLPRequestMethod.INVITE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
self.start_timeout("response", 60)
def _transreq(self):
self._cseq = 0
body = SLPTransportRequestBody(self._id, 0, 1)
message = SLPRequestMessage(SLPRequestMethod.INVITE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
def _respond(self, status_code):
body = SLPSessionRequestBody(session_id=self._id, capabilities_flags=None,
s_channel_state=None)
self._cseq += 1
response = SLPResponseMessage(status_code,
to=self.remote_id,
frm=self.local_id,
cseq=self._cseq,
branch=self._branch,
call_id=self._call_id)
response.body = body
self._send_slp_message(response)
# close other end points so we are the only one answering
self._close_end_points(status_code)
def _accept(self):
self._respond(200)
def _decline(self, status_code):
self._respond(status_code)
self._dispose()
def _respond_transreq(self, transreq, status, body):
self._cseq += 1
response = SLPResponseMessage(status,
to=self.remote_id,
frm=self.local_id,
cseq=self._cseq,
branch=transreq.branch,
call_id=self._call_id)
response.body = body
self._send_slp_message(response)
def _accept_transreq(self, transreq, bridge, listening, nonce, local_ip,
local_port, extern_ip, extern_port):
body = SLPTransportResponseBody(bridge, listening, nonce, [local_ip],
local_port, [extern_ip], extern_port, self._id, 0, 1)
self._respond_transreq(transreq, 200, body)
def _decline_transreq(self, transreq):
body = SLPTransportResponseBody(session_id= | self._id)
self._respond_transreq(transreq, 603, body)
self._dispose()
def _close(self, context=None, reason=None):
body = SLPSessionCloseBody(context=context, session_id=self._id,
reason=reason, s_channel_state=0)
self._cseq = 0
self._branch = "{%s}" % uuid.uuid4()
message = SLPRequestMessage(SLPRequestMethod.BYE,
"MSNMSGR:" + self.remote_id,
to=self.remote_id,
frm=self.local_id | ,
branch=self._branch,
cseq=self._cseq,
call_id=self._call_id)
message.body = body
self._send_slp_message(message)
self._dispose()
def _close_end_points(self, status):
"""Send BYE to other end points; this client already answered.
@param status: response we sent to the peer"""
if len(self._peer.end_points) > 0:
return # if the peer supports MPOP, let him do the work
for end_point in self._client.profile.end_points.values():
if end_point.id == self._client.machine_ |
opennode/nodeconductor-assembly-waldur | src/waldur_geo_ip/views.py | Python | mit | 706 | 0 | fro | m rest_framework import status, views
from rest_framework.response import Response
from waldur_core.core.utils import get_lat_lon_from_address
from . import serializers
class GeocodeViewSet(views.APIView):
def get(self, request): |
serializer = serializers.GeoCodeSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
address = serializer.validated_data['address']
lat_lon = get_lat_lon_from_address(address)
if lat_lon:
return Response(
{'latitude': lat_lon[0], 'longitude': lat_lon[1]},
status=status.HTTP_200_OK,
)
return Response(None, status=status.HTTP_200_OK)
|
arielisidro/myprograms | python/practice/ExcelColumns.py | Python | gpl-2.0 | 2,439 | 0.02624 | import string
def fillChars(chars):
for i in string.ascii_lowercase:
chars.append(i)
def getColumnNumber():
columnNumber=int(raw_input("Please input cell number: "))
return columnNumber
def printColumns(chars):
printBlank=True
counter=1
for c0 in chars:
for c1 in chars:
if printBlank:
start2=0
printBlank=False
else:
start2=1
for c2 in chars[start2:]:
print counter,':',
for c3 in chars[1:]:
print c0+c1+c2+c3,
counter += 1
print
raw_input("pause for a while")
def printColumns2(columnNumber):
counter=1
aColumn=1
while aColumn<=columnNumber:
if aColumn % 26 ==1:
print
print counter,':',
print getEquivalentColumn(computeColumnRecursive(aColumn,[])),
counter+=1
aColumn+=1
def computeColumn(columnNumber):
base=26
column=[]
while columnNumber>0:
col=columnNumber % base
columnNumber/=base
if col==0:
col=base
columnNumber=columnNumber-1
column.insert(0,col)
return column
def computeColumn(columnNumber,column):
if columnNumber==0:
return column
print 'overloading'
base=26
col=columnNumber % base
columnNumber/=base
if col==0:
col=base
columnNumber=columnNumber-1
column.insert(0,col)
return computeColumn(columnNumber,column)
def computeColumnRecursive(columnNumber,column):
if columnNumber==0:
return column
base=26
col=columnNumber % base
columnNumber/=base
if col==0:
col=base
columnNumber=columnNumber-1
column.insert(0,col)
return computeColumnRecursive(columnNumber,column)
def getEquivalentColumn(column):
columnLetter=''
for i in column:
columnLetter+=string.ascii_uppercase[i-1]
return columnLetter
def printEquivalentColumn(column):
for i in column:
print string.ascii_uppercase[i-1],
if __name__ == '__main__':
#chars=['',]
#fillChars(chars)
#printColumns2(getColumnNumber())
printEquival | entColumn(computeColumn(getCo | lumnNumber()))
printEquivalentColumn(computeColumn(getColumnNumber(),[]))
|
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/configobj/validate.py | Python | gpl-2.0 | 46,768 | 0.003507 | # validate.py
# A Validator object
# Copyright (C) 2005 Michael Foord, Mark Andrews, Nicola Larosa
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mark AT la-la DOT com
# nico AT tekNico DOT net
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# For information about bugfixes, updates and support, please join the
# ConfigObj mailing list:
# http://lists.sourceforge.net/lists/listinfo/configobj-develop
# Comments, suggestions and bug reports welcome.
"""
The Validator object is used to check that supplied values
conform to a specification.
The value can be supplied as a string - e.g. from a config file.
In this case the check will also *convert* the value to
the required type. This allows you to add validation
as a transparent layer to access data stored as strings.
The validation checks that the data is correct *and*
converts it to the expected type.
Some standard checks are provided for basic data types.
Additional checks are easy to write. They can be
provided when the ``Validator`` is instantiated or
added afterwards.
The standard functions work with the following basic data types :
* integers
* floats
* booleans
* strings
* ip_addr
plus lists of these datatypes
Adding additional checks is done through coding simple functions.
The full set of standard checks are :
* 'integer': matches integer values (including negative)
Takes optional 'min' and 'max' arguments : ::
integer()
integer(3, 9) # any value from 3 to 9
integer(min=0) # any positive value
integer(max=9)
* 'float': matches float values
Has the same parameters as the integer check.
* 'boolean': matches boolean values - ``True`` or ``False``
Acceptable string values for True are :
true, on, yes, 1
Acceptable string values for False are :
false, off, no, 0
Any other value raises an error.
* 'ip_addr': matches an Internet Protocol address, v.4, re | presented
by a dotted-quad string, i.e. '1.2.3.4'.
* 'string': matches any string.
Takes optional keyword args 'min' and 'max'
to specify | min and max lengths of the string.
* 'list': matches any list.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the list. (Always returns a list.)
* 'tuple': matches any tuple.
Takes optional keyword args 'min', and 'max' to specify min and
max sizes of the tuple. (Always returns a tuple.)
* 'int_list': Matches a list of integers.
Takes the same arguments as list.
* 'float_list': Matches a list of floats.
Takes the same arguments as list.
* 'bool_list': Matches a list of boolean values.
Takes the same arguments as list.
* 'ip_addr_list': Matches a list of IP addresses.
Takes the same arguments as list.
* 'string_list': Matches a list of strings.
Takes the same arguments as list.
* 'mixed_list': Matches a list with different types in
specific positions. List size must match
the number of arguments.
Each position can be one of :
'integer', 'float', 'ip_addr', 'string', 'boolean'
So to specify a list with two strings followed
by two integers, you write the check as : ::
mixed_list('string', 'string', 'integer', 'integer')
* 'pass': This check matches everything ! It never fails
and the value is unchanged.
It is also the default if no check is specified.
* 'option': This check matches any from a list of options.
You specify this check with : ::
option('option 1', 'option 2', 'option 3')
You can supply a default value (returned if no value is supplied)
using the default keyword argument.
You specify a list argument for default using a list constructor syntax in
the check : ::
checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
A badly formatted set of arguments will raise a ``VdtParamError``.
"""
__docformat__ = "restructuredtext en"
__version__ = '1.0.0'
__revision__ = '$Id: validate.py 123 2005-09-08 08:54:28Z fuzzyman $'
__all__ = (
'__version__',
'dottedQuadToNum',
'numToDottedQuad',
'ValidateError',
'VdtUnknownCheckError',
'VdtParamError',
'VdtTypeError',
'VdtValueError',
'VdtValueTooSmallError',
'VdtValueTooBigError',
'VdtValueTooShortError',
'VdtValueTooLongError',
'VdtMissingValue',
'Validator',
'is_integer',
'is_float',
'is_boolean',
'is_list',
'is_tuple',
'is_ip_addr',
'is_string',
'is_int_list',
'is_bool_list',
'is_float_list',
'is_string_list',
'is_ip_addr_list',
'is_mixed_list',
'is_option',
'__docformat__',
)
import re
_list_arg = re.compile(r'''
(?:
([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
)
\)
)
''', re.VERBOSE | re.DOTALL) # two groups
_list_members = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
''', re.VERBOSE | re.DOTALL) # one group
_paramstring = r'''
(?:
(
(?:
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
(?:
\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)
\s*,\s*
)*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s\)][^,\)]*?) # unquoted
)? # last one
\)
)|
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?)| # unquoted
(?: # keyword argument
[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\s=][^,=]*?) # unquoted
)
)
)
)
(?:
(?:\s*,\s*)|(?:\s*$) # comma
)
)
'''
_matchstring = '^%s*' % _paramstring
# Python pre 2.2.1 doesn't ha |
burakbayramli/classnotes | sk/2019/07/test_rocket1.py | Python | gpl-3.0 | 1,843 | 0.006511 | from rocketlander import RocketLander
from constants import LEFT_GROUND_CONTACT, RIGHT_GROUND_CONTACT
import numpy as np
import pyglet
if __name__ == "__main__":
# Settings holds all the settings for the rocket lander environment.
settings = {'Side Engines': True,
'Clouds': True,
'Vectorized Nozzle': True,
'Starting Y-Pos Constant': 1,
'Initial Force': 'random'} # (6000, -10000)}
env = RocketLander(settings)
s = env.reset()
left_or_right_barge_movement = np.random.randint(0, 2)
for i in range(50):
a = [10.0, 1.0, 1.0]
s, r, done, info = env.step(a)
# -------------------------------------
# Optional render
env.render()
# Draw the target
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
if i % 5 == 0:
image_data.save(filename='frames/rocket-%04d.png' % i)
env.dr | aw_marker(env.landing_coordinates[0], env.landing_coordinates[1])
| # Refresh render
env.refresh(render=False)
# When should the barge move? Water movement, dynamics etc can be simulated here.
if s[LEFT_GROUND_CONTACT] == 0 and s[RIGHT_GROUND_CONTACT] == 0:
env.move_barge_randomly(0.05, left_or_right_barge_movement)
# Random Force on rocket to simulate wind.
env.apply_random_x_disturbance \
(epsilon=0.005, \
left_or_right=left_or_right_barge_movement)
env.apply_random_y_disturbance(epsilon=0.005)
# Touch down or pass abs(THETA_LIMIT)
if done: break
|
ATIX-AG/foreman-ansible-modules | plugins/modules/architecture.py | Python | gpl-3.0 | 3,245 | 0.000924 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019 Manisha Singhal (ATIX AG)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: architecture
version_added: 1.0.0
short_description: Manage Architectures
description:
- Create, update, and delete Architectures
author:
- "Manisha Singhal (@Manisha15) ATIX AG"
options:
name:
description: Name of architecture
required: true
type: str
updated_name:
description: New architecture name. When this parameter is set, the module will not be idempotent.
type: str
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.entity_state
- theforeman.foreman.foreman.operatingsystems
'''
EXAMPLES = '''
- name: "Create an Architecture"
theforeman.foreman.architecture:
name: "i386"
operatingsystems:
- "TestOS1"
- "TestOS2"
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: "Update an Architecture"
theforeman.foreman.architecture:
name: "i386"
operatingsystems:
- "TestOS3"
- "TestOS4"
server_url: "https://foreman.example.com"
username: "ad | min"
password: "changeme"
state: present
- name: "Delete an Architecture"
theforeman.foreman.architecture:
name: "i386"
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: absent
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
architectures:
description: List of archi | tectures.
type: list
elements: dict
contains:
id:
description: Database id of the architecture.
type: int
name:
description: Name of the architecture.
type: str
operatinsystem_ids:
description: Database ids of associated operatingsystems.
type: list
elements: int
'''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import ForemanEntityAnsibleModule
class ForemanArchitectureModule(ForemanEntityAnsibleModule):
pass
def main():
module = ForemanArchitectureModule(
argument_spec=dict(
updated_name=dict(),
),
foreman_spec=dict(
name=dict(required=True),
operatingsystems=dict(type='entity_list'),
),
)
with module.api_connection():
module.run()
if __name__ == '__main__':
main()
|
foreveremain/common-workflow-language | reference/cwltool/expression.py | Python | apache-2.0 | 2,977 | 0.004703 | import docker
import subprocess
import json
from aslist import aslist
import logging
import os
from process import WorkflowException
import process
import yaml
import avro_ld.validate as validate
import avro_ld.ref_resolver
_logger = logging.getLogger("cwltool")
def exeval(ex, jobinput, requirements, outdir, tmpdir, context, pull_image):
if ex["engine"] == "cwl:JsonPointer":
try:
obj = {"job": jobinput, "context": context, "outdir": outdir, "tmpdir": tmpdir}
return avro_ld.ref_resolver.resolve_json_pointer(obj, ex["script"])
except ValueError as v:
raise WorkflowException("%s in %s" % (v, obj))
for r in reversed(requirements):
if r["class"] == "ExpressionEngineRequirement" and r["id"] == ex["engine"]:
runtime = []
class DR(object):
pass
dr = DR()
dr.requirements = r.get("requirements", [])
dr.hints = r.get("hints", [])
(docker_req, docker_is_req) = process.get_feature(dr, "DockerRequirement")
if docker_req:
img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
if img_id:
runtime | = ["docker", "run", "-i", "--rm", img_id]
exdefs = []
for exdef in r.get("engineConfig", []):
if isinstance(exdef, dict) and "ref" in exdef:
with open(exdef["ref"][7:]) as f:
exdefs.append(f.read())
elif isinstance(exdef, basestring):
exdefs.append(exdef)
inp = {
"script": ex["script"],
"engineConfig": exdefs,
"job": jo | binput,
"context": context,
"outdir": outdir,
"tmpdir": tmpdir,
}
_logger.debug("Invoking expression engine %s with %s",
runtime + aslist(r["engineCommand"]),
json.dumps(inp, indent=4))
sp = subprocess.Popen(runtime + aslist(r["engineCommand"]),
shell=False,
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = sp.communicate(json.dumps(inp) + "\n\n")
if sp.returncode != 0:
raise WorkflowException("Expression engine returned non-zero exit code on evaluation of\n%s" % json.dumps(inp, indent=4))
return json.loads(stdoutdata)
raise WorkflowException("Unknown expression engine '%s'" % ex["engine"])
def do_eval(ex, jobinput, requirements, outdir, tmpdir, context=None, pull_image=True):
if isinstance(ex, dict) and "engine" in ex and "script" in ex:
return exeval(ex, jobinput, requirements, outdir, tmpdir, context, pull_image)
else:
return ex
|
jovanpacheco/todo-eureka | eureka/list/urls.py | Python | gpl-3.0 | 2,662 | 0.034936 | from django.conf.urls import url,include
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from .views import (
ListRetriveView,ListRegisterView,ListUpdateView,ListDetailView,ListDeleteView,
ItemListView,ItemRegisterView,ItemUpdateView,ItemDeleteView,ItemCompleted
)
from django.contrib.auth.decorators import login_required
from .forms import ItemForm,ItemListForm
from rest_framework import routers
from .api import viewsets
urlpatterns = [
url(r'^$',TemplateView.as_view(template_name='index.html') , name="index"),
url(r'^list/$',ListRetriveView.as_view() , name="list_list"),
url(r'^list/register/$',ListRegisterView.as_view() , name="list_register"),
url(r'^list/update/(?P<pk>\d+)/$', ListUpdateView.as_view(), name='list_update'),
url(r'^list/detail/(?P<pk>\d+)/$', ListDetailView.as_view(), name='list_detail'),
url(r'^list/delete/(?P<pk>\d+)/$', ListDeleteView.as_view(), name='list_delete'),
url(r'^item/list/(?P<pk>\d+)/$',ItemListView.as_view() , name="item_list"),
url(r'^item/list/register/(?P<pk>\d+)/$',ItemRegisterView.as_view(form_class=ItemListForm),
name="item_register_list"),
url(r'^item/register/$',ItemRegisterView.as_view(form_class=ItemForm) , name="item_register_new"),
url(r'^item/list/(?P<pk_list>\d+)/update/(?P<pk>\d+)/$', ItemUpdateView.as_view(), name='item_update'),
url(r'^item/list/(?P<pk_list>\d+)/delete/(?P<pk>\d+)/$', ItemDeleteView.as_view(), name='item_delete'),
url(r'^item/list/(?P<pk_list>\d+)/completed/(?P<pk>\d+)/$', ItemCompleted.as_view(), name='item_completed'),
## APis for list
url(r'^api/(?P<version>[v1.]+)/list/$',viewsets.AllListViewSet.as_view(),name='all_list'),
url(r'^api/(?P<version>[v1.]+)/list/(?P<uuid>[-\w]+)/$',viewsets.ObjectListViewSet.as_view(),name='uuid_list'),
url(r'^api/(?P<version>[v1.]+)/author_list/$',viewsets.AuthorListViewSet.as_view(),name='author_list'),
url(r'^api/(?P<version>[v1.]+)/author_list/(?P<uuid>[-\w]+)/$',viewsets.ObjectAuthorListViewSet.as_view(),
name='author_list'),
## Apis for item
url(r'^api/(?P<version>[v1.]+)/item/$',viewsets.AllItemViewSet.as_view(),name='all_item'),
url(r'^api/(?P<version>[v1.]+ | )/item/(?P<uuid>[-\w]+)/completed$',viewsets.CompletedItemViewSet.as_view(),
name='completed_item'),
url(r'^api/(?P<version>[v1.]+)/item/(?P<uuid>[-\w]+)/$',view | sets.ObjectItemViewSet.as_view(),name='uuid_item'),
url(r'^api/(?P<version>[v1.]+)/item/list/(?P<uuid>[-\w]+)/$',viewsets.AllItemForListViewSet.as_view(),
name='items_by_list'),
# register a new user by api
url(r'^api/(?P<version>[v1.]+)/user/$',viewsets.RegistrationView.as_view(),name='new_user'),
] |
ngageoint/scale | scale/messaging/backends/backend.py | Python | apache-2.0 | 2,030 | 0.005419 | from abc import ABCMeta, abstractmethod
from django.conf import settings
from util.broker import BrokerDetails
class MessagingBackend(object):
__metaclass__ = ABCMeta
def __init__(self, backend_type):
"""Instantiates backend specific settings
"""
# Unique type of MessagingBackend, each type must be registered in apps.py
self.type = backend_type
# Connection string pulled from configuration
self._broker_url = settings.BROKER_URL
self._broker = BrokerDetails.from_broker_url(settings.BROKER_URL)
# TODO: Transition to more advanced message routing per command message type
self._queue_name = settings.QUEUE_NAME
@abstractmethod
def send_messages(self, messages):
"""Send a collection of messages to the backend
Connections are not persisted across send_messages calls. It is recommended that if a large
number of messages are to be sent it be done directly in a single function call.
:param messages: JSON payload of messages
:type messages: [dict]
"""
@abstractmethod
def receive_messages(self, batch_size):
"""Receive a batch of messages from the backend
Connections are not persisted across receive_messages calls. It is recommended th | at if a large
number of messages are to be retrieved it be done directly in a single function call.
Implementing function must yield messages from backend. Messages must be
in dict form. It is also the responsibility of the function to handle a boolean response
and appropriately acknowledge / delete message on True
:param batch_size: Number of messages to be processed
:type batch_size: int
:return: Yielded list of messages
:rtype: | Generator[dict]
"""
@abstractmethod
def get_queue_size(self):
"""Gets the current length of the queue
:return: number of messages in the queue
:rtype: int
""" |
alex/django-paging | paging/helpers.py | Python | bsd-3-clause | 652 | 0.006135 | from paging.paginators import *
def paginate(request, queryset_or_list, per_page=25, endless=True):
if endless:
paginator_class = EndlessPaginator
else:
paginator_class = BetterPaginator
paginator = paginator_class(queryset_or_list, per_page)
query_dict = request.GET.copy()
if 'p' in query_dict:
del query_dict['p']
try:
page = int(request.GET.get('p', 1))
except (ValueErro | r, TypeError):
page = 1
if page < 1:
| page = 1
context = {
'query_string': query_dict.urlencode(),
'paginator': paginator.get_context(page),
}
return context |
jawilson/home-assistant | homeassistant/components/wled/models.py | Python | apache-2.0 | 875 | 0 | """Models for WLED."""
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import WLEDDataUpdateCoordinator
class WLEDEntity(CoordinatorEntity):
"""Defines a base WLED entity."""
coor | dinator: WLEDDataUpdateCoordinator
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this WLED device."""
return DeviceInfo(
identifiers={(DOMAIN, self.coordinator.data.info.mac_address)},
name=self.coordinator.data.info.name,
manufacturer=self.coordinator.data.info.brand,
model=self.coordinat | or.data.info.product,
sw_version=str(self.coordinator.data.info.version),
configuration_url=f"http://{self.coordinator.wled.host}",
)
|
tiagocoutinho/bliss | bliss/controllers/motors/slitbox.py | Python | lgpl-3.0 | 1,530 | 0.000654 | """
One calculation and two real motors.
The calculation motor has the position of the motor tagged as first.
The real motor tagged as second differs from the first by a fraction.
orientation: label (horizontal | vertical) of the orientation of the motors.
fraction: the difference [mm] between the first and the second motor.
Example yml file:
.. code-block:: yaml
-
controller:
class: Slitbox
orientation: vertical
fraction: 0.01
axes:
-
name: s1v
tags: real first
-
name: s2v
tags: real second
-
name: sV
tags: vertical
-
controller:
class: Slitbox
orientation: horizontal
fraction: 0.01
axes:
-
name: $s1h
tags: real first
-
name: $s2h
tags: real second
-
name: sH
tags: horizontal
"""
from bliss.controllers.motor import CalcController
class Slitbox(CalcController):
def __init__(self, *args, **kwargs):
C | alcController.__init__(self, *args, **kwargs)
self.orientation = str(self.config.get("orientation"))
def calc_from_real(self, positions_dict):
return {self.orientation: positions_dict["first"]}
def calc_to_real(self, positions_dict):
fraction = float(self.config.get("fraction"))
pos = positions_dict[self.orientation]
| return {"first": pos, "second": pos + fraction}
|
NirBenTalLab/proorigami-cde-package | cde-root/usr/local/apps/inkscape/share/inkscape/extensions/export_gimp_palette.py | Python | mit | 1,346 | 0.021545 | #!/usr/bin/env python
'''
Author: Jos Hirth, kaioa.com
License: GNU General Public License - http://www.gnu.or | g/licenses/gpl.html
Warranty: see above
'''
DOCNAME='sodipodi:docname'
import sys, simplestyle
try:
from xml.dom.minidom import parse
except:
sys.exit('The export_gpl.py modu | le requires PyXML. Please download the latest version from <http://pyxml.sourceforge.net/>.')
colortags=(u'fill',u'stroke',u'stop-color',u'flood-color',u'lighting-color')
colors={}
def walk(node):
checkStyle(node)
if node.hasChildNodes():
childs=node.childNodes
for child in childs:
walk(child)
def checkStyle(node):
if node.hasAttributes():
sa=node.getAttribute('style')
if sa!='':
styles=simplestyle.parseStyle(sa)
for c in range(len(colortags)):
if colortags[c] in styles.keys():
addColor(styles[colortags[c]])
def addColor(col):
if simplestyle.isColor(col):
c=simplestyle.parseColor(col)
colors['%3i %3i %3i ' % (c[0],c[1],c[2])]=simplestyle.formatColoria(c).upper()
stream = open(sys.argv[-1:][0],'r')
dom = parse(stream)
stream.close()
walk(dom)
print 'GIMP Palette\nName: %s\n#' % (dom.getElementsByTagName('svg')[0].getAttribute(DOCNAME).split('.')[0])
for k,v in sorted(colors.items()):
print k+v |
5StevenWu/Coursepy | L08/网络编程3.2粘包/client端.py | Python | apache-2.0 | 1,009 | 0.024277 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
''''''
'''
此处接收分段 先接收大小 然后分段接收数据
'''
import socket,struct,json
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM) #等同于服务端
phone.connect((' | 127.0.0.1',8080)) #拨通电话 注意此处是一个元组的形式
while True:
cmd=input('>>:').strip()
if not cmd:continue
phone.send(cmd.encode('utf-8')) #转为二进制发出去
print('ready to recv message')
'''先收报头的长度'''
head_struct = phone.recv(4)
head_len = struct.unpack('i',head_struct)[0]
head_bytes = phone.recv(head_len)
head_json = head_bytes.decode('utf-8')
head_dic = json.loads(head_json)
'''根据报头里的详细信息,取真实的数据'''
| total_size = head_dic['total_size']
recv_size=0
data=b''
while recv_size < total_size:
recv_data=phone.recv(1024)
data+=recv_data
recv_size+=len(recv_data)
print(data.decode('gbk'))
phone.close() |
pferreir/indico | indico/modules/admin/views.py | Python | mit | 1,087 | 0.00092 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE fil | e for more details.
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.flask.util import url_for
from indico.web.menu import get_menu_item
from indico.web.views import WPDecorat | ed, WPJinjaMixin
class WPAdmin(WPJinjaMixin, WPDecorated):
"""Base class for admin pages."""
def __init__(self, rh, active_menu_item=None, **kwargs):
kwargs['active_menu_item'] = active_menu_item or self.sidemenu_option
WPDecorated.__init__(self, rh, **kwargs)
def _get_breadcrumbs(self):
menu_item = get_menu_item('admin-sidemenu', self._kwargs['active_menu_item'])
items = [(_('Administration'), url_for('core.admin_dashboard'))]
if menu_item:
items.append(menu_item.title)
return render_breadcrumbs(*items)
def _get_body(self, params):
return self._get_page_content(params)
|
Infinidat/infi.pyutils | infi/pyutils/functors/pass_.py | Python | bsd-3-clause | 198 | 0.015152 | from .functor import Functor
class _ | PASS(Functor):
def __call__(self, *_, **__):
pass
__enter__ = __exit__ = __call__
def __repr__(self):
| return '<PASS>'
PASS = _PASS()
|
Asparagirl/ArchiveBot | pipeline/archivebot/seesaw/wpullargs_test.py | Python | mit | 2,884 | 0.003814 | from os import environ as env
import unittest
from .wpull import WpullArgs
from seesaw.item import Item
# taken form pipeline/pipeline.py
if 'WARC_MAX_SIZE' in env:
WARC_MAX_SIZE = env['WARC_MAX_SIZE']
else:
WARC_MAX_SIZE = '5368709120'
def joined(args):
return str.join(' ', args)
class TestWpullArgs(unittest.TestCase):
def setUp(self):
self.item = {
'cookie_jar': '/foobar/cookies.txt',
'ident': 'abc123',
'item_dir': '/foobar',
'url': 'http://www.example.com',
'warc_file_base': '/foobar/warc'
}
self.args = WpullArgs(default_user_agent='Default/1',
wpull_exe='/bin/wpull',
youtube_dl_exe='/usr/bin/youtube-dl',
phantomjs_exe='/usr/bin/phantomjs',
| finished_warcs_dir='/lost+found/',
warc_max_size=WARC_MAX_SIZE
)
def test_user_agent_can_be_set(self):
self.item['user_agent'] = 'Frobinator/20.1'
self.assertIn('-U Frobinator/20.1', joined(self.args.realize(self.item)))
def test_youtube_dl_activation(self):
self.item['youtube_dl'] = True
self.ass | ertIn('--youtube-dl', joined(self.args.realize(self.item)))
def test_uses_default_user_agent(self):
self.assertIn('-U Default/1', joined(self.args.realize(self.item)))
def test_recursive_fetch_settings(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
cmdline = joined(self.args.realize(self.item))
self.assertIn('--recursive', cmdline)
self.assertIn('--level inf', cmdline)
def test_nonrecursive_fetch_settings(self):
self.item['recursive'] = False
cmdline = joined(self.args.realize(self.item))
self.assertNotIn('--recursive', cmdline)
self.assertNotIn('--level inf', cmdline)
def test_recursive_fetch_enables_linked_pages_and_requisites(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites,linked-pages',
cmdline)
def test_recursive_fetch_with_no_offsite_links_enables_requisites(self):
self.item['recursive'] = True
self.item['depth'] = 'inf'
self.item['no_offsite_links'] = True
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites', cmdline)
self.assertNotIn('linked-pages', cmdline)
def test_nonrecursive_fetch_enables_requisites(self):
self.item['recursive'] = False
cmdline = joined(self.args.realize(self.item))
self.assertIn('--span-hosts-allow page-requisites', cmdline)
self.assertNotIn('linked-pages', cmdline)
# vim:ts=4:sw=4:et:tw=78
|
ClydeSpace-GroundStation/GroundStation | GNURadio/OOT_Modules/gr-ax25/python/__init__.py | Python | mit | 1,133 | 0.002648 | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at y | our option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of | the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio AX25 module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the ax25 namespace
try:
# this might fail if the module is python-only
from ax25_swig import *
except ImportError:
pass
# import any pure python here
#
|
pyrrho314/recipesystem | trunk/dontload-astrodata_Gemini/ADCONFIG_Gemini/lookups/NIRI/NIRISpecDict.py | Python | mpl-2.0 | 1,538 | 0.028609 | niriSpecDict = {
# Database for nprepare.cl
# Date: 2004 July 6
# Author: Joe Jensen, Gemini Observatory
# The long 6-pix and 4-pix centered slits are currently installed
#
# Array characteristics
"readnoise" : 70, # electrons (1 read pair, 1 digital av.)
"medreadnoise" : 35., # electrons (1 read pair, 16 dig av.)
"lowreadnoise" : 12.3, # electrons (16 read pairs, 16 dig av.)
"gain" : 12.3, # electrons/ADU
"shallowwell" : 200000., # electrons full-well
"deepwell" : 280000., # electrons full-well
"s | hallowbias" : -0.6, # detector bias (V)
"deepbias" : -0.87, # detector bias (V)
"linearlimit" : 0.7, # non-linear regime (fraction of saturation)
#
# Camera+FPmask SPECSEC1 SPECSEC2 SPECSEC3
#
"f6f6-2pix_G5211" : ( "[1:1024,276:700]" , "none", "none" ),
"f6f6-4pix_G5212" : ( "[1:1024,1:1024]" , "none", "none" ),
"f6f6-6pix_G521 | 3" : ( "[1:1024,1:1024]" , "none", "none" ),
"f6f6-2pixBl_G5214" : ( "[1:1024,276:700]" , "none", "none" ),
"f6f6-4pixBl_G5215" : ( "[1:1024,276:700]" , "none", "none" ),
"f6f6-6pixBl_G5216" : ( "[1:1024,276:700]" , "none", "none" ),
"f6f6-4pix_G5222" : ( "[1:1024,276:700]" , "none", "none" ),
"f6f6-6pix_G5223" : ( "[1:1024,276:700]" , "none", "none" )
}
|
samuelmaudo/yepes | yepes/test/plugins/base.py | Python | bsd-3-clause | 8,371 | 0.000717 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from argparse import ArgumentError
import textwrap
from warnings import warn
class Plugin(object):
"""
Base class for test plugins. It's recommended but not *necessary* to
subclass this class to create a plugin, but all plugins *must* implement
`addArguments(self, parser)` and `configure(self, options, stream)`, and
must have the attributes `enabled` and `name`. The `name` attribute may
contain hyphens ('-').
Plugins should not be enabled by default.
Subclassing Plugin (and calling the superclass methods in `__init__`,
`configure`, and `options`, if you override them) will give your plugin
some friendly default behavior:
* A --with-$name option will be added to the command line interface to
enable the plugin, and a corresponding environment variable will be used
as the default value. The plugin class's docstring will be used as the
help for this option.
* The plugin will not be enabled unless this option is selected by the user.
"""
canConfigure = False
enabled = False
enableOpt = None
name = None
def __init__(self):
if self.name is None:
self.name = self.__class__.__name__.lower()
if self.enableOpt is None:
self.enableOpt = 'enable_plugin_{0}'.format(
self.name.replace('-', '_'))
def addArguments(self, parser):
"""
Add command-line options for this plugin.
The base plugin class adds --with-$name by default, used to enable the
plugin.
.. warning :: Don't implement `addArguments` unless you want to override
all default option handling behavior, including warnings
for conflicting options. Implement :meth:`options
<yepes.test.plugins.base.PluginInterface.options>`
instead.
"""
try:
self.arguments(parser)
except ArgumentError as e:
msg = ("Plugin '{0}' has conflicting option string: '{1}'"
" and will be disabled")
warn(msg.format(self.name, e.option_id), RuntimeWarning)
self.enabled = False
self.canConfigure = False
else:
self.canConfigure = True
def arguments(self, parser):
"""
Register commandline options.
Implement this method for normal options behavior with protection from
ArgumentErrors. If you override this method and want the default
--with-$name option to be registered, be sure to call super().
"""
parser.add_argument(
'--with-{0}'.format(self.name),
action='store_true',
dest=self.enableOpt,
default=False,
help="Enables plugin '{0}'. {1}".format(
self.name,
self.help(),
),
)
def configure(self, options, stream):
"""
Configure the plugin and system, based on selected options.
The base plugin class sets the plugin to enabled if the enable option
for the plugin (self.enableOpt) is true.
"""
if self.canConfigure:
self.enabled = getattr(options, self.enableOpt, self.enabled)
self.stream = stream
def help( | self):
"""
Return help for this plugin. This will be output as the help
section of the --with-$name option that enables the plugin.
"""
docs = self.__class__.__doc__
if docs:
# doc sections are often indented; compress the spaces
return textwrap.dedent(docs.splitlines()[0])
else:
return '(no help avail | able)'
class PluginInterface(object):
"""
PluginInterface describes the plugin API. Do not subclass or use this class
directly.
"""
def __new__(cls, *arg, **kw):
raise TypeError('PluginInterface class is for documentation only')
def addArguments(self, parser):
"""
Called to allow plugin to register command-line options with the parser.
.. warning :: Don't implement `addArguments` unless you want to override
all default option handling behavior, including warnings
for conflicting options. Implement :meth:`options
<yepes.test.plugins.base.PluginInterface.options>`
instead.
"""
pass
def addError(self, test, err):
"""
Called when a test raises an uncaught exception.
:param test: The test case that was errored.
:type test: :class:`unittest.case.TestCase`
:param err: A tuple of the form returned by :func:`sys.exc_info`:
``(type, value, traceback)``.
:type err: tuple
"""
pass
def addExpectedFailure(self, test, err):
"""
Called when the test case test fails, but was marked with the
:func:`unittest.case.expectedFailure()` decorator.
:param test: The test case that was failed (as was expected).
:type test: :class:`unittest.case.TestCase`
:param err: A tuple of the form returned by :func:`sys.exc_info`:
``(type, value, traceback)``.
:type err: tuple
"""
pass
def addFailure(self, test, err):
"""
Called when a test fails.
:param test: The test case that was failed.
:type test: :class:`unittest.case.TestCase`
:param err: A tuple of the form returned by :func:`sys.exc_info`:
``(type, value, traceback)``.
:type err: tuple
"""
pass
def addSkip(self, test, reason):
"""
Called when a test is skipped.
:param test: The test case that was skipped.
:type test: :class:`unittest.case.TestCase`
:param reason: The reason for skipping the test.
:type reason: str
"""
pass
def addSuccess(self, test):
"""
Called when a test passes.
:param test: The test case that was successful.
:type test: :class:`unittest.case.TestCase`
"""
pass
def addUnexpectedSuccess(self, test):
"""
Called when the test case test was marked with the
:func:`unittest.case.expectedFailure` decorator, but succeeded.
:param test: The test case that was surprisingly successful.
:type test: :class:`unittest.case.TestCase`
"""
pass
def configure(self, options, stream):
"""
Called after the command line has been parsed, with the parsed options
and the output stream. Here, implement any config storage or changes
to state or operation that are set by command line options.
:param options: An object that stores all plugin options.
:type options: :class:`argparse.Values`
:param stream: Stream object, send your output here.
:type stream: file-like object
"""
pass
def arguments(self, parser):
"""
Called to allow plugin to register command line options with the parser.
:param parser: Options parser instance.
:type parser: :class:`argparse.OptionParser`
"""
pass
def report(self):
"""
Called after all tests are run. Use this to print your plugin's report.
"""
pass
def startTest(self, test):
"""
Called before each test is run.
:param test: The test case.
:type test: :class:`unittest.case.TestCase`
"""
pass
def startTestRun(self):
"""
Called before any tests are run. Use this to perform any setup needed
before testing begins.
"""
pass
def stopTest(self, test):
"""
Called after each test is run.
:param test: The test case.
:type test: :class:` |
0xSteve/detection_learning | distest/helpers.py | Python | apache-2.0 | 1,230 | 0 | '''Some helper functions.'''
def make_p(count):
'''A helper function that generates a probaiblity vector, p, based on
the number of actions.'''
a = []
for i in range(count):
a.append(1.0 / count)
return a
def subtract_nonzero(array, amount):
for i in range(len(array)):
if(array[i] > 0):
array[i] -= amount
return array
def make_dp(count):
'''A helper function that generates a discretized probability vector for
the dlri automata.'''
a = []
for i in range(count):
a.append(count)
return a
def cdf(p_vector):
'''get the cumulative distribution vector for a given input vector.'''
cdf = []
sigma = 0
for i in range(len | (p_ve | ctor)):
sigma += p_vector[i]
cdf.append(sigma)
return cdf
def get_index(desired_action, cdf_array):
'''Given a desired action get the action that corresponds to it from the
cdf of the action probability vector.'''
index = 0 # Return the first action as default.
for i in range(len(cdf_array)):
# Not actually looking for p looking for the CDF.
if(desired_action < cdf_array[i]):
index = i
break
return index
|
aboyett/blockdiag | src/blockdiag/tests/test_builder_node.py | Python | apache-2.0 | 6,607 | 0 | # -*- coding: utf-8 -*-
from collections import defaultdict
from blockdiag.tests.utils import BuilderTestCase
class TestBuilderNode(BuilderTestCase):
def test_single_node_diagram(self):
diagram = self.build('single_node.diag')
self.assertEqual(1, len(diagram.nodes))
self.assertEqual(0, len(diagram.edges))
self.assertEqual('A', diagram.nodes[0].label)
self.assertEqual((0, 0), diagram.nodes[0].xy)
def test_node_shape_diagram(self):
expected = {'A': 'box', 'B': 'roundedbox', 'C': 'diamond',
'D': 'ellipse', 'E': 'note', 'F': 'cloud',
'G': 'mail', 'H': 'beginpoint', 'I': 'endpoint',
'J': 'minidiamond', 'K': 'flowchart.condition',
'L': 'flowchart.database', 'M': 'flowchart.input',
'N': 'flowchart.loopin', 'O': 'flowchart.loopout',
'P': 'actor', 'Q': 'flowchart.terminator', 'R': 'textbox',
'S': 'dots', 'T': 'none', 'U': 'square', 'V': 'circle',
'Z': 'box'}
diagram = self.build('node_shape.diag')
self.assertNodeShape(diagram, expected)
def test_node_shape_namespace_diagram(self):
diagram = self.build('node_shape_namespace.diag')
self.assertNodeShape(diagram, {'A': 'flowchart.condition',
'B': 'condition',
'Z': | 'box'})
def test_node_has_multilined_label_diagram(self):
diagram = self.build('node_has_multilined_label.diag')
self.assertNodeXY(diagram, {'A | ': (0, 0), 'Z': (0, 1)})
self.assertNodeLabel(diagram, {'A': "foo\nbar", 'Z': 'Z'})
def test_quoted_node_id_diagram(self):
diagram = self.build('quoted_node_id.diag')
self.assertNodeXY(diagram, {'A': (0, 0), "'A'": (1, 0),
'B': (2, 0), 'Z': (0, 1)})
def test_node_id_includes_dot_diagram(self):
diagram = self.build('node_id_includes_dot.diag')
self.assertNodeXY(diagram, {'A.B': (0, 0), 'C.D': (1, 0),
'Z': (0, 1)})
def test_multiple_nodes_definition_diagram(self):
diagram = self.build('multiple_nodes_definition.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (0, 1),
'Z': (0, 2)})
self.assertNodeColor(diagram, {'A': (255, 0, 0), 'B': (255, 0, 0),
'Z': (255, 255, 255)})
def test_multiple_node_relation_diagram(self):
diagram = self.build('multiple_node_relation.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (1, 1), 'D': (2, 0),
'Z': (0, 2)})
def test_node_attribute(self):
labels = {'A': 'B', 'B': 'double quoted', 'C': 'single quoted',
'D': '\'"double" quoted\'', 'E': '"\'single\' quoted"',
'F': 'F', 'G': 'G', 'H': 'H', 'I': 'I', 'J': 'Hello'}
colors = {'A': (255, 0, 0), 'B': (255, 255, 255), 'C': (255, 0, 0),
'D': (255, 0, 0), 'E': (255, 0, 0), 'F': (255, 255, 255),
'G': (255, 255, 255), 'H': (255, 255, 255),
'I': (255, 255, 255), 'J': (255, 255, 255)}
textcolors = defaultdict(lambda: (0, 0, 0))
textcolors['F'] = (255, 0, 0)
linecolors = defaultdict(lambda: (0, 0, 0))
linecolors['I'] = (255, 0, 0)
numbered = defaultdict(lambda: None)
numbered['E'] = '1'
stacked = defaultdict(lambda: False)
stacked['G'] = True
fontsize = defaultdict(lambda: None)
fontsize['H'] = 16
orientations = defaultdict(lambda: 'horizontal')
orientations['J'] = 'vertical'
diagram = self.build('node_attribute.diag')
self.assertNodeLabel(diagram, labels)
self.assertNodeColor(diagram, colors)
self.assertNodeTextColor(diagram, textcolors)
self.assertNodeLineColor(diagram, linecolors)
self.assertNodeNumbered(diagram, numbered)
self.assertNodeStacked(diagram, stacked)
self.assertNodeFontsize(diagram, fontsize)
self.assertNodeLabel_Orientation(diagram, orientations)
def test_node_height_diagram(self):
diagram = self.build('node_height.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (2, 1),
'E': (1, 1), 'Z': (0, 2)})
def test_branched_diagram(self):
diagram = self.build('branched.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'D': (1, 1),
'E': (2, 1), 'Z': (0, 2)})
def test_multiple_parent_node_diagram(self):
diagram = self.build('multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 2), 'D': (1, 2),
'E': (0, 1), 'Z': (0, 3)})
def test_twin_multiple_parent_node_diagram(self):
diagram = self.build('twin_multiple_parent_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (0, 1), 'D': (1, 1),
'E': (0, 2), 'Z': (0, 3)})
def test_flowable_node_diagram(self):
diagram = self.build('flowable_node.diag')
self.assertNodeXY(diagram, {'A': (0, 0), 'B': (1, 0),
'C': (2, 0), 'Z': (0, 1)})
def test_plugin_autoclass_diagram(self):
diagram = self.build('plugin_autoclass.diag')
self.assertNodeXY(diagram, {'A_emphasis': (0, 0),
'B_emphasis': (1, 0),
'C': (1, 1)})
self.assertNodeStyle(diagram, {'A_emphasis': 'dashed',
'B_emphasis': 'dashed',
'C': None})
self.assertNodeColor(diagram, {'A_emphasis': (255, 0, 0),
'B_emphasis': (255, 0, 0),
'C': (255, 255, 255)})
def test_plugin_attributes_diagram(self):
diagram = self.build('plugin_attributes.diag')
self.assertNodeTest_Attr1(diagram, {'A': "1", 'B': None})
self.assertNodeTest_Attr2(diagram, {'A': "2", 'B': None})
self.assertNodeTest_Attr3(diagram, {'A': "3", 'B': None})
|
tonyduckles/svn2svn | svnreplay.py | Python | gpl-3.0 | 95 | 0 | #!/usr/bin/env python
impo | rt sys
from svn2svn.run.svnreplay import main
sys.exit(main | () or 0)
|
dmitriyminer/webgpio | webgpio/auth/views.py | Python | mit | 2,048 | 0 | import logging
import aiohttp_jinja2
from aiohttp import web
from aiohttp_session import get_session
from .sa import check_password, user_exist, create_user
auth_logger = logging.getLogger('auth.logger')
@aiohttp_jinja2.template('auth/login.html')
async def login(request):
session = await get_session(request)
data = await request.post()
context = {}
if data and data.get('email') and data.get('passwd'):
db = request.app['db']
config = request.app['config']
user_id = await check_password(db, config, **data)
if user_id:
session['user_id'] = user_id
auth_logger.info('Login user: %s' % data.get('email'))
url = request.app.router['devices'].url()
return web.HTTPFound(url)
else:
auth_logger.info('Login error: %s' % data.get('email'))
| context['errors'] = ['Wrong email or password']
return context
@aiohttp_jinja2.template('auth/register. | html')
async def register(request):
data = await request.post()
context = dict()
if all([data, data.get('email'), data.get('passwd'),
data.get('passwd') == data.get('passwd1')]):
already_exist = await user_exist(request.app['db'], **data)
if already_exist:
context['errors'] = ['User with this email already exists']
context['username'] = data.get('username', '')
context['email'] = data.get('email')
else:
session = await get_session(request)
user_id = await create_user(request.app['db'],
request.app['config'], **data)
session['user_id'] = user_id
auth_logger.info('Register user: %s' % data.get('email'))
url = request.app.router['devices'].url()
return web.HTTPFound(url)
return context
async def logout(request):
session = await get_session(request)
session.pop('user_id', None)
url = request.app.router['home'].url()
return web.HTTPFound(url)
|
dyrock/trafficserver | tests/gold_tests/headers/hsts.test.py | Python | apache-2.0 | 2,907 | 0.002408 | '''
Test the hsts response header.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
heck hsts header is set correctly
'''
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server")
#**testname is required**
testName = ""
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
# ATS Configuration
ts.addSSLfile("../remap/ssl/server.pem")
ts.addSSLfile("../remap/ssl/server.key")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.hsts_max_age': 300,
})
ts.Disk.remap_config.AddLine(
'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_nam | e=server.key'
)
# Test 1 - 200 Response
tr = Test.AddTestRun()
tr.Processes.Default.St | artBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = (
'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}'
.format('www.example.com', ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "hsts.200.gold"
tr.StillRunningAfter = ts
# Test 2 - 404 Not Found on Accelerator
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}'
.format('bad_host', ts.Variables.ssl_port)
)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = "hsts.404.gold"
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
|
cloudmesh/management | tests/test_02_user_list.py | Python | apache-2.0 | 967 | 0.003102 | from cloudmesh_database.dbconn import get_mongo_db, get_mongo_dbname_from_collection, DBConnFactory
from cloudmesh_base.util import HEADING
from cloudmesh_management.user import Users
from cloudmesh_management.mongo import Mongo
class TestListUsers:
yaml_dir = "~/.cloudmesh_yaml"
| def setup(self):
# HEADING()
db_name = get_mongo_dbname_from_collection("manage")
if db_name:
meta = {'db_alias': db_name}
obj = Mongo()
obj.check_mongo()
get_mongo_db("manage", DBConnFactory.TYPE_MONGOENGINE)
pass
def teardown(self):
| # HEADING()
pass
def test_listusers(self):
HEADING()
"""
Test to list users in default format followed by JSON format
"""
user = Users()
print "Listing users in default format"
user.list_users()
print "Listing users in JSON format"
user.list_users(display_fmt='json')
|
nylas/sync-engine | migrations/versions/018_message_contact_association.py | Python | agpl-3.0 | 1,215 | 0.001646 | """message contact association
Revision ID: 223041bb858b
Revises: 2c9f3a06de09
Create Date: 2014-04-28 23:52:05.449401
"""
# revision identifiers, used by Alembic.
revision = '223041bb858b'
down_revision = '2c9f3a06de09'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'messagecontactassociation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contact_id', sa.Integer(), nullable=False),
sa.Column('message_id', sa.Integer(), nullable=False),
sa.Column('field',
sa.Enum('from_addr', 'to_addr', 'cc_addr', 'bcc_addr'),
nullable=True),
sa | .ForeignKeyConstraint(['contact_id'], ['contact.id'], ),
sa.ForeignKeyConstraint(['message_id'], ['message.id'], ),
sa.PrimaryKeyConstraint('id', 'contact_id', 'message_id')
)
# Yes, this is a terrible hack. But tools/rer | ank_contacts.py already
# contains a script to process contacts from messages, so it's very
# expedient.
import sys
sys.path.append('./tools')
from rerank_contacts import rerank_contacts
rerank_contacts()
def downgrade():
op.drop_table('messagecontactassociation')
|
micromagnetics/magnum.fe | tests/cache_test.py | Python | lgpl-3.0 | 1,521 | 0.015779 | import unittest
from magnumfe import *
set_log_active(False)
class CacheTest(unittest.TestCase):
def test_initial_update(self):
mesh = UnitCubeMesh(1,1,1)
state = State(mesh)
cache = Cache()
self.assertTrue(cache.requires_update(state))
def test_change_state(self):
mesh = UnitCubeMesh(1,1,1)
state1 = State(mesh)
state2 = State(mesh)
cache = Cache()
count = 0
if cache.requires_update(state1): count += 1
if cache.requires_update(state1): count += 1
self.assertEqual(1, count)
if cache.requires_update(state2): count += 1
self.assertEqual(2, count)
def test_update_required(self):
mesh = UnitCubeMesh(2, 2, 2)
state = State(mesh, m = Constant((1 | .0, 0.0, 0.0)), j = Constant((0.0, 0.0, 0.0)))
cache = Cache("m", "t")
count = 0
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
if cache.requires_update(state): count += 1
self.assertEqual(1, count)
state.t = 1.0
if cache.requires_update(state): count += 1
self.assertEqual(2, count)
if cache.requires_update(state): count += 1
self.assertEqua | l(2, count)
state.m = Constant((0.0, 1.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
state.j = Constant((1.0, 0.0, 0.0))
if cache.requires_update(state): count += 1
self.assertEqual(3, count)
if __name__ == '__main__':
unittest.main()
|
KaranToor/MA450 | google-cloud-sdk/lib/surface/container/node_pools/describe.py | Python | apache-2.0 | 1,997 | 0.003005 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe node pool command."""
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.container import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.container import flags
class Describe(base.DescribeCommand):
"""Describe an existing node pool for a cluster.
*{command}* displays all data associated with the node pool in the
Google Container Engine cluster.
"""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like ob | ject. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
flags.AddNodePoolNameArg(parser, 'The name of the node pool.')
flags.AddNodePoolClusterFlag(parser, 'The name of the cluster.')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse n | amespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
try:
return adapter.GetNodePool(adapter.ParseNodePool(args.name))
except apitools_exceptions.HttpError as error:
raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)
|
easmetz/inasafe | safe/impact_functions/ash/ash_raster_population/impact_function.py | Python | gpl-3.0 | 7,502 | 0 | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Ash Raster on Population
Impact Function
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import numpy
from safe.common.exceptions import ZeroImpactException
from safe.impact_functions.bases.continuous_rh_continuous_re import \
ContinuousRHContinuousRE
from safe.impact_functions.core import (
has_no_data,
no_population_impact_message
)
from safe.storage.raster import Raster
from safe.utilities.i18n import tr
from safe.common.utilities import (
humanize_class,
create_classes,
create_label)
from safe.gui.tools.minimum_needs.needs_profile import add_needs_parameters, \
filter_needs_parameters
from safe.impact_reports.population_exposure_report_mixin import \
PopulationExposureReportMixin
from safe.impact_functions.ash.ash_raster_population.metadata_definitions \
import AshRasterHazardPopulationFunctionMetadata
__author__ = 'ismailsunni'
__project_name__ = 'inasafe-dev'
__filename__ = 'impact_function.py'
__date__ = '7/13/16'
__copyright__ = 'imajimatika@gmail.com'
class AshRasterPopulationFunction(
ContinuousRHContinuousRE,
PopulationExposureReportMixin):
# noinspection PyUnresolvedReferences
"""Simple impact function for ash raster on population."""
_metadata = AshRasterHazardPopulationFunctionMetadata()
def __init__(self):
"""Constructor."""
super(AshRasterPopulationFunction, self).__init__()
PopulationExposureReportMixin.__init__(self)
self.hazard_classes = [
tr('Very Low'),
tr('Low'),
tr('Moderate'),
tr('High'),
tr('Very High'),
]
self.parameters = add_needs_parameters(self.parameters)
self.no_data_warning = False
def run(self):
"""Run the impact function.
"""
# Range for ash hazard
group_parameters = self.parameters['group_threshold']
unaffected_max = group_parameters.value_map[
'unaffected_threshold'].value
very_low_max = group_parameters.value_map['very_low_threshold'].value
low_max = group_parameters.value_map['low_threshold'].value
medium_max = group_parameters.value_map['moderate_threshold'].value
high_max = group_parameters.value_map['high_threshold'].value
# Extract hazard data as numeric arrays
ash = self.hazard.layer.get_data(nan=True) # Thickness
if has_no_data(ash):
self.no_data_warning = True
# Extract exposure data as numeric arrays
population = self.exposure.layer.get_data(nan=True, scaling=True)
if has_no_data(population):
self.no_data_warning = True
# Create 5 data for each hazard level. Get the value of the exposure
# if the exposure is in the hazard zone, else just assign 0
unaffected_exposure = numpy.where(ash < unaffected_max, population, 0)
very_low_exposure = numpy.where(
(ash >= unaffected_max) & (ash < very_low_max), population, 0)
low_exposure = numpy.where(
(ash >= very_low_max) & (ash < low_max), population, 0)
medium_exposure = numpy.where(
(ash >= low_max) & (ash < medium_max), population, 0)
high_exposure = numpy.where(
(ash >= medium_max) & (ash < high_max), population, 0)
very_high_exposure = numpy.where(ash >= high_max, population, 0)
impacted_exposure = (
very_low_exposure +
low_exposure +
medium_exposure +
high_exposure +
very_high_exposure
)
# Count totals
self.total_population = int(numpy.nansum(population))
self.affected_population[
tr('Population in very low hazard zone')] = int(
numpy.nansum(very_low_exposure))
self.affected_population[
tr('Population in low hazard zone')] = int(
numpy.nansum(low_exposure))
self.affected_population[
tr('Population in medium hazard zone')] = int(
numpy.nansum(medium_exposure))
self.affected_population[
tr('Population in high hazard zone')] = int(
numpy.nansum(high_exposure))
self.affected_population[
tr('Population in very high hazard zone')] = int(
numpy.nansum(very_high_exposure))
self.unaffected_population = int(
numpy.nansum(unaffected_exposure))
# check for zero impact
if self.total_affected_population == 0:
message = no_population_impact_message(self.question)
raise ZeroImpactException(message)
# Don't show digits less than a 1000
self.minimum_needs = [
parameter.serialize() for parameter in
filter_needs_parameters(self.parameters['minimum needs'])
]
total_needs = self.total_needs
# Style for impact layer
colours = [
'#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
classes = create_classes(impacted_exposure.flat[:], len(colours))
interval_classes = humanize_class(classes)
style_classes = []
for i in xrange(len(colours)):
style_class = dict()
if i == 1:
label = create_label(
interval_classes[i],
tr('Low Population [%i people/cell]' % classes[i]))
elif i == 4:
label = create_label(
interval_classes[i],
tr('Medium Population [%i people/cell]' % classes[i]))
elif i == 7:
label = create_label(
| interval_classes[i],
tr('High Population [%i people/cell]' % classes[i]))
else:
label = c | reate_label(interval_classes[i])
style_class['label'] = label
style_class['quantity'] = classes[i]
style_class['transparency'] = 0
style_class['colour'] = colours[i]
style_classes.append(style_class)
style_info = dict(
target_field=None,
style_classes=style_classes,
style_type='rasterStyle')
impact_data = self.generate_data()
extra_keywords = {
'map_title': self.metadata().key('map_title'),
'legend_notes': self.metadata().key('legend_notes'),
'legend_units': self.metadata().key('legend_units'),
'legend_title': self.metadata().key('legend_title'),
'total_needs': total_needs
}
impact_layer_keywords = self.generate_impact_keywords(extra_keywords)
# Create raster object and return
impact_layer = Raster(
data=impacted_exposure,
projection=self.hazard.layer.get_projection(),
geotransform=self.hazard.layer.get_geotransform(),
name=self.metadata().key('layer_name'),
keywords=impact_layer_keywords,
style_info=style_info)
impact_layer.impact_data = impact_data
self._impact = impact_layer
return impact_layer
|
abhijithanilkumar/CollegeSeatAllocation | src/CollegeSeatAllocation/settings/base.py | Python | mit | 4,017 | 0.001494 | """
Django settings for CollegeSeatAllocation project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file_ | _), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SEC | RET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
'seats',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CollegeSeatAllocation.urls'
WSGI_APPLICATION = 'CollegeSeatAllocation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
|
googleapis/python-compute | google/cloud/compute_v1/services/service_attachments/client.py | Python | apache-2.0 | 59,785 | 0.001321 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.service_attachments import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import ServiceAttachmentsTransport, DEFAULT_CLIENT_INFO
from .transports.rest import ServiceAttachmentsRestTransport
class ServiceAttachmentsClientMeta(type):
"""Metaclass for the ServiceAttachments client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ServiceAttachmentsTransport]]
_transport_registry["re | st"] = ServiceAttachmentsRestTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ServiceAttachmentsTransport]:
"""Returns an appropriate transport class.
Args:
labe | l: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ServiceAttachmentsClient(metaclass=ServiceAttachmentsClientMeta):
"""The ServiceAttachments API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceAttachmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ServiceAttachmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ServiceAttachmentsTransport:
"""Returns the transport used by the client instance.
Returns:
ServiceAttachmentsTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
|
kakunbsc/enigma2.4 | lib/python/Components/Network.py | Python | gpl-2.0 | 20,526 | 0.034883 | from os import system, popen, path as os_path, listdir
from re import compile as re_compile, search as re_search
from socket import *
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
class Network:
def __init__(self):
self.ifaces = {}
self.configuredInterfaces = []
self.configuredNetworkAdapters = []
self.NetworkState = 0
self.DnsState = 0
self.nameservers = []
self.ethtool_bin = "/usr/sbin/ethtool"
self.container = eConsoleAppContainer()
self.Console = Console()
self.LinkConsole = Console()
self.restartConsole = Console()
se | lf.deactivateConsole = Console()
self.deactivateInterfaceConsole = Console()
self.activateConsole = Console()
self.resetNetworkConsole = Console()
self.DnsConsole = Console()
self.config_ready = None
self.getInterfaces()
def onRemoteRootFS(self):
fp = file('/proc/mounts', 'r')
mounts = fp.readlines()
fp.close()
for line in mounts:
parts = line.strip().split(' ')
if parts[ | 1] == '/' and (parts[2] == 'nfs' or parts[2] == 'smbfs'):
return True
return False
def getInterfaces(self, callback = None):
devicesPattern = re_compile('[a-z]+[0-9]+')
self.configuredInterfaces = []
fp = file('/proc/net/dev', 'r')
result = fp.readlines()
fp.close()
for line in result:
try:
device = devicesPattern.search(line).group()
if device in ('wifi0', 'wmaster0'):
continue
self.getDataForInterface(device, callback)
except AttributeError:
pass
#print "self.ifaces:", self.ifaces
#self.writeNetworkConfig()
#print ord(' ')
#for line in result:
# print ord(line[0])
# helper function
def regExpMatch(self, pattern, string):
if string is None:
return None
try:
return pattern.search(string).group()
except AttributeError:
None
# helper function to convert ips from a sring to a list of ints
def convertIP(self, ip):
strIP = ip.split('.')
ip = []
for x in strIP:
ip.append(int(x))
return ip
def getDataForInterface(self, iface,callback):
#get ip out of ip addr, as avahi sometimes overrides it in ifconfig.
if not self.Console:
self.Console = Console()
cmd = "ip -o addr"
self.Console.ePopen(cmd, self.IPaddrFinished, [iface,callback])
def IPaddrFinished(self, result, retval, extra_args):
(iface, callback ) = extra_args
data = { 'up': False, 'dhcp': False, 'preup' : False, 'postdown' : False }
globalIPpattern = re_compile("scope global")
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
netRegexp = '[0-9]{1,2}'
macRegexp = '[0-9]{2}\:[0-9]{2}\:[0-9]{2}\:[a-z0-9]{2}\:[a-z0-9]{2}\:[a-z0-9]{2}'
ipLinePattern = re_compile('inet ' + ipRegexp + '/')
ipPattern = re_compile(ipRegexp)
netmaskLinePattern = re_compile('/' + netRegexp)
netmaskPattern = re_compile(netRegexp)
bcastLinePattern = re_compile(' brd ' + ipRegexp)
upPattern = re_compile('UP')
macPattern = re_compile('[0-9]{2}\:[0-9]{2}\:[0-9]{2}\:[a-z0-9]{2}\:[a-z0-9]{2}\:[a-z0-9]{2}')
macLinePattern = re_compile('link/ether ' + macRegexp)
for line in result.splitlines():
split = line.strip().split(' ',2)
if (split[1][:-1] == iface):
up = self.regExpMatch(upPattern, split[2])
mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
if up is not None:
data['up'] = True
if iface is not 'lo':
self.configuredInterfaces.append(iface)
if mac is not None:
data['mac'] = mac
if (split[1] == iface):
if re_search(globalIPpattern, split[2]):
ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
if ip is not None:
data['ip'] = self.convertIP(ip)
if netmask is not None:
data['netmask'] = self.convertIP(netmask)
if bcast is not None:
data['bcast'] = self.convertIP(bcast)
if not data.has_key('ip'):
data['dhcp'] = True
data['ip'] = [0, 0, 0, 0]
data['netmask'] = [0, 0, 0, 0]
data['gateway'] = [0, 0, 0, 0]
cmd = "route -n | grep " + iface
self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback])
def routeFinished(self, result, retval, extra_args):
(iface, data, callback) = extra_args
ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
ipPattern = re_compile(ipRegexp)
ipLinePattern = re_compile(ipRegexp)
for line in result.splitlines():
print line[0:7]
if line[0:7] == "0.0.0.0":
gateway = self.regExpMatch(ipPattern, line[16:31])
if gateway is not None:
data['gateway'] = self.convertIP(gateway)
self.ifaces[iface] = data
self.loadNetworkConfig(iface,callback)
def writeNetworkConfig(self):
self.configuredInterfaces = []
fp = file('/etc/network/interfaces', 'w')
fp.write("# automatically generated by enigma 2\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
for ifacename, iface in self.ifaces.items():
if iface['up'] == True:
fp.write("auto " + ifacename + "\n")
self.configuredInterfaces.append(ifacename)
if iface['dhcp'] == True:
fp.write("iface "+ ifacename +" inet dhcp\n")
if iface['dhcp'] == False:
fp.write("iface "+ ifacename +" inet static\n")
if iface.has_key('ip'):
print tuple(iface['ip'])
fp.write(" address %d.%d.%d.%d\n" % tuple(iface['ip']))
fp.write(" netmask %d.%d.%d.%d\n" % tuple(iface['netmask']))
if iface.has_key('gateway'):
fp.write(" gateway %d.%d.%d.%d\n" % tuple(iface['gateway']))
if iface.has_key("configStrings"):
fp.write("\n" + iface["configStrings"] + "\n")
if iface["preup"] is not False and not iface.has_key("configStrings"):
fp.write(iface["preup"])
fp.write(iface["postdown"])
fp.write("\n")
fp.close()
self.writeNameserverConfig()
def writeNameserverConfig(self):
fp = file('/etc/resolv.conf', 'w')
for nameserver in self.nameservers:
fp.write("nameserver %d.%d.%d.%d\n" % tuple(nameserver))
fp.close()
def loadNetworkConfig(self,iface,callback = None):
interfaces = []
# parse the interfaces-file
try:
fp = file('/etc/network/interfaces', 'r')
interfaces = fp.readlines()
fp.close()
except:
print "[Network.py] interfaces - opening failed"
ifaces = {}
currif = ""
for i in interfaces:
split = i.strip().split(' ')
if (split[0] == "iface"):
currif = split[1]
ifaces[currif] = {}
if (len(split) == 4 and split[3] == "dhcp"):
ifaces[currif]["dhcp"] = True
else:
ifaces[currif]["dhcp"] = False
if (currif == iface): #read information only for available interfaces
if (split[0] == "address"):
ifaces[currif]["address"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("ip"):
if self.ifaces[currif]["ip"] != ifaces[currif]["address"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["ip"] = map(int, split[1].split('.'))
if (split[0] == "netmask"):
ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("netmask"):
if self.ifaces[currif]["netmask"] != ifaces[currif]["netmask"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["netmask"] = map(int, split[1].split('.'))
if (split[0] == "gateway"):
ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if self.ifaces[currif].has_key("gateway"):
if self.ifaces[currif]["gateway"] != ifaces[currif]["gateway"] and ifaces[currif]["dhcp"] == False:
self.ifaces[currif]["gateway"] = map(int, split[1].split('.'))
if (split[0] == "pre-up"):
if self.ifaces[currif].has_key("preup"):
self.ifaces[currif]["preup"] = i
if (split[0] == "post-down"):
if self.ifaces[currif].has_key("postdown"):
self.ifaces[currif]["postdown"] = i
for ifacename, iface in ifaces.items():
if self.ifaces.has_key(ifacename):
self.ifaces[ifacename]["dhcp"] = iface["dhcp"]
if self.Console:
|
n054/qBittorrent | src/searchengine/nova/engines/torrentz.py | Python | gpl-2.0 | 5,201 | 0.003076 | #VERSION: 2.12
#AUTHORS: Diego de las Heras (diegodelasheras@gmail.com)
# Redistribution and us | e in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# | documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from urllib import urlencode
class torrentz(object):
# mandatory properties
url = 'https://torrentz.eu'
name = 'Torrentz'
supported_categories = {'all': ''}
trackers_list = ['udp://open.demonii.com:1337/announce',
'udp://tracker.leechers-paradise.org:6969',
'udp://exodus.desync.com:6969',
'udp://tracker.coppersurfer.tk:6969',
'udp://9.rarbg.com:2710/announce']
class MyHtmlParser(HTMLParser):
def __init__(self, results, url, trackers):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.trackers = trackers
self.td_counter = None
self.current_item = None
def handle_starttag(self, tag, attrs):
if tag == 'a':
params = dict(attrs)
if 'href' in params:
self.current_item = {}
self.td_counter = 0
self.current_item['link'] = 'magnet:?xt=urn:btih:' + \
params['href'].strip(' /') + self.trackers
self.current_item['desc_link'] = self.url + params['href'].strip()
elif tag == 'span':
if isinstance(self.td_counter,int):
self.td_counter += 1
if self.td_counter > 6: # safety
self.td_counter = None
def handle_data(self, data):
if self.td_counter == 0:
if 'name' not in self.current_item:
self.current_item['name'] = ''
self.current_item['name'] += data
elif self.td_counter == 4:
if 'size' not in self.current_item:
self.current_item['size'] = data.strip()
elif self.td_counter == 5:
if 'seeds' not in self.current_item:
self.current_item['seeds'] = data.strip().replace(',', '')
elif self.td_counter == 6:
if 'leech' not in self.current_item:
self.current_item['leech'] = data.strip().replace(',', '')
# display item
self.td_counter = None
self.current_item['engine_url'] = self.url
if self.current_item['name'].find(' \xc2'):
self.current_item['name'] = self.current_item['name'].split(' \xc2')[0]
self.current_item['link'] += '&' + urlencode({'dn' : self.current_item['name']})
if not self.current_item['seeds'].isdigit():
self.current_item['seeds'] = 0
if not self.current_item['leech'].isdigit():
self.current_item['leech'] = 0
prettyPrinter(self.current_item)
self.results.append('a')
def download_torrent(self, info):
print(download_file(info))
def search(self, what, cat='all'):
# initialize trackers for magnet links
trackers = '&' + '&'.join(urlencode({'tr' : tracker}) for tracker in self.trackers_list)
i = 0
while i < 6:
results_list = []
# "what" is already urlencoded
html = retrieve_url(self.url + '/search?f=%s&p=%d' % (what, i))
parser = self.MyHtmlParser(results_list, self.url, trackers)
parser.feed(html)
parser.close()
if len(results_list) < 1:
break
i += 1
|
ChantyTaguan/zds-site | zds/forum/admin.py | Python | gpl-3.0 | 1,098 | 0 | from django.contrib import admin
from zds.forum.models import ForumCategory, Forum, Post, Topic, TopicRead
class TopicAdmin(admin.ModelAdmin):
list_display = ("title", "author", "forum", "pubdate")
list_filter = ("is_locked", "is_sticky")
raw_id_fields = ("forum", "author", "last_message", "tags", "solved_by")
ordering = ("-pubdate",)
search_fields = ("author__username", "title", "subtitle", "github_issue")
class TopicReadAdmin(admin.ModelAdmin):
list_ | display = ("topic", "user")
raw_id_fields = ("topic", "post", "user")
search_fields = ("topic__title", "user__username")
class PostAdmin(admin.ModelAdmin):
list_display = ("topic", "author", "ip_address", "pubdate", "is_visible")
list_filter = ("is_visible",)
raw_id_fields = ("author", "editor")
ordering = ("-pubdate",)
search_fields = ("author__username", "text", "text_hidden", "ip_address")
admin.site.register(ForumCategory)
admin.site.reg | ister(Forum)
admin.site.register(Post, PostAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(TopicRead, TopicReadAdmin)
|
wogsland/QSTK | Homework/hw3.py | Python | bsd-3-clause | 3,364 | 0.0217 | '''
Example call:
python marketsim.py 1000000 orders.csv values.csv
python hw3.py 1000000 orders.csv values.csv
Example orders.csv:
2008, 12, 3, AAPL, BUY, 130
2008, 12, 8, AAPL, SELL, 130
2008, 12, 5, IBM, BUY, 50
Example values.csv:
2008, 12, 3, 1000000
2008, 12, 4, 1000010
2008, 12, 5, 1000250
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu |
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import csv
import sys
import copy
import string
if __name__ == '__main__':
# 1. Read the dates and symbols
startcash = int(sys.argv[1])
filename = sys.argv[2]
outfile = sys.argv[3]
reader = csv.reader(open(filename, 'rU'), delimiter=',')
dt_array = []
symb_array = []
trade_array = []
for row in reader:
| symb_array.append(row[3].strip())
the_date = dt.datetime(int(row[0]), int(row[1]), int(row[2]), 16)
dt_array.append(the_date)
#the_date = row[0]+"-"+row[1]+"-"+row[2]+" 16:00:00"
trade_array.append([row[3].strip(), row[4].strip(), row[5].strip(), the_date])
print dt_array
dt_array = sorted(list(set(dt_array)))
print dt_array
symb_array = list(set(symb_array))
print symb_array
print trade_array
# 2. Read the data
read_dt_array = []
for row in dt_array:
read_dt_array.append(row - dt.timedelta(days=1))
print read_dt_array
dataobj = da.DataAccess('Yahoo')
#ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ls_keys = ['actual_close']
ldf_data = dataobj.get_data(read_dt_array, symb_array, ls_keys)
print ldf_data
d_data = dict(zip(ls_keys, ldf_data))
print d_data
# problem with this fill is it only grabs the previous day in the dataset, not last trading day
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
print d_data
# 3. Create the matrix of shares
trade_matrix = copy.deepcopy(d_data)
for s_key in ls_keys:
trade_matrix[s_key] = trade_matrix[s_key] * 0.0
print trade_matrix
holdings_matrix = copy.deepcopy(trade_matrix)
for row in trade_array:
if 'BUY' == row[1]:
shares = int(row[2])
else:
shares = int(row[2]) * (-1)
traded = row[0]
day = string.replace((row[3]-dt.timedelta(days=1)).isoformat(),"T"," ")
print traded + " " + str(shares)
#trade = d_data['actual_close'][traded] * shares
#print trade
multi = d_data['actual_close'][traded].mul(shares)
print multi[day]
trade_matrix['actual_close'][traded][day] = multi[day]
print trade_matrix
# 4. Calculate the cash timeseries
#cash = copy.deepcopy(read_dt_array)
first_date = string.replace((read_dt_array[0]-dt.timedelta(days=1)).isoformat(),"T"," ")
cash = []
cash.append([first_date,startcash])
i=0
for row in read_dt_array:
last_cash = cash[i][1]
this_date = string.replace(row.isoformat(),"T"," ")
for symbol in symb_array:
trade_matrix['actual_close'][symbol][this_date]
last_cash = round(last_cash - trade_matrix['actual_close'][symbol][this_date], 2)
cash.append([this_date,last_cash])
i = i+1
print cash
# 5. Calculate the fund timeseries
# 6. Write to CSV
|
SINGROUP/pycp2k | pycp2k/classes/_bse1.py | Python | lgpl-3.0 | 682 | 0.002933 | from pycp2k.inputsection import InputSection
class _bse1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Num_z_vectors = None
self.Threshold_min_trans = None
self.Max_iter = None
self._name = "BSE"
self._keywords = {'Num_z_vectors': | 'NUM_Z_VECTORS', 'Threshold_min_trans': ' | THRESHOLD_MIN_TRANS', 'Max_iter': 'MAX_ITER'}
self._aliases = {'Eps': 'Threshold_min_trans'}
@property
def Eps(self):
"""
See documentation for Threshold_min_trans
"""
return self.Threshold_min_trans
@Eps.setter
def Eps(self, value):
self.Threshold_min_trans = value
|
Akagi201/learning-python | json/json_mixed_data.py | Python | mit | 762 | 0.001312 | #!/usr/bin/env python
# encoding: utf-8
import json
decoder = json.JSONDecoder()
def get_decoded_and_remainder(input_data):
obj, end = decoder.raw_decode(input_data)
remaining = input_data[end:]
return (obj, end, remaining)
encoded_object = '[{"a": "A", "c": 3.0, "b": [2, 4]}]'
extra_text = 'This text is not JSON.'
print 'JSON first:'
obj, end, remaining = get_decoded_and_remainder(' '.join([ | encoded_object, extra_text]))
print 'Object :', obj
print 'End of parsed input :', end
print 'Remaining text :', repr(remaining)
print
print 'JSON embedded:'
try:
| obj, end, remaining = get_decoded_and_remainder(
' '.join([extra_text, encoded_object, extra_text])
)
except ValueError, err:
print 'ERROR:', err
|
C-o-r-E/OctoPrint | src/octoprint/server/api/printer.py | Python | agpl-3.0 | 9,354 | 0.027053 | # coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
from flask import request, jsonify, make_response
import re
from octoprint.settings import settings, valid_boolean_trues
from octoprint.server import printer, restricted_access, NO_CONTENT
from octoprint.server.api import api
import octoprint.util as util
#~~ Printer
@api.route("/printer", methods=["GET"])
def printerState():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
# process excludes
excludes = []
if "exclude" in request.values:
excludeStr = request.values["exclude"]
if len(excludeStr.strip()) > 0:
excludes = filter(lambda x: x in ["temperature", "sd", "state"], map(lambda x: x.strip(), excludeStr.split(",")))
result = {}
# add temperature information
if not "temperature" in excludes:
result.update({"temperature": _getTemperatureData(lambda x: x)})
# add sd information
if not "sd" in excludes and settings().getBoolean(["feature", "sdSupport"]):
result.update({"sd": {"ready": printer.isSdReady()}})
# add state information
if not "state" in excludes:
state = printer.getCurrentData()["state"]
result.update({"state": state})
return jsonify(result)
#~~ Tool
@api.route("/printer/tool", methods=["POST"])
@restricted_access
def printerToolCommand():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
valid_commands = {
"select": ["tool"],
"target": ["targets"],
"offset": ["offsets"],
"extrude": ["amount"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
validation_regex = re.compile("tool\d+")
##~~ tool selection
if command == "select":
tool = data["tool"]
if re.match(validation_regex, tool) is None:
return make_response("Invalid tool: %s" % tool, 400)
if not tool.startswith("tool"):
return make_response("Invalid tool for selection: %s" % tool, 400)
printer.changeTool(tool)
##~~ temperature
elif command == "target":
targets = data["targets"]
# make sure the targets are valid and the values are numbers
validated_values = {}
for tool, value in targets.iteritems():
if re.match(validation_regex, tool) is None:
return make_response("Invalid target for setting temperature: %s" % tool, 400)
if not isinstance(value, (int, long, float)):
return make_response("Not a number for %s: %r" % (tool, value), 400)
validated_values[tool] = value
# perform the actual temperature commands
for tool in validated_values.keys():
printer.setTemperature(tool, validated_values[tool])
##~~ temperature offset
elif command == "offset":
offsets = data["offsets"]
# make sure the targets are valid, the values are numbers and in the range [-50, 50]
validated_values = {}
for tool, value in offsets.iteritems():
if re.match(validation_regex, tool) is None:
return make_response("Invalid target for setting temperature: %s" % tool, 400)
if not isinstance(value, (int, long, float)):
return make_response("Not a number for %s: %r" % (tool, value), 400)
if not -50 <= value <= 50:
return make_response("Offset %s not in range [-50, 50]: %f" % (tool, value), 400)
validated_values[tool] = value
# set the offsets
printer.setTemperatureOffset(validated_values)
##~~ extrusion
elif command == "extrude":
if printer.isPrinting():
# do not extrude when a print job is running
return make_response("Printer is currently printing", 409)
amount = data["amount"]
if not isinstance(amount, (int, long, float)):
return make_response("Not a number for extrusion amount: %r" % amount, 400)
printer.extrude(amount)
return NO_CONTENT
@api.route("/printer/tool", methods=["GET"])
def printerToolState():
def deleteBed(x):
data = dict(x)
if "bed" in data.keys():
del data["bed"]
return data
return jsonify(_getTemperatureData(deleteBed))
##~~ Heated bed
@api.route("/printer/bed", methods=["POST"])
@restricted_access
def printerBedCommand():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
valid_commands = {
"target": ["target"],
"offset": ["offset"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
##~~ temperature
if command == "target":
target = data["target"]
# make sure the target is a number
if not isinstance(target, (int, long, float)):
return make_response("Not a number: %r" % target, 400)
# perform the actual temperature command
printer.setTemperature("bed", target)
##~~ temperature offset
elif command == "offset":
offset = data["offset"]
# make sure the offset is valid
if not isinstance(offset, (int, long, float)):
return make_response("Not a number: %r" % offset, 400)
if not -50 <= offset <= 50:
return make_response("Offset not in range [-50, 50]: %f" % offset, 400)
# set the offsets
printer.setTemperatureOffset({"bed": offset})
return NO_CONTENT
@api.route("/printer/bed", methods=["GET"])
def printerBedState():
def deleteTools(x):
data = dict(x)
for k in data.keys():
if k.startswith("tool"):
del data[k]
return data
return jsonify(_getTemperatureData(deleteTools))
##~~ Print head
@api.route("/printer/printhead", methods=["POST"])
@restricted_access
def printerPrintheadCommand():
if not printer.isOperational() or printer.isPrinting():
# do not jog when a print job is running or we don't have a connection
return make_response("Printer is not operational or currently printing", 409)
valid_commands = {
"jog": [],
"home": ["axes"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
valid_axes = ["x", "y", "z"]
##~~ jog command
if command == "jog":
# validate all jog instructions, make sure that the values are numbers
validated_values = {}
for axis in valid_axes:
if axis in data:
value = data[axis]
if not isinstance(value, (int, long, float)):
return make_response("Not a number for axis %s: %r" % (axis, value), 400)
validated_values[axis] = value
# execute the jog commands
for axis, value in validated_values.iteritems():
printer.jog(axis, value)
##~~ home command
elif command == "home":
validated_values = []
axes = data["axes"]
for axis in axes:
if not axis in valid_axes:
return make_response("Invalid axis: %s" % axis, 400)
validated_values.append(axis)
# execute the home command
printer.home(validated_values)
return NO_CONTENT
##~~ SD Card
@api.route("/printer/sd", methods=["POST"])
@restricted_access
def printerSdCommand():
if not settings().getBoolean(["feature", "sdSupport"]):
return make_response("SD support is disabled", 404)
if not printer.isOperational() or printer.isPrinting() or printer.isPaused():
return make_response("Printer is not operational or currently busy", 409)
valid_commands = {
"init": [],
"refresh": [],
"release": []
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
if command == "init":
printer. | initSdCard()
elif command == "refresh":
printer.refreshSdFiles()
elif command == "release":
printer.releaseSdCard()
return NO_CONTENT
@api.route("/printer/sd", methods=["GET"] | )
def printerSdState():
if not settings().getBoolean(["feature", "sdSupport"]):
return make_response("SD support is disabled", 404)
return jsonify(ready=printer.isSdReady())
##~~ Commands
@api.route("/printer/command", methods=["POST"])
@restricted_access
def printerCommand():
# TODO: document me
if not printer.isOperational():
return make_response("Printer is not operational", 409)
if not "application/json" in request.headers["Content-Type"]:
return make_response("Expected content type JSON", 400)
data = request.json
parameters = {}
if "parameters" in data.keys(): parameters = data["parameters"]
commands = []
if "command" in data.keys(): command |
DavidCain/mitoc-trips | ws/templatetags/medical_tags.py | Python | gpl-3.0 | 1,991 | 0.001005 | from datetime import timedelta
from django import template
import ws.utils.dates as date_utils
import ws.utils.perms as perm_utils
from ws import forms
from ws.utils.itinerary import get_cars
register = template.Library()
@register.inclusion_tag('for_templatetags/show_wimp.html')
def show_wimp(wimp):
return {
'participant': wimp,
}
@register.inclusion_tag('for_templatetags/trip_itinerary.html')
def trip_itinerary(trip):
"""Return a stripped form for read-only display.
Drivers will be displayed separately, and the 'accuracy' checkbox
isn't needed for display.
"""
if not trip.info:
return {'info_form': None}
info_form = forms.TripInfoForm(instance=trip.info)
info_form.fields.pop('drivers')
info_form.fields.pop('accurate')
return {'info_form': info_form}
@register.inclusion_tag('for_templatetags/trip_info.html', takes_context=True)
def trip_info(context, trip, show_participants_if_no_itinerary=False):
participant = context['viewing_participant']
# After a sufficiently long waiting period, hide medical information
# (We could need medical info a day or two after a trip was due back)
# Some trips last for multiple days (trip date is Friday, return is Sunday)
# Because we only record a single trip date, give a few extra days' buffer
is_old_trip = date_utils.local_date() > (trip.trip_date + timedelta(days=5))
return {
'trip': trip,
'participants': (
trip.si | gned_up_participants.filter(signup__on_trip=True).select_related(
'emergency_info__emergency_contact'
)
),
'trip_leaders': (
trip.leaders.select_related('emergency_info__emergency_contact')
),
'cars': get_cars(trip),
'show_participants_if_no_itinerary': show_participants_if_no_itinerary,
'hide_sensitive_info': is_old_trip,
'is_trip_leader': perm | _utils.leader_on_trip(participant, trip),
}
|
4shadoww/stabilizerbot | core/rules/ores.py | Python | mit | 2,595 | 0.003083 | from core.rule_core import *
from core import yapi
from core.config_loader import cur_conf
class YunoModule:
name = "ores"
cfg_ver = None
ores_api = yapi.ORES
config = [
{
"models": {
"damaging": {"max_false": 0.15, "min_true": 0.8},
"goodfaith": {"min_false": 0.8, "max_true": 0.15}
},
"score": 1,
"expiry": 24
}
]
def load_config(self):
if core.config.config_mode == "online":
pass
def getScores(self, rev):
tries = 2
revid_data = 1
# Check result and check for errors
# If error faced then try again once
for i in | reversed(range(tries)):
scores = self.ores_api.getScore([rev["revision"]["new"]])[cur_conf["core"]["lang"]+"wiki"]["scores"]
revid_data = scores[str(rev["revision"]["new"])]
for i | tem in revid_data:
if "error" in revid_data[item] and "scores" not in revid_data[item]:
if i <= 0:
logger.error("failed to fetch ores revision data: %s" % str(revid_data))
return False
else:
break
return revid_data
def run(self, rev):
score = 0
expiry = None
revid_data = self.getScores(rev)
if not revid_data:
return score, expiry
for rule in self.config:
failed = False
for item in rule["models"]:
if failed:
break
for value in rule["models"][item]:
if value == "max_false" and rule["models"][item][value] < revid_data[item]["score"]["probability"]["false"]:
failed = True
break
elif value == "min_false" and rule["models"][item][value] > revid_data[item]["score"]["probability"]["false"]:
failed = True
break
elif value == "max_true" and rule["models"][item][value] < revid_data[item]["score"]["probability"]["true"]:
failed = True
break
elif value == "min_true" and rule["models"][item][value] > revid_data[item]["score"]["probability"]["true"]:
failed = True
break
if not failed and rule["score"] > score:
score = rule["score"]
expiry = rule["expiry"]
return score, expiry
|
ZeitOnline/zeit.content.dynamicfolder | src/zeit/content/dynamicfolder/browser/tests/test_folder.py | Python | bsd-3-clause | 873 | 0 | import zeit.cms.interfaces
import zeit.cms.testing
import zeit.content.dynamicfolder.testing
class EditDynamicFolder(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.dynamicfolder.testing.DYNAMIC_LAYER
def test_check_out_and_edit_folder(self):
b = self.browser
b.open('http://localhost/++skin++ | vivi/repository/dynamicfolder')
b.getLink('Checkout').click()
b.getControl(
'Configuration file').value = 'http://xml.zeit.de/testcontent'
b.getControl('Apply').click()
self.assertEllipsis('...Updated on...', b.contents)
b.getLink('Checkin').click()
self.assertIn('repository', b.url)
folder = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/dynamicfolder')
self.assertEqual(
'http://xml.zei | t.de/testcontent', folder.config_file.uniqueId)
|
texta-tk/texta | account/migrations/0001_initial.py | Python | gpl-3.0 | 909 | 0.0033 | # Generated by Django 2.0.4 on 2019-01-10 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
mi | grations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('auth_token', models.CharField(blank=True, default='', max_length=14)),
('email_confirmation_token', models.CharField(blank=True, default='', max_length=14)),
('email_confirmed', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, | to=settings.AUTH_USER_MODEL)),
],
),
]
|
onysos/django-nginx-config | django_nginx/management/commands/create_nginx_config.py | Python | mit | 8,249 | 0.002669 | # -*- coding: utf-8 -*-
"""
Created on 18 juil. 2013
"""
from __future__ import unicode_literals
import logging
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
import sys
import os
from django.template import loader
from django.template.context import Context
import shutil
from pprint import pprint
import codecs
import itertools
import re
logger = logging.getLogger(__name__)
from optparse import make_option
class Command(BaseCommand):
args = '<destination>'
help = 'generate a ready-to-use nginx config for the current project'
option_list = BaseCommand.option_list + (
make_option(
'--socket',
action='store',
dest='socket',
default="/var/run/django/{fqdn}.sock",
help='the socket to use to contact gunicorn. can add {fqdn} to insert FQDN. default to /var/run/django/{fqdn}.sock'),
make_option(
'--workon_home',
action='store',
dest='workon_home',
default=None,
help='the directory of the workon home (where is located all virtualenv)'),
make_option(
'--forcesetting',
action='append',
dest='extra_settings',
default=[],
help='overide a value defaulted by the settings value. ie: --forcesetting=FQDN=myproj.exemple.com'),
make_option(
'--type',
action='store',
dest='type',
default="all",
help='the type of files to create. on of [ nginx, systemd, init, all ]'),
make_option(
'--no-buildout',
action='store_true',
dest='buildout',
default=False,
help="don't set path as if project was in a buildout cookpot"),
make_option(
'--no-upload-progress',
action='store_true',
dest='no_upload',
default=False,
help="disable upload-progress directive for nginx if this one don't support upload-progress module"),
make_option(
'--log-dir',
action='store',
dest='log-dir',
default=None,
help="l'emplacement des fichiers log a génére [/var/log/nginx/{FQDN}/]"),
) #
taken_from_settings = (
# (settings name, default value),
("ADMINISTRATOR_IP", "127.0.0.1"),
("FQDN"),
("SITE_NAME"),
("DOMAIN_NAME"),
("ADMINISTRATOR_IP"),
("DJANGO_ROOT"),
("SECURE_PREFIX"),
("STATIC_ROOT"),
("MEDIA_ROOT"),
("static_domain"),
)
template_files = dict(
nginx=[
"sub.domain.ext",
"sub.domain.ext.d/dynamic.conf",
"sub.domain.ext.d/static.conf",
"sub.domain.ext.d/static_location.conf",
"sub.domain.ext.d/gunicorn.conf",
"sub.domain.ext.d/extra/di.maintenance.conf",
"sub.domain.ext.d/extra/di.xsendfile.conf",
"sub.do | main.ext.d/switch_maintenance.sh"
],
init=["django_sub.domain.ext"],
systemd=[
"sub.domain.ext.service",
"sub.domain.ext.socket"
| ],
)
def handle(self, *args, **options):
if len(args) > 0:
dest = args[0]
else:
dest = "nginx_conf"
socket = options["socket"]
buildout = not options["buildout"]
workon_home = options["workon_home"]
if workon_home is None and not buildout:
try:
workon_home = os.environ["WORKON_HOME"]
self.stderr.write("guesing workon home with environ : %s" % workon_home)
except:
raise CommandError("impossible to get workon_home. pleas set an environement or with --workon_home")
# constructing overriden settings
extra_settings = {}
for opt_settings in options["extra_settings"]:
splited = opt_settings.split("=")
extra_settings[splited[0]] = "=".join(splited[1:])
regexp_external_url = re.compile("(https?:)?//(?P<domain>[^/]+)/")
match_static = regexp_external_url.match(settings.STATIC_URL)
match_media = regexp_external_url.match(settings.MEDIA_URL)
if (match_static is not None) != (match_media is not None):
raise CommandError("impossible de détérminer le domaine pour les fichiers static. votre MEDIA_URL et STATIC_URL ne concorde pas")
if match_static is not None:
if match_static.groupdict()["domain"] != match_media.groupdict()["domain"]:
raise CommandError("impossible de détérminer le domaine pour les fichiers static. votre MEDIA_URL et STATIC_URL ne concorde pas")
static_domain = match_media.groupdict()["domain"]
else:
static_domain = False
context = {
"buildout": buildout,
"WORKON_HOME": workon_home,
"ROOT_NGINX_PATH": os.path.abspath(dest),
"static_domain": static_domain
}
errors = False
for res in self.taken_from_settings:
if isinstance(res, (tuple, list)):
if res[0] in extra_settings:
context[res[0]] = extra_settings[res[0]]
continue
try:
settingsname, default = res
context[settingsname] = getattr(settings, settingsname, default)
except ValueError:
# dont have default value
try:
context[settingsname] = getattr(settings, settingsname)
except AttributeError:
errors = True
self.stderr.write('setting {0} absent from settings. try to ovenride it with --setting={0}=FOO'.format(settingsname))
else:
if res in extra_settings:
context[res] = extra_settings[res]
continue
try:
settingsname = res
context[settingsname] = getattr(settings, settingsname)
except AttributeError:
errors = True
self.stderr.write('setting {0} absent from settings. try to ovenride it with --setting={0}=FOO'.format(settingsname))
if errors:
raise CommandError("dont continu because of previous settings missing")
socket = socket.format(fqdn=context["FQDN"])
context["socket_path"] = socket
if socket.startswith("/"):
context["socket"] = "unix:%s" % socket
else:
context["socket"] = socket
context["NGINX_LOG_DIR"] = "/var/log/nginx/{fqdn}/".format(fqdn=context["FQDN"])
if options["log-dir"]:
context["NGINX_LOG_DIR"] = options["log-dir"]
context["upload"] = not options["no_upload"]
self.stdout.write("context variable used :")
pprint(context)
tmpl_context = Context(context)
# create arbo
try:
os.chdir(dest)
except:
os.makedirs(dest)
os.chdir(dest)
try:
os.mkdir("%s.d" % context["FQDN"])
except OSError:
pass
try:
os.mkdir(os.path.join("%s.d" % context["FQDN"], "extra"))
except OSError:
pass
files = []
if options["type"] == "all":
files = itertools.chain(*self.template_files.values())
else:
try:
files = self.template_files[options["type"]]
except KeyError:
raise CommandError("ce type de fichie n'existe pas : %s n'est pas parmis %s" % (options["type"], self.template_files.keys()))
for template_file in files:
with codecs.open(template_file.replace("sub.domain.ext", context["FQDN"]), "w", encoding="utf-8") as output:
template = loader.get_template("django_nginx/%s" % template_file)
print("writing %s" % os.path.join(dest, output.name))
output.write(temp |
etamponi/emetrics | emetrics/evaluation/test_random_subsets_experiment.py | Python | gpl-2.0 | 2,356 | 0.002971 | import pickle
import unittest
import numpy
from sklearn.tree.tree import D | ecisionTreeClassifier
from emetrics.correlation_s | core import CorrelationScore
from emetrics.evaluation.random_subsets_experiment import RandomSubsetsExperiment
__author__ = 'Emanuele Tamponi'
class RandomSubsetsExperimentTest(unittest.TestCase):
def test_results_in_shape(self):
experiment = RandomSubsetsExperiment(
dataset="aggregation_score_dataset",
subset_size=2,
scorers=[("wilks", CorrelationScore())],
classifiers=[("dt", DecisionTreeClassifier())],
n_folds=5,
n_runs=10
)
results = experiment.run(directory="test_files/")
self.assertEqual(["wilks"], list(results["scores"].keys()))
self.assertEqual(["dt"], list(results["errors"].keys()))
self.assertEqual(10, len(results["scores"]["wilks"]))
self.assertEqual(10, len(results["errors"]["dt"]))
for score in results["scores"]["wilks"]:
self.assertTrue(0 < score < 1)
for error in results["errors"]["dt"]:
self.assertTrue(0 < error < 1)
for time in results["score_times"]["wilks"]:
self.assertTrue(time > 0)
for time in results["classifier_times"]["dt"]:
self.assertTrue(time > 0)
self.assertEqual(experiment, results["experiment"])
def test_results_reproducible(self):
experiment = RandomSubsetsExperiment(
dataset="aggregation_score_dataset",
subset_size=2,
scorers=[("wilks", CorrelationScore())],
classifiers=[("dt", DecisionTreeClassifier())],
n_folds=5,
n_runs=10
)
results = experiment.run(directory="test_files/")
results2 = results["experiment"].run(directory="test_files/")
numpy.testing.assert_array_equal(results["scores"]["wilks"], results2["scores"]["wilks"])
numpy.testing.assert_array_equal(results["errors"]["dt"], results2["errors"]["dt"])
dump = pickle.dumps(results)
results2 = pickle.loads(dump)["experiment"].run(directory="test_files/")
numpy.testing.assert_array_equal(results["scores"]["wilks"], results2["scores"]["wilks"])
numpy.testing.assert_array_equal(results["errors"]["dt"], results2["errors"]["dt"])
|
yannrouillard/weboob | modules/imdb/browser.py | Python | agpl-3.0 | 8,535 | 0.001054 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob | . If not, see <http://www.gnu.org/licenses/>.
from HTMLParser import HTMLParser
from weboob.tools.browser import BaseBrowser, BrowserHTTPNotFound
from weboob.capabilities.base import NotAvailable, NotLoaded
from weboob.capabilities.cinema import Movie, Person
from weboob.tools.json import json
from .pages import PersonPage, MovieCrewPage, BiographyPage, FilmographyPage, ReleasePage
from datetime import datetime
__all__ = ['ImdbBrowse | r']
class ImdbBrowser(BaseBrowser):
DOMAIN = 'www.imdb.com'
PROTOCOL = 'http'
ENCODING = 'utf-8'
USER_AGENT = BaseBrowser.USER_AGENTS['wget']
PAGES = {
'http://www.imdb.com/title/tt[0-9]*/fullcredits.*': MovieCrewPage,
'http://www.imdb.com/title/tt[0-9]*/releaseinfo.*': ReleasePage,
'http://www.imdb.com/name/nm[0-9]*/*': PersonPage,
'http://www.imdb.com/name/nm[0-9]*/bio.*': BiographyPage,
'http://www.imdb.com/name/nm[0-9]*/filmo.*': FilmographyPage,
}
def iter_movies(self, pattern):
res = self.readurl('http://www.imdb.com/xml/find?json=1&nr=1&tt=on&q=%s' % pattern.encode('utf-8'))
jres = json.loads(res)
htmlparser = HTMLParser()
for cat in ['title_popular', 'title_exact', 'title_approx']:
if cat in jres:
for m in jres[cat]:
tdesc = unicode(m['title_description'])
if '<a' in tdesc and '>' in tdesc:
short_description = u'%s %s' % (tdesc.split('<')[
0].strip(', '), tdesc.split('>')[1].split('<')[0])
else:
short_description = tdesc.strip(', ')
movie = Movie(m['id'], htmlparser.unescape(m['title']))
movie.other_titles = NotLoaded
movie.release_date = NotLoaded
movie.duration = NotLoaded
movie.short_description = htmlparser.unescape(short_description)
movie.pitch = NotLoaded
movie.country = NotLoaded
movie.note = NotLoaded
movie.roles = NotLoaded
movie.all_release_dates = NotLoaded
movie.thumbnail_url = NotLoaded
yield movie
def iter_persons(self, pattern):
res = self.readurl('http://www.imdb.com/xml/find?json=1&nr=1&nm=on&q=%s' % pattern.encode('utf-8'))
jres = json.loads(res)
htmlparser = HTMLParser()
for cat in ['name_popular', 'name_exact', 'name_approx']:
if cat in jres:
for p in jres[cat]:
person = Person(p['id'], htmlparser.unescape(unicode(p['name'])))
person.real_name = NotLoaded
person.birth_place = NotLoaded
person.birth_date = NotLoaded
person.death_date = NotLoaded
person.gender = NotLoaded
person.nationality = NotLoaded
person.short_biography = NotLoaded
person.short_description = htmlparser.unescape(p['description'])
person.roles = NotLoaded
person.thumbnail_url = NotLoaded
yield person
def get_movie(self, id):
res = self.readurl(
'http://imdbapi.org/?id=%s&type=json&plot=simple&episode=1&lang=en-US&aka=full&release=simple&business=0&tech=0' % id)
if res is not None:
jres = json.loads(res)
else:
return None
htmlparser = HTMLParser()
title = NotAvailable
duration = NotAvailable
release_date = NotAvailable
pitch = NotAvailable
country = NotAvailable
note = NotAvailable
short_description = NotAvailable
thumbnail_url = NotAvailable
other_titles = []
genres = []
roles = {}
if 'title' not in jres:
return
title = htmlparser.unescape(unicode(jres['title'].strip()))
if 'poster' in jres:
thumbnail_url = unicode(jres['poster'])
if 'directors' in jres:
short_description = unicode(', '.join(jres['directors']))
if 'genres' in jres:
for g in jres['genres']:
genres.append(g)
if 'runtime' in jres:
dur_str = jres['runtime'][0].split(':')
if len(dur_str) == 1:
duration = int(dur_str[0].split()[0])
else:
duration = int(dur_str[1].split()[0])
if 'also_known_as' in jres:
for other_t in jres['also_known_as']:
if 'country' in other_t and 'title' in other_t:
other_titles.append('%s : %s' % (other_t['country'], htmlparser.unescape(other_t['title'])))
if 'release_date' in jres:
dstr = str(jres['release_date'])
year = int(dstr[:4])
if year == 0:
year = 1
month = int(dstr[4:5])
if month == 0:
month = 1
day = int(dstr[-2:])
if day == 0:
day = 1
release_date = datetime(year, month, day)
if 'country' in jres:
country = u''
for c in jres['country']:
country += '%s, ' % c
country = country[:-2]
if 'plot_simple' in jres:
pitch = unicode(jres['plot_simple'])
if 'rating' in jres and 'rating_count' in jres:
note = u'%s/10 (%s votes)' % (jres['rating'], jres['rating_count'])
for r in ['actor', 'director', 'writer']:
if '%ss' % r in jres:
roles['%s' % r] = list(jres['%ss' % r])
movie = Movie(id, title)
movie.other_titles = other_titles
movie.release_date = release_date
movie.duration = duration
movie.genres = genres
movie.pitch = pitch
movie.country = country
movie.note = note
movie.roles = roles
movie.short_description = short_description
movie.all_release_dates = NotLoaded
movie.thumbnail_url = thumbnail_url
return movie
def get_person(self, id):
try:
self.location('http://www.imdb.com/name/%s' % id)
except BrowserHTTPNotFound:
return
assert self.is_on_page(PersonPage)
return self.page.get_person(id)
def get_person_biography(self, id):
self.location('http://www.imdb.com/name/%s/bio' % id)
assert self.is_on_page(BiographyPage)
return self.page.get_biography()
def iter_movie_persons(self, movie_id, role):
self.location('http://www.imdb.com/title/%s/fullcredits' % movie_id)
assert self.is_on_page(MovieCrewPage)
for p in self.page.iter_persons(role):
yield p
def iter_person_movies(self, person_id, role):
self.location('http://www.imdb.com/name/%s/filmotype' % person_id)
assert self.is_on_page(FilmographyPage)
return self.page.iter_movies(role)
def iter_person_movies_ids(self, person_id):
self.location('http://www.imdb.com/name/%s/filmotype' % person_id)
assert self.is_on_page(FilmographyPage)
for movie in self.page.iter_movies_ids():
yield movie
def iter_movie_persons_ids(self, movie_id):
self.location('http://www.imdb.com/title/%s/fullcredi |
lvdongbing/python-bileanclient | bileanclient/tests/unit/test_http.py | Python | apache-2.0 | 16,420 | 0.000914 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import json
from keystoneclient.auth import token_endpoint
from keystoneclient import session
import mock
import requests
from requests_mock.contrib import fixture
import six
from six.moves.urllib import parse
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
import testtools
from testtools import matchers
import types
import bileanclient
from bileanclient.common import http
from bileanclient.tests.unit import utils
def original_only(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if not hasattr(self.client, 'log_curl_request'):
self.skipTest('Skip logging tests for session client')
return f(self, *args, **kwargs)
class TestClient(testtools.TestCase):
scenarios = [
('httpclient', {'create_client': '_create_http_client'}),
('session', {'create_client': '_create_session_client'})
]
def _create_http_client(self):
return http.HTTPClient(self.endpoint, token=self.token)
def _create_session_client(self):
auth = token_endpoint.Token(self.endpoint, self.token)
sess = session.Session(auth=auth)
return http.SessionClient(sess)
def setUp(self):
super(TestClient, self).setUp()
self.mock = self.useFixture(fixture.Fixture())
self.endpoint = 'http://example.com:8770'
self.ssl_endpoint = 'https://example.com:8770'
self.token = u'abc123'
self.client = getattr(self, self.create_client)()
def test_identity_headers_and_token(self):
identity_headers = {
'X-Auth-Token': 'auth_token',
'X-User-Id': 'user',
'X-Tenant-Id': 'tenant',
'X-Roles': 'roles',
'X-Identity-Status': 'Confirmed',
'X-Service-Catalog': 'service_catalog',
}
# with token
kwargs = {'token': u'fake-token',
'identity_headers': identity_headers}
http_client_object = http.HTTPClient(self.endpoint, **kwargs)
self.assertEqual('auth_token', http_client_object.auth_token)
self.assertTrue(http_client_object.identity_headers.
get('X-Auth-Token') is None)
def test_identity_headers_and_no_token_in_header(self):
identity_headers = {
'X-User-Id': 'user',
'X-Tenant-Id': 'tenant',
'X-Roles': 'roles',
'X-Identity-Status': 'Confirmed',
'X-Service-Catalog': 'service_catalog',
}
# without X-Auth-Token in identity headers
kwargs = {'token': u'fake-token',
'identity_headers': identity_headers}
http_client_object = http.HTTPClient(self.endpoint, **kwargs)
self.assertEqual(u'fake-token', http_client_object.auth_token)
self.assertTrue(http_client_object.identity_headers.
get('X-Auth-Token') is None)
def test_identity_headers_and_no_token_in_session_header(self):
# Tests that if token or X-Auth-Token are not provided in the kwargs
# when creating the http client, the session headers don't contain
# the X-Auth-Token key.
identity_headers = {
'X-User-Id': 'user',
'X-Tenant-Id': 'tenant',
'X-Roles': 'roles',
'X-Identity-Status': 'Confirmed',
'X-Service-Catalog': 'service_catalog',
}
kwargs = {'identity_headers': identity_headers}
http_client_object = http.HTTPClient(self.endpoint, **kwargs)
self.assertIsNone(http_client_object.auth_token)
self.assertNotIn('X-Auth-Token', http_client_object.session.headers)
def test_identity_headers_are_passed(self):
# Tests that if token or X-Auth-Token are not provided in the kwargs
# when creating the http client, the session headers don't contain
# the X-Auth-Token key.
identity_headers = {
'X-User-Id': b'user',
'X-Tenant-Id': b'tenant',
'X-Roles': b'roles',
'X-Identity-Status': b'Confirmed',
'X-Service-Catalog': b'service_catalog',
}
kwargs = {'identity_headers': identity_headers}
http_client = http.HTTPClient(self.endpoint, **kwargs)
path = '/users/user_id'
self.mock.get(self.endpoint + path)
http_client.get(path)
headers = self.mock.last_request.headers
for k, v in six.iteritems(identity_headers):
self.assertEqual(v, headers[k])
def test_language_header_passed(self):
kwargs = {'language_header': 'nb_NO'}
http_client = http.HTTPClient(self.endpoint, **kwargs)
path = '/users/user_id'
self.mock.get(self.endpoint + path)
http_client.get(path)
headers = self.mock.last_request.headers
self.assertEqual(kwargs['language_header'], headers['Accept-Language'])
def test_language_header_not_passed_no_language(self):
kwargs = {}
http_client = http.HTTPClient(self.endpoint, **kwargs)
path = '/1/users/user_id'
self.mock.get(self.endpoint + path)
http_client.get(path)
headers = self.mock.last_request.headers
self.assertTrue('Accept-Language' not in headers)
def test_connection_timeout(self):
"""Should receive an InvalidEndpoint if connection timeout."""
def cb(request, context):
raise requests.exceptions.Timeout
path = '/users'
self.mock.get(self.endpoint + path, text=cb)
comm_err = self.assertRaises(bileanclient.exc.InvalidEndpoint,
self.client.get,
'/users')
self.assertI | n(self.endpoint, comm_err.message)
def test_connection_refused(self):
"""
Should receive a CommunicationError if connection refused.
And the error should list the host and port that refused the
connection
"""
def cb(request, context):
raise requests.excep | tions.ConnectionError()
path = '/events?limit=20'
self.mock.get(self.endpoint + path, text=cb)
comm_err = self.assertRaises(bileanclient.exc.CommunicationError,
self.client.get,
'/events?limit=20')
self.assertIn(self.endpoint, comm_err.message)
def test_http_encoding(self):
path = '/users'
text = 'Ok'
self.mock.get(self.endpoint + path, text=text,
headers={"Content-Type": "text/plain"})
headers = {"test": u'ni\xf1o'}
resp, body = self.client.get(path, headers=headers)
self.assertEqual(text, resp.text)
def test_headers_encoding(self):
if not hasattr(self.client, 'encode_headers'):
self.skipTest('Cannot do header encoding check on SessionClient')
value = u'ni\xf1o'
headers = {"test": value, "none-val": None}
encoded = self.client.encode_headers(headers)
self.assertEqual(b"ni\xc3\xb1o", encoded[b"test"])
self.assertNotIn("none-val", encoded)
def test_raw_request(self):
"""Verify the path being used for HTTP requests reflects accurately."""
headers = {"Content-Type": "text/plain"}
text = 'Ok'
path = '/users'
self.mock.get(self.endpoint + path, text=text, headers=headers)
resp, body = self.client.get('/users', headers=headers)
self.assertEqual(headers, resp.headers)
|
edeposit/edeposit.amqp.rest | bin/edeposit_rest_runzeo.py | Python | mit | 785 | 0.001274 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import sys
import os.path
import subprocess
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/edeposit/amqp"))
try:
from rest import settings
except ImportError:
| from edeposit.amqp.rest import settings
# Variables ================================================= | ==================
assert settings.ZEO_SERVER_CONF_FILE, settings._format_error(
"ZEO_SERVER_CONF_FILE",
settings.ZEO_SERVER_CONF_FILE
)
# Main program ================================================================
if __name__ == '__main__':
subprocess.check_call(["runzeo", "-C", settings.ZEO_SERVER_CONF_FILE])
|
endlessm/chromium-browser | third_party/catapult/systrace/profile_chrome/chrome_startup_tracing_agent_unittest.py | Python | bsd-3-clause | 1,066 | 0.010319 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import j | son
from profile_chrome import chrome_startup_tracing_agent
from systrace import decorators
from systrace.tracing_agents import agents_unittest
class ChromeAgentTest(agents_unittest.BaseAgentTest):
# TODO(washingtonp): This test seems to fail on the version of Android
# currently on the Trybot servers (KTU84P), although it works on Android M.
# Either upgrade the version of Android on the Trybot servers or determine
# if there is a way to run this | agent on Android KTU84P.
@decorators.Disabled
def testTracing(self):
agent = chrome_startup_tracing_agent.ChromeStartupTracingAgent(
self.device, self.package_info,
'', # webapk_package
False, # cold
'https://www.google.com' # url
)
try:
agent.StartAgentTracing(None)
finally:
agent.StopAgentTracing()
result = agent.GetResults()
json.loads(result.raw_data)
|
harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/swf/test_cert_verification.py | Python | gpl-3.0 | 1,553 | 0 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import u | nittest
from tests.integration import ServiceCertVerificationTe | st
import boto.swf
class SWFCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
swf = True
regions = boto.swf.regions()
def sample_service_call(self, conn):
conn.list_domains('REGISTERED')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.