commit stringlengths 40 40 | subject stringlengths 1 3.25k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | old_contents stringlengths 0 26.3k | lang stringclasses 3 values | proba float64 0 1 | diff stringlengths 0 7.82k |
|---|---|---|---|---|---|---|---|
d951b11e9991c021e631299f0e22da8eb4c7d850 | comment out post-checkout undo demonstration | main.py | main.py | # this is the main file that get called
import os
import sys
import gitTA as git
import colorama
from colorama import Fore, Back # add color output to terminal: we want anything printed to be VERY visible to user
colorama.init() # called so that windows colors work
'''
modify this file! When git runs certain commands, it will run THIS main.py
which will trigger the functions you've decorated here with gitta.listen('event-name')
your methods can listen for the following events:
pre-push, pre-commit, # pre-x methods can be aborted by raising an exception
post-commit, post-checkout, post-merge
'''
# pre-* events can be aborted by raising an exception ???
@git.listen('pre-push')
def prepush(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
@git.listen('pre-commit')
def precommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-commit')
def postcommit(*args, **kwargs):
print(Fore.GREEN)
print(args, kwargs)
@git.listen('post-checkout')
def postcheckout(*args, **kwargs):
print(Fore.GREEN) # set so that ALL next prints will be green
print(args, kwargs)
branches = git.Branch()
branches.undo_checkout(*args, **kwargs)
@git.listen('post-merge')
def postmerge(*args, **kwargs):
print(args, kwargs)
if __name__ == '__main__':
git.trigger(45, event='post-checkout') # example of what might get passed to postcheckout
# the garbled message that appears before (45, ) is the Fore.GREEN. On normal terminals this garbled output will NOT appear
# ['.gitta/py/main.py', 'pre-push', 'origin', 'https://github.com/lancekindle/test.git']
# ['.gitta/py/main.py', 'pre-commit']
# ['.gitta/py/main.py', 'post-commit']
| Python | 0 | @@ -1170,16 +1170,17 @@
kwargs)%0A
+#
bran
@@ -1199,16 +1199,17 @@
ranch()%0A
+#
bran
|
1624504bd966eaf47698938e387a58dd14738a76 | add warnings about deprecation of compiler specific template tags | static_precompiler/templatetags/compile_static.py | static_precompiler/templatetags/compile_static.py | import six
from django.template import Library
from django.templatetags.static import static
from static_precompiler.settings import PREPEND_STATIC_URL, USE_CACHE, CACHE_TIMEOUT
from static_precompiler.utils import compile_static, get_compiler_by_name, get_cache_key, get_hexdigest, get_cache
from static_precompiler.templatetags.base import container_tag
register = Library()
@register.simple_tag(name="compile")
def compile_tag(source_path, compiler=None):
if compiler:
compiled = compiler.compile(source_path)
else:
compiled = compile_static(source_path)
if PREPEND_STATIC_URL:
compiled = static(compiled)
return compiled
@container_tag(register)
def inlinecompile(nodelist, context, compiler):
source = nodelist.render(context)
if isinstance(compiler, six.string_types):
compiler = get_compiler_by_name(compiler)
if USE_CACHE:
cache_key = get_cache_key("{0}.{1}".format(
compiler.__class__.__name__,
get_hexdigest(source)
))
cache = get_cache()
cached = cache.get(cache_key, None)
if cached is not None:
return cached
output = compiler.compile_source(source)
cache.set(cache_key, output, CACHE_TIMEOUT)
return output
return compiler.compile_source(source)
def register_compiler_tags(register, compiler):
@register.simple_tag(name=compiler.name)
def tag(source_path):
return compile_tag(source_path, compiler)
@container_tag(register, name="inline" + compiler.name)
def inline_tag(nodelist, context):
return inlinecompile(nodelist, context, compiler)
| Python | 0 | @@ -3,16 +3,32 @@
port six
+%0Aimport warnings
%0A%0Afrom d
@@ -1345,16 +1345,223 @@
urce)%0A%0A%0A
+def _warn(old, new):%0A warnings.warn(%0A %22%7B%25%25 %25s %25%25%7D tag has been deprecated, use %7B%25%25 %25s %25%25%7D %22%0A %22from %60compile_static%60 template tag library instead.%22 %25 (old, new),%0A UserWarning,%0A )%0A%0A%0A
def regi
@@ -1596,24 +1596,24 @@
compiler):%0A
-
@registe
@@ -1671,16 +1671,56 @@
_path):%0A
+ _warn(compiler.name, 'compile')%0A
@@ -1814,32 +1814,32 @@
compiler.name)%0A
-
def inline_t
@@ -1861,16 +1861,96 @@
ntext):%0A
+ _warn('inline%25s' %25 compiler.name, 'inlinecompile %22%25s%22' %25 compiler.name)%0A
|
437643d0f0680470b52ce893555df5dac17bdca1 | use selenium for loading js content | main.py | main.py | import urllib.request
result=urllib.request.urlopen("https://www.expedia.de/Flights-Search?trip=roundtrip&leg1=from:Hamburg,%20Deutschland%20(HAM-Alle%20Flugh%C3%A4fen),to:Amman,%20Jordanien%20(AMM-Queen%20Alia%20Intl.),departure:08.03.2017TANYT&leg2=from:Amman,%20Jordanien%20(AMM-Queen%20Alia%20Intl.),to:Hamburg,%20Deutschland%20(HAM-Alle%20Flugh%C3%A4fen),departure:26.03.2017TANYT&passengers=children:0,adults:2,seniors:0,infantinlap:Y&mode=search").read()
print(result)
| Python | 0 | @@ -4,53 +4,132 @@
ort
-urllib.request%0A%0Aresult=urllib.request.urlopen
+time %0Afrom bs4 import BeautifulSoup%0Afrom selenium import webdriver%0A%0A%0Abrowser = webdriver.Firefox()%0A%0AffResults = browser.get
(%22ht
@@ -146,18 +146,19 @@
expedia.
-de
+com
/Flights
@@ -197,35 +197,31 @@
:Hamburg,%2520
-Deutschland
+Germany
%2520(HAM-Alle
@@ -219,34 +219,27 @@
(HAM-All
-e
%2520
-Flugh%25C3%25A4fen
+Airports
),to:Amm
@@ -242,35 +242,32 @@
:Amman,%2520Jordan
-ien
%2520(AMM-Queen%2520
@@ -295,13 +295,13 @@
re:0
-8.03.
+3/08/
2017
@@ -335,11 +335,8 @@
rdan
-ien
%2520(
@@ -379,19 +379,15 @@
,%2520
-Deutschland
+Germany
%2520(
@@ -397,26 +397,19 @@
-All
-e
%2520
-Flugh%25C3%25A4fen
+Airports
),de
@@ -420,14 +420,14 @@
ure:
-26.03.
+03/24/
2017
@@ -443,16 +443,25 @@
sengers=
+adults:2,
children
@@ -463,25 +463,16 @@
ldren:0,
-adults:2,
seniors:
@@ -504,17 +504,306 @@
ch%22)
-.read()%0A%0A
+%0A%0Atime.sleep(15)%0A%0Afull_content = browser.execute_script(%22return document.getElementsByTagName('html')%5B0%5D.innerHTML%22)%0A%0Abrowser.quit()%0A%0Asoup = BeautifulSoup(full_content, %22lxml%22 )%0A%0Aprint(soup.find_all('span', class_='dollars'))%0A%0A#for dollar in dollars_copy:%0A# print(dollar.text)%0A#print(dollars)%0A%0A#
prin
|
1550660e39ded9cbcaf0ad429f01f2803f3c5256 | Add a register function prior to enacting reporting | main.py | main.py | #!/usr/bin/python
import os
import sys
import json
import time
import sched
import socket
import psutil
from lib import cpu, memory, disks, network, system, transport
_cache = []
_cache_timer = 0
_cache_keeper = 0
def main(scheduler, config, sock, hostname, callers):
global _cache
global _cache_timer
global _cache_keeper
payload = {
"_id": {
"time": time.time(),
"id": config['identification']['id'],
"hostname": hostname,
"type": config['identification']['type']
},
"cpu": callers['cpu'].snapshot(),
"memory": callers['memory'].snapshot(),
"disks": callers['disks'].snapshot(),
"network": callers['network'].snapshot(),
"system": callers['system'].snapshot()
}
_cache.append(payload)
if _cache_keeper < _cache_timer:
_cache_keeper += config['interval']
else:
transport.Transport({ "payload": json.dumps(_cache) }, config, sock)
_cache_keeper = 0
_cache = []
# Schedule a new run at the specified interval
scheduler.enter(config['interval'], 1, main, (scheduler, config, sock, hostname, callers))
scheduler.run()
if __name__ == '__main__':
try:
config = (json.loads(open(os.path.dirname(os.path.abspath(__file__)) + "/config.json").read()))['config']
config['identification']['type'] = config['identification'].get('type', 'false')
config['disable_cache'] = False
if config['cache'].get('enabled') is True:
_cache_timer = config['cache'].get('time_seconds_to_cache_between_sends', 60)
config['interval'] = config['cache'].get('interval_seconds_between_captures', 5)
# If the interval is higher, just exit
if config['interval'] > _cache_timer:
print >> sys.stderr, "Report interval is higher than cache timer."
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
scheduler = sched.scheduler(time.time, time.sleep)
hostname = config['identification'].get('hostname', socket.gethostname())
callers = {
"cpu": cpu.CPU(psutil),
"memory": memory.Memory(psutil),
"disks": disks.Disks(psutil),
"network": network.Network(psutil),
"system": system.System(psutil)
}
main(scheduler, config, sock, hostname, callers)
except KeyboardInterrupt:
print >> sys.stderr, '\nExiting by user request.\n'
sys.exit(0)
except Exception as e:
location = '\n' + type(e).__name__
print >> sys.stderr, location, '=>', str(e)
sys.exit(1)
| Python | 0 | @@ -8,24 +8,49 @@
bin/python%0D%0A
+from hashlib import md5%0D%0A
import os%0D%0Ai
@@ -252,16 +252,30 @@
er = 0%0D%0A
+_version = 1.0
%0D%0A%0D%0Adef
@@ -394,16 +394,37 @@
_keeper%0D
+%0A global _version%0D
%0A%0D%0A p
@@ -879,15 +879,83 @@
ot()
-%0D%0A %7D
+,%0D%0A %22version%22: _version%0D%0A %7D%0D%0A payload%5B'digest'%5D = md5(payload)
%0D%0A
@@ -1377,16 +1377,131 @@
()%0D%0A%0D%0A%0D%0A
+def register(config):%0D%0A %22%22%22%0D%0A Register this server/device with the mothership%0D%0A %22%22%22%0D%0A return True%0D%0A%0D%0A%0D%0A
if __nam
@@ -2691,24 +2691,52 @@
%0A %7D%0D%0A
+ if register():%0D%0A
main
|
1cab65aba369263904607738cd69b2ad7d6a8e63 | change web framework from wsgi to cgi | main.py | main.py | #!/usr/bin/env python
# coding=utf-8
from datetime import date
import time
from webapp.web import Application, BaseHandler
URLS = (
("/", "Index"),
("/hello/(.*)", "Hello"),
)
class Index(BaseHandler):
def get(self):
header = "Content-type:text/html\r\n\r\n"
# self.write(header+"Welcome~")
body = self.wrap_html('static/index.html')
self.write(header)
self.write(body)
class Hello(BaseHandler):
def get(self, name):
params = {'name': name, 'date': date.today(), 'time': time.time()}
header = "Content-type:text/html\r\n\r\n"
body = self.wrap_html('static/hello.html', params)
self.write(header)
self.write(body)
if __name__ == '__main__':
app = Application(globals(), URLS)
app.run()
| Python | 0.000016 | |
24d4fee92c1c2ff4bac1fe09d9b436748234a48c | Add argument for execution of defective server. | main.py | main.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
""" Main script. Executes the XML Server implementation with an HTTP
connection and default parameters.
"""
import sys
import argparse
from server import xml_server
from connection import http_connection
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=8080,
help="server's HTTP port")
parser.add_argument('--sensordata', type=str,
default='server/sensor_data.csv', help="sensor data file")
if __name__ == '__main__':
args = parser.parse_args()
server = xml_server.XMLServer(args.sensordata)
connection = http_connection.HttpConnection(server, port=args.port)
| Python | 0 | @@ -227,16 +227,35 @@
l_server
+, defective_servers
%0Afrom co
@@ -529,16 +529,73 @@
a file%22)
+%0Aparser.add_argument('--randomloss', action='store_true')
%0A%0Aif __n
@@ -646,16 +646,126 @@
_args()%0A
+ if args.randomloss:%0A server = defective_servers.RandomLossXMLServer(args.sensordata)%0A else:%0A
serv
|
52c2205804d8dc38447bca1ccbf5599e00cd1d7b | Rename user_id config key to admin_user_id | main.py | main.py | #!/usr/bin/env python3
import requests
CONFIG_DIR = "config"
class Bot:
def __init__(self):
self.config = Config(CONFIG_DIR)
self.api = TelegramBotApi(self.config.get_auth_token())
def run(self):
self.api.send_message(self.config.get_user_id(), "test")
class TelegramBotApi:
def __init__(self, auth_token):
self.base_url = "https://api.telegram.org/bot" + auth_token + "/"
def send_message(self, chat_id, text):
self.__send_request("sendMessage", chat_id=chat_id, text=text)
def __send_request(self, command, **params):
requests.get(self.base_url + command, params=params)
class Config:
def __init__(self, config_dir):
self.config_dir = config_dir + "/"
def get_auth_token(self):
return self.__get_config_value("auth_token")
def get_user_id(self):
return self.__get_config_value("user_id")
def __get_config_value(self, config_key):
return open(self.config_dir + config_key).read().strip()
if __name__ == "__main__":
Bot().run()
| Python | 0.005562 | @@ -260,24 +260,30 @@
.config.get_
+admin_
user_id(), %22
@@ -841,16 +841,22 @@
def get_
+admin_
user_id(
@@ -902,16 +902,22 @@
_value(%22
+admin_
user_id%22
|
2ffb390ae3fa31a2240d10abd4028c48602e852b | Add docstrings for main module | main.py | main.py | from random import shuffle
from res import types
from src import ai
from src import coordinate
from src import historynode
from src import interface
from settings import (DISPLAYEVALUATION,
SEARCHPLY,
STANDARD,
USERPLAYSFOX)
def aPlayerHasWon(game):
if game.geeseWinP():
print("Geese win!")
return True
elif game.foxesWinP():
print("Foxes win!")
return True
return False
def determineDraw(game, ai):
if ai.evaluationFunction(game, True) is None:
print("Game is a draw")
return True
return False
def setTwoRandomFoxCoordinatesInVictoryArea(game):
possibleCoordinates = []
for x in range(3, 6):
for y in range(1, 4):
possibleCoordinates.append(coordinate.Coordinate(x, y))
shuffle(possibleCoordinates)
game.setState(possibleCoordinates.pop(), types.FOX)
game.setState(possibleCoordinates.pop(), types.FOX)
def createStartingPosition(standard):
"""
7 G - G - G
| \ | / |
6 G - G - G
| / | \ |
5 G - G - G - G - G - G - G
| \ | / | \ | / | \ | / |
4 G - G - G - G - G - G - G
| / | \ | / | \ | / | \ |
3 G - G - . - . - . - G - G
| \ | / |
2 . - . - .
| / | \ |
1 F - . - F
1 2 3 4 5 6 7
"""
game = historynode.HistoryNode()
game.setState(coordinate.Coordinate(3, 7), types.GOOSE)
game.setState(coordinate.Coordinate(4, 7), types.GOOSE)
game.setState(coordinate.Coordinate(5, 7), types.GOOSE)
game.setState(coordinate.Coordinate(3, 6), types.GOOSE)
game.setState(coordinate.Coordinate(4, 6), types.GOOSE)
game.setState(coordinate.Coordinate(5, 6), types.GOOSE)
game.setState(coordinate.Coordinate(1, 5), types.GOOSE)
game.setState(coordinate.Coordinate(2, 5), types.GOOSE)
game.setState(coordinate.Coordinate(3, 5), types.GOOSE)
game.setState(coordinate.Coordinate(4, 5), types.GOOSE)
game.setState(coordinate.Coordinate(5, 5), types.GOOSE)
game.setState(coordinate.Coordinate(6, 5), types.GOOSE)
game.setState(coordinate.Coordinate(7, 5), types.GOOSE)
game.setState(coordinate.Coordinate(1, 4), types.GOOSE)
game.setState(coordinate.Coordinate(2, 4), types.GOOSE)
game.setState(coordinate.Coordinate(3, 4), types.GOOSE)
game.setState(coordinate.Coordinate(4, 4), types.GOOSE)
game.setState(coordinate.Coordinate(5, 4), types.GOOSE)
game.setState(coordinate.Coordinate(6, 4), types.GOOSE)
game.setState(coordinate.Coordinate(7, 4), types.GOOSE)
game.setState(coordinate.Coordinate(1, 3), types.GOOSE)
game.setState(coordinate.Coordinate(2, 3), types.GOOSE)
game.setState(coordinate.Coordinate(6, 3), types.GOOSE)
game.setState(coordinate.Coordinate(7, 3), types.GOOSE)
if standard:
game.setState(coordinate.Coordinate(3, 1), types.FOX)
game.setState(coordinate.Coordinate(5, 1), types.FOX)
else:
setTwoRandomFoxCoordinatesInVictoryArea(game)
return game
if __name__ == '__main__':
game = createStartingPosition(STANDARD)
firstTurn = True
aiObject = ai.AI()
if USERPLAYSFOX:
computersTurn = True
else:
computersTurn = False
while(True):
if not firstTurn:
game.pretty_print_board()
print("----------------------------")
elif firstTurn and USERPLAYSFOX:
game.pretty_print_board()
print("----------------------------")
if aPlayerHasWon(game):
break
elif determineDraw(game, aiObject):
break
if computersTurn:
game = aiObject.iterativeDeepeningSearch(game,
USERPLAYSFOX,
SEARCHPLY)
computersTurn = False
game.pretty_print_board()
if aPlayerHasWon(game):
break
elif determineDraw(game, aiObject):
break
if not game.score:
game.score = aiObject.evaluationFunction(game)
if DISPLAYEVALUATION:
print("Score: {0:.2f}".format(game.score))
legalMoves = aiObject.getAllMovesForPlayer(game,
not USERPLAYSFOX)
while(True):
userInput = input('Enter a move: ')
result = interface.getPositionFromListOfMoves(game,
legalMoves,
str(userInput),
not USERPLAYSFOX)
if len(result) != 1:
print("Unknown or invalid move, please try again")
continue
else:
game = result[0]
computersTurn = True
break
firstTurn = False
| Python | 0 | @@ -308,24 +308,80 @@
sWon(game):%0A
+ %22%22%22 Check game state to see if a player has won %22%22%22%0A
if game.
@@ -385,32 +385,32 @@
me.geeseWinP():%0A
-
print(%22G
@@ -563,16 +563,67 @@
e, ai):%0A
+ %22%22%22 Check game state to see if it is drawn %22%22%22%0A
if a
@@ -781,24 +781,73 @@
Area(game):%0A
+ %22%22%22 Randomize the fox starting positions %22%22%22%0A
possible
@@ -1171,24 +1171,24 @@
ndard):%0A
-
%22%22%22%0A
7
@@ -1175,24 +1175,131 @@
d):%0A %22%22%22%0A
+ Generates the starting position board. Can optionally randomize fox%0A positions if standard is true.%0A
7
@@ -3412,16 +3412,87 @@
ain__':%0A
+ %22%22%22 Main game loop. Play alternates between user and computer. %22%22%22%0A
game
|
8cbe375b478764f05e67b3d5600ca51bbd5b5c48 | enable 'inline_defnode_calls' optimisation for benchmarks (even though they don't benefit currently) | Demos/benchmarks/setup.py | Demos/benchmarks/setup.py | from distutils.core import setup
from Cython.Build import cythonize
setup(
name = 'benchmarks',
ext_modules = cythonize("*.py", language_level=3, annotate=True),
)
| Python | 0 | @@ -62,16 +62,76 @@
honize%0A%0A
+directives = %7B%0A 'optimize.inline_defnode_calls': True%0A%7D%0A%0A
setup(%0A
@@ -217,13 +217,71 @@
ate=True
+,%0A compiler_directives=directives
),%0A)%0A
|
0389759b9b300c5a0cc807e9d6d154e757abecad | make sentry optional | main.py | main.py | import logging
from time import mktime
import feedparser
import sys
import yaml
from raven import Client
from wallabag_api.wallabag import Wallabag
import github_stars
import golem_top
logger = logging.getLogger()
logger.handlers = []
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler('debug.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
with open("config.yaml", 'r') as stream:
try:
config = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
config = None
exit(1)
with open("sites.yaml", 'r') as stream:
try:
sites = yaml.load(stream)
except (yaml.YAMLError, FileNotFoundError) as exception:
logger.error(exception)
sites = None
exit(1)
client = Client(
dsn=config["sentry_url"],
processors=(
'raven.processors.SanitizePasswordsProcessor',
)
)
token = Wallabag.get_token(**config["wallabag"])
wall = Wallabag(host=config["wallabag"]["host"], client_secret=config["wallabag"]["client_secret"],
client_id=config["wallabag"]["client_id"], token=token)
sites = github_stars.get_starred_repos(config["github_username"], sites)
for sitetitle, site in sites.items():
logger.info(sitetitle + ": Downloading feed")
# r = requests.get(site["url"])
logger.info(sitetitle + ": Parsing feed")
f = feedparser.parse(site["url"])
logger.debug(sitetitle + ": finished parsing")
# feedtitle = f["feed"]["title"]
if "latest_article" in site:
for article in f.entries:
if article.title == site["latest_article"]:
logger.debug("already added: " + article.title)
break
logger.info(article.title + ": article found")
taglist = [sitetitle]
if site["tags"]:
taglist.extend(site["tags"])
tags = ",".join(taglist)
if "published_parsed" in article:
published = mktime(article.published_parsed)
elif "updated_parsed" in article:
published = mktime(article.updated_parsed)
else:
published = None
logger.info(article.title + ": add to wallabag")
if "github" in site and site["github"]:
title = sitetitle + ": " + article.title
else:
title = article.title
# wall.post_entries(url=article.link, title=title, tags=tags)
else:
logger.debug(sitetitle + ": no latest_article")
if f.entries:
sites[sitetitle]["latest_article"] = f.entries[0].title
# articles = golem_top.get_top_articles()
# params = {
# 'access_token': wall.token,
# "urls[]": articles
# }
# response = wall.query("/api/entries/exists.{format}".format(format=wall.format), "get", **params)
# for url, old in response.items():
# if not old:
# wall.post_entries(url=url, tags="golem,it", title=None)
# print(response)
with open("sites.yaml", 'w') as stream:
yaml.dump(sites, stream, default_flow_style=False)
| Python | 0.000001 | @@ -977,16 +977,47 @@
xit(1)%0A%0A
+if %22sentry_url%22 in config:%0A
client =
@@ -1025,16 +1025,20 @@
Client(%0A
+
dsn=
@@ -1055,24 +1055,28 @@
ntry_url%22%5D,%0A
+
processo
@@ -1088,16 +1088,20 @@
+
'raven.p
@@ -1143,18 +1143,26 @@
r',%0A
-)%0A
+ )%0A
)%0A%0Atoken
|
25d67637fafb04bae67033a4deef4bc71fd91ef2 | Fix elision of needed path joins. | main.py | main.py | from markdown import Markdown
import sys
import codecs
import os
import errno
def ensure_output_exists(dir):
if not os.path.isdir(dir):
try:
print("mkdir", dir)
os.makedirs(dir)
except OSError as e:
raise SnabbptException("Unable to create output directory") from e
class SnabbptException(Exception):
pass
class HTMLTemplate:
def __init__(self, html):
self.html = html
def from_file(filename):
with codecs.open(filename, mode="r", encoding="utf-8") as file:
return HTMLTemplate(file.read())
def render(self, file, outfile):
with codecs.open(outfile, mode="w", encoding="utf-8", errors="xmlcharrefreplace") as out:
out.write(self.html.replace("{{PAGE-TITLE}}", file.title).replace("{{PAGE-CONTENT}}", file.html))
class File:
def __init__(self, filename):
self.filename = filename
with codecs.open(self.filename, mode="r", encoding="utf-8") as input_file:
text = input_file.read()
self.md = Markdown(extensions=["markdown.extensions.meta"])
self.html = self.md.convert(text)
self.title = self.md.Meta["title"][-1]
self.template = self.md.Meta["template"][-1]
self.output_path = "{0}/{1}.html".format(os.path.dirname(self.filename), self.title)
def __str__(self):
return self.filename
class Renderer:
def __init__(self, outDir):
self.templates = {}
self.outDir = outDir
ensure_output_exists(outDir)
def renderDir(self, path):
if not os.path.isdir(path):
raise SnabbptException("{0} is not a directory".format(path))
for file in self.get_files(path):
self.renderFile(file)
def get_files(self, path):
files = []
for file in os.listdir(path):
if file.startswith('.'):
continue
if os.path.isdir(file) and file != self.outDir:
files.extend(list(map(lambda x: os.path.normpath(os.path.join(path, file, x)), self.get_files(file))))
elif file.endswith(".md"):
files.append(file)
return files
def renderFile(self, filename):
file = File(filename)
if file.template not in self.templates:
ext = file.template.split('.')[-1].upper()
if ext not in fileTypes:
raise SnabbptException("Invalid template type: {0}".format(ext))
self.templates[file.template] = fileTypes[ext].from_file(file.template)
input_file = file.filename
output_file = os.path.join(self.outDir, file.output_path)
try:
if os.stat(input_file).st_mtime < os.stat(output_file).st_mtime:
print(file, "is up to date")
return
except:
pass
ensure_output_exists(os.path.dirname(output_file))
print("{0} -> {1}".format(input_file, output_file))
self.templates[file.template].render(file, output_file)
fileTypes = {
"HTML": HTMLTemplate,
}
if __name__ == "__main__":
try:
Renderer(str(sys.argv[2])).renderDir(str(sys.argv[1]))
except SnabbptException as e:
if e.__cause__:
print("{0} ({1})".format(e, e.__cause__))
else:
print(e)
| Python | 0 | @@ -1745,21 +1745,41 @@
derFile(
+os.path.join(path,
file)
+)
%0A%0A de
@@ -2096,16 +2096,35 @@
les(
+os.path.join(path,
file))))
%0A
@@ -2119,16 +2119,17 @@
file))))
+)
%0A
|
b110bbd335b87f806e73c4dbace098acfdfc7c6a | move CLIENT_SECRETS variable outside of decorator | main.py | main.py | from flask import Flask, render_template, flash, Markup, json
#import urllib
#import webapp2
import os
from apiclient.discovery import build
#from google.appengine.ext import ext
import httplib2
import sys
import itertools
import logging
import logging.config
import time
import MySQLdb
from sheepdog import *
# from googleapiclient.discovery import build_from_document
# from oauth2client.client import flow_from_clientsecrets
# from oauth2client.file import Storage
# from oauth2client.tools import argparser, run_flow
# from apiclient.errors import HttpError
# @Author Daniel George
# Sheepdog
# @Version 0.9.1
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
global channel_id
global channel_Id
global user
#video_list_uploads = ['Daniel','George']
channel_list = ['UCgJA3nqJEUZBkZivasUSdJg']
@app.route('/')
def hello():
video_list_uploads = []
CLIENT_SECRETS_FILE = "client_secrets.json"
YOUTUBE_READ_WRITE_SSL_SCOPE = "https://www.googleapis.com/auth/youtube.force-ssl"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# return render_template('index.html', title = "Princess Elsa" , numbers = video_list_uploads)
# storage = Storage("static/main.py-oauth2.json")
# credentials = storage.get()
#
# logging.debug(storage)
# logging.debug(credentials)
# return render_template('index.html', title = "Princess Elsa" , numbers = video_list_uploads)
# json_data = open("static/youtube-v3-discoverydocument.json")
# doc = json.load(json_data)
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey="AIzaSyBRgM5ARXMih_F9HviEUFYDpnkEmA4FPCs")
#http=credentials.authorize(httplib2.Http()))
for channelId in channel_list:
channels_response = youtube.channels().list(id=channelId, part="contentDetails").execute()
for channel in channels_response["items"]:
try:
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["likes"]
except KeyError:
pass
try:
playlistitems_list_request = youtube.playlistItems().list(playlistId=uploads_list_id, part="snippet", maxResults=50)
except NameError:
pass
try:
while playlistitems_list_request:
try:
playlistitems_list_response = playlistitems_list_request.execute()
except HttpError:
pass
# Print information about each video.
for playlist_item in playlistitems_list_response["items"]:
#title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
video_list_uploads.append([video_id])
try:
playlistitems_list_request = youtube.playlistItems().list_next(playlistitems_list_request, playlistitems_list_response)
except HttpError:
pass
except NameError:
pass
env = os.getenv('SERVER_SOFTWARE')
if (env and env.startswith('Google App Engine/')):
# Connecting from App Engine
db = MySQLdb.connect(
unix_socket='/cloudsql/peppy-linker-102423:daniel-george',
user='root',
db='sheepdog')
else:
# You may also assign an IP Address from the access control
# page and use it to connect from an external network.
pass
cursor = db.cursor()
cursor.execute("""TRUNCATE sheepdog.videoIds_sample;""")
db.commit()
cursor.execute("""INSERT INTO sheepdog.videoIds_sample (videoId) VALUES ('TjqH3XiiUF8'),('Hc0ZPYhl_VE');""")
db.commit()
#s = cursor.fetchall()
return render_template('index.html') #, title = s, numbers = video_list_uploads
@app.route('/user')
def user():
f = frozen('Princess Elsa')
return render_template('user.html', title=f)
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
| Python | 0.000001 | @@ -921,16 +921,61 @@
SdJg'%5D%0A%0A
+CLIENT_SECRETS_FILE = %22client_secrets.json%22%0A%0A
@app.rou
@@ -1024,53 +1024,8 @@
%5B%5D%0A
-%09CLIENT_SECRETS_FILE = %22client_secrets.json%22%0A
|
8f1668e8b3e93317f10f2bf6667ec7d9faf3e134 | add prefix, globalize | main.py | main.py | #!/usr/bin/python
import re
import json
import boto
import requests
import speedparser
import yaml
import calendar
import time
import PyRSS2Gen
import StringIO
dthandler = lambda obj: calendar.timegm(obj) if isinstance(obj, time.struct_time) else json.JSONEncoder().default(obj)
def do_feed(config):
req = requests.get(config['url'])
feed = speedparser.parse(req.content, clean_html=True) #, encoding='UTF-8')
entries = feed['entries']
for filterset in config['filter']:
filter_type, filter_rules = filterset.popitem()
if filter_type == 'include':
entries = filter_include(entries, filter_rules)
elif filter_type == 'exclude':
entries = filter_exclude(entries, filter_rules)
else:
raise "can only handle include/exclude filter types. being asked to process %s" % filter_type
items = []
# convert the entries to RSSItems, build the list we'll stick in the RSS..
for entry in entries:
item = PyRSS2Gen.RSSItem(
title = entry.get('title'),
link = entry.get('link'),
description = entry.get('description'),
author = entry.get('author'),
categories = entry.get('categories'),
comments = entry.get('comments'),
enclosure = entry.get('enclosure'),
guid = entry.get('guid'),
pubDate = entry.get('pubDate'),
source = entry.get('source'),
)
items.append(item)
rss = PyRSS2Gen.RSS2(
title = feed['feed'].get('title'),
link = feed['feed'].get('link'),
description = feed['feed'].get('description'),
pubDate = feed['feed'].get('pubDate'),
lastBuildDate = feed['feed'].get('lastBuildDate'),
categories = feed['feed'].get('categories'),
ttl = feed['feed'].get('ttl'),
image = feed['feed'].get('image'),
items = items
)
rssfile = StringIO.StringIO()
rss.write_xml(rssfile)
rssfile.seek(0)
return rssfile
def stringify(blob):
retstr = ''
if isinstance(blob, list):
for e in blob:
retstr += stringify(e)
elif isinstance(blob, dict):
for k,v in blob.iteritems():
retstr += stringify(k)
retstr += stringify(v)
elif isinstance(blob, str):
retstr += blob
elif isinstance(blob, unicode):
retstr += blob.encode('utf8')
else:
raise "unknown type: %s" % type(blob)
return retstr
def rule_matches(entry, rule):
# content can be a list/dict/etc, so it needs some help.
contentstr = stringify(entry.get('content')).lower()
titlestr = entry.get('title', '').lower()
summarystr = entry.get('summary', '').lower()
linkstr = entry.get('link', '').lower()
if rule[0] == '/':
# regex. trim off leading/trailing /slash/
rex = rule.strip('/')
if re.search(rex, titlestr) or re.search(rex, summarystr) or re.search(rex, contentstr) or re.search(rex, linkstr):
return True
elif rule in titlestr or rule in summarystr or rule in contentstr or rule in linkstr:
return True
return False
def item_matches(entry, rules):
for rule in rules:
if rule_matches(entry, rule):
return True
return False
def filter_include(entries, rules):
# only include items that match. all others will be removed.
newlist = []
for entry in entries:
if item_matches(entry, rules):
newlist.append(entry)
return newlist
def filter_exclude(entries, rules):
# include all items unless they match.
newlist = []
for entry in entries:
if not item_matches(entry, rules):
newlist.append(entry)
return newlist
def do_include(includeurl):
if includeurl.startswith('http'):
read_config(s3, url=includeurl)
elif includeurl.startswith('s3'):
match = re.search('s3://([^\/]+)\/(.+)', includeurl)
bucket = match.group(0)
key = match.group(1)
read_config(s3, bucket=bucket, key=key)
else:
raise "did not recognize include url format. either http[s]:// or s3:// please."
def do_config(config):
rss_bucket = s3.get_bucket('dyn.tedder.me')
for feedcfg in config:
# pull off non-feed config entries first.
if feedcfg.get('include'):
feedcfg['include']
rssfile = do_feed(feedcfg)
dest = feedcfg['output']
rss_bucket.new_key(dest).set_contents_from_file(rssfile, reduced_redundancy=True, rewind=True, headers={'Content-Type': 'application/rss+xml', 'Cache-Control':'max-age=600,public'}, policy='public-read')
print "wrote feed to %s" % dest
def read_config(s3, bucket=None, key=None, url=None):
if bucket and key:
bucket = s3.get_bucket('tedder')
config_file = bucket.get_key('rss/main_list.yml').get_contents_as_string()
elif url:
config_file = requests.get(url).text
else:
raise "need s3 or http details for config"
config = yaml.load(config_file)
do_config(config)
s3 = boto.connect_s3()
read_config(s3, 'tedder', 'rss/main_list.yml')
| Python | 0.017851 | @@ -155,16 +155,87 @@
ringIO%0A%0A
+S3_OUTPUT_BUCKET = 'dyn.tedder.me'%0AS3_OUTPUT_PREFIX = '/rss_filter/'%0A%0A#
dthandle
@@ -3961,31 +3961,32 @@
_bucket(
-'dyn.tedder.me'
+S3_OUTPUT_BUCKET
)%0A for
@@ -4148,16 +4148,35 @@
dest =
+ S3_OUTPUT_PREFIX +
feedcfg
|
18d59a1d23cc9021fa388028ab723822e031dc07 | Add health check | main.py | main.py | # Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
# law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import json
from google.appengine.ext import vendor
vendor.add('lib')
from flask import Flask
app = Flask(__name__)
from api_key import key
@app.route('/get_author/<title>')
def get_author(title):
host = 'https://www.googleapis.com/books/v1/volume?q={}&key={}&country=US'.format(title, key)
request = urllib2.Request(host)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, error:
contents = error.read()
print ('Received error from Books API {}'.format(contents))
return str(contents)
html = response.read()
author = json.loads(html)['items'][0]['volumeInfo']['authors'][0]
return author
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.000001 | @@ -714,16 +714,89 @@
port key
+%0A %0A%0A@app.route('/_ah/health')%0Adef health_check():%0A return 'ok', 200
%0A%0A@app.r
|
788f11632ce085d82be6d90665b9b277f7a60148 | Refactor Task function to properly switch if it is a TaskTemplate, and if there is a CloudHarenssTask use CloudHarnessWorkflow. | gbdxtools/interface.py | gbdxtools/interface.py | """
Main Interface to GBDX API.
Contact: kostas.stamatiou@digitalglobe.com
"""
from __future__ import absolute_import
from builtins import object
from future import standard_library
import json
import os
import logging
from gbdx_auth import gbdx_auth
from gbdxtools.s3 import S3
from gbdxtools.ordering import Ordering
from gbdxtools.workflow import Workflow
from gbdxtools.catalog import Catalog
from gbdxtools.idaho import Idaho
import gbdxtools.simpleworkflows
from gbdxtools.cloudharness_task import CloudHarnessTask, TaskCreationError
class Interface(object):
gbdx_connection = None
def __init__(self, **kwargs):
if (kwargs.get('username') and kwargs.get('password') and
kwargs.get('client_id') and kwargs.get('client_secret')):
self.gbdx_connection = gbdx_auth.session_from_kwargs(**kwargs)
elif kwargs.get('gbdx_connection'):
# Pass in a custom gbdx connection object, for testing purposes
self.gbdx_connection = kwargs.get('gbdx_connection')
else:
# This will throw an exception if your .ini file is not set properly
self.gbdx_connection = gbdx_auth.get_session()
# create a logger
# for now, just log to the console. We'll replace all the 'print' statements
# with at least logger.info or logger.debug statements
# later, we can log to a service, file, or some other aggregator
self.logger = logging.getLogger('gbdxtools')
self.logger.setLevel(logging.ERROR)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
self.logger.info('Logger initialized')
# create and store an instance of the GBDX s3 client
self.s3 = S3(self)
# create and store an instance of the GBDX Ordering Client
self.ordering = Ordering(self)
# create and store an instance of the GBDX Catalog Client
self.catalog = Catalog(self)
# create and store an instance of the GBDX Workflow Client
self.workflow = Workflow(self)
# create and store an instance of the Idaho Client
self.idaho = Idaho(self)
def Task(self, __task_name=None, cloudharness_obj=None, **kwargs):
if __task_name is None:
# Create a cloudharness gbdxtools task object
return CloudHarnessTask(self, __task_name, **kwargs)
else:
# Create a standard gbdxtools task object.
return gbdxtools.simpleworkflows.Task(self, __task_name, **kwargs)
def Workflow(self, tasks, **kwargs):
return gbdxtools.simpleworkflows.Workflow(self, tasks, **kwargs)
| Python | 0 | @@ -488,21 +488,16 @@
dharness
-_task
import
@@ -518,25 +518,72 @@
sk,
-TaskCreationError
+CloudHarnessWorkflow%0Afrom gbdx_task_template import TaskTemplate
%0A%0A%0Ac
@@ -2456,12 +2456,8 @@
ness
-_obj
=Non
@@ -2470,24 +2470,25 @@
wargs):%0A
+%0A
if __tas
@@ -2479,16 +2479,24 @@
+# Check
if __tas
@@ -2506,329 +2506,986 @@
ame
-is None:%0A # Create a cloudharness gbdxtools task object%0A return CloudHarnessTask(self, __task_name, **kwargs)%0A else:%0A # Create a standard gbdxtools task object.%0A return gbdxtools.simpleworkflows.Task(self, __task_name, **kwargs)%0A%0A def Workflow(self, tasks, **kwargs):
+has been passed as a CloudHarnessTask object,%0A # or the keyword cloudharness has been provied.%0A task_is_subclass = False%0A if not isinstance(__task_name, str) and __task_name is not None:%0A task_is_subclass = issubclass(__task_name, TaskTemplate)%0A%0A if task_is_subclass or __task_name is None and cloudharness is not None:%0A # Create a cloudharness gbdxtools task object%0A return CloudHarnessTask(%0A self,%0A __task_name if task_is_subclass else cloudharness,%0A **kwargs%0A )%0A else:%0A # Create a standard gbdxtools task object.%0A return gbdxtools.simpleworkflows.Task(self, __task_name, **kwargs)%0A%0A def Workflow(self, tasks, **kwargs):%0A # Check if any of the tasks are CloudHarnessTasks%0A if len(%5Btask for task in tasks if isinstance(task, CloudHarnessTask)%5D) %3E 0:%0A return CloudHarnessWorkflow(self, tasks, **kwargs)%0A
%0A
|
6ddc63dcb1005ccf6d09f2577faf99566bafced7 | fix Log.add_group() use in live_plot.py example | examples/miscellaneous/live_plot.py | examples/miscellaneous/live_plot.py | from __future__ import print_function
from __future__ import absolute_import
import os
import sys
sys.path.append( '.' )
import numpy as nm
from sfepy.base.base import output, pause
from sfepy.base.log import Log
def main():
cwd = os.path.split(os.path.join(os.getcwd(), __file__))[0]
log = Log((['sin(x)', 'cos(x)'], ['exp(x)']),
yscales=['linear', 'log'],
xlabels=['angle', None], ylabels=[None, 'a function'],
log_filename=os.path.join(cwd, 'live_plot.log'))
log2 = Log([['x^3']],
yscales=['linear'],
xlabels=['x'], ylabels=['a cubic function'],
aggregate=50, sleep=0.5,
log_filename=os.path.join(cwd, 'live_plot2.log'))
added = 0
for x in nm.linspace(0, 4.0 * nm.pi, 200):
output('x: ', x)
if x < (2.0 * nm.pi):
log(nm.sin(x), nm.cos(x), nm.exp(x), x = [x, None])
else:
if added:
log(nm.sin(x), nm.cos(x), nm.exp(x), x**2,
x=[x, None, x])
else:
log.plot_vlines(color='r', linewidth=2)
log.add_group(['x^2'], 'linear', 'new x', 'square',
formats=['%+g'])
added += 1
if (added == 20) or (added == 50):
log.plot_vlines([2], color='g', linewidth=2)
log2(x*x*x, x=[x])
print(log)
print(log2)
pause()
log(finished=True)
log2(finished=True)
if __name__ == '__main__':
main()
| Python | 0.000001 | @@ -1162,16 +1162,23 @@
'x%5E2'%5D,
+yscale=
'linear'
@@ -1183,16 +1183,23 @@
r',
+xlabel=
'new x',
'sq
@@ -1198,19 +1198,10 @@
x',
- 'square',
%0A
+
@@ -1225,16 +1225,32 @@
+ylabel='square',
formats
|
43a087c69eedd26d3bab699fca08b5a01a06a6a4 | Add test to check if InvalidFrequencyException is thrown | skrf/tests/test_frequency.py | skrf/tests/test_frequency.py | import unittest
import os
import numpy as npy
import skrf as rf
class FrequencyTestCase(unittest.TestCase):
'''
'''
def setUp(self):
'''
'''
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
def test_create_linear_sweep(self):
freq = rf.Frequency(1,10,10,'ghz')
self.assertTrue((freq.f == npy.linspace(1,10,10)*1e9).all())
self.assertTrue((freq.f_scaled ==npy.linspace(1,10,10)).all())
self.assertTrue((freq.sweep_type == 'lin'))
def test_create_log_sweep(self):
freq = rf.Frequency(1,10,10,'ghz', sweep_type='log')
#Check end points
self.assertTrue((freq.f[0] == 1e9))
self.assertTrue((freq.f[-1] == 10e9))
spacing = [freq.f[i+1]/freq.f[i] for i in range(len(freq.f)-1)]
#Check that frequency is increasing
self.assertTrue(all(s > 1 for s in spacing))
#Check that ratio of adjacent frequency points is identical
self.assertTrue(all(abs(spacing[i] - spacing[0]) < 1e-10 for i in range(len(spacing))))
self.assertTrue((freq.sweep_type == 'log'))
def test_create_rando_sweep(self):
f = npy.array([1,5,200])
freq = rf.Frequency.from_f(f,unit='khz')
self.assertTrue((freq.f ==f*1e3).all())
self.assertTrue((freq.f_scaled== f).all())
self.assertTrue((freq.sweep_type == 'unknown'))
with self.assertRaises(ValueError):
freq.npoints = 10
def test_rando_sweep_from_touchstone(self):
'''
this also tests the ability to read a touchstone file.
'''
rando_sweep_ntwk = rf.Network(os.path.join(self.test_dir, 'ntwk_arbitrary_frequency.s2p'))
self.assertTrue((rando_sweep_ntwk.f == \
npy.array([1,4,10,20])).all())
self.assertTrue((rando_sweep_ntwk.frequency.sweep_type == 'unknown'))
def test_slicer(self):
a = rf.Frequency.from_f([1,2,4,5,6])
b = a['2-5ghz']
tinyfloat = 1e-12
self.assertTrue((abs(b.f - [2e9,4e9,5e9]) < tinyfloat).all())
suite = unittest.TestLoader().loadTestsFromTestCase(FrequencyTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| Python | 0 | @@ -1,12 +1,65 @@
+from skrf.frequency import InvalidFrequencyException%0A
import unitt
@@ -2126,16 +2126,155 @@
all())%0A%0A
+ def test_frequency_check(self):%0A with self.assertRaises(InvalidFrequencyException):%0A a = rf.Frequency.from_f(%5B2,1%5D)%0A%0A
suite =
|
e7a01079e57acfa4486fc6cf786a1012da436d0f | Revise snapshot parsing to not expect multiple samples for contrast | solar_snapshot_name_parse.py | solar_snapshot_name_parse.py | #!/usr/bin/env python3
################################################################################
# Description:
# * Parses names of files in directory containing snapshots of solar
# suitcase displays, and formats them for pasting into timestamp column of
# solar energy log spreadsheet
# * Requires that Box WebDAV mount is active
# * Expects a configuration file in home directory named
# '.solar_snapshot_name_parse_cfg.json', in the following format:
# {
# "snapshot_dir": "/mnt/box_webdav/.../Solar charge logs"
# }
#
# Arguments:
# * --help (optional)
# Displays help message
#
# Examples:
# * ./solar_snapshot_name_parse.py
# * ./solar_snapshot_name_parse.py --help
#
# Limitations:
# * Tested on only Raspbian
# * Makes no attempt to verify that Box WebDAV mount is valid
################################################################################
# Modules
import sys
import os
import time
import argparse
import json
import re
# Constants
CFG_FILE_PATH = "~/.solar_snapshot_name_parse_cfg.json"
# Main function
def main(argv):
# Configure argument parser
desc_str = "Parses names of files in directory containing snapshots of "
desc_str += "solar suitcase displays, and formats them for pasting into "
desc_str += "timestamp column of solar energy log spreadsheet"
parser = argparse.ArgumentParser(description=desc_str)
# Print current time
print(time.strftime("%a %Y-%m-%d %I:%M:%S %p"))
print("")
# Parse arguments
print("Parsing arguments...")
args = parser.parse_args()
for (arg, val) in sorted(vars(args).items()):
print(" * {}: {}".format(arg, val))
print("")
# Parse configuration file
cfg_file_path = os.path.expanduser(CFG_FILE_PATH)
cfg_file_path = os.path.expandvars(cfg_file_path)
print("Parsing configuration file '{}'...".format(cfg_file_path))
cfg = json.load(open(cfg_file_path))
check_cfg_file(cfg) # Check that file contains all required information
print("")
# Retrieve names of files in snapshot directory
print("Retrieving names of files in '{}'...".format(cfg["snapshot_dir"]))
file_names = os.listdir(cfg["snapshot_dir"])
# Format file names and print results
print("Formatting file names and print results...")
count = fmt_print_file_names(sorted(file_names))
print("")
# Exit
print("Printed {} lines.".format(count))
print("Done.")
print("")
sys.exit(0) # Success
# Checks that configuration file contained all required information
def check_cfg_file(cfg):
# Snapshot directory
if ("snapshot_dir" in cfg):
msg = "Parsed snapshot directory name from configuration file: "
msg += "{}".format(cfg["snapshot_dir"])
print(msg)
else: # No snapshot directory parsed
msg = "Configuration file does not contain 'snapshot_dir' string."
raise Exception(msg)
# Formats file names and prints results
def fmt_print_file_names(file_names):
re_file_name = re.compile(r'^(\d{4}-\d{2}-\d{2})_(\d{2})(\d{2})_c(\d{2})\.jpg$')
num_printed = 0
for file_name in file_names:
m = re_file_name.match(file_name)
if m: # Regular expression match
m_date = m.group(1)
m_hour = m.group(2)
m_minute = m.group(3)
m_contrast = m.group(4)
if (m_contrast == "00"): # Ignore duplicates
print("{} {}:{}".format(m_date, m_hour, m_minute))
num_printed += 1
return num_printed
# Execute 'main()' function
if (__name__ == "__main__"):
main(sys.argv)
| Python | 0 | @@ -3105,17 +3105,8 @@
%7B2%7D)
-_c(%5Cd%7B2%7D)
%5C.jp
@@ -3350,107 +3350,9 @@
(3)%0A
- m_contrast = m.group(4)%0A%0A if (m_contrast == %2200%22): # Ignore duplicates%0A
+%0A
@@ -3410,20 +3410,16 @@
inute))%0A
-
|
653376cf10edb42e6d5c429e61bc9ef23eb51234 | fix test for GenomicFilter | solvebio/test/test_filter.py | solvebio/test/test_filter.py | import unittest
import solvebio
from solvebio import Filter, GenomicFilter
class FilterTest(unittest.TestCase):
def test_filter_basic(self):
f = Filter()
self.assertEqual(repr(f), '<Filter []>', 'empty filter')
self.assertEqual(repr(~f), '<Filter []>', '"not" of empty filter')
# Because the order in listing keys is arbitrary, we only
# test with one entry.
f1 = Filter(price='Free')
self.assertEqual(repr(f1), "<Filter [('price', 'Free')]>")
self.assertEqual(repr(~~f1), "<Filter [('price', 'Free')]>",
'"not" of empty filter')
a = solvebio.query.Filter(chr1="3")
b = solvebio.query.Filter(chr2="4")
self.assertEqual(repr(a | b),
"<Filter [{'or': [('chr1', '3'), ('chr2', '4')]}]>",
'"or" filter')
f |= a
self.assertEqual(repr(f), "<Filter [('chr1', '3')]>",
"'or' with empty filter")
self.assertEqual(repr(a), "<Filter [('chr1', '3')]>",
"prior 'or' doesn't mung filter")
filters3 = Filter(omim_id=144650) | Filter(omim_id=144600) \
| Filter(omim_id=145300)
self.assertEqual(repr(filters3),
"<Filter [{'or': [('omim_id', 144650)," +
" ('omim_id', 144600), ('omim_id', 145300)]}]>")
def test_process_filters(self):
# FIXME: add more and put in a loop.
filters = [('omim_id', None)]
expect = filters
dataset_name = 'omim/0.0.1-1/omim'
x = solvebio.Query(dataset_name)
self.assertEqual(repr(x._process_filters(filters)), repr(expect))
class GenomicFilterTest(unittest.TestCase):
def test_single_position(self):
f = GenomicFilter('chr1', 100)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 100), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
def test_range(self):
f = GenomicFilter('chr1', 100, 200)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [{'or': [{'and': [('genomic_coordinates.start__lte', 100), ('genomic_coordinates.stop__gte', 200)]}, ('genomic_coordinates.start__range', [100, 201]), ('genomic_coordinates.stop__range', [100, 201])]}, ('genomic_coordinates.chromosome', '1')]}]>") # noqa
f = GenomicFilter('chr1', 100, 200, exact=True)
self.assertEqual(repr(f), "<GenomicFilter [{'and': [('genomic_coordinates.stop', 200), ('genomic_coordinates.start', 100), ('genomic_coordinates.chromosome', '1')]}]>") # noqa
if __name__ == "__main__":
unittest.main()
| Python | 0 | @@ -2535,17 +2535,17 @@
%5B100, 20
-1
+0
%5D), ('ge
@@ -2584,17 +2584,17 @@
%5B100, 20
-1
+0
%5D)%5D%7D, ('
|
a4ea5f9a6b6de93188a590b918aa122e4fbe437b | Fix jsbox formset usage. | go/apps/jsbox/forms.py | go/apps/jsbox/forms.py | from django import forms
from django.forms.formsets import BaseFormSet, DEFAULT_MAX_NUM
from go.base.widgets import CodeField, SourceUrlField
SOURCE_URL_HELP_TEXT = (
'HTTP Basic Authentication is supported. If using GitHub '
'please use '
'<a href="http://developer.github.com/v3/#authentication">'
'OAuth2 access tokens'
'</a>.')
class JsboxForm(forms.Form):
javascript = CodeField(required=False)
source_url = SourceUrlField(code_field='javascript',
help_text=SOURCE_URL_HELP_TEXT,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'javascript': self.cleaned_data['javascript'],
'source_url': self.cleaned_data['source_url'],
}
class JsboxAppConfigForm(forms.Form):
key = forms.CharField()
value = CodeField(required=False)
source_url = SourceUrlField(code_field='value',
help_text=None,
required=False)
@staticmethod
def initial_from_config(metadata):
return metadata
def to_config(self):
return {
'key': self.cleaned_data['key'],
'value': self.cleaned_data['value'],
'source_url': self.cleaned_data['source_url'],
}
class JsboxAppConfigFormset(BaseFormSet):
form = JsboxAppConfigForm
absolute_max = DEFAULT_MAX_NUM
extra = 1
can_order = False
can_delete = True
max_num = None
def to_config(self):
metadata = {}
for form in self.forms:
if not form.cleaned_data or form in self.deleted_forms:
continue
submeta = form.to_config()
metadata[submeta['key']] = submeta
del submeta['key']
return metadata
@classmethod
def initial_from_config(cls, metadata):
initials = []
for key in sorted(metadata):
submeta = metadata[key].copy()
submeta['key'] = key
if hasattr(cls.form, 'initial_from_config'):
submeta = getattr(cls.form, 'initial_from_config')(submeta)
initials.append(submeta)
return initials
| Python | 0 | @@ -65,31 +65,31 @@
ormSet,
-DEFAULT_MAX_NUM
+formset_factory
%0A%0Afrom g
@@ -1386,32 +1386,36 @@
%7D%0A%0A%0Aclass
+Base
JsboxAppConfigFo
@@ -1437,150 +1437,8 @@
et):
-%0A form = JsboxAppConfigForm%0A absolute_max = DEFAULT_MAX_NUM%0A extra = 1%0A can_order = False%0A can_delete = True%0A max_num = None
%0A%0A
@@ -2139,8 +2139,139 @@
nitials%0A
+%0AJsboxAppConfigFormset = formset_factory(%0A JsboxAppConfigForm, can_delete=True, extra=1,%0A formset=BaseJsboxAppConfigFormset)%0A
|
884e17eb92e35ab5a9f4d6bc94f11f49977711a3 | Use render() so that we can pass in the request context and thus link to static files correctly (reviewed by @smn). | go/apps/jsbox/views.py | go/apps/jsbox/views.py | import requests
from urlparse import urlparse, urlunparse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from go.conversation.base import ConversationViews
from go.apps.jsbox.forms import JsboxForm, JsboxAppConfigFormset
from go.apps.jsbox.log import LogManager
from go.base.utils import conversation_or_404
class JsboxConversationViews(ConversationViews):
conversation_type = u'jsbox'
conversation_display_name = u'Javascript App'
conversation_initiator = None
edit_conversation_forms = (
('jsbox', JsboxForm),
('jsbox_app_config', JsboxAppConfigFormset),
)
@login_required
@csrf_exempt
def cross_domain_xhr(request):
url = request.POST.get('url', None)
parse_result = urlparse(url)
if parse_result.username:
auth = (parse_result.username, parse_result.password)
url = urlunparse((parse_result.scheme,
('%s:%s' % (parse_result.hostname, parse_result.port)
if parse_result.port
else parse_result.hostname),
parse_result.path,
parse_result.params,
parse_result.query,
parse_result.fragment))
else:
auth = None
url = url
r = requests.get(url, auth=auth)
return HttpResponse(r.text, status=r.status_code)
@login_required
def jsbox_logs(request, conversation_key):
campaign_key = request.user_api.user_account_key
conversation = conversation_or_404(request.user_api, conversation_key)
log_manager = LogManager(request.user_api.api.redis)
logs = log_manager.get_logs(campaign_key, conversation_key)
logs = list(reversed(logs))
return render_to_response("jsbox/jsbox_logs.html", {
"conversation": conversation,
"logs": logs,
})
| Python | 0 | @@ -182,28 +182,16 @@
t render
-_to_response
%0Afrom dj
@@ -1883,21 +1883,18 @@
nder
-_to_response(
+(request,
%22jsb
|
f10e01a180cca2185862c1f6cf926c2a197536ed | lower the name to be stripped | gozerlib/utils/name.py | gozerlib/utils/name.py | # gozerlib/utils/name.py
#
#
""" name related helper functions. """
## basic imports
import string
import os
## defines
allowednamechars = string.ascii_letters + string.digits + '!.@-+#'
## stripname function
def stripname(name, allowed=""):
""" strip all not allowed chars from name. """
res = ""
for c in name:
if ord(c) < 31: res += "-"
elif c in allowednamechars + allowed: res += c
else: res += "-"
res = res.replace(os.sep, '+')
res = res.replace("@", '+')
res = res.replace("#", '+')
return res
## testnam function
def testname(name):
""" test if name is correct. """
for c in name:
if c not in allowedchars or ord(c) < 31: return False
return True
| Python | 0.999995 | @@ -289,24 +289,48 @@
m name. %22%22%22%0A
+ name = name.lower()%0A
res = %22%22
|
cbfd5f70beffd978fd3ea22ec1146eaa3eb6c104 | fix timezone issue | noaa.py | noaa.py | import urllib2
import xml.etree.ElementTree as ET
from scipy.interpolate import interp1d
import datetime
import time
def forecast(place, forecast = True):
lat,lon = place
url = """http://graphical.weather.gov/xml/SOAP_server/ndfdXMLclient.php?whichClient=NDFDgen&lat=%s&lon=%s&Unit=e&temp=temp&wspd=wspd&sky=sky&wx=wx&rh=rh&product=time-series&Submit=Submit""" % (lat,lon)
print url
res = urllib2.urlopen(url).read()
root = ET.fromstring(res)
timeSeries = [(i.text) for i in root.findall('./data/time-layout')[0].iterfind('start-valid-time')]
#knots to mph
print res
windSpd = [eval(i.text)*1.15 for i in root.findall('./data/parameters/wind-speed')[0].iterfind('value')]
cloudCover = [eval(i.text)/100.0 for i in root.findall('./data/parameters/cloud-amount')[0].iterfind('value')]
temperature = [eval(i.text) for i in root.findall('./data/parameters/temperature')[0].iterfind('value')]
if not forecast:
return {'cloudCover':cloudCover[0],
'temperature':temperature[0],
'windSpeed':windSpd[0],
'start-valid-time':timeSeries[0]}
else:
return {'cloudCover':cloudCover,
'temperature':temperature,
'windSpeed':windSpd,
'start-valid-time':timeSeries}
def strToTime(str):
fmt='%Y-%m-%dT%H:%M:00'
return datetime.datetime.strptime(str[0:19],fmt)
def castFloat(d):
if type(d) == str:
fmt='%Y-%m-%dT%H:%M:00'
d = datetime.datetime.strptime(d[0:19],fmt)
return time.mktime(d.timetuple())
def herpDerpInterp(place):
lat,lon = place
#begin=2014-02-14T00%3A00%3A00&end=2018-02-22T00%3A00%3A00
fmt='%Y-%m-%dT00:00:00'
fmt='%Y-%m-%dT%H:%M:00'
begin=(datetime.datetime.now()-datetime.timedelta(hours=12)).strftime(fmt)
#end=(datetime.datetime.now()+datetime.timedelta(hours=48)).strftime(fmt)
url = """http://graphical.weather.gov/xml/SOAP_server/ndfdXMLclient.php?whichClient=NDFDgen&lat=%s&lon=%s&Unit=e&temp=temp&wspd=wspd&sky=sky&wx=wx&rh=rh&product=time-series&begin=%s&end=2018-02-22T00:00:00&Submit=Submit""" % (lat, lon, begin)
res = urllib2.urlopen(url).read()
root = ET.fromstring(res)
timeSeries = [castFloat(i.text) for i in root.findall('./data/time-layout')[0].iterfind('start-valid-time')]
#knots to mph
windSpd = [eval(i.text)*1.15 for i in root.findall('./data/parameters/wind-speed')[0].iterfind('value')]
cloudCover = [eval(i.text)/100.0 for i in root.findall('./data/parameters/cloud-amount')[0].iterfind('value')]
temperature = [eval(i.text) for i in root.findall('./data/parameters/temperature')[0].iterfind('value')]
ws = interp1d(timeSeries, windSpd, kind='cubic')
cc = interp1d(timeSeries, cloudCover, kind='cubic')
t = interp1d(timeSeries, temperature, kind='cubic')
startD = datetime.datetime.fromtimestamp(timeSeries[0])
series = []
for i in range(48):
try:
temp_dict = {}
b = startD + datetime.timedelta(hours=i)
temp_dict['utc_datetime'] = b + datetime.timedelta(hours=5)
temp_dict['windSpeed'] = ws(castFloat(b)).item()
temp_dict['temperature'] = t(castFloat(b)).item()
temp_dict['cloudCover'] = cc(castFloat(b)).item()
series.append(temp_dict)
except:
pass
return series
if __name__ == '__main__':
import geo
place = geo.zipToCoordinates('17603')
#print forecast(place)
print herpDerpInterp(place)
#print windSpd, len(windSpd)
#print cloudCover, len(cloudCover)
#print temperature, len(temperature)
| Python | 0.000015 | @@ -1404,16 +1404,48 @@
oat(d):%0A
+ %22%22%22returns utc timestamp%22%22%22%0A
if t
@@ -1503,50 +1503,124 @@
-d = datetime.datetime.strptime(d%5B0:19%5D,fmt
+lt= d%5B0:19%5D%0A tz = eval(d%5B19:22%5D)%0A d = datetime.datetime.strptime(lt,fmt) - datetime.timedelta(hours=tz
)%0A
@@ -1632,33 +1632,56 @@
urn
+(d - date
time.
-mk
+date
time(
-d.timetuple()
+1970,1,1)).total_seconds(
)%0A%0Ad
@@ -2981,16 +2981,19 @@
atetime.
+utc
fromtime
@@ -3192,38 +3192,8 @@
= b
- + datetime.timedelta(hours=5)
%0A
|
debeefabeb64766b380af42458433a05c2a2f04a | Add F1 score to metrics. | non_semantic_speech_benchmark/eval_embedding/metrics.py | non_semantic_speech_benchmark/eval_embedding/metrics.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Metrics for evaluation.
1) Equal Error Rate (EER) metric.
2) D-Prime.
3) AUC.
4) Balanced accuracy.
"""
import math
from typing import Any, Iterable, Tuple, Optional
import numpy as np
import scipy.stats
from sklearn import metrics as skmetrics
def calculate_eer(labels, scores):
"""Returns the equal error rate for a binary classifier.
EER is defined as the point on the DET curve where the false positive and
false negative rates are equal.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1.
Returns:
eer: The Equal Error Rate.
"""
fpr, fnr = calculate_det_curve(labels, scores)
min_diff_idx = np.argmin(np.abs(fpr - fnr))
return np.mean((fpr[min_diff_idx], fnr[min_diff_idx]))
def calculate_det_curve(labels,
scores):
"""Calculates the false positive and negative rate at each score.
The DET curve is related to the ROC curve, except it plots false positive rate
against false negative rate.
See https://en.wikipedia.org/wiki/Detection_error_tradeoff for a full
description of the DET curve.
Args:
labels: Ground truth labels for each data point.
scores: Regression scores for each data point. A score of 1 indicates a
classification of label 1. Should be in range (0, 1).
Returns:
fpr, fnr
All returned values are numpy arrays with the same length as scores.
fpr: False positive rate at a given threshold value.
fnr: False negative rate at a given threshold value.
"""
scores = np.asarray(scores, dtype=float)
labels = np.asarray(labels, dtype=float)
indices = np.argsort(scores)
labels = labels[indices]
fnr = np.cumsum(labels) / np.sum(labels)
fnr = np.insert(fnr, 0, 0)
negative_labels = 1 - labels
fpr = np.cumsum(negative_labels[::-1])[::-1]
fpr /= np.sum(negative_labels)
fpr = np.append(fpr, 0)
return fpr, fnr
def calculate_auc(labels,
predictions,
sample_weight = None):
return skmetrics.roc_auc_score(
labels, predictions, sample_weight=sample_weight)
def dprime_from_auc(auc):
"""Returns a d-prime measure corresponding to an ROC area under the curve.
D-prime denotes the sensitivity index:
https://en.wikipedia.org/wiki/Sensitivity_index
Args:
auc: (float) Area under an ROC curve.
Returns:
Float value representing the separation of score distributions
between negative and positive scores for a labeler (an algorithm or
group of readers who assign continuous suspicion scores to a series
of cases). The AUC is given by PHI(mu / sqrt(2)), where PHI is the
cumulative distribution function of the normal distribution.
"""
return math.sqrt(2) * scipy.stats.norm.ppf(auc)
def balanced_accuracy(labels,
predictions):
return skmetrics.balanced_accuracy_score(y_true=labels, y_pred=predictions)
| Python | 0.999995 | @@ -724,16 +724,29 @@
curacy.%0A
+5) F1 score.%0A
%22%22%22%0A%0Aimp
@@ -803,16 +803,22 @@
Optional
+, Text
%0A%0Aimport
@@ -2703,16 +2703,54 @@
weight =
+ None,%0A multi_class =
None):%0A
@@ -2837,16 +2837,41 @@
e_weight
+, multi_class=multi_class
)%0A%0A%0Adef
@@ -3673,8 +3673,169 @@
ctions)%0A
+%0A%0Adef f1_score(labels,%0A predictions):%0A return skmetrics.f1_score(y_true=labels, y_pred=predictions,%0A average='weighted')%0A
|
8d2167bc3bc37f68e225ddcd86bc4114d90be87e | Update version number | local_packages.py | local_packages.py | import sublime
from .event_handler import EventHandler
from .settings import Settings
package_control_installed = False
LOCAL_PACKAGES_VERSION = "0.1.1"
evaluating = False
retry_times = 3
def plugin_loaded():
Settings.reset()
Settings.startup()
EventHandler().register_handler(
evaluate_install,
EventHandler().ON_LOAD
)
print("[Local Packages] v%s" % (LOCAL_PACKAGES_VERSION))
check_package_control()
def check_package_control():
try:
__import__("Package Control").package_control
global package_control_installed
package_control_installed = True
except:
global retry_times
if retry_times > 0:
retry_times -= 1
sublime.set_timeout(check_package_control, 3000)
else:
sublime.error_message(
"Package Control is not found.\n\n" +
"Local Packages will now disabled"
)
return
evaluate_install()
def evaluate_install(view=None):
global evaluating
if evaluating:
return
print("[Local Packages] Evaluating missing packages")
from .package_evaluator import PackageEvaluatorThread
evaluating = True
PackageEvaluatorThread(
window=sublime.active_window(),
callback=on_installed
).start()
def on_installed(failed_packages=[]):
global evaluating
evaluating = False
if len(failed_packages) > 0:
msg = "Local Packages failed to install %s missing packages...\n" % (
len(failed_packages)
)
limit = 10
for package in failed_packages:
limit -= 1
if limit < 0:
break
msg += " - %s\n" % (package)
if limit < 0:
msg += "and more..."
sublime.error_message(msg)
else:
print("[Local Packages] Dependencies already installed")
| Python | 0.000002 | @@ -144,17 +144,17 @@
= %220.1.
-1
+2
%22%0Aevalua
|
2e3d200ee0511819f3befb146a4ebed22fef80ab | reorder admin menu | local_settings.py | local_settings.py | import os
from django.utils.translation import ugettext_lazy as _
DEBUG = True if os.environ.get('DEBUG', 'True') else False
# Make these unique, and don't share it with anybody.
SECRET_KEY = "j1qa@u$5ktqr^0_kwh@-j@*-80t$)ht!4-=ybz1xc%@3+r(r&tzefoih"
NEVERCACHE_KEY = "m)u^%r@uh#r3wu0&$=#$1ogx)uy4hv93^2lt%c3@xi=^gifoj8paozijdihazefd"
# DATABASE_ROUTERS = ['eve.routers.EveRouter', 'festival.routers.FestivalRouter',]
# DATABASE_ROUTERS = ['eve.routers.EveRouter',]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': os.environ.get('DB_ENV_POSTGRES_PASSWORD'),
'HOST': 'db',
'PORT': '5432',
},
'eve': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'eve',
'USER': 'eve',
'PASSWORD': 'HmazS2frT',
'HOST': 'pgdb',
'PORT': '5432',
},
}
# DATABASE_ROUTERS = ['eve.routers.EveRouter',]
# EXTENSIONS AND FORMATS
# Allowed Extensions for File Upload. Lower case is important.
FILEBROWSER_EXTENSIONS = {
'Folder': [''],
'Image': ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff'],
'Document': ['.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.docx'],
'Video': ['.mov', '.wmv', '.mpeg', '.mpg', '.avi', '.rm'],
'Audio': ['.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p']
}
# Define different formats for allowed selections.
# This has to be a subset of EXTENSIONS.
# e.g., add ?type=image to the browse-URL ...
FILEBROWSER_SELECT_FORMATS = {
'File': ['Folder', 'Document'],
'Image': ['Image'],
'Media': ['Video', 'Audio'],
'Audio': ['Audio'],
'Document': ['Document'],
# for TinyMCE we can also define lower-case items
'image': ['Image'],
'file': ['Folder', 'Image', 'Document'],
'media': ['Video', 'Audio'],
'audio': ['Audio'],
}
EMAIL_HOST = 'smtp.ircam.fr'
EMAIL_PORT = '25'
DEFAULT_FROM_EMAIL = 'www@ircam.fr'
EMAIL_SUBJECT_PREFIX = "[IRCAM WWW]"
SITE_TITLE = 'IRCAM'
SITE_TAGLINE = 'Institut de Recherche et de Coordination Acoustique et Musique'
SILENCED_SYSTEM_CHECKS = ['fields.W342',]
ADMIN_MENU_ORDER = (
(_('Pages'), ('pages.Page', 'organization-featured.Featured',)),
(_('Media'), ('organization-media.Video', 'organization-media.VideoCategory', 'organization-media.Audio', 'organization-media.Playlist', 'organization-media.Photo', (_('Media Library'), 'fb_browse'),)),
(_('Events'), ('mezzanine_agenda.Event', 'mezzanine_agenda.EventLocation', 'mezzanine_agenda.EventCategory', 'mezzanine_agenda.EventPrice',)),
(_('Magazine'), ('organization-magazine.Article', 'organization-magazine.Brief',)),
(_('Organization'), ('organization-team.Organization', 'organization-team.OrganizationType', 'organization-team.Team', 'organization-team.Department', 'organization-team.Person', 'organization-team.Activity',)),
(_('Projects'), ('organization-project.Project',)),
(_('Festival'), ('organization-festival.Artist',)),
(_('Users'), ('auth.User', 'auth.Group',)),
(_('Site'), ('sites.Site', 'redirects.Redirect', 'conf.Setting')),
)
GRAPPELLI_ADMIN_TITLE = 'IRCAM Admin'
SEARCH_MODEL_CHOICES = ()
RATINGS_ACCOUNT_REQUIRED = True
import warnings
warnings.filterwarnings(
'ignore', r"DateTimeField .* received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
EVENT_SLUG = 'events'
EVENT_GOOGLE_MAPS_DOMAIN = 'maps.google.fr'
EVENT_PER_PAGE = 50
EVENT_USE_FEATURED_IMAGE = True
EVENT_SHOP_URL = 'http://eve.ircam.fr/manifeste.php/manifestation/'
EVENT_PASS_URL = 'http://eve.ircam.fr/manifeste.php/pass/'
if DEBUG:
TINYMCE_SETUP_JS = "/static/js/tinymce_setup.js"
else:
TINYMCE_SETUP_JS = "/srv/app/organization/core/static/js/tinymce_setup.js"
SLUGIFY = 'django.template.defaultfilters.slugify'
HOME_FEATURED_ID = 1
BREAKING_NEWS_FEATURED_ID = 4
BLOG_POST_PER_PAGE = 200
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o664
FILE_UPLOAD_TEMP_DIR = '/srv/media/uploads/tmp/'
FILEBROWSER_MAX_UPLOAD_SIZE = 512000000
if DEBUG:
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda x : True
}
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
| Python | 0.000001 | @@ -2690,16 +2690,46 @@
.Brief',
+ 'organization-magazine.Topic'
)),%0A
@@ -2806,24 +2806,18 @@
eam.
-OrganizationType
+Department
', '
@@ -2856,34 +2856,30 @@
zation-team.
-Department
+Person
', 'organiza
@@ -2880,38 +2880,40 @@
ganization-team.
-Person
+Activity
', 'organization
@@ -2918,24 +2918,32 @@
on-team.
-Activity
+OrganizationType
',)),%0A
|
b67642ce07631ffe621dc94207524c8049141987 | calculate vcirc | galpy/potential_src/plotRotcurve.py | galpy/potential_src/plotRotcurve.py | import numpy as nu
import galpy.util.bovy_plot as plot
def plotRotcurve(Pot,*args,**kwargs):
"""
NAME:
plotRotcurve
PURPOSE:
plot the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rrange -
grid - grid in R
savefilename - save to or restore from this savefile (pickle)
+bovy_plot.bovy_plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
if kwargs.has_key('Rrange'):
Rrange= kwargs['Rrange']
kwargs.pop('Rrange')
else:
Rrange= [0.01,5.]
if kwargs.has_key('grid'):
grid= kwargs['grid']
kwargs.pop('grid')
else:
grid= 1001
if kwargs.has_key('savefilename'):
savefilename= kwargs['savefilename']
kwargs.pop('savefilename')
else:
savefilename= None
if not savefilename == None and os.path.exists(savefilename):
print "Restoring savefile "+savefilename+" ..."
savefile= open(savefilename,'rb')
rotcurve= pickle.load(savefile)
Rs= pickle.load(savefile)
savefile.close()
else:
Rs= nu.linspace(Rrange[0],Rrange[1],grid)
rotcurve= calcRotcurve(Pot,Rs)
if not savefilename == None:
print "Writing savefile "+savefilename+" ..."
savefile= open(savefilename,'wb')
pickle.dump(rotcurve,savefile)
pickle.dump(Rs,savefile)
savefile.close()
if not kwargs.has_key('xlabel'):
kwargs['xlabel']= r"$R/R_0$"
if not kwargs.has_key('ylabel'):
kwargs['ylabel']= r"$v_c(R)/v_c(R_0)$"
kwargs['xrange']= Rrange
return plot.bovy_plot(Rs,rotcurve,*args,
**kwargs)
def calcRotcurve(Pot,Rs):
"""
NAME:
calcRotcurve
PURPOSE:
calculate the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Pot - Potential or list of Potential instances
Rs - (array of) radius(i)
OUTPUT:
array of vc
HISTORY:
2011-04-13 - Written - Bovy (NYU)
"""
isList= isinstance(Pot,list)
isNonAxi= ((isList and Pot[0].isNonAxi) or (not isList and Pot.isNonAxi))
if isNonAxi:
raise AttributeError("Rotation curve plotting for non-axisymmetric potentials is not currently supported")
try:
grid= len(Rs)
except TypeError:
grid=1
Rs= nu.array([Rs])
rotcurve= nu.zeros(grid)
from planarPotential import evaluateplanarRforces
for ii in range(grid):
try:
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
except TypeError:
from planarPotential import RZToplanarPotential
Pot= RZToplanarPotential(Pot)
rotcurve[ii]= nu.sqrt(Rs[ii]*-evaluateplanarRforces(Rs[ii],Pot))
return rotcurve
| Python | 0.998805 | @@ -2994,8 +2994,405 @@
tcurve%0A%0A
+def vcirc(Pot,R):%0A %22%22%22%0A%0A NAME:%0A%0A vcirc%0A%0A PURPOSE:%0A%0A calculate the circular velocity at R in potential Pot%0A%0A INPUT:%0A%0A Pot - Potential instance or list of such instances%0A%0A R - Galactocentric radius%0A%0A OUTPUT:%0A%0A circular rotation velocity%0A%0A HISTORY:%0A%0A 2011-10-09 - Written - Bovy (IAS)%0A%0A %22%22%22%0A return nu.sqrt(R*-evaluateplanarRforces(R,Pot))%0A
|
d04143f1e3defd27f9c9a9dbf90d3b6c5af44aec | Add exception call to AsyncResult after task failing | huey/contrib/mini.py | huey/contrib/mini.py | #
# Minimal huey-like API using gevent and running within the parent process.
#
import datetime
import heapq
import logging
import time
from functools import wraps
import gevent
from gevent.event import AsyncResult
from gevent.event import Event
from gevent.pool import Pool
from huey.api import crontab
logger = logging.getLogger('huey.mini')
class MiniHueyResult(AsyncResult):
__call__ = AsyncResult.get
class MiniHuey(object):
def __init__(self, name='huey', interval=1, pool_size=None):
self.name = name
self._interval = interval
self._last_check = datetime.datetime.now()
self._periodic_interval = datetime.timedelta(seconds=60)
self._periodic_tasks = []
self._scheduled_tasks = []
self._shutdown = Event()
self._pool = Pool(pool_size)
self._run_t = None
def task(self, validate_func=None):
if validate_func is not None:
def periodic_task_wrapper(fn):
self._periodic_tasks.append((validate_func, fn))
return fn
return periodic_task_wrapper
def decorator(fn):
@wraps(fn)
def _inner(*args, **kwargs):
async_result = MiniHueyResult()
self._enqueue(fn, args, kwargs, async_result)
return async_result
def _schedule(args=None, kwargs=None, delay=None, eta=None):
if delay is not None:
eta = (datetime.datetime.now() +
datetime.timedelta(seconds=delay))
if eta is None:
raise ValueError('Either a delay (in seconds) or an '
'eta (datetime) must be specified.')
async_result = MiniHueyResult()
heapq.heappush(self._scheduled_tasks,
(eta, fn, args, kwargs, async_result))
return async_result
_inner.schedule = _schedule
return _inner
return decorator
def start(self):
if self._run_t is not None:
raise Exception('Task runner is already running.')
self._run_t = gevent.spawn(self._run)
def stop(self):
if self._run_t is None:
raise Exception('Task runner does not appear to have started.')
self._shutdown.set()
logger.info('shutdown requested.')
self._run_t.join()
self._run_t = None
def _enqueue(self, fn, args=None, kwargs=None, async_result=None):
logger.info('enqueueing %s' % fn.__name__)
self._pool.spawn(self._execute, fn, args, kwargs, async_result)
def _execute(self, fn, args, kwargs, async_result):
args = args or ()
kwargs = kwargs or {}
start = time.time()
try:
ret = fn(*args, **kwargs)
except Exception as exc:
logger.exception('task %s failed' % fn.__name__)
raise
else:
duration = time.time() - start
if async_result is not None:
async_result.set(ret)
logger.info('executed %s in %0.3fs', fn.__name__, duration)
def _run(self):
logger.info('task runner started.')
while not self._shutdown.is_set():
start = time.time()
now = datetime.datetime.now()
if self._last_check + self._periodic_interval <= now:
logger.debug('checking periodic task schedule')
self._last_check = now
for validate_func, fn in self._periodic_tasks:
if validate_func(now):
self._enqueue(fn)
if self._scheduled_tasks:
logger.debug('checking scheduled tasks')
# The 0-th item of a heap is always the smallest.
while self._scheduled_tasks and \
self._scheduled_tasks[0][0] <= now:
eta, fn, args, kwargs, async_result = (
heapq.heappop(self._scheduled_tasks))
self._enqueue(fn, args, kwargs, async_result)
# Wait for most of the remained of the time remaining.
remaining = self._interval - (time.time() - start)
if remaining > 0:
if not self._shutdown.wait(remaining * 0.9):
gevent.sleep(self._interval - (time.time() - start))
logger.info('exiting task runner')
| Python | 0.000004 | @@ -2936,32 +2936,76 @@
%25 fn.__name__)%0A
+ async_result.set_exception(exc)%0A
rais
|
4ca8889396595f9da99becbb88fb7e38ab0ed560 | Raise exception if connection not succeed and customize error message | hunter/reviewsapi.py | hunter/reviewsapi.py | import requests
import os
from .endpoints import *
class UnauthorizedToken(Exception):
pass
class ReviewsAPI:
def __init__(self):
token = os.environ.get('UDACITY_AUTH_TOKEN')
self.headers = {'Authorization': token, 'Content-Length': '0'}
def certifications(self):
try:
raw_response = requests.get(CERTIFICATIONS_URL, headers=self.headers)
response = raw_response.json()
certifications_list = [item['project_id'] for item in response if item['status'] == 'certified']
return certifications_list
except requests.exceptions.HTTPError:
raise UnauthorizedToken
def request_reviews(self, certifications_list):
projects = self.__projects(certifications_list)
return requests.post(SUBMISSION_REQUESTS, json=projects, headers=self.headers)
# TODO Add support to multi language
def __projects(self, certifications_list):
projects_list = []
for certification in certifications_list:
projects_list.append({'project_id': certification, 'language': 'pt-br'})
return {'projects': projects_list}
| Python | 0 | @@ -429,16 +429,74 @@
.json()%0A
+ %0A raw_response.raise_for_status()%0A%0A
@@ -716,16 +716,57 @@
zedToken
+('Maybe it%5C's time to change you token!')
%0A%0A de
|
2ba350d71e8a24471ea80fafa75803eb439c4ea6 | add require(internet) | i3pystatus/parcel.py | i3pystatus/parcel.py |
from urllib.request import urlopen
import webbrowser
import lxml.html
from lxml.cssselect import CSSSelector
from i3pystatus import IntervalModule
class TrackerAPI:
def __init__(self, idcode):
pass
def status(self):
return {}
class DHL(TrackerAPI):
URL = "http://nolp.dhl.de/nextt-online-public/set_identcodes.do?lang=en&idc={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector("#set_identcodes .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.progress_selector = CSSSelector(
".greyprogressbar > span, .greenprogressbar > span")
self.last_status_selector = CSSSelector(".events .eventList tr")
self.intrarow_status_selector = CSSSelector("td.status div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["progress"] = self.progress_selector(page)[0].text.strip()
last_row = self.last_status_selector(page)[-1]
ret["status"] = self.intrarow_status_selector(
last_row)[0].text.strip()
return ret
def get_url(self):
return self.url
class UPS(TrackerAPI):
URL = "http://wwwapps.ups.com/WebTracking/processRequest?HTMLVersion=5.0&Requester=NES&AgreeToTermsAndConditions=yes&loc=en_US&tracknum={idcode}"
def __init__(self, idcode):
self.idcode = idcode
self.url = self.URL.format(idcode=self.idcode)
error_selector = CSSSelector(".secBody .error")
self.error = lambda page: len(error_selector(page)) >= 1
self.status_selector = CSSSelector("#tt_spStatus")
self.progress_selector = CSSSelector(".pkgProgress div")
def status(self):
ret = {}
with urlopen(self.url) as page:
page = lxml.html.fromstring(page.read())
if self.error(page):
ret["progress"] = ret["status"] = "n/a"
else:
ret["status"] = self.status_selector(page)[0].text.strip()
progress_cls = int(
int(self.progress_selector(page)[0].get("class").strip("staus")) / 5 * 100)
ret["progress"] = progress_cls
return ret
def get_url(self):
return self.url
class ParcelTracker(IntervalModule):
interval = 20
settings = (
("instance", "Tracker instance"),
"format",
"name",
)
required = ("instance",)
format = "{name}:{progress}"
def run(self):
fdict = {
"name": self.name,
}
fdict.update(self.instance.status())
self.output = {
"full_text": self.format.format(**fdict).strip(),
"instance": self.name,
}
def on_leftclick(self):
webbrowser.open_new_tab(self.instance.get_url())
| Python | 0.000005 | @@ -143,16 +143,67 @@
lModule%0A
+from i3pystatus.core.util import internet, require%0A
%0A%0Aclass
@@ -2789,24 +2789,47 @@
progress%7D%22%0A%0A
+ @require(internet)%0A
def run(
|
d12cfe8125f9f20023493e4be4cce673cd6c207d | Update package in database | iatidq/dqpackages.py | iatidq/dqpackages.py |
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from iatidq import db
import models
def addPackage(data):
checkP = models.Package.query.filter_by(
package_name=data['package_name']).first()
if not checkP:
with db.session.begin():
p = models.Package()
p.package_name = data['package_name']
p.package_title = data['package_title']
p.source_url = data['source_url']
p.man_auto = data['man_auto']
p.active=data['active']
db.session.add(p)
return p
else:
return False
def package_status(package_id):
return models.PackageStatus.query.filter_by(
package_id=package_id).order_by("runtime_datetime desc").first()
def packages(package_id=None):
if package_id is not None:
return models.Package.query.filter_by(
id=package_id).order_by(models.Package.package_name).first()
else:
return models.Package.query.order_by(models.Package.package_name).all()
def packages_by_name(package_name):
return models.Package.query.filter_by(
package_name=package_name).order_by(models.Package.package_name).first()
def packages_by_packagegroup(packagegroup_id=None):
return models.Package.query.filter(
models.PackageGroup.id==packagegroup_id
).join(models.PackageGroup
).order_by(models.Package.package_name
).all()
def packages_by_packagegroup_name(packagegroup_name=None):
return models.Package.query.filter(
models.PackageGroup.name==packagegroup_name
).join(models.PackageGroup
).order_by(models.Package.package_name
).all()
def packageGroups():
return models.PackageGroup.query.order_by(models.PackageGroup.name).all()
def packageOrganisations(package_id):
if package_id is not None:
packageorganisations = db.session.query(models.Organisation,
models.OrganisationPackage
).filter(models.Package.id==package_id
).join(models.OrganisationPackage
).join(models.Package
).all()
return packageorganisations
else:
return False
def packageGroupOrganisations(packagegroup_name):
if packagegroup_name is not None:
packagegrouporganisations = db.session.query(models.Organisation
).filter(models.PackageGroup.name==packagegroup_name
).join(models.OrganisationPackageGroup
).join(models.PackageGroup
).all()
return packagegrouporganisations
else:
return False
def get_organisations_for_testing(package_id):
organisations = []
conditions = []
conditions_unbracketed = []
packageorganisations = packageOrganisations(package_id)
dummy = [{
'organisation_id': None,
'activities_xpath': "//iati-activity"
}]
if not packageorganisations:
return dummy
for packageorganisation in packageorganisations:
# add organisations to be tested;
organisation_id = packageorganisation.Organisation.id
condition = packageorganisation.OrganisationPackage.condition
if condition == '':
condition = None
if condition is not None:
# unicode-escape is necessary to deal with an umlaut in a condition.
condition_unbracketed = condition.decode('unicode-escape').strip()
condition = u"[" + condition_unbracketed + u"]"
conditions.append(condition)
conditions_unbracketed.append(condition_unbracketed)
else:
condition_unbracketed = u""
condition = u""
organisations.append({
'organisation_id': organisation_id,
'activities_xpath': u"//iati-activity%s" % condition
})
conditions_str = " or ".join(conditions_unbracketed)
remainder_xpath = u"//iati-activity[not(%s)]" % conditions_str
if conditions:
organisations.append({
'organisation_id': None,
'activities_xpath': remainder_xpath
})
if organisations:
return organisations
return dummy
| Python | 0 | @@ -858,32 +858,680 @@
return False%0A%0A
+def updatePackage(data):%0A checkP = models.Package.query.filter_by(%0A id=data%5B'package_id'%5D).first()%0A checkOK = models.Package.query.filter_by(package_name=data%5B'package_name'%5D).first()%0A if checkP:%0A if (checkOK and checkOK.id!=checkP.id):%0A return False%0A with db.session.begin():%0A checkP.package_name = data%5B'package_name'%5D%0A checkP.package_title = data%5B'package_title'%5D%0A checkP.source_url = data%5B'source_url'%5D%0A checkP.man_auto = data%5B'man_auto'%5D%0A checkP.active=data%5B'active'%5D%0A db.session.add(checkP)%0A return checkP%0A return False%0A%0A
def package_stat
|
792e46bcd01d2718215a3cb324b8deca5e4e1a7e | bump 1.3.10 release (#160) | icontrol/__init__.py | icontrol/__init__.py | __version__ = "1.3.9"
| Python | 0 | @@ -16,7 +16,8 @@
1.3.
-9
+10
%22%0A
|
5dde537bec1a4d4ddd89999b0a15d9a90fa2e41d | add NOARP attribute | ifparser/ifconfig.py | ifparser/ifconfig.py | from __future__ import unicode_literals, print_function
from .re_scan import ScanEnd, Scanner
class Interface(object):
_attrs = frozenset([
'interface', 'itype', 'mtu', 'ip', 'bcast', 'mask', 'hwaddr',
'txbytes', 'rxbytes', 'rxpkts', 'txpkts'
])
_flags = frozenset(
['BROADCAST', 'MULTICAST', 'UP', 'RUNNING', 'LOOPBACK', 'DYNAMIC', 'PROMISC'])
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
"""
Return False if flag not set
"""
if name in Interface._attrs:
return None
if name in Interface._flags:
return False
def __setattr__(self, name, value):
if name in Interface._attrs or name in Interface._flags:
if value:
super(Interface, self).__setattr__(name, value)
else:
raise ValueError("Invalid attribute mentioned name=%s value=%s" %
(name, value))
def __str__(self):
return "%s-%s" % ("obj", self.interface)
def __repr__(self):
return self.__str__()
def get_values(self):
value_dict = {}
for attr in Interface._attrs:
value_dict[attr] = getattr(self, attr)
for attr in Interface._flags:
value_dict[attr] = getattr(self, attr)
return value_dict
class ParseError(Exception):
pass
class InterfaceNotFound(Exception):
pass
class Ifcfg(object):
scanner = Scanner([
('process_interface', r"(?P<interface>^[a-zA-Z0-9:-]+)\s+"
"Link encap\:(?P<itype>[A-Za-z]+)\s+"
"((?:Loopback)|(?:HWaddr\s(?P<hwaddr>[0-9A-Fa-f:]+))).*"),
('process_any', r"\s+ether\s(?P<hwaddr>[0-9A-Fa-f:]+).*"),
('process_ip', r"\s+inet[\s:].*"),
('process_mtu', r"\s+(?P<states>[A-Z\s]+\s*)+MTU:(?P<mtu>[0-9]+).*"),
('process_any', r"\s+RX bytes:(?P<rxbytes>\d+).*?"
"TX bytes:(?P<txbytes>\d+).*"),
('process_any', r"\s+RX packets[:\s](?P<rxpkts>\d+).*"),
('process_any', r"\s+TX packets[:\s](?P<txpkts>\d+).*"),
('process_interface2',
r"(?P<interface>^[a-zA-Z0-9-]+).*?<(?P<states>[A-Z,]+\s*)>"
".*?mtu (?P<mtu>[0-9]+).*"),
('process_ignore', r"(Ifconfig|Infiniband|Because)\s.*"),
('process_ignore', r"\s+.*"),
])
def __init__(self, raw_text, debug=False):
self.debug = debug
self._interfaces = {}
self.curr_interface = None
self._process(raw_text)
def _process(self, raw_text):
for line in raw_text.splitlines():
try:
for token, match in Ifcfg.scanner.scan(line):
process_func = getattr(self, token)
process_func(match.groups(),
match.groupdict(), match.group())
except ScanEnd:
raise ParseError(repr(line))
def set_curr_interface_attr(self, kwargs):
for k, v in kwargs.items():
setattr(self._interfaces[self.curr_interface], k, v)
def process_interface(self, group, groupdict, matched_str):
self.curr_interface = groupdict['interface']
self._interfaces[self.curr_interface] = Interface()
self.set_curr_interface_attr(groupdict)
def process_interface2(self, group, groupdict, matched_str):
self.curr_interface = groupdict['interface']
self._interfaces[self.curr_interface] = Interface()
states = groupdict.pop('states').strip().split(',')
for st in states:
groupdict[st] = True
self.set_curr_interface_attr(groupdict)
def process_ip(self, group, groupdict, matched_str):
if ':' in matched_str:
for attr in matched_str.strip().lower().replace('inet addr',
'ip').split():
name, value = attr.split(':')
setattr(self._interfaces[self.curr_interface], name, value)
else:
map_dict = {'inet': 'ip', 'netmask': 'mask', 'broadcast': 'bcast'}
kv = iter(matched_str.split())
for k, v in zip(kv, kv):
groupdict[map_dict[k]] = v
self.set_curr_interface_attr(groupdict)
def process_any(self, group, groupdict, matched_str):
self.set_curr_interface_attr(groupdict)
def process_mtu(self, group, groupdict, matched_str):
states = groupdict.pop('states').strip().split()
for st in states:
groupdict[st] = True
self.set_curr_interface_attr(groupdict)
def process_ignore(self, group, groupdict, matched_str):
if self.debug:
print("{0} {1} {2}".format(group, groupdict, matched_str))
@property
def interfaces(self):
return sorted(self._interfaces.keys())
def get_interface(self, interface):
if interface in self._interfaces:
return self._interfaces[interface]
raise InterfaceNotFound("No such interface {0} found.".format(
interface))
def get(self, **kwargs):
for key in kwargs.keys():
key_check = key in Interface._attrs or key in Interface._flags
if not key_check:
raise ValueError("Invalid argument: %s" % key)
eligible = []
for name, interface in self._interfaces.items():
inc_check = True
for key in kwargs.keys():
if not inc_check:
continue
inc_check = getattr(interface, key) == kwargs[key]
if inc_check:
eligible.append(interface)
return eligible
| Python | 0.000001 | @@ -373,16 +373,25 @@
PROMISC'
+, 'NOARP'
%5D)%0A%0A
|
627d5a0a7f8f8886b362ececa5b4bb8029ad7e34 | add client and params arguments | library/module_utils/network/f5/common.py | library/module_utils/network/f5/common.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(fallback=(env_fallback, ['F5_SERVER'])),
'server_port': dict(type='int', default=443, fallback=(env_fallback, ['F5_SERVER_PORT'])),
'user': dict(fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])),
'password': dict(no_log=True, fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'validate_certs': dict(type='bool', fallback=(env_fallback, ['F5_VALIDATE_CERTS'])),
'transport': dict(default='rest', choices=['cli', 'rest'])
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(removed_in_version=2.9, fallback=(env_fallback, ['F5_SERVER'])),
'user': dict(removed_in_version=2.9, fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])),
'password': dict(removed_in_version=2.9, no_log=True, fallback=(env_fallback, ['F5_PASSWORD'])),
'validate_certs': dict(removed_in_version=2.9, type='bool', fallback=(env_fallback, ['F5_VALIDATE_CERTS'])),
'server_port': dict(removed_in_version=2.9, type='int', default=443, fallback=(env_fallback, ['F5_SERVER_PORT'])),
'transport': dict(removed_in_version=2.9, choices=['cli', 'rest'])
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def cleanup_tokens(client):
try:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception:
pass
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self.api = self.mgmt
class AnsibleF5Parameters(object):
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| Python | 0.000001 | @@ -4752,35 +4752,39 @@
nit__(self,
-params=None
+*args, **kwargs
):%0A s
@@ -4819,24 +4819,24 @@
mbda: None)%0A
-
self
@@ -4863,16 +4863,109 @@
'%5D = %5B%5D%0A
+ self.client = kwargs.pop('client', None)%0A params = kwargs.pop('params', None)%0A
|
3ed9e04b0b3db8c75a030369da56251d258d92db | Refactor lddb_json_shape.py to handle e.g. framed data | librisxl-tools/scripts/lddb_json_shape.py | librisxl-tools/scripts/lddb_json_shape.py | import json
MAX_STATS = 512
HARD_MAX_STATS = 8192
STATS_FOR_ALL = {
# from auth 008
"marc:subdivision",
"marc:romanization",
"marc:languageOfCatalog",
"marc:kindOfRecord",
"descriptionConventions",
"marc:subjectHeading",
"marc:typeOfSeries",
"marc:numberedSeries",
"marc:headingSeries",
"marc:subjectSubdivision",
"marc:govtAgency",
"marc:reference",
"marc:recordUpdate",
"marc:personalName",
"marc:level",
"marc:modifiedRecord",
"marc:catalogingSource",
"marc:headingMain",
"marc:headingSubject",
# "shouldn't" be too many...
"marc:displayText",
"part",
}
def compute_shape(node, index, type_key=None):
if len(node) == 1 and '@id' in node:
count_value('@id', node['@id'], index)
return
rtype = type_key or node.get('@type')
if isinstance(rtype, list):
rtype = '+'.join(rtype)
shape = index.setdefault(rtype, {})
for k, vs in node.items():
if not isinstance(vs, list):
vs = [vs] # Ignoring dict/list difference for now
for v in vs:
if isinstance(v, dict):
subindex = shape.setdefault(k, {})
compute_shape(v, subindex)
else:
count_value(k, v, shape)
def count_value(k, v, shape):
stats = shape.setdefault(k, {})
if isinstance(stats, dict):
if k in STATS_FOR_ALL and len(stats) < HARD_MAX_STATS or \
len(stats) < MAX_STATS:
if not k.startswith('@') and isinstance(v, (str, bool, int, float)):
v = f'@value {v}'
stats[v] = stats.setdefault(v, 0) + 1
else:
shape[k] = sum(stats.values()) + 1
else:
shape[k] = stats + 1
if __name__ == '__main__':
from time import time
import sys
from pathlib import Path
args = sys.argv[:]
cmd = args.pop(0)
if not args:
print(f'USAGE: {cmd} OUT_DIR', file=sys.stderr)
sys.exit(1)
outpath = Path(args.pop(0))
SUFFIX = '.json'
if outpath.suffix == SUFFIX:
outdir = outpath.parent
else:
outdir = outpath
outpath = None
if not outdir.is_dir():
outdir.mkdir(parents=True, exist_ok=True)
index = {}
work_by_type_index = {}
instance_index = {}
work_index = {}
t_last = 0
cr = '\r'
for i, l in enumerate(sys.stdin):
if not l.rstrip():
continue
if isinstance(l, bytes):
l = l.decode('utf-8')
t_now = time()
if t_now - t_last > 2:
t_last = t_now
print(f'{cr}At: {i + 1:,}', end='', file=sys.stderr)
try:
data = json.loads(l)
graph = data['@graph']
thing =graph[1]
thing['meta'] =graph[0]
if len(graph) > 2 and 'instanceOf' in thing:
work = graph[2]
assert thing['instanceOf']['@id'] == work['@id']
thing['instanceOf'] = work
else:
work = None
compute_shape(thing, index)
if work:
compute_shape(thing, instance_index, type_key='Instance')
compute_shape(work, work_by_type_index)
compute_shape(work, work_index, type_key='Work')
except (ValueError, AttributeError) as e:
print(f'ERROR at: {i} in data:', file=sys.stderr)
print(l, file=sys.stderr)
print(e, file=sys.stderr)
print(f'{cr}Total: {i + 1:,}', file=sys.stderr)
def output(index, fpath):
with fpath.open('w') as f:
json.dump(index, f, indent=2, ensure_ascii=False)
print(f'Wrote: {fpath}', file=sys.stderr)
if outpath:
output(index, outpath)
else:
to_outfile = lambda name: (outdir / name).with_suffix(SUFFIX)
output(index, to_outfile('instance_shapes_by_type'))
output(instance_index, to_outfile('instance_shapes'))
output(work_by_type_index, to_outfile('work_shapes_by_type'))
output(work_index, to_outfile('work_shapes'))
| Python | 0 | @@ -738,16 +738,440 @@
t%22,%0A%7D%0A%0A%0A
+def reshape(data):%0A if '@graph' in data:%0A graph = data%5B'@graph'%5D%0A thing =graph%5B1%5D%0A thing%5B'meta'%5D = graph%5B0%5D%0A%0A if len(graph) %3E 2 and 'instanceOf' in thing:%0A work = graph%5B2%5D%0A assert thing%5B'instanceOf'%5D%5B'@id'%5D == work%5B'@id'%5D%0A thing%5B'instanceOf'%5D = work%0A else:%0A work = None%0A%0A return thing, work%0A%0A return data, data.get('instanceOf')%0A%0A%0A
def comp
@@ -3247,340 +3247,35 @@
-graph = data%5B'@graph'%5D%0A thing =graph%5B1%5D%0A thing%5B'meta'%5D =graph%5B0%5D%0A%0A if len(graph) %3E 2 and 'instanceOf' in thing:%0A work = graph%5B2%5D%0A assert thing%5B'instanceOf'%5D%5B'@id'%5D == work%5B'@id'%5D%0A thing%5B'instanceOf'%5D = work%0A else:%0A work = None%0A
+thing, work = reshape(data)
%0A
|
a557e46e64fcd1992988fee4cd7a4cfa2da2eb61 | add state deregistering | xoinvader/application/__init__.py | xoinvader/application/__init__.py | """Base class for game application."""
from tornado import ioloop
from xoinvader.constants import DEFAULT_FPS, DRIVER_NCURSES, DRIVER_SDL
from xoinvader.common import Settings
_CURRENT_APPLICATION = None
"""Current application instance."""
class ApplicationNotInitializedError(Exception):
"""Raise when try to get not initialized application."""
def __init__(self):
super(ApplicationNotInitializedError, self).__init__(
"Application not initialized.")
def get_current():
"""Current application getter.
:return: current application object
"""
if _CURRENT_APPLICATION is not None:
return _CURRENT_APPLICATION
else:
raise ApplicationNotInitializedError()
# TODO: implement proper choosing by env
def get_application():
"""Application class getter.
:return: application class based on environment
"""
driver_map = {
DRIVER_NCURSES: get_ncurses_application,
DRIVER_SDL: get_pygame_application,
}
return driver_map[Settings.system.video_driver]()
def get_ncurses_application():
"""Incapsulate ncurses-related stuff.
:return: CursesApplication class
"""
from .ncurses_app import CursesApplication
return CursesApplication
def get_pygame_application():
"""Incapsulate pygame-related stuff.
:return: PygameApplication class
"""
from .pygame_app import PygameApplication
return PygameApplication
def trigger_state(state, **kwargs):
"""Change current state and pass to it data via kwargs.
:param str state: state name
"""
app = get_current()
app.state = state
app.state.trigger(**kwargs)
class Application(object):
"""Base application class for backend-specific application classes.
Provides state primitive mechanism and some useful getters/setters.
"""
def __init__(self):
global _CURRENT_APPLICATION
_CURRENT_APPLICATION = self
self._state = None
self._states = {}
self._screen = None
self._fps = DEFAULT_FPS
self._ioloop = ioloop.IOLoop.instance()
self._pc = ioloop.PeriodicCallback(self.tick, 30, self._ioloop)
self._pc.start()
def tick(self):
"""Callback to execute at every frame."""
self._state.events()
self._state.update()
self._state.render()
@property
def state(self):
"""Current state.
:getter: Return current state
:setter: Set current state
:type: :class:`xoinvader.application.Application`
"""
if self._state:
return self._state
else:
raise AttributeError("There is no available state.")
@state.setter
def state(self, name):
"""Setter."""
if name in self._states:
self._state = self._states[name]
else:
raise KeyError("No such state: '{0}'.".format(name))
@property
def states(self):
"""State names to State classes mapping.
:getter: yes
:setter: no
:type: dict
"""
return self._states
def register_state(self, state):
"""Add new state and initiate it with owner.
:param state: state class to register
:type state: :class:`xoinvader.state.State`
"""
name = state.__name__
state_object = state(self)
self._states[name] = state_object
if len(self._states) == 1:
self._state = self._states[name]
# NOTE: State cannot instantiate in State.__init__ objects that
# want access to state because there is no instance at creation
# moment. For such objects state can declare it's 'postinit'
# method.
state_object.postinit()
@property
def fps(self):
"""Frames per second.
:getter: yes
:setter: yes
:type: int
"""
return self._fps
@fps.setter
def fps(self, val):
"""Setter."""
self._fps = int(val)
@property
def screen(self):
"""Application's screen Surface.
:getter: yes
:setter: no
:type: class::`curses.Window` or class::`pygame.display.Surface`
"""
return self._screen
def start(self):
"""Start main application loop."""
if not self._state:
raise AttributeError("There is no available state.")
self._ioloop.start()
def stop(self):
"""Stop application."""
self._pc.stop()
self._ioloop.add_callback(self._ioloop.stop)
| Python | 0 | @@ -3787,24 +3787,207 @@
postinit()%0A%0A
+ def deregister_state(self, name):%0A %22%22%22Remove existing state.%0A%0A :param str name: name of state%0A %22%22%22%0A%0A state = self._states.pop(name)%0A del state%0A%0A
@propert
|
9d20717b39154252109153a6c5936922d28c6511 | mark unicode context values as safe | mailviews/utils.py | mailviews/utils.py | import textwrap
from collections import namedtuple
from django.utils.safestring import mark_safe
Docstring = namedtuple('Docstring', ('summary', 'body'))
def split_docstring(value):
"""
Splits the docstring of the given value into it's summary and body.
:returns: a 2-tuple of the format ``(summary, body)``
"""
docstring = getattr(value, '__doc__', '') or ''
docstring = textwrap.dedent(docstring)
if not docstring:
return None
pieces = docstring.strip().split('\n\n', 1)
try:
body = pieces[1]
except IndexError:
body = None
return Docstring(pieces[0], body)
def unimplemented(*args, **kwargs):
raise NotImplementedError
def unescape(context):
"""
Accepts a context object, returning a new context with autoescape off.
Useful for rendering plain-text templates without having to wrap the entire
template in an `{% autoescape off %}` tag.
"""
for key in context:
if type(context[key]) is str:
context[key] = mark_safe(context[key])
return context
| Python | 0.999996 | @@ -1000,13 +1000,24 @@
%5D) i
-s str
+n %5Bstr, unicode%5D
:%0A
|
8aa9f5c86a507b238620792686afe30303536bc1 | Change benchmark score output to (?) for likely bogus data. | benchmark/scripts/compare_perf_tests.py | benchmark/scripts/compare_perf_tests.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py --------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===----------------------------------------------------------------------===//
# e.g.
# repeat.sh 3 tot/bin/Benchmark_Driver run -o -O > tot.O.times
# repeat.sh 3 mypatch/bin/Benchmark_Driver run -o -O > mypatch.O.times
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t
import sys
import re
VERBOSE = 0
# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
KEYGROUP = 2
VALGROUP = 4
NUMGROUP = 1
IsTime = 1
ShowSpeedup = 1
PrintAllScores = 0
def parseInt(word):
try:
return int(word)
except:
raise Exception("Expected integer value, not " + word)
def getScores(fname):
scores = {}
nums = {}
runs = 0
f = open(fname)
try:
for line in f:
if VERBOSE:
print "Parsing", line,
m = SCORERE.match(line)
is_total = False
if not m:
is_total = True
m = TOTALRE.match(line)
if not m:
continue
if VERBOSE:
print " match", m.group(KEYGROUP), m.group(VALGROUP)
if not m.group(KEYGROUP) in scores:
scores[m.group(KEYGROUP)] = []
scores[m.group(KEYGROUP)].append(parseInt(m.group(VALGROUP)))
if is_total:
nums[m.group(KEYGROUP)] = ""
else:
nums[m.group(KEYGROUP)] = m.group(NUMGROUP)
if len(scores[m.group(KEYGROUP)]) > runs:
runs = len(scores[m.group(KEYGROUP)])
finally:
f.close()
return scores, runs, nums
def isMaxScore(newscore, maxscore, invert):
return not maxscore or (newscore > maxscore if not invert else newscore < maxscore)
def compareScores(key, score1, score2, runs, num):
print num.rjust(3),
print key.ljust(25),
bestscore1 = None
bestscore2 = None
worstscore1 = None
worstscore2 = None
minbest = IsTime
minworst = not minbest
r = 0
for score in score1:
if isMaxScore(newscore=score, maxscore=bestscore1, invert=minbest):
bestscore1 = score
if isMaxScore(newscore=score, maxscore=worstscore1, invert=minworst):
worstscore1 = score
if PrintAllScores:
print ("%d" % score).rjust(16),
for score in score2:
if isMaxScore(newscore=score, maxscore=bestscore2, invert=minbest):
bestscore2 = score
if isMaxScore(newscore=score, maxscore=worstscore2, invert=minworst):
worstscore2 = score
if PrintAllScores:
print ("%d" % score).rjust(16),
r += 1
while r < runs:
if PrintAllScores:
print ("0").rjust(9),
r += 1
if not PrintAllScores:
print ("%d" % bestscore1).rjust(16),
print ("%d" % bestscore2).rjust(16),
print ("%+d" % (bestscore2 - bestscore1)).rjust(9),
if bestscore1 != 0 and bestscore2 != 0:
print ("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9),
if ShowSpeedup:
Num, Den = float(bestscore2), float(bestscore1)
if IsTime:
Num, Den = Den, Num
print ("%.2fx" % (Num / Den)).rjust(9),
else:
print "*".rjust(9),
if ShowSpeedup:
print "*".rjust(9),
# if the interval endpoints have inverse relationship, then they overlap
if minbest:
if bestscore1 < worstscore2:
print "(!)",
else:
if bestscore1 > worstscore2:
print "(!)",
print
def printBestScores(key, scores):
print key,
bestscore = None
minbest = IsTime
for score in scores:
if isMaxScore(newscore=score, maxscore=bestscore, invert=minbest):
bestscore = score
print ", %d" % bestscore
def usage():
print "repeat.sh <n> Benchmark_O[none|unchecked] > file.times"
print "compare_perf_tests.py <file.times> [<file2.times>]"
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
sys.exit(1)
file1 = sys.argv[1]
if len(sys.argv) < 3:
scores, runs, nums = getScores(file1)
keys = list(set(scores.keys()))
keys.sort()
for key in keys:
printBestScores(key, scores[key])
sys.exit(0)
file2 = sys.argv[2]
if len(sys.argv) > 3:
SCORERE = re.compile(sys.argv[3])
scores1, runs1, nums = getScores(file1)
scores2, runs2, nums = getScores(file2)
runs = runs1
if runs2 > runs:
runs = runs2
if VERBOSE:
print scores1
print scores2
keys = list(set(scores1.keys() + scores2.keys()))
keys.sort()
if VERBOSE:
print "comparing ", file1, "vs", file2, "=",
if IsTime:
print file1, "/", file2
else:
print file2, "/", file1
print "#".rjust(3),
print "TEST".ljust(25),
if PrintAllScores:
for i in range(0, runs):
print ("OLD_RUN%d" % i).rjust(9),
for i in range(0, runs):
print ("NEW_RUN%d" % i).rjust(9),
else:
print "BEST_OLD_MIN(μs)".rjust(17),
print "BEST_NEW_MIN(μs)".rjust(17),
print 'DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9)
for key in keys:
if key not in scores1:
print key, "not in", file1
continue
if key not in scores2:
print key, "not in", file2
continue
compareScores(key, scores1[key], scores2[key], runs, nums[key])
| Python | 0.000003 | @@ -4053,33 +4053,33 @@
print %22(
-!
+?
)%22,%0A else:%0A
@@ -4133,17 +4133,17 @@
print %22(
-!
+?
)%22,%0A
|
bdca4889442e7d84f8c4e68ecdbee676d46ff264 | Fix data provider example file. | examples/test_with_data_provider.py | examples/test_with_data_provider.py | from pytf.dataprovider import DataProvider
try:
from unittest.mock import call
except ImportError:
from mock import call
@DataProvider([call(max=5), call(max=10), call(max=15)])
class TestCase(object):
def __init__(self, max):
self.max = max
@DataProvider([call(n=3), call(n=7), call(n=12), call(n=20)])
def test_test(self, n):
assert n < self.max
| Python | 0 | @@ -39,90 +39,9 @@
ider
-%0A%0Atry:%0A from unittest.mock import call%0Aexcept ImportError:%0A from mock import
+,
cal
@@ -54,25 +54,30 @@
ataProvider(
-%5B
+max_5=
call(max=5),
@@ -77,16 +77,23 @@
max=5),
+max_10=
call(max
@@ -98,16 +98,23 @@
ax=10),
+max_15=
call(max
@@ -117,17 +117,16 @@
(max=15)
-%5D
)%0Aclass
@@ -218,9 +218,12 @@
der(
-%5B
+n_3=
call
@@ -229,16 +229,20 @@
l(n=3),
+n_7=
call(n=7
@@ -244,16 +244,21 @@
l(n=7),
+n_12=
call(n=1
@@ -261,16 +261,21 @@
(n=12),
+n_20=
call(n=2
@@ -280,9 +280,8 @@
=20)
-%5D
)%0A
|
dafb1fb624e4d816a3c8c504da15469365443473 | test assertion fixed | cloudify_cli/tests/test_utils.py | cloudify_cli/tests/test_utils.py | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import unittest
from cloudify_cli import utils
from cloudify_cli import constants
from cloudify_cli.exceptions import CloudifyCliError
from cloudify_cli.utils import CloudifyWorkingDirectorySettings
TEST_DIR = '/tmp/cloudify-cli-unit-tests'
TEST_WORK_DIR = TEST_DIR + '/cloudify'
class CliUtilsUnitTests(unittest.TestCase):
"""
Unit tests for methods in utils.py
"""
@classmethod
def setUpClass(cls):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
os.mkdir(TEST_DIR)
@classmethod
def tearDownClass(cls):
shutil.rmtree(TEST_DIR)
def setUp(self):
utils.get_cwd = lambda: TEST_WORK_DIR
os.mkdir(TEST_WORK_DIR)
os.chdir(TEST_WORK_DIR)
def tearDown(self):
shutil.rmtree(TEST_WORK_DIR)
def test_get_existing_init_path_from_inner_dir(self):
# first create the init
init_path = os.path.join(utils.get_cwd(),
constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)
os.mkdir(init_path)
# switch working directory to inner one
new_cwd = os.path.join(utils.get_cwd(),
'test_get_existing_init_path')
os.mkdir(new_cwd)
utils.get_cwd = lambda: new_cwd
self.assertEqual(utils.get_init_path(), init_path)
def test_get_existing_init_path_from_init_dir(self):
# first create the init
init_path = os.path.join(utils.get_cwd(),
constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)
os.mkdir(init_path)
self.assertEqual(utils.get_init_path(), init_path)
def test_get_init_path_from_outside_dir(self):
# first create the init
init_path = os.path.join(utils.get_cwd(),
constants.CLOUDIFY_WD_SETTINGS_DIRECTORY_NAME)
os.mkdir(init_path)
# switch working directory to outer one
new_cwd = os.path.dirname(os.path.dirname(init_path))
utils.get_cwd = lambda: new_cwd
self.assertRaises(CloudifyCliError, utils.get_context_path)
def test_dump_cosmo_working_dir_settings_update(self):
self.assertRaises(CloudifyCliError,
utils.dump_cloudify_working_dir_settings,
cosmo_wd_settings=CloudifyWorkingDirectorySettings(),
update=True)
def test_dump_cosmo_working_dir_settings_create(self):
directory_settings = CloudifyWorkingDirectorySettings()
utils.dump_cloudify_working_dir_settings(
cosmo_wd_settings=directory_settings,
update=False)
utils.load_cloudify_working_dir_settings()
def test_parsing_input_as_string(self):
self.assertEqual(utils.plain_string_to_dict(""), {})
self.assertEqual(utils.plain_string_to_dict(" "), {})
self.assertEqual(utils.plain_string_to_dict(";"), {})
self.assertEqual(utils.plain_string_to_dict(" ; "), {})
expected_dict = dict(my_key1="my_value1", my_key2="my_value2")
parsed_dict = utils.plain_string_to_dict(
"my_key1=my_value1;my_key2=my_value2")
self.assertEqual(parsed_dict, expected_dict)
parsed_dict = utils.plain_string_to_dict(
" my_key1 = my_value1 ;my_key2=my_value2; ")
self.assertEqual(parsed_dict, expected_dict)
parsed_dict = utils.plain_string_to_dict(
" my_key1 = my_value1 ;my_key2=my_value2; ")
self.assertEqual(parsed_dict, expected_dict)
expected_dict = dict(my_key1="")
parsed_dict = utils.plain_string_to_dict(" my_key1=")
self.assertEqual(parsed_dict, expected_dict)
parsed_dict = utils.plain_string_to_dict(" my_key1=;")
self.assertEqual(parsed_dict, expected_dict)
expected_dict = dict(my_key1="my_value1",
my_key2="my_value2,my_other_value2")
parsed_dict = utils.plain_string_to_dict(
" my_key1 = my_value1 ;my_key2=my_value2,my_other_value2; ")
self.assertEqual(parsed_dict, expected_dict)
expected_err_msg = "Invalid input format: {0}, the expected " \
"format is: key1=value1;key2=value2"
input_str = "my_key1"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict, input_str)
input_str = "my_key1;"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict, input_str)
input_str = "my_key1=my_value1;myvalue2;"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict,
input_str)
input_str = "my_key1=my_value1;my_key2=myvalue2;my_other_value2;"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict,
input_str)
input_str = "my_key1=my_value1;my_key2=myvalue2;my_other_value2;"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict,
input_str)
input_str = "my_key1:my_value1;my_key2:my_value2"
self.assertRaisesRegexp(CloudifyCliError,
expected_err_msg.format(input_str),
utils.plain_string_to_dict,
input_str)
def test_inputs_to_dict(self):
input_str = "my_key1=my_value1;my_key2"
resource_name = "my_resource_name"
expected_err_msg = "Invalid input: {0}. {1} can be either be a path " \
"to a valid YAML file, a string formatted as a " \
"valid YAML or a string formatted as a dictionary" \
" \(key1=value1;key2=value2\)"
self.assertRaisesRegexp(
CloudifyCliError,
expected_err_msg.format(input_str, resource_name),
utils.inputs_to_dict,
input_str,
resource_name)
| Python | 0.000002 | @@ -6682,11 +6682,12 @@
%7B1%7D
-can
+must
be
|
1b893f2283ee69c6da64c1cfe6f11f932155b5e8 | Remove legacy PRF test due to memory usage | photutils/psf/tests/test_sandbox.py | photutils/psf/tests/test_sandbox.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the sandbox module.
"""
from astropy.convolution.utils import discretize_model
from astropy.modeling.models import Gaussian2D
from astropy.table import Table
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..sandbox import DiscretePRF
PSF_SIZE = 11
GAUSSIAN_WIDTH = 1.
IMAGE_SIZE = 101
# Position and FLUXES of test sources
INTAB = Table([[50., 23, 12, 86], [50., 83, 80, 84],
[np.pi * 10, 3.654, 20., 80 / np.sqrt(3)]],
names=['x_0', 'y_0', 'flux_0'])
# Create test psf
psf_model = Gaussian2D(1. / (2 * np.pi * GAUSSIAN_WIDTH ** 2), PSF_SIZE // 2,
PSF_SIZE // 2, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH)
test_psf = discretize_model(psf_model, (0, PSF_SIZE), (0, PSF_SIZE),
mode='oversample')
# Set up grid for test image
image = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
# Add sources to test image
for x, y, flux in INTAB:
model = Gaussian2D(flux / (2 * np.pi * GAUSSIAN_WIDTH ** 2),
x, y, GAUSSIAN_WIDTH, GAUSSIAN_WIDTH)
image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE),
mode='oversample')
# Some tests require an image with wider sources.
WIDE_GAUSSIAN_WIDTH = 3.
WIDE_INTAB = Table([[50, 23.2], [50.5, 1], [10, 20]],
names=['x_0', 'y_0', 'flux_0'])
wide_image = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
# Add sources to test image
for x, y, flux in WIDE_INTAB:
model = Gaussian2D(flux / (2 * np.pi * WIDE_GAUSSIAN_WIDTH ** 2),
x, y, WIDE_GAUSSIAN_WIDTH, WIDE_GAUSSIAN_WIDTH)
wide_image += discretize_model(model, (0, IMAGE_SIZE), (0, IMAGE_SIZE),
mode='oversample')
def test_create_prf_mean():
"""
Check if create_prf works correctly on simulated data.
Position input format: list
"""
prf = DiscretePRF.create_from_image(image,
list(INTAB['x_0', 'y_0'].as_array()),
PSF_SIZE, subsampling=1, mode='mean')
assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8)
def test_create_prf_median():
"""
Check if create_prf works correctly on simulated data.
Position input format: astropy.table.Table
"""
prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']),
PSF_SIZE, subsampling=1,
mode='median')
assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8)
def test_create_prf_nan():
"""
Check if create_prf deals correctly with nan values.
"""
image_nan = image.copy()
image_nan[52, 52] = np.nan
image_nan[52, 48] = np.nan
prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']),
PSF_SIZE, subsampling=1, fix_nan=True)
assert not np.isnan(prf._prf_array[0, 0]).any()
def test_create_prf_flux():
"""
Check if create_prf works correctly when FLUXES are specified.
"""
prf = DiscretePRF.create_from_image(image, np.array(INTAB['x_0', 'y_0']),
PSF_SIZE, subsampling=1,
mode='median', fluxes=INTAB['flux_0'])
assert_allclose(prf._prf_array[0, 0].sum(), 1)
assert_allclose(prf._prf_array[0, 0], test_psf, atol=1E-8)
def test_create_prf_excessive_subsampling():
"""
Check if a helpful error is raised if the subsampling parameter is
too high.
"""
with pytest.raises(ValueError) as exc:
DiscretePRF.create_from_image(image,
list(INTAB['x_0', 'y_0'].as_array()),
PSF_SIZE, subsampling=999)
assert('subsampling' in exc.value.args[0])
| Python | 0 | @@ -294,22 +294,8 @@
lose
-%0Aimport pytest
%0A%0Afr
@@ -3468,429 +3468,4 @@
-8)%0A
-%0A%0Adef test_create_prf_excessive_subsampling():%0A %22%22%22%0A Check if a helpful error is raised if the subsampling parameter is%0A too high.%0A %22%22%22%0A%0A with pytest.raises(ValueError) as exc:%0A DiscretePRF.create_from_image(image,%0A list(INTAB%5B'x_0', 'y_0'%5D.as_array()),%0A PSF_SIZE, subsampling=999)%0A assert('subsampling' in exc.value.args%5B0%5D)%0A
|
97e2e80b43ba3639e5af9deb6485c28da1a5e7af | change path | make_submission.py | make_submission.py | """
Ensemble by columnwise weighted sum.
The weights are determined by scipy.optimize.minimize using validation set predictions.
LB Private: 0.40076
LB Public: 0.39773
"""
import numpy as np
import pandas as pd
import sklearn.preprocessing as pp
path = '~/'
# Neural Networks
pred = [np.load(path + 'pred_TRI_kmax_' + str(k_max) + '.npy') for k_max in [4,5]]
pred.append(np.load(path + 'pred_Sparse_RI.npy'))
pred_NN = (pred[0] + pred[1] + pred[2]) / 3
# XGBoost
pred_XGB = (np.load(path + 'pred_RI.npy') + np.load(path + 'pred_CF.npy')) / 2
# Ensemble weights
w = np.array([1.,0.95657896,0.52392701,0.75156431,1.,0.77871818,0.81764163,0.9541003,0.82863579])
pr005 = pp.normalize(pred_NN * w + pred_XGB * (1 - w), norm = 'l1')
pred005 = pd.read_csv(path + 'sampleSubmission.csv', index_col = 0)
pred005.iloc[:,:] = pr005
pred005.to_csv(path + 'pred005.csv', float_format='%.8f')
| Python | 0.000001 | @@ -254,9 +254,9 @@
= '
-~
+.
/'%0A%0A
|
4cc5e7eb48dfdb167359dbeb541a2dbf25c93865 | Add missing exchange rates | pipe2py/modules/pipeexchangerate.py | pipe2py/modules/pipeexchangerate.py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.modules.pipeexchangerate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import requests
from itertools import starmap
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.threads import deferToThread
from . import (
get_dispatch_funcs, get_async_dispatch_funcs, get_splits, asyncGetSplits)
from pipe2py.lib import utils
from pipe2py.lib.utils import combine_dicts as cdicts
from pipe2py.twisted.utils import asyncStarMap, asyncDispatch
opts = {'listize': False}
timeout = 60 * 60 * 24 # 24 hours in seconds
FIELDS = [
{'name': 'USD/USD', 'price': 1},
{'name': 'USD/EUR', 'price': 0.8234},
{'name': 'USD/GBP', 'price': 0.6448},
{'name': 'USD/INR', 'price': 63.6810},
]
EXCHANGE_API_BASE = 'http://finance.yahoo.com/webservice'
EXCHANGE_API = '%s/v1/symbols/allcurrencies/quote' % EXCHANGE_API_BASE
PARAMS = {'format': 'json'}
# Common functions
def get_base(conf, word):
base = word or conf.default
try:
offline = conf.offline
except AttributeError:
offline = False
return (base, offline)
def calc_rate(from_cur, to_cur, rates):
if from_cur == to_cur:
rate = 1
elif to_cur == 'USD':
rate = rates['USD/%s' % from_cur]
else:
usd_to_given = rates['USD/%s' % from_cur]
usd_to_default = rates['USD/%s' % to_cur]
rate = usd_to_given * (1 / usd_to_default)
return 1 / float(rate)
def parse_request(r, offline):
if offline:
fields = FIELDS
else:
resources = r['list']['resources']
fields = (r['resource']['fields'] for r in resources)
return {i['name']: i['price'] for i in fields}
@utils.memoize(timeout)
def get_rate_data():
return requests.get(EXCHANGE_API, params=PARAMS)
# Async functions
@inlineCallbacks
def asyncParseResult(conf, word, _pass):
base, offline = get_base(conf, word)
if offline:
r = None
else:
data = yield deferToThread(get_rate_data)
r = data.json()
rates = parse_request(r, offline)
result = base if _pass else calc_rate(base, conf.quote, rates)
returnValue(result)
@inlineCallbacks
def asyncPipeExchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously retrieves the current exchange rate
for a given currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of hashed strings
"""
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(asyncParseResult, parsed)
returnValue(iter(_OUTPUT))
# Synchronous functions
def parse_result(conf, word, _pass):
base, offline = get_base(conf, word)
r = None if offline else get_rate_data().json()
rates = parse_request(r, offline)
result = base if _pass else calc_rate(base, conf.quote, rates)
return result
def pipe_exchangerate(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that retrieves the current exchange rate for a given
currency pair. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings (base currency)
conf : {
'quote': {'value': <'USD'>},
'default': {'value': <'USD'>},
'offline': {'type': 'bool', 'value': '0'},
}
Returns
-------
_OUTPUT : generator of hashed strings
"""
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT
| Python | 0.999141 | @@ -778,16 +778,96 @@
.6810%7D,%0A
+ %7B'name': 'USD/PLN', 'price': 3.76%7D,%0A %7B'name': 'USD/SGD', 'price': 1.34%7D,%0A
%5D%0A%0AEXCHA
|
542ddc0d0bd96c8ff8635f649344f468d7d497d0 | bump version to 0.2.3 | mallory/version.py | mallory/version.py | Version = "0.2.2"
| Python | 0.000001 | @@ -8,11 +8,11 @@
= %220.2.
-2
+3
%22%0A
|
b4d0c51c0034984026ff9008d826579cdfeaeaa6 | comment on how to simplify data retrieval | importer/importer.py | importer/importer.py | from Monument import *
from Uploader import *
from Logger import *
from os import path
import argparse
import pymysql
from importer_utils import *
SHORT = 10
MAPPING_DIR = "mappings"
MONUMENTS_ALL = "monuments_all"
class Mapping(object):
"""
For a table to be processed, it requires a basic mapping file,
named just like the table (eg. se-ship_(sv).json)
At the very least, it should contain the name of the column
in the _specific_ table should be mapped against the "id" column
in monuments_all().
That's because the column does not have to be called "id"
in the specific table.
Such as:
"_id": "signal"
"""
def join_id(self):
DEFAULT_ID = "id"
joins = {}
if self.country != "dk-bygninger":
joins["all_id"] = DEFAULT_ID
joins["join_id"] = self.file_content["_id"]
else:
joins["all_id"] = "name"
joins["join_id"] = "sagsnavn"
joins["country_code"] = self.file_content["country_code"]
return joins
def load_mapping_file(self, countryname, languagename):
filename = path.join(
MAPPING_DIR, "{}_({}).json".format(countryname, languagename))
return load_json(filename)
def __init__(self, countryname, languagename):
self.file_content = self.load_mapping_file(countryname, languagename)
self.country = countryname
self.joins = self.join_id()
def make_query(country_code, language, specific_table, join_id, all_id="id"):
query = ('select DISTINCT * from `{}` as m_all JOIN `{}` '
'as m_spec on m_all.{} = m_spec.{} '
'WHERE m_all.adm0="{}" and m_all.lang="{}"'
).format(MONUMENTS_ALL, specific_table, all_id, join_id, country_code, language)
print(query)
return query
def create_connection(arguments):
return pymysql.connect(
host=arguments.host,
user=arguments.user,
password=arguments.password,
db=arguments.db,
charset="utf8")
"""
There must be a better way to do this.....
"""
SPECIFIC_TABLES = {"monuments_se-ship_(sv)": {"class": SeShipSv,
"data_files": {}},
"monuments_dk-bygninger_(da)": {"class": DkBygningDa,
"data_files": {}},
"monuments_se-bbr_(sv)": {"class": SeBbrSv,
"data_files": {}},
"monuments_se-fornmin_(sv)":
{"class": SeFornminSv,
"data_files":
{"municipalities": "sweden_municipalities.json",
"types": "se-fornmin_(sv)_types.json"}},
"monuments_se-arbetsl_(sv)":
{"class":
SeArbetslSv,
"data_files":
{"municipalities": "sweden_municipalities.json",
"types": "se-arbetsl_(sv)_types.json",
"settlements": "sweden_settlements.json"}}
}
def select_query(query, connection):
cursor = connection.cursor(pymysql.cursors.DictCursor)
cursor.execute(query)
result = cursor.fetchall()
return result
def load_data_files(file_dict):
for key in file_dict.keys():
file_dict[key] = load_json(path.join(MAPPING_DIR, file_dict[key]))
return file_dict
def get_items(connection, country, language, short=False):
specific_table_name = get_specific_table_name(country, language)
if not table_exists(connection, specific_table_name):
print("Table does not exist.")
return
mapping = Mapping(country, language)
country_code = mapping.joins["country_code"]
all_id = mapping.joins["all_id"]
join_id = mapping.joins["join_id"]
query = make_query(country_code,
language,
specific_table_name,
join_id,
all_id)
if short:
query += " LIMIT " + str(SHORT)
if specific_table_name in SPECIFIC_TABLES.keys():
class_to_use = SPECIFIC_TABLES[specific_table_name]["class"]
data_files = load_data_files(
SPECIFIC_TABLES[specific_table_name]["data_files"])
else:
class_to_use = Monument
data_files = None
print(class_to_use)
results = [class_to_use(table_row, mapping, data_files)
for table_row in select_query(query, connection)]
print("Fetched {} items from {}".format(
len(results), get_specific_table_name(country, language)))
return results
def upload(monuments):
logger = Logger()
for sample_item in monuments:
uploader = Uploader(sample_item, log=logger)
uploader.upload()
def main(arguments):
connection = create_connection(arguments)
country = arguments.country
language = arguments.language
results = get_items(connection, country, language, arguments.short)
if arguments.upload:
upload(results)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="localhost")
parser.add_argument("--user", default="root")
parser.add_argument("--password", default="")
parser.add_argument("--db", default="wlm")
parser.add_argument("--language", default="sv")
parser.add_argument("--country", default="se-ship")
parser.add_argument("--short", action='store_true')
parser.add_argument("--upload", action='store_true')
args = parser.parse_args()
main(args)
| Python | 0.000001 | @@ -1521,16 +1521,453 @@
=%22id%22):%0A
+ %22%22%22%0A you know what. MONUMENTS_ALL IS NOT EVEN NECESSARY%0A IT WILL SOLVE THIS WHOLE JOINING PROBLEM IF YOU GET RID OF IT%0A THERE IS LITERALLY NOTHING UNIQUE IN IT%0A SERIOUSLY%0A WHY%0A bUT: parent class Monument() relies on consistent attributes to assign%0A simple values (name, image, adm2)%0A idea: make methods in Monument() take params to indicate where to search%0A for the values, like add_image(%22bilde%22)%0A %22%22%22%0A
quer
@@ -4820,16 +4820,98 @@
to_use)%0A
+ database_rows = select_query(query, connection)%0A print(len(database_rows))%0A
resu
@@ -4998,39 +4998,21 @@
in
-select_query(query, connection)
+database_rows
%5D%0A
|
3a1471e965798dcf291c89f528c40eb047aa915c | Increase heapsize glm covtype20x pytest | py/testdir_single_jvm/test_GLM2_covtype20x_1.py | py/testdir_single_jvm/test_GLM2_covtype20x_1.py | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_exec as h2e
import h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=12)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype20x_1(self):
h2o.beta_features = True
csvFilenameList = [
('covtype20x.data', 800),
]
# a browser window too, just because we can
# h2b.browseTheCloud()
importFolderPath = 'standard'
for csvFilename, timeoutSecs in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
hex_key = "A.hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put',
hex_key = hex_key, timeoutSecs=2000, pollTimeoutSecs=60)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
print "WARNING: max_iter set to 8 for benchmark comparisons"
max_iter = 8
y = 54
kwargs = {
'response': 'C' + str(y+1), # for 2
'family': 'binomial',
'n_folds': 2,
'max_iter': max_iter,
'beta_epsilon': 1e-3,
# 'destination_key': modelKey
}
execExpr="A.hex[,%s]=(A.hex[,%s]>%s)" % (y+1, y+1, 1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
aHack = {'destination_key': 'A.hex'}
# L2
kwargs.update({'alpha': 0, 'lambda': 0})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "glm (L2) end on ", csvPathname, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
# Elastic
kwargs.update({'alpha': 0.5, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "glm (Elastic) end on ", csvPathname, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
# L1
kwargs.update({'alpha': 1.0, 'lambda': 1e-4})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "glm (L1) end on ", csvPathname, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_glm.simpleCheckGLM(self, glm, "C14", **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| Python | 0.000001 | @@ -442,17 +442,17 @@
eap_GB=1
-2
+4
)%0A
|
ff9444ea838bb7ed3efae125d343cee2cec994a9 | Improve the level of comments in mysite/base/depends.py | mysite/base/depends.py | mysite/base/depends.py | import os
try:
import lxml
import lxml.etree
import lxml.html
except:
class nothing(object):
pass
lxml = nothing()
lxml.etree = None
lxml.html = None
import logging
if lxml.html is None:
logging.warning("Some parts of the OpenHatch site may fail because the lxml"
" library is not installed. Look in README.mkd for"
" information about lxml.")
def svnadmin_available():
# FIXME: This should move to a variable controlled
# by settings.py.
SVNADMIN_PATH = '/usr/bin/svnadmin'
return os.path.exists(SVNADMIN_PATH)
### Here we try to import "Image", from the Python Imaging Library.
### If we fail, Image is None.
Image = None
try:
import Image
except:
try:
from PIL import Image
except ImportError:
### Okay, for a good time, let's hack sys.modules.
### This permits Django to think ImageFields might
### possibly work.
import sys
sys.modules['Image'] = sys.modules['sys']
try:
import launchpadbugs
import launchpadbugs.connector
import launchpadbugs.basebuglistfilter
import launchpadbugs.text_bug
import launchpadbugs.lphelper
except ImportError: # usually because python2libxml2 is missing
launchpadbugs = None
logging.warning("launchpadbugs did not import. Install python-libxml2.")
| Python | 0.000015 | @@ -1,14 +1,1279 @@
-import os%0A
+# -*- coding: utf-8 -*-%0A%0A# This file is part of OpenHatch.%0A# Copyright (C) 2011 Asheesh Laroia%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A### This file exists to wrap some dependencies for other parts of the code.%0A###%0A### In general, core parts of the OpenHatch site are forbidden from importing%0A### some hard-to-install modules, like lxml. Those files import from here%0A### instead so that if the import fails, the site doesn't crash.%0A###%0A### This is so that new contributors can run the OpenHatch site without%0A### installing these hard-to-install dependencies.%0A%0A# Used within this file%0Aimport os%0Aimport logging%0A%0A# Wrap lxml and the modules that are part of it
%0Atry
@@ -1446,32 +1446,16 @@
= None%0A%0A
-import logging%0A%0A
if lxml.
@@ -1670,16 +1670,135 @@
xml.%22)%0A%0A
+# Provide a helper to check if svnadmin is available. If not,%0A# we can skip running code (and tests) that require it.%0A%0A
def svna
@@ -1974,17 +1974,16 @@
_PATH)%0A%0A
-%0A
### Here
@@ -2394,16 +2394,116 @@
'sys'%5D%0A%0A
+# Wrap launchpadbugs. We wrap it because it imports libxml2,%0A# which qualifies as hard-to-install.%0A%0A
try:%0A
@@ -2714,17 +2714,17 @@
e python
-2
+-
libxml2
|
bf8b29e7d05a7b476198109f1dccfd42da38f73b | Update pack.py: copy directory to destination instead of compressing | pack.py | pack.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' Generate static webpage files '
import os
import sys
usage_prompt = '''Usage:
python3 pack.py
python3 pack.py -H <hostname>
python3 pack.py { ? | -h | --help }'''
protocal = "http"
hostname = 'localhost'
filename_host = os.path.join('scripts', 'host.js')
dir_site = 'site'
filename_pkg = dir_site + '.tar.gz'
for i, arg in enumerate(sys.argv[1:]):
if (arg == '?' or arg == '-h' or arg == '--help'):
print(usage_prompt)
sys.exit(0)
elif (arg == '-H' and i + 2 < len(sys.argv)):
hostname = sys.argv[i + 2]
hostname = protocal + '://' + hostname
print("Hostname set to '%s'" % hostname)
host_file = open(filename_host, 'w')
host_file.write("var hostname = '%s'" % hostname)
host_file.close()
print("Gulp building...")
os.system("gulp clean --silent")
os.system("gulp build --silent")
print("Compressing...")
os.system("tar -zcf %s %s" % (filename_pkg, dir_site))
print("Files saved to '%s'" % filename_pkg)
| Python | 0 | @@ -96,16 +96,30 @@
port sys
+%0Aimport shutil
%0A%0Ausage_
@@ -131,18 +131,16 @@
t =
-''
'Usage:
-%0A
+
pyth
@@ -154,76 +154,43 @@
k.py
-%0Apython3 pack.py -H %3Chostname%3E%0Apython3 pack.py %7B ? %7C -h %7C --help %7D''
+ %3Cdestination_path%3E %5B-H %3Chostname%3E%5D
'%0A%0Ap
@@ -222,32 +222,19 @@
= '
-localhost'%0Afilename_host
+'%0Ahost_path
= o
@@ -271,17 +271,16 @@
s')%0A
-%0Adir_
site
+_dir
= '
@@ -289,193 +289,168 @@
te'%0A
-filename_pkg = dir_site + '.tar.gz'%0A%0Afor i, arg in enumerate(sys.argv%5B1:%5D):%0A if (arg == '?' or arg == '-h' or arg == '--help'):%0A print(usage_prompt)%0A sys.exit(0)%0A el
+%0Aif (len(sys.argv) %3C 2):%0A print(usage_prompt)%0A sys.exit(0)%0Aelse:%0A des_path = sys.argv%5B1%5D + site_dir%0A for i, arg in enumerate(sys.argv%5B2:%5D):%0A
if (
@@ -469,17 +469,17 @@
and i +
-2
+3
%3C len(s
@@ -501,36 +501,12 @@
-hostname = sys.argv%5Bi + 2%5D%0A%0A
+
host
@@ -535,18 +535,48 @@
' +
-hostname%0A%0A
+sys.argv%5Bi + 3%5D%0A%0Aif hostname != '':%0A
prin
@@ -591,11 +591,15 @@
ame
-set
+changed
to
@@ -608,32 +608,36 @@
s'%22 %25 hostname)%0A
+
host_file = open
@@ -641,28 +641,28 @@
pen(
-filename_host
+host_path
, 'w')%0A
+
host
@@ -707,16 +707,20 @@
stname)%0A
+
host_fil
@@ -836,119 +836,135 @@
(%22Co
-mpressing...%22)%0Aos.system(%22tar -zcf %25s %25s%22 %25 (filename_pkg, dir_site))%0A%0Aprint(%22Files saved to '%25s'%22 %25 filename_pkg
+pying files to '%25s'...%22 %25 des_path)%0Ashutil.rmtree(des_path, ignore_errors=True)%0Ashutil.copytree(site_dir, des_path)%0Aprint(%22Done.%22
)%0A
|
aada38225b0b09e9b8f9752e45ec5a4b1b01dd60 | Handle index missing in anon import script | portality/scripts/anon_import.py | portality/scripts/anon_import.py | """
Clear out the index and retrieve new anonymised data, according to a configuration file
Configure the target index in your *.cfg override file
For now, this import script requires the same index pattern (prefix, 'types', index-per-type setting) as the exporter.
"""
import esprit, json, gzip, shutil
from portality.core import app, es_connection, initialise_index
from portality.store import StoreFactory
from portality.util import ipt_prefix
# FIXME: monkey patch for esprit.bulk (but esprit's chunking is handy)
class Resp(object):
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def es_bulk(connection, data, type=""):
try:
if not isinstance(data, str):
data = data.read()
res = connection.bulk(data, type, timeout='60s', request_timeout=60)
return Resp(status_code=200, json=res)
except Exception as e:
return Resp(status_code=500, text=str(e))
def do_import(config):
# filter for the types we are going to work with
import_types = {}
for t, s in config.get("types", {}).items():
if s.get("import", False) is True:
import_types[t] = s
print("==Carrying out the following import==")
for import_type, cfg in import_types.items():
count = "All" if cfg.get("limit", -1) == -1 else cfg.get("limit")
print(("{x} from {y}".format(x=count, y=import_type)))
print("\n")
if config.get("confirm", True):
text = input("Continue? [y/N] ")
if text.lower() != "y":
exit()
# remove all the types that we are going to import
for import_type in list(import_types.keys()):
if es_connection.indices.get(app.config['ELASTIC_SEARCH_DB_PREFIX'] + import_type):
es_connection.indices.delete(app.config['ELASTIC_SEARCH_DB_PREFIX'] + import_type)
# re-initialise the index (sorting out mappings, etc)
print("==Initialising Index for Mappings==")
initialise_index(app, es_connection)
mainStore = StoreFactory.get("anon_data")
tempStore = StoreFactory.tmp()
container = app.config.get("STORE_ANON_DATA_CONTAINER")
print("\n==Importing==")
for import_type, cfg in import_types.items():
count = "all" if cfg.get("limit", -1) == -1 else cfg.get("limit")
print(("Importing {x} from {y}".format(x=count, y=import_type)))
print(("Obtaining {x} from storage".format(x=import_type)))
limit = cfg.get("limit", -1)
limit = None if limit == -1 else limit
n = 1
while True:
filename = import_type + ".bulk" + "." + str(n)
handle = mainStore.get(container, filename)
if handle is None:
break
tempStore.store(container, filename + ".gz", source_stream=handle)
print(("Retrieved {x} from storage".format(x=filename)))
handle.close()
print(("Unzipping {x} in temporary store".format(x=filename)))
compressed_file = tempStore.path(container, filename + ".gz")
uncompressed_file = tempStore.path(container, filename, must_exist=False)
with gzip.open(compressed_file, "rb") as f_in, open(uncompressed_file, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
tempStore.delete_file(container, filename + ".gz")
print(("Importing from {x}".format(x=filename)))
imported_count = esprit.tasks.bulk_load(es_connection, ipt_prefix(import_type), uncompressed_file,
limit=limit, max_content_length=config.get("max_content_length", 100000000))
tempStore.delete_file(container, filename)
if limit is not None and imported_count != -1:
limit -= imported_count
if limit is not None and limit <= 0:
break
n += 1
# once we've finished importing, clean up by deleting the entire temporary container
tempStore.delete_container(container)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("config", help="Config file for import run")
args = parser.parse_args()
with open(args.config, "r", encoding="utf-8") as f:
config = json.loads(f.read())
# FIXME: monkey patch for esprit raw_bulk
unwanted_primate = esprit.raw.raw_bulk
esprit.raw.raw_bulk = es_bulk
do_import(config)
esprit.raw.raw_bulk = unwanted_primate
| Python | 0 | @@ -298,16 +298,31 @@
, shutil
+, elasticsearch
%0Afrom po
@@ -1677,16 +1677,33 @@
eys()):%0A
+ try:%0A
@@ -1798,16 +1798,20 @@
+
es_conne
@@ -1884,16 +1884,88 @@
rt_type)
+%0A except elasticsearch.exceptions.NotFoundError:%0A pass
%0A%0A #
|
41adb0952aaffa88d1e42634cabb2f252b254472 | Add stub for fasttext cross-validation | cocoscore/ml/fasttext_helpers.py | cocoscore/ml/fasttext_helpers.py | from ..tools.file_tools import get_file_handle
from gensim import utils
import gzip
import numpy as np
import os
def get_uniform(low, high, random_seed):
random_state = np.random.RandomState(random_seed)
return lambda: float('%.3g' % random_state.uniform(low, high))
def get_uniform_int(low, high, random_seed):
random_state = np.random.RandomState(random_seed)
return lambda: random_state.randint(low, high)
def get_log_uniform(low, high, random_seed):
random_state = np.random.RandomState(random_seed)
return lambda: float('%.3g' % np.power(10, random_state.uniform(low, high)))
def get_discrete_uniform(values, random_seed):
random_state = np.random.RandomState(random_seed)
return lambda: values[random_state.randint(len(values))]
def get_hyperparameter_distributions():
param_dict = {
'-lr': get_log_uniform(-3, 1, 0),
'-epoch': get_uniform_int(10, 51, 12),
'-wordNgrams': get_uniform_int(1, 6, 23),
'-dim': get_uniform_int(50, 500, 42),
'-ws': get_uniform_int(3, 10, 55)
}
return param_dict
def get_fasttext_train_calls(train_file_path, param_dict, fasttext_path, model_path, thread=1,
pretrained_vectors_path=None):
"""
Generates fastText command-line calls for training a supervised model and for compressing the output model.
:param train_file_path: path to the training dataset
:param param_dict: dictionary mapping fasttext hyperparameters to their values
:param fasttext_path: path to the fastText executable
:param model_path: str, path to output model
:param thread: int, the number of threads to use
:param pretrained_vectors_path: str, path to pre-trained `.vec` file with word embeddings
:return tuple of str - fastText calls for training and quantizing
"""
param_dict['-thread'] = thread
train_args = []
for arg in sorted(param_dict.keys()):
val = param_dict[arg]
train_args += [arg, str(val)]
train_call = [fasttext_path, 'supervised', '-input', train_file_path, '-output', model_path]
train_call += train_args
if pretrained_vectors_path is not None:
train_call += ['-pretrainedVectors', pretrained_vectors_path]
compress_call = [fasttext_path, 'quantize', '-input', model_path, '-output', model_path]
return train_call, compress_call
def fasttext_fit(train_file_path, param_dict, fasttext_path, thread=1, compress_model=False, model_path='model',
pretrained_vectors_path=None):
"""
Trains a fastText supervised model. This is a wrapper around the fastText command line interface.
:param train_file_path: path to the training dataset
:param param_dict: dictionary mapping fasttext hyperparameters to their values
:param fasttext_path: path to the fastText executable
:param thread: int, the number of threads to use
:param compress_model: indicates whether the fastText model should be compressed (using fastText's quantize).
:param model_path: str, path to output model
:param pretrained_vectors_path: str, path to pre-trained `.vec` file with word embeddings
:return str: path to trained model
"""
train_call, compress_call = get_fasttext_train_calls(train_file_path, param_dict, fasttext_path, model_path, thread,
pretrained_vectors_path=pretrained_vectors_path)
utils.check_output(args=train_call)
if compress_model:
utils.check_output(args=compress_call)
model_file = model_path + '.bin'
# remove auxiliary vectors file
os.remove(model_path + '.vec')
# remove non-compressed model file if compression was performed
if compress_model:
os.remove(model_file)
model_file = model_path + '.ftz'
return model_file
def get_fasttext_test_calls(test_file_path, fasttext_path, model_path):
"""
Generates fastText command-line calls to apply a previously trained model to a test dataset. Note, this only
supports binary classification scenarios.
:param test_file_path: path to the test dataset
:param fasttext_path: path to the fastText executable
:param model_path: str, path to output model
:return str - fastText calls for testing
"""
class_count = 2
predict_call = [fasttext_path, 'predict-prob', model_path, test_file_path, str(class_count)]
return predict_call
def fasttext_predict(trained_model_path, test_file_path, fasttext_path, probability_file_path):
"""
Predicts class probabilities for a given dataset using a previously trained fastText model.
:param trained_model_path: path to the trained fastText model
:param test_file_path: path to the test dataset
:param fasttext_path: path to the fastText executable
:param probability_file_path: str, path to the output file with class probabilities for the test dataset;
output written to this file will always be gzipped
"""
predict_call = get_fasttext_test_calls(test_file_path, fasttext_path, trained_model_path)
predictions = utils.check_output(args=predict_call)
with gzip.open(probability_file_path, 'wb') as fout:
fout.write(predictions)
def load_fasttext_class_probabilities(probability_file_path):
"""
Utility function that loads class probabilities from a previously performed prediction run.
:param probability_file_path: str, path to the output file with class probabilities for the test dataset
:return: list of float: probability of belonging to the positive class for each example in the test dataset
"""
probabilities = []
with gzip.open(probability_file_path, 'rt') as fin:
for line in fin:
cols = line.rstrip().split()
prob = None
for i, col in enumerate(cols):
if col == '__label__1':
prob = float(cols[i + 1])
assert prob is not None
probabilities.append(prob)
return probabilities
def load_labels(dataset_path, compression=False):
"""
Load class labels from a given dataset.
:param dataset_path: str, path to dataset
:param compression: boolean, indicates whether or not dataset_path is gzipped
:return: list of 0/1, depending on class label of the instances
"""
conn = None
try:
conn = get_file_handle(dataset_path, compression)
true_labels = []
for line in conn:
true_labels.append(line.split()[0])
true_labels = [1 if l == '__label__1' else 0 for l in true_labels]
return true_labels
finally:
conn.close()
| Python | 0 | @@ -5217,24 +5217,54 @@
dictions)%0A%0A%0A
+def fasttext_cv():%0A pass%0A%0A%0A
def load_fas
|
0db54aacbb1607e2d1d505bc57864dd421d90529 | fix indentation | adhocracy/model/userbadges.py | adhocracy/model/userbadges.py | from datetime import datetime
import logging
from sqlalchemy import Table, Column, Integer, ForeignKey, DateTime, Unicode
from adhocracy.model import meta
log = logging.getLogger(__name__)
badge_table = Table(
'badge', meta.data,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False),
Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")))
user_badges_table = Table('user_badges', meta.data,
Column('id', Integer, primary_key=True),
Column('badge_id', Integer, ForeignKey('badge.id'),
nullable=False),
Column('user_id', Integer, ForeignKey('user.id'),
nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False))
class Badge(object):
def __init__(self, title, color):
self.title = title
self.color = color
def __repr__(self):
return "<Badge(%s,%s)>" % (self.id,
self.title.encode('ascii', 'replace'))
def __unicode__(self):
return self.title
def count(self):
if self._count is None:
from badges import Badges
q = meta.Session.query(Badges)
q = q.filter(Badges.badge == self)
self._count = q.count()
return self._count
def __le__(self, other):
return self.title >= other.title
def __lt__(self, other):
return self.title > other.title
@classmethod
def by_id(cls, id, instance_filter=True, include_deleted=False):
try:
q = meta.Session.query(Badge)
q = q.filter(Badge.id == id)
return q.limit(1).first()
except Exception, e:
log.warn("by_id(%s): %s" % (id, e))
return None
@classmethod
def find(cls, title):
q = meta.Session.query(Badge).filter(Badge.title.like(title))
return q.first()
@classmethod
def all(cls):
q = meta.Session.query(Badge)
return q.all()
@classmethod
def create(cls, title, color):
badge = cls(title, color)
meta.Session.add(badge)
meta.Session.flush()
return badge
@classmethod
def find_or_create(cls, title):
badge = cls.find(title)
if badge is None:
badge = cls.create(title)
return badge
def to_dict(self):
return dict(id=self.id,
title=self.title,
color=self.color,
users=[user.name for user in self.users])
def _index_id(self):
return self.id
class UserBadge(object):
def __init__(self, user, badge, creator):
self.user = user
self.badge = badge
self.creator = creator
def __repr__(self):
badge = self.badge.name.encode('ascii', 'replace')
return "<userbadges(%s, badge %s/%s for user%s/%s)>" % (
self.id, self.user.id, self.user.name, self.badge.id, badge)
def delete(self):
meta.Session.delete(self)
meta.Session.flush()
@classmethod
def find(cls, id):
q = meta.Session.query(UserBadge)
q = q.filter(UserBadge.id == id)
return q.limit(1).first()
@classmethod
def create(cls, user, badge, creator):
assert isinstance(badge, Badge), (
"badge has to be an :class:`adhocracy.model.badge.Badge`")
userbadge = cls(user, badge, creator)
meta.Session.add(userbadge)
meta.Session.flush()
return userbadge
def _index_id(self):
return self.id
| Python | 0.000358 | @@ -543,16 +543,21 @@
= Table(
+%0A
'user_ba
|
3027c1ece280bc665f03781203d6b37b1c1bd82c | fix parsing of HTML entities with HTMLParser | weboob/tools/parser.py | weboob/tools/parser.py | # -*- coding: utf-8 -*-
"""
Copyright(C) 2010 Romain Bignon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
try:
# XXX Currently, elementtidy segfaults when there are no error, because of
# the behavior of libtidy.
# A patch has been sent to Debian:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=576343
#
# As it is not integrated in Debian yet, and as this problem persists on other
# systems, using elementtidy is for now disabled.
raise ImportError
from elementtidy import TidyHTMLTreeBuilder
TidyHTMLTreeBuilder.ElementTree = ElementTree # force cElementTree if using it.
HTMLTreeBuilder = TidyHTMLTreeBuilder.TidyHTMLTreeBuilder
except ImportError:
from HTMLParser import HTMLParser
class HTMLTreeBuilder(HTMLParser):
def __init__(self, encoding=None):
HTMLParser.__init__(self)
self._target = ElementTree.TreeBuilder()
def doctype(self, name, pubid, system):
pass
def close(self):
tree = self._target.close()
return tree
def handle_starttag(self, tag, attrs):
self._target.start(tag, dict(attrs))
def handle_startendtag(self, tag, attrs):
self._target.start(tag, dict(attrs))
self._target.end(tag)
def handle_data(self, data):
self._target.data(data)
def handle_endtag(self, tag):
try:
self._target.end(tag)
except:
pass
class StandardParser(object):
def parse(self, data, encoding=None):
parser = HTMLTreeBuilder(encoding)
tree = ElementTree.parse(data, parser)
for elem in tree.getiterator():
if elem.tag.startswith('{'):
elem.tag = elem.tag[elem.tag.find('}')+1:]
return tree
def tostring(element):
e = ElementTree.Element('body')
e.text = element.text
e.tail = element.tail
for sub in element.getchildren():
e.append(sub)
s = ElementTree.tostring(e, 'utf-8')
return unicode(s)
| Python | 0.000001 | @@ -1444,16 +1444,42 @@
MLParser
+%0A import htmlentitydefs
%0A%0A cl
@@ -2033,16 +2033,224 @@
d(tag)%0A%0A
+ def handle_charref(self, name):%0A self._target.data(unichr(int(name)))%0A%0A def handle_entityref(self, name):%0A self._target.data(unichr(htmlentitydefs.name2codepoint%5Bname%5D))%0A%0A
|
bc2e7d77eb4aaa6d0063951a98de78c462f261ae | Use timezone-aware datetime object | confirmation/models.py | confirmation/models.py | # -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import os
import re
import datetime
from hashlib import sha1
from django.db import models
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from confirmation.util import get_status_field
try:
import mailer
send_mail = mailer.send_mail
except ImportError:
# no mailer app present, stick with default
pass
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class ConfirmationManager(models.Manager):
def confirm(self, confirmation_key):
if SHA1_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
obj = confirmation.content_object
status_field = get_status_field(obj._meta.app_label, obj._meta.module_name)
setattr(obj, status_field, getattr(settings, 'STATUS_ACTIVE', 1))
obj.save()
return obj
return False
def send_confirmation(self, obj, email_address):
confirmation_key = sha1(str(os.urandom(20)) + str(email_address)).hexdigest()
current_site = Site.objects.get_current()
activate_url = u'https://%s%s' % (current_site.domain,
reverse('confirmation.views.confirm', kwargs={'confirmation_key': confirmation_key}))
context = Context({
'activate_url': activate_url,
'current_site': current_site,
'confirmation_key': confirmation_key,
'target': obj,
'days': getattr(settings, 'EMAIL_CONFIRMATION_DAYS', 10),
})
templates = [
'confirmation/%s_confirmation_email_subject.txt' % obj._meta.module_name,
'confirmation/confirmation_email_subject.txt',
]
template = loader.select_template(templates)
subject = template.render(context).strip().replace(u'\n', u' ') # no newlines, please
templates = [
'confirmation/%s_confirmation_email_body.txt' % obj._meta.module_name,
'confirmation/confirmation_email_body.txt',
]
template = loader.select_template(templates)
body = template.render(context)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, [email_address])
return self.create(content_object=obj, date_sent=datetime.datetime.now(), confirmation_key=confirmation_key)
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField(_('sent'))
confirmation_key = models.CharField(_('activation key'), max_length=40)
objects = ConfirmationManager()
class Meta:
verbose_name = _('confirmation email')
verbose_name_plural = _('confirmation emails')
def __unicode__(self):
return _('confirmation email for %s') % self.content_object
| Python | 0.000001 | @@ -173,24 +173,8 @@
re%0A
-import datetime%0A
from
@@ -592,16 +592,54 @@
azy as _
+%0Afrom django.utils.timezone import now
%0A%0Afrom c
@@ -2779,26 +2779,8 @@
ent=
-datetime.datetime.
now(
|
a563e5a9bf7b7bbfba58f60ef1f9139d0f67fcb9 | Add data_to_send check | holosocket/wsserver.py | holosocket/wsserver.py | #!/usr/bin/env python3
import argparse
import asyncio
import functools
import logging
import struct
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
try:
from . import utils
from .encrypt import aes_gcm
except (ModuleNotFoundError, ImportError): # develop mode
import utils
from encrypt import aes_gcm
class Server:
def __init__(self, key):
self.key = key
async def handle(self, reader, writer):
try:
# get salt
salt = await utils.get_content(reader, True)
if not len(salt) == 16:
logging.warn('recv error salt')
asyncio.sleep(90)
writer.close()
return None
Encrypt = aes_gcm(self.key, salt)
Decrypt = aes_gcm(self.key, salt)
# get target addr, port
data_to_send = await utils.get_content(reader, True)
tag = data_to_send[-16:]
data = data_to_send[:-16]
content = Decrypt.decrypt(data, tag)
addr_len = content[0]
addr = content[1:1 + addr_len]
_port = content[-2:]
port = struct.unpack('>H', _port)[0]
except OSError as e:
logging.error(e)
writer.close()
return None
except ConnectionResetError as e:
logging.error(e)
writer.close()
return None
except BrokenPipeError as e:
logging.error(e)
writer.close()
return None
# connect to target
try:
r_reader, r_writer = await asyncio.open_connection(addr, port)
except OSError as e:
logging.error(e)
writer.close()
return None
logging.debug('start relay')
s2r = asyncio.ensure_future(
self.sock2remote(reader, r_writer, Decrypt))
r2s = asyncio.ensure_future(
self.remote2sock(r_reader, writer, Encrypt))
s2r.add_done_callback(
functools.partial(self.close_transport, writer, r_writer))
r2s.add_done_callback(
functools.partial(self.close_transport, writer, r_writer))
async def sock2remote(self, reader, writer, cipher):
while True:
try:
data = await utils.get_content(reader, True)
# close Connection
if not data:
break
# send data
tag = data[-16:]
content = data[:-16]
try:
data = cipher.decrypt(content, tag)
except ValueError:
logging.warn('detect attack')
await asyncio.sleep(90)
return None
writer.write(data)
await writer.drain()
except OSError as e:
logging.error(e)
break
except ConnectionResetError as e:
logging.error(e)
break
except BrokenPipeError as e:
logging.error(e)
break
async def remote2sock(self, reader, writer, cipher):
while True:
try:
data = await reader.read(8192)
# close Connection
if not data:
break
# send data
data, tag = cipher.encrypt(data)
content = utils.gen_server_frame(data + tag)
writer.write(content)
await writer.drain()
except OSError as e:
logging.error(e)
break
except ConnectionResetError as e:
logging.error(e)
break
except BrokenPipeError as e:
logging.error(e)
break
def close_transport(self, writer, r_writer, future):
writer.close()
r_writer.close()
logging.debug('stop relay')
def main():
parser = argparse.ArgumentParser(description='holosocket server')
parser.add_argument('-c', '--config', help='config file')
parser.add_argument('-4', '--ipv4', action='store_true', help='ipv4 only')
parser.add_argument('--debug', action='store_true', help='debug mode')
args = parser.parse_args()
if args.config:
with open(args.config, 'r') as f:
config = yaml.load(f, Loader=Loader)
if args.debug:
MODE = logging.DEBUG
else:
MODE = logging.INFO
logging.basicConfig(
level=MODE,
format='{asctime} {levelname} {message}',
datefmt='%Y-%m-%d %H:%M:%S',
style='{')
SERVER = [config['server']]
if not args.ipv4:
if 'server_v6' in config:
SERVER_V6 = config['server_v6']
SERVER.append(SERVER_V6)
SERVER_PORT = config['server_port']
KEY = config['password']
server = Server(KEY)
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
logging.info('uvloop mode')
except ImportError:
logging.info('pure asyncio mode')
loop = asyncio.get_event_loop()
coro = asyncio.start_server(server.handle, SERVER, SERVER_PORT, loop=loop)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| Python | 0.000001 | @@ -1019,24 +1019,45 @@
_send%5B:-16%5D%0A
+ try:%0A
@@ -1089,24 +1089,195 @@
(data, tag)%0A
+ except ValueError:%0A logging.warn('detect attack')%0A asyncio.sleep(90)%0A writer.close()%0A return None%0A%0A
@@ -4722,32 +4722,40 @@
.debug:%0A
+LOGGING_
MODE = logging.D
@@ -4777,16 +4777,24 @@
+LOGGING_
MODE = l
@@ -4845,16 +4845,24 @@
level=
+LOGGING_
MODE,%0A
|
46001060e444272573fbee2abac8ce42b8745985 | improve exception messages | src/choo/queries/base.py | src/choo/queries/base.py | from collections import OrderedDict
from copy import deepcopy
from itertools import chain
from types import MappingProxyType
from ..models.base import Field, Model
class MetaQuery(type):
def __new__(mcs, name, bases, attrs):
not_field_attrs = {n: v for n, v in attrs.items() if not isinstance(v, Field)}
cls = super(MetaQuery, mcs).__new__(mcs, name, bases, not_field_attrs)
try:
Model = attrs['Model']
except KeyError:
for base in bases:
if hasattr(base, 'Model'):
Model = base.Model
break
else:
raise TypeError('Query without Model!')
cls._fields = Model._fields
cls._settings_defaults = OrderedDict()
for base in cls.__bases__:
cls._settings_defaults.update(getattr(base, '_settings_defaults', {}))
if '_settings_defaults' in attrs:
cls._settings_defaults.update(attrs['_settings_defaults'])
return cls
class QuerySettingsProxy:
def __init__(self, settings):
self._settings = settings
def __setattr__(self, name, value):
if name != '_settings' or hasattr(self, name):
raise TypeError('Can not set settings directly, set them using methods!')
super().__setattr__(name, value)
def __getattr__(self, name):
try:
return self._settings[name]
except KeyError:
raise AttributeError
def __delattr__(self, name):
raise TypeError
class Query(metaclass=MetaQuery):
Model = Model
_settings_defaults = {'limit': None}
def __init__(self, network):
if self.__class__ == Query:
raise TypeError('only subclasses of Query can be initialised')
self.network = network
self._obj = self.Model()
self._settings = self._settings_defaults.copy()
self._cached_results = []
self._results_generator = None
self._results_done = False
def copy(self):
result = self.__class__(self.network)
result._obj = deepcopy(self._obj)
result._settings = self._settings
return result
def where(self, **kwargs):
result = self.copy()
for name, value in kwargs.items():
if name not in self.Model._fields:
raise TypeError('invalid field: %s.%s' % (self.Model.__name__, name))
setattr(result._obj, name, value)
return result
@classmethod
def unserialize(cls, data):
raise NotImplementedError
@property
def settings(self):
return MappingProxyType(self._settings)
def get(self, obj):
if not isinstance(obj, self.Model):
raise TypeError('Expected %s instance, got %s' % (self.Model.__name__, repr(obj)))
result = self.copy()
result._obj = deepcopy(obj)
r = result.limit(1).execute()
if not r:
raise self.Model.NotFound
return next(iter(r))
def _execute(self):
raise TypeError('Cannot execute query not bound to a network')
def limit(self, limit):
if limit is not None and (not isinstance(limit, int) or limit < 1):
raise TypeError('limit has to be None or int >= 1')
self._update_setting('limit', limit)
return self
def _update_setting(self, name, value):
result = self.copy()
result._settings[name] = value
return result
def execute(self):
if self._results_generator is None:
self._results_generator = self._execute()
return self
def _full_iter(self):
self.execute()
if self._results_done:
return iter(self._cached_results)
return chain(self._cached_results, self._next_result())
def __iter__(self):
return self._full_iter
def _next_result(self):
for result in self._results_generator:
self._cached_results.append(result)
yield result
self._results_done = True
def __getattr__(self, name):
if name in self.Model._fields:
return getattr(self._obj, name)
if name in self.__class__._settings_defaults:
raise TypeError('Use .settings to get settings!')
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.Model._fields:
raise TypeError('Can not set fields, use .where()!')
if name in self._settings_defaults:
raise TypeError('Can not set settings directly, set them using methods!')
super().__setattr__(name, value)
def __delattr__(self, name):
if name in self.Model._fields:
raise TypeError('Can not delete fields, use .where(%s=None)!' % name)
if name in self._settings_defaults:
raise TypeError('Can not delete settings')
super().__delattr__(name)
| Python | 0.000006 | @@ -4278,17 +4278,16 @@
settings
-!
')%0A%0A
@@ -4458,17 +4458,16 @@
.where()
-!
')%0A%0A
@@ -4581,24 +4581,29 @@
m using
+their
methods
-!
')%0A%0A
@@ -4783,17 +4783,16 @@
%25s=None)
-!
' %25 name
|
9fd89a23e55d9b0b393c3975758b9e2c16a3cda1 | Set MOOSE_DIR if the user doesn't have it | python/MooseDocs/common/moose_docs_file_tree.py | python/MooseDocs/common/moose_docs_file_tree.py | #pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import re
import MooseDocs
from moose_docs_import import moose_docs_import
from nodes import DirectoryNode, MarkdownFileIndexNode, MarkdownFilePageNode, CopyFileNode
def finder(node, name):
"""Helper for finding child by name"""
for child in node.children:
if child.name == name:
return child
return None
def tree_builder(files, root, base, node, directory):
"""
Helper for building markdown file tree.
Inputs:
files[set]:
"""
for item in os.listdir(directory):
# Complete path to the directory item (path or filename)
path = os.path.join(directory, item)
# Move along if path not in list of files
if path in files:
# Special case when the supplied node is the root, this maintains the root node
# and creates an index node from which everything will stem.
if item == 'index.md':
if node.parent is None:
node = MarkdownFileIndexNode('', base=base, root_directory=root, parent=node)
elif isinstance(node, DirectoryNode):
node = node.replace(MarkdownFileIndexNode(node.name, root_directory=root,
base=base))
# General markdown files
elif item.endswith('.md'):
MarkdownFilePageNode(item[:-3], root_directory=root, base=base, parent=node)
# Other files to copy
elif item.endswith(MooseDocs.common.EXTENSIONS):
CopyFileNode(item.lstrip('/'), root_directory=root, base=base, parent=node)
# Directories
elif os.path.isdir(path):
n = finder(node, item)
if n is None:
n = DirectoryNode(item, base=base, parent=node)
tree_builder(files, root, base, n, path)
def moose_docs_file_tree(config):
"""
Creates a unified markdown file tree from multiple locations.
Inputs:
config[dict]: Contains key value pairs, with each value containing another dict() with
key value pairs that are passed to moose_docs_import function.
"""
node = DirectoryNode('')
for value in config.itervalues():
value.setdefault('include', [])
value.setdefault('exclude', [])
value.setdefault('extensions', MooseDocs.common.EXTENSIONS)
value.setdefault('base', '')
value.setdefault('root_dir', MooseDocs.ROOT_DIR)
value['root_dir'] = re.sub(r'\$(\w+)', lambda m: os.getenv(m.group(1)), value['root_dir'])
if not os.path.isabs(value['root_dir']):
value['root_dir'] = os.path.join(MooseDocs.ROOT_DIR, value['root_dir'])
files = set(moose_docs_import(**value))
tree_builder(files,
value['root_dir'],
value['base'],
node,
os.path.join(value['root_dir'], value['base']))
# Remove un-used directories
for desc in node.descendants:
if isinstance(desc, DirectoryNode) and \
all(isinstance(x, DirectoryNode) for x in desc.descendants):
desc.parent = None
return node
| Python | 0 | @@ -3558,24 +3558,229 @@
ion.%0A %22%22%22
+%0A%0A # Set the MOOSE_DIR if it does not exists so that the root_dir can always use it%0A if 'MOOSE_DIR' not in os.environ:%0A os.environ%5B'MOOSE_DIR'%5D = MooseDocs.MOOSE_DIR%0A%0A # Build the file tree
%0A node =
|
346e296872e1ca011eb5e469505de1c15c86732f | Clarify the comment about setting the PYTHON variable for the Doc Makefile. | Doc/tools/sphinx-build.py | Doc/tools/sphinx-build.py | # -*- coding: utf-8 -*-
"""
Sphinx - Python documentation toolchain
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: Python license.
"""
import sys
if __name__ == '__main__':
if sys.version_info[:3] < (2, 5, 0):
print >>sys.stderr, """\
Error: Sphinx needs to be executed with Python 2.5 or newer.
(If you run this from the Makefile, you can set the PYTHON variable
to the path of an alternative interpreter executable.)
"""
sys.exit(1)
from sphinx import main
sys.exit(main(sys.argv))
| Python | 0 | @@ -356,17 +356,16 @@
or newer
-.
%0A(If you
@@ -481,10 +481,48 @@
able
+, e.g.,%0A%60%60make html PYTHON=python2.5%60%60)
.
-)
%0A%22%22%22
|
8070b119c11ad18e2c1979afef21503a255dd8d8 | Check the number of matches for each query | rockuefort.py | rockuefort.py | #!/usr/bin/python3
"""
Usage: rockuefort copy <file> <destination>
rockuefort symlink <file> <destination>
rockuefort list <file>
"""
from collections import OrderedDict
import subprocess
import sys
from docopt import docopt
def log(*args, **kwargs):
print("rockuefort:", *args, file=sys.stderr, **kwargs)
if __name__ == '__main__':
args = docopt(__doc__)
# Load and evaluate queries
files = OrderedDict()
with open(args['<file>']) as f:
queries = [line.strip() for line in f]
for query in queries:
r = subprocess.check_output(['quodlibet', '--print-query', query])
matched_files = [mf.decode() for mf in r.splitlines() if mf]
for file in matched_files:
files.setdefault(file, []).append(query)
if not matched_files:
log("No match: {}".format(query))
# Check for multiply-matched files
for file, queries in files.items():
if len(queries) > 1:
log("Matched multiple: {}".format(file))
for q in queries:
log(" query: {}".format(q))
# Perform the requested action
if args['copy']:
log("Copying to {}".format(args['<destination>']))
...
elif args['symlink']:
log("Symlinking to {}".format(args['<destination>']))
...
else: # args['list']
for file in files:
print(file)
| Python | 0.00149 | @@ -433,16 +433,33 @@
dDict()%0A
+ queries = %5B%5D%0A
with
@@ -493,28 +493,76 @@
%0A
+ for line in f:%0A try:%0A c,
quer
-ies
+y
=
-%5B
line.str
@@ -569,31 +569,186 @@
ip()
- for line in f%5D
+.split(':', 1)%0A c = int(c)%0A except ValueError:%0A c = 1%0A query = line.strip()%0A queries.append((c, query))
%0A for
que
@@ -743,16 +743,19 @@
%0A for
+ c,
query i
@@ -909,16 +909,224 @@
if mf%5D%0A
+ nm = len(matched_files)%0A if nm != c:%0A log(%22Matched %7B%7D (expected %7B%7D): %7B%7D%22.format(nm, c, query))%0A for file in matched_files:%0A log(%22 match: %7B%7D%22.format(file))%0A
@@ -1208,84 +1208,8 @@
ery)
-%0A if not matched_files:%0A log(%22No match: %7B%7D%22.format(query))
%0A%0A
@@ -1339,16 +1339,19 @@
Matched
+by
multiple
|
6b495cfc31743ab761cd1b9a791bfb4ec7b80d36 | Remove __main__ block from container.py. | dockorm/container.py | dockorm/container.py | # encoding: utf-8
"""
Container class.
"""
from __future__ import print_function, unicode_literals
from itertools import chain
import json
from subprocess import call
import sys
from docker import Client
from six import (
iteritems,
iterkeys,
itervalues,
text_type,
)
from IPython.utils.py3compat import string_types
from IPython.utils.traitlets import (
Any,
Dict,
HasTraits,
Instance,
List,
Type,
Unicode,
TraitError,
)
from .py3compat_utils import strict_map
def print_build_output(build_output):
success = True
for raw_message in build_output:
message = json.loads(raw_message)
try:
print(message['stream'], end="")
except KeyError:
success = False
print(message['error'])
return success
def scalar(l):
"""
Get the first and only item from a list.
"""
assert len(l) == 1
return l[0]
class Container(HasTraits):
"""
A specification for creation of a container.
"""
organization = Unicode()
def _organization_changed(self, name, old, new):
if new and not new.endswith('/'):
self.organization = new + '/'
image = Unicode()
def _image_changed(self, name, old, new):
if not new:
raise TraitError("Must supply a value for image.")
tag = Unicode(default_value='latest')
def full_imagename(self, tag=None):
return '{}{}:{}'.format(
self.organization,
self.image,
tag or self.tag,
)
name = Unicode()
def _name_default(self):
return self.image + '-running'
build_path = Unicode()
links = List(Instance(__name__ + '.Link'))
def format_links(self):
return {}
volumes_readwrite = Dict()
volumes_readonly = Dict()
@property
def volume_mount_points(self):
"""
Volumes are declared in docker-py in two stages. First, you declare
all the locations where you're going to mount volumes when you call
create_container.
Returns a list of all the values in self.volumes or
self.read_only_volumes.
"""
return list(
chain(
itervalues(self.volumes_readwrite),
itervalues(self.volumes_readonly),
)
)
@property
def volume_binds(self):
"""
The second half of declaring a volume with docker-py happens when you
actually call start(). The required format is a dict of dicts that
looks like:
{
host_location: {'bind': container_location, 'ro': True}
}
"""
volumes = {
key: {'bind': value, 'ro': False}
for key, value in iteritems(self.volumes_readwrite)
}
ro_volumes = {
key: {'bind': value, 'ro': True}
for key, value in iteritems(self.volumes_readonly)
}
volumes.update(ro_volumes)
return volumes
ports = Dict(help="Map from container port -> host port.")
@property
def open_container_ports(self):
return strict_map(int, iterkeys(self.ports))
@property
def port_bindings(self):
out = {}
for key, value in self.ports:
if isinstance(ports, (list, tuple)):
key = '/'.join(strict_map(text_type, key))
out[key] = value
return out
environment = Dict()
# This should really be something like:
# Either(Instance(str), List(Instance(str)))
command = Any()
_client = Any()
@property
def client(self):
if self._client is None:
self._client = Client()
return self._client
def build(self, tag=None, display=True, rm=True):
"""
Build the container.
If display is True, write build output to stdout. Otherwise return it
as a generator.
"""
output = self.client.build(
self.build_path,
self.full_imagename(tag=tag),
# This is in line with the docker CLI, but different from
# docker-py's default.
rm=rm,
)
if display:
print_build_output(output)
return None
else:
return list(output)
def run(self, command=None, tag=None, attach=False):
"""
Run this container.
"""
container = self.client.create_container(
self.full_imagename(tag),
name=self.name,
ports=self.open_container_ports,
volumes=self.volume_mount_points,
detach=not attach,
stdin_open=attach,
tty=attach,
command=command,
environment=self.environment,
)
self.client.start(
container,
binds=self.volume_binds,
port_bindings=self.ports,
links=self.format_links(),
)
if attach:
call(['docker', 'attach', self.name])
def _matches(self, container):
return '/' + self.name in container['Names']
def instances(self, all=True):
"""
Return any instances of this container, running or not.
"""
return [
c for c in self.client.containers(all=all) if self._matches(c)
]
def running(self):
"""
Return the running instance of this container, or None if no container
is running.
"""
container = self.instances(all=False)
if container:
return scalar(container)
else:
return None
def stop(self):
self.client.stop(self.name)
def purge(self, stop_first=True, remove_volumes=False):
"""
Purge all containers of this type.
"""
for container in self.instances():
if stop_first:
self.client.stop(container)
else:
self.client.kill(container)
self.client.remove_container(
container,
v=remove_volumes,
)
def inspect(self, tag=None):
"""
Inspect any running instance of this container.
"""
return self.client.inspect_container(
self.name,
)
def images(self):
"""
Return any images matching our current organization/name.
Does not filter by tag.
"""
return self.client.images(self.full_imagename().split(':')[0])
def remove_images(self):
"""
Remove any images matching our current organization/name.
Does not filter by tag.
"""
for image in self.images():
self.client.remove_image(image)
def logs(self, all=False):
return [
{
'Id': container,
'Logs': self.client.logs(container)
}
for container in self.instances(all=all)
]
def join(self):
"""
Wait until there are no instances of this container running.
"""
container = self.running()
if container:
self.client.wait(container)
class Link(HasTraits):
"""
A link between containers.
"""
container = Instance(Container)
alias = Unicode()
if __name__ == '__main__':
cont = Container(image='busybox')
cont.run()
| Python | 0 | @@ -7340,85 +7340,4 @@
()%0A%0A
-%0Aif __name__ == '__main__':%0A cont = Container(image='busybox')%0A cont.run()%0A
|
0b39cfbdbfa397be5e428425aedc9ebced62c6ec | Fix reversed lat/lon | projects/tpoafptarbmit/scrape.py | projects/tpoafptarbmit/scrape.py | #!/usr/bin/env python3
from urllib.parse import parse_qsl
import json
import os
import sys
import requests
from bs4 import BeautifulSoup
ROUTE_BASE_URL = 'http://www.thepassageride.com/Routes/'
def fetch_text(url):
r = requests.get(url)
if r.status_code != 200:
r.raise_for_status()
return r.text
def scrape_route_list(html):
print('Fetching route list...', end='')
routes = []
soup = BeautifulSoup(html, 'html.parser')
for link in soup.select('#wikitext a[href*="/Routes/"]'):
href = link.get('href')
routes.append({
'name': link.text,
'number': int(href.strip(ROUTE_BASE_URL)),
'url': href,
})
print('done (%d routes)' % len(routes))
return routes
def fetch_route_description(route_url):
print('\t%s' % route_url)
html = fetch_text(route_url)
soup = BeautifulSoup(html, 'html.parser')
description = [p.prettify() for p in soup.select('#wikitext p')]
map_url = soup.select_one('#wikitext a[href*="gmap-pedometer"]')
if map_url is not None:
map_url = map_url.get('href')
return {
'map_url': map_url,
'description': '\n'.join(description),
}
def fetch_route_map(map_url):
print('\t%s' % map_url, end='')
_, map_id = map_url.split('?r=')
path = '/getRoute.php' if int(map_id) <= 5_000_000 else '/gp/ajaxRoute/get'
r = requests.post('https://www.gmap-pedometer.com' + path, {'rId': map_id})
if r.status_code != 200:
r.raise_for_status()
data = parse_qsl(r.text)
polyline = [x[1] for x in data if x[0] == 'polyline'][0]
coords = []
points = polyline.split('a')
for i in range(0, len(points)-1, 2):
coords.append({
'lat': float(points[i]),
'lon': float(points[i+1]),
})
print(' ... done (%d coords)' % len(coords))
return coords
def route_to_geojson(route_meta, coords):
return {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [
[c['lat'], c['lon']]
for c in coords
]
},
'properties': route_meta
}
def main():
html = fetch_text(ROUTE_BASE_URL)
routes = []
for r in scrape_route_list(html):
print('#%d "%s"' % (r['number'], r['name']))
desc = fetch_route_description(r['url'])
if desc['map_url'] is not None:
coords = fetch_route_map(desc['map_url'])
else:
coords = []
geo = route_to_geojson({**r, **desc}, coords)
routes.append(geo)
collection = {
'type': 'FeatureCollection',
'features': routes
}
print('Dumping to file...')
with open('tpoafptarbmit.geojson', 'w') as fp:
json.dump(collection, fp, indent=4)
print('All done!')
if __name__ == '__main__':
main()
| Python | 0.999871 | @@ -1773,16 +1773,18 @@
points%5Bi
++1
%5D),%0A
@@ -1804,34 +1804,32 @@
: float(points%5Bi
-+1
%5D),%0A %7D)%0A%0A
|
1c785168dcf725a07f5621e95376fcc5e5c9a7c1 | Update extension.py | extensions/appdynamics/extension.py | extensions/appdynamics/extension.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppDynamics Extension
Downloads, installs and configures the AppDynamics agent for PHP
"""
import os
import os.path
import logging
from extension_helpers import PHPExtensionHelper
from subprocess import call
import re
_log = logging.getLogger('appdynamics')
class AppDynamicsInstaller(PHPExtensionHelper):
def __init__(self, ctx):
PHPExtensionHelper.__init__(self, ctx)
self._FILTER = "app[-]?dynamics" # make static final
self._appdynamics_credentials = None # JSON which mentions all appdynamics credentials
self._account_access_key = None # AppDynamics Controller Account Access Key
self._account_name = None # AppDynamics Controller Account Name
self._host_name = None # AppDynamics Controller Host Address
self._port = None # AppDynamics Controller Port
self._ssl_enabled = None # AppDynamics Controller SSL Enabled
# Specify the Application details
self._app_name = None # AppDynamics App name
self._tier_name = None # AppDynamics Tier name
self._node_name = None # AppDynamics Node name
try:
print("Initializing")
if ctx['PHP_VM'] == 'php':
print("method: constructor")
except Exception:
_log.warn("Error installing AppDynamics! "
"AppDynamics will not be available.")
#0
def _defaults(self):
"""Returns a set of default environment variables.
Create and return a list of default environment variables. These
are merged with the build pack context when this the extension
object is created.
Return a dictionary.
"""
return {
'APPDYNAMICS_HOST': 'packages.appdynamics.com',
'APPDYNAMICS_VERSION': '4.2.14.0',
'APPDYNAMICS_PACKAGE': 'appdynamics-php-agent-x64-linux-{APPDYNAMICS_VERSION}.tar.bz2',
'APPDYNAMICS_DOWNLOAD_URL': 'https://{APPDYNAMICS_HOST}/php/{APPDYNAMICS_VERSION}/{APPDYNAMICS_PACKAGE}'
}
#1
# (Done)
def _should_compile(self):
"""Determines if the extension should install it's payload.
This check is called during the `compile` method of the extension.
It should return true if the payload of this extension should
be installed (i.e. the `install` method is called).
"""
#print("method: _should_compile")
#VCAP_SERVICES_STRING = str(self._services)
#if bool(re.search(self.FILTER, VCAP_SERVICES_STRING)):
# print("AppDynamics service detected")
# return True
#return False
return True
# WIP
def _configure(self):
"""Configure the extension.
Called when `should_configure` returns true. Implement this
method for your extension.
"""
print("method: _configure")
pass
# WIP
def _compile(self, install):
"""Install the payload of this extension.
Called when `_should_compile` returns true. This is responsible
for installing the payload of the extension.
The argument is the installer object that is passed into the
`compile` method.
"""
print("method: _compile")
print("Installing AppDynamics")
install.package('APPDYNAMICS')
print("Downloaded AppDynamics package")
#3
def _service_environment(self):
"""Return dict of environment variables x[var]=val"""
print("method: _service_environment")
return {}
#4 (Done)
def _service_commands(self):
"""Return dict of commands to run x[name]=cmd"""
print("method: _service_commands")
return {
'httpd': (
'$HOME/httpd/bin/apachectl',
'-f "$HOME/httpd/conf/httpd.conf"',
'-k restart',
'-DFOREGROUND')
#'appdynamics_proxy': (
# '$HOME/appdynamics-php-agent/proxy/runProxy',
# '-d "$HOME/appdynamics-php-agent/proxy"',
# ''
#)
}
#5
def _preprocess_commands(self):
"""Return your list of preprocessing commands"""
print("method: _preprocess_commands")
return ()
AppDynamicsInstaller.register(__name__)
| Python | 0.000001 | @@ -3298,17 +3298,16 @@
-#
print(%22m
@@ -3339,17 +3339,16 @@
-#
VCAP_SER
@@ -3390,17 +3390,16 @@
-#
if bool(
@@ -3449,25 +3449,24 @@
)):%0A
-#
print(%22A
@@ -3499,25 +3499,24 @@
d%22)%0A
-#
return T
@@ -3531,22 +3531,19 @@
-#return False%0A
+else: %0A
@@ -3553,19 +3553,20 @@
return
-Tru
+Fals
e%0A%0A #
|
02f59b60062004fc23dbfbfc6201b326b08513a8 | Add 404 exception | src/client/exceptions.py | src/client/exceptions.py | class HTTP4xx(Exception):
pass
class HTTP400(HTTP4xx):
pass
class HTTP409(HTTP4xx):
pass
| Python | 0.000019 | @@ -55,32 +55,66 @@
4xx):%0A pass%0A%0A
+class HTTP404(HTTP4xx):%0A pass%0A%0A
class HTTP409(HT
|
ac2339bc1dbac84799eb2effc053b96359e78993 | fix missing } and endl | extras/AwesomeMaker/AwesomeMaker.py | extras/AwesomeMaker/AwesomeMaker.py | #!/usr/bin/python
import re
import urllib
endl = "\n"
regex_icon_description = re.compile(ur'(.fa-[^{]+:before ){([^}]+)}$', re.MULTILINE)
regex_icon_name = re.compile(ur'fa-(.+):before.+', re.MULTILINE)
regex_icon_unicode = re.compile(ur'.+content: "\\(.+)";', re.MULTILINE)
def get_icon_names(icon_description):
return re.findall(regex_icon_name, icon_description[0])
def get_icon_unicode(icon_description):
return re.findall(regex_icon_unicode, icon_description[1])
def parse_css(css_content, output_file):
match = re.findall(regex_icon_description, css_content)
for icon_description in match:
icon_names = get_icon_names(icon_description)
icon_unicode = get_icon_unicode(icon_description)
for icon_name in icon_names:
write_icon_description(icon_name, icon_unicode[0], output_file)
def camel_case_string(string):
result = ""
words = string.split("-")
for word in words:
result += word.capitalize()
return result
def write_icon_file_header(output_file):
output_file.write("// IMPORTANT! This file is auto-generated see extras/AwesomeMaker" + endl)
output_file.write(endl)
output_file.write("#ifndef __FONTAWESOME_ICONS_H__" + endl)
output_file.write("#define __FONTAWESOME_ICONS_H__" + endl)
output_file.write("typedef juce::String Icon;" + endl)
output_file.write(endl)
def write_icon_file_footer(output_file):
output_file.write(endl)
output_file.write("#endif // __FONTAWESOME_ICONS_H__" + endl)
output_file.write(endl)
def write_icon_description(icon_name, icon_unicode, output_file):
output_file.write("const Icon FontAwesome_" + camel_case_string(icon_name) + " = Icon::fromUTF8(u8\"\\u" + icon_unicode + "\");" + endl)
def write_font_file_header(font, output_file):
output_file.write("// IMPORTANT! This file is auto-generated see extras/AwesomeMaker" + endl)
output_file.write(endl)
output_file.write("#ifndef __FONTAWESOME_DATA_H__" + endl)
output_file.write("#define __FONTAWESOME_DATA_H__" + endl)
output_file.write(endl)
output_file.write("namespace FontAwesomeData {" + endl)
output_file.write("\textern const char*\tfontawesomewebfont_ttf;" + endl)
output_file.write("\tconst int\t\t\tfontawesomewebfont_ttfSize = " + str(len(font)) + ";" + endl)
output_file.write("}" + endl)
output_file.write(endl)
output_file.write("#endif" + endl)
output_file.write(endl)
def write_font_file_source(font, output_file):
output_file.write("// IMPORTANT! This file is auto-generated see extras/AwesomeMaker" + endl)
output_file.write("#include \"FontAwesomeData.h\"" + endl)
output_file.write(endl)
output_file.write("static const unsigned char data[] = {")
count = 0
size = len(font)
for byte in font:
count += 1
size -= 1
if (count % 40) != 39:
output_file.write(str(ord(byte)))
else:
output_file.write(str(ord(byte)) + endl)
if size > 0:
output_file.write(",")
output_file.write(";" + endl)
output_file.write("const char* FontAwesomeData::fontawesomewebfont_ttf = (const char*) data;" + endl)
output_file.write(endl)
css = urllib.urlopen("https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/css/font-awesome.css").read()
if css == "":
exit()
font = urllib.urlopen("https://github.com/FortAwesome/Font-Awesome/raw/master/fonts/fontawesome-webfont.ttf").read()
if font == "":
exit()
icon_file = open("Icons.h", "wt")
write_icon_file_header(icon_file)
parse_css(css, icon_file)
write_icon_file_footer(icon_file)
icon_file.close()
font_file_header = open("FontAwesomeData.h", "wt")
font_file_source = open("FontAwesomeData.cpp", "wt")
write_font_file_header(font, font_file_header)
write_font_file_source(font, font_file_source) | Python | 0.000116 | @@ -2590,32 +2590,60 @@
eMaker%22 + endl)%0A
+ output_file.write(endl)%0A
output_file.
@@ -3098,16 +3098,17 @@
.write(%22
+%7D
;%22 + end
|
c6d710bf8a0bf8ce6c06f380fa7913ef73816d51 | Remove seconds from nightbot predicted times | controllers/nightbot_controller.py | controllers/nightbot_controller.py | import pytz
from base_controller import CacheableHandler
from database.event_query import TeamEventsQuery
from database.match_query import TeamEventMatchesQuery
from helpers.event_team_status_helper import EventTeamStatusHelper
from helpers.match_helper import MatchHelper
from helpers.team_helper import TeamHelper
from models.event_team import EventTeam
from models.team import Team
def validate_team(user_str, team_number):
"""
Returns:
Team object if the team exists and is currently competing
String with the appropriate error otherwise
"""
team_key = 'frc{}'.format(team_number)
team = Team.get_by_id(team_key)
if not team:
return "{}Team {} does not exist.".format(user_str, team_number)
team_events_future = TeamEventsQuery(team_key).fetch_async()
current_event = None
for event in team_events_future.get_result():
if event.now:
current_event = event
if not current_event:
return "{}Team {} is not currently competing.".format(user_str, team_number)
return team, current_event
class NightbotTeamNextmatchHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "nightbot_team_nextmatch_{}" # (team_number)
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(NightbotTeamNextmatchHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, team_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(team_number)
super(NightbotTeamNextmatchHandler, self).get(team_number)
def _render(self, team_number):
self.response.headers['content-type'] = 'text/plain; charset="utf-8"'
user = self.request.get('user')
if user:
user_str = '@{}, '.format(user)
else:
user_str = ''
team_event_or_error = validate_team(user_str, team_number)
if type(team_event_or_error) == str:
return team_event_or_error
_, event = team_event_or_error
event_code_upper = event.event_short.upper()
matches_future = TeamEventMatchesQuery('frc{}'.format(team_number), event.key.id()).fetch_async()
matches = MatchHelper.play_order_sort_matches(matches_future.get_result())
# No match schedule yet
if not matches:
return "{}[{}] Team {} has no scheduled matches yet.".format(user_str, event_code_upper, team_number)
next_match = None
for match in matches:
if not match.has_been_played:
next_match = match
break
if next_match is None:
return "{}[{}] Team {} has no more scheduled matches.".format(user_str, event_code_upper, team_number)
predicted_str = "predicted" if next_match.predicted_time else "scheduled"
match_time = next_match.predicted_time if next_match.predicted_time else next_match.time
timezone = pytz.timezone(event.timezone_id) if event.timezone_id else None
predicted_time_local = pytz.utc.localize(match_time).astimezone(timezone) if timezone else match_time
time_string = ", {} to start at {}".format(predicted_str, predicted_time_local.strftime("%a %X %Z")) if match_time else ""
return "{}[{}] Team {} will be playing in match {}{}".format(user_str, event_code_upper, team_number, next_match.short_name, time_string)
class NightbotTeamStatuskHandler(CacheableHandler):
CACHE_VERSION = 0
CACHE_KEY_FORMAT = "nightbot_team_status_{}" # (team_number)
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(NightbotTeamStatuskHandler, self).__init__(*args, **kw)
self._cache_expiration = self.CACHE_HEADER_LENGTH
def get(self, team_number):
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(team_number)
super(NightbotTeamStatuskHandler, self).get(team_number)
def _render(self, team_number):
self.response.headers['content-type'] = 'text/plain; charset="utf-8"'
user = self.request.get('user')
if user:
user_str = '@{}, '.format(user)
else:
user_str = ''
team_event_or_error = validate_team(user_str, team_number)
if type(team_event_or_error) == str:
return team_event_or_error
_, event = team_event_or_error
event_code_upper = event.event_short.upper()
event_team = EventTeam.get_by_id('{}_frc{}'.format(event.key.id(), team_number))
team_key = 'frc{}'.format(team_number)
status = EventTeamStatusHelper.generate_team_at_event_status_string(team_key, event_team.status)
if status:
status = status.replace('<b>', '').replace('</b>', '')
return '{}[{}] {}'.format(user_str, event_code_upper, status)
| Python | 0.000001 | @@ -3234,9 +3234,12 @@
%25a %25
-X
+H:%25M
%25Z%22
|
8d06ccd7aeefe5945bab44b01764bd62685a2e17 | Add missing member to API. | mindbender/api.py | mindbender/api.py | """Public API
Anything that is not defined here is **internal** and
unreliable for external use.
Motivation for api.py:
Storing the API in a module, as opposed to in __init__.py, enables
use of it internally.
For example, from `pipeline.py`:
>> from . import api
>> api.do_this()
The important bit is avoiding circular dependencies, where api.py
is calling upon a module which in turn calls upon api.py.
"""
import logging
from . import schema
from .pipeline import (
install,
uninstall,
ls,
search,
Loader,
discover_loaders,
register_root,
register_data,
register_host,
register_format,
register_silo,
register_family,
register_loaders_path,
register_plugins,
registered_host,
registered_families,
registered_loaders_paths,
registered_formats,
registered_data,
registered_root,
registered_silos,
deregister_plugins,
deregister_format,
deregister_family,
deregister_data,
deregister_loaders_path,
any_representation,
fixture,
)
from .lib import (
format_staging_dir,
format_shared_dir,
format_version,
time,
find_latest_version,
parse_version,
)
logging.basicConfig()
__all__ = [
"install",
"uninstall",
"schema",
"ls",
"search",
"Loader",
"discover_loaders",
"register_host",
"register_data",
"register_format",
"register_silo",
"register_family",
"register_loaders_path",
"register_plugins",
"register_root",
"registered_root",
"registered_silos",
"registered_loaders_paths",
"registered_host",
"registered_families",
"registered_formats",
"registered_data",
"deregister_plugins",
"deregister_family",
"deregister_data",
"deregister_loaders_path",
"format_staging_dir",
"format_shared_dir",
"format_version",
"find_latest_version",
"parse_version",
"time",
"any_representation",
"fixture",
]
| Python | 0 | @@ -1762,24 +1762,49 @@
r_plugins%22,%0A
+ %22deregister_format%22,%0A
%22deregis
|
95b62db08280850bf7232c914dfc1c77372a206f | update parse rule | wechat/wechat/views.py | wechat/wechat/views.py | from django.http import HttpResponse, HttpResponseBadRequest
from hashlib import sha1
from lxml import etree
import urllib.request
import json
def _make_post_request(url, post_data):
post_encoded = json.dumps(post_data).encode('utf-8')
req = urllib.request.Request(url, data=post_encoded, method='POST')
req.add_header('Content-Type', 'application/json')
resp_json = urllib.request.urlopen(req).read().decode('utf-8')
resp = json.loads(resp_json)
return resp
def _check_token(request):
get_dict = request.GET.dict()
needed_token = ['signature', 'timestamp', 'nonce', 'echostr']
for token in needed_token:
if token not in get_dict:
return HttpResponseBadRequest('invalid tokens')
my_token = 'uvacsssvoice'
arr = [my_token, get_dict['timestamp'], get_dict['nonce']]
arr.sort()
before_hash = arr[0] + arr[1] + arr[2]
after_hash = sha1(before_hash.encode('utf-8')).hexdigest()
if after_hash == get_dict['signature']:
return HttpResponse(get_dict['echostr'])
else:
return HttpResponseBadRequest('something went wrong')
def _reply(from_name, to_name, create_time, content):
ret = etree.Element('xml')
etree.SubElement(ret, 'FromUserName').text = from_name
etree.SubElement(ret, 'ToUserName').text = to_name
etree.SubElement(ret, 'CreateTime').text = create_time
etree.SubElement(ret, 'MsgType').text = 'text'
etree.SubElement(ret, 'Content').text = content
ret_str = etree.tostring(ret, pretty_print=True, encoding='unicode')
return HttpResponse(ret_str, content_type='application/xml')
def _handle_reply(request):
tree = etree.fromstring(request.body.decode('utf-8'))
try:
from_name = tree.xpath('/xml/FromUserName')[0].text
to_name = tree.xpath('/xml/ToUserName')[0].text
create_time = tree.xpath('/xml/CreateTime')[0].text
msg_type = tree.xpath('/xml/MsgType')[0].text
except:
return HttpResponseBadRequest('cannot parse correct xml')
if msg_type != 'text':
txt = '弹幕仅支持文本信息,请按\n\"弹幕 想发送的内容\"\n格式发弹幕'
return _reply(to_name, from_name, create_time, txt)
try:
content = tree.xpath('/xml/Content')[0].text
except:
return HttpResponseBadRequest('cannot parse correct xml')
if len(content) <= 2 or content[:2] != '弹幕':
txt = '你的弹幕格式似乎不对哦,请按\n\"弹幕 想发送的内容\"\n格式发弹幕'
return _reply(to_name, from_name, create_time, txt)
bul = content[2:].strip()
if len(bul) == 0:
txt = '不能发送空弹幕,请按\n\"弹幕 想发送的内容\"\n格式发弹幕'
return _reply(to_name, from_name, create_time, txt)
post_url = 'http://162.243.117.39:8000/api/create/'
post_data = { 'content': bul, 'fingerprint': '#'+from_name }
# try:
resp = _make_post_request(post_url, post_data)
if resp['ok']:
txt = '弹幕发送成功!'
return _reply(to_name, from_name, create_time, txt)
else:
txt = 'oops,你的弹幕发送失败了...请再给我们一个机会,稍等片刻再试哦'
return _reply(to_name, from_name, create_time, txt)
def index_page(request):
if request.method == 'GET': return _check_token(request)
if request.method == 'POST': return _handle_reply(request)
return HttpResponseBadRequest('bad request type')
| Python | 0.000001 | @@ -2491,17 +2491,9 @@
%5B2:%5D
-.strip()
%0A
+
@@ -2615,24 +2615,181 @@
_time, txt)%0A
+ if bul%5B0%5D != ' ':%0A txt = '%E4%BD%A0%E7%9A%84%E5%BC%B9%E5%B9%95%E6%A0%BC%E5%BC%8F%E4%BC%BC%E4%B9%8E%E4%B8%8D%E5%AF%B9%E5%93%A6%EF%BC%8C%E8%AF%B7%E6%8C%89%5Cn%5C%22%E5%BC%B9%E5%B9%95 %E6%83%B3%E5%8F%91%E9%80%81%E7%9A%84%E5%86%85%E5%AE%B9%5C%22%5Cn%E6%A0%BC%E5%BC%8F%E5%8F%91%E5%BC%B9%E5%B9%95'%0A return _reply(to_name, from_name, create_time, txt)%0A%0A bul = bul.strip()
%0A post_ur
|
267f0c72f3d3f89792ab152fb0f2257aa21d9365 | fix get_value_from_state() | plenum/server/domain_req_handler.py | plenum/server/domain_req_handler.py | from binascii import hexlify
from hashlib import sha256
from common.serializers.serialization import domain_state_serializer, \
proof_nodes_serializer, state_roots_serializer
from ledger.util import F
from plenum.common.constants import TXN_TYPE, NYM, ROLE, STEWARD, TARGET_NYM, \
VERKEY, TXN_TIME, ROOT_HASH, MULTI_SIGNATURE, PROOF_NODES, DATA, \
STATE_PROOF
from plenum.common.exceptions import UnauthorizedClientRequest
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
from plenum.common.request import Request
from plenum.common.txn_util import reqToTxn, get_type, get_payload_data, get_seq_no, get_txn_time, get_from
from plenum.common.types import f
from plenum.server.ledger_req_handler import LedgerRequestHandler
from stp_core.common.log import getlogger
logger = getlogger()
class DomainRequestHandler(LedgerRequestHandler):
stateSerializer = domain_state_serializer
write_types = {NYM, }
def __init__(self, ledger, state, config, reqProcessors, bls_store, ts_store=None):
super().__init__(ledger, state, ts_store=ts_store)
self.config = config
self.reqProcessors = reqProcessors
self.bls_store = bls_store
def doStaticValidation(self, request: Request):
pass
def validate(self, req: Request):
if req.operation.get(TXN_TYPE) == NYM:
origin = req.identifier
error = None
if not self.isSteward(self.state,
origin, isCommitted=False):
error = "Only Steward is allowed to do these transactions"
if req.operation.get(ROLE) == STEWARD:
if self.stewardThresholdExceeded(self.config):
error = "New stewards cannot be added by other stewards " \
"as there are already {} stewards in the system".\
format(self.config.stewardThreshold)
if error:
raise UnauthorizedClientRequest(req.identifier,
req.reqId,
error)
def _reqToTxn(self, req: Request):
txn = reqToTxn(req)
for processor in self.reqProcessors:
res = processor.process(req)
txn.update(res)
return txn
@staticmethod
def transform_txn_for_ledger(txn):
"""
Some transactions need to be updated before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger
"""
return txn
def updateState(self, txns, isCommitted=False):
for txn in txns:
self._updateStateWithSingleTxn(txn, isCommitted=isCommitted)
def gen_txn_path(self, txn):
typ = get_type(txn)
if typ == NYM:
nym = get_payload_data(txn).get(TARGET_NYM)
return hexlify(self.nym_to_state_key(nym)).decode()
else:
logger.error('Cannot generate id for txn of type {}'.format(typ))
return None
def _updateStateWithSingleTxn(self, txn, isCommitted=False):
typ = get_type(txn)
if typ == NYM:
nym = get_payload_data(txn).get(TARGET_NYM)
self.updateNym(nym, txn, isCommitted=isCommitted)
else:
logger.debug(
'Cannot apply request of type {} to state'.format(typ))
def countStewards(self) -> int:
"""
Count the number of stewards added to the pool transaction store
Note: This is inefficient, a production use case of this function
should require an efficient storage mechanism
"""
# THIS SHOULD NOT BE DONE FOR PRODUCTION
return sum(1 for _, txn in self.ledger.getAllTxn() if
(get_type(txn) == NYM) and (get_payload_data(txn).get(ROLE) == STEWARD))
def stewardThresholdExceeded(self, config) -> bool:
"""We allow at most `stewardThreshold` number of stewards to be added
by other stewards"""
return self.countStewards() > config.stewardThreshold
def updateNym(self, nym, txn, isCommitted=True):
existingData = self.getNymDetails(self.state, nym,
isCommitted=isCommitted)
txn_data = get_payload_data(txn)
newData = {}
if not existingData:
# New nym being added to state, set the TrustAnchor
newData[f.IDENTIFIER.nm] = get_from(txn)
# New nym being added to state, set the role and verkey to None, this makes
# the state data always have a value for `role` and `verkey` since we allow
# clients to omit specifying `role` and `verkey` in the request consider a
# default value of None
newData[ROLE] = None
newData[VERKEY] = None
if ROLE in txn_data:
newData[ROLE] = txn_data[ROLE]
if VERKEY in txn_data:
newData[VERKEY] = txn_data[VERKEY]
newData[F.seqNo.name] = get_seq_no(txn)
newData[TXN_TIME] = get_txn_time(txn)
existingData.update(newData)
val = self.stateSerializer.serialize(existingData)
key = self.nym_to_state_key(nym)
self.state.set(key, val)
return existingData
def hasNym(self, nym, isCommitted: bool=True):
key = self.nym_to_state_key(nym)
data = self.state.get(key, isCommitted)
return bool(data)
@staticmethod
def get_role(state, nym, role, isCommitted: bool=True):
nymData = DomainRequestHandler.getNymDetails(state, nym, isCommitted)
if not nymData:
return {}
else:
if nymData.get(ROLE) == role:
return nymData
else:
return {}
@staticmethod
def getSteward(state, nym, isCommitted: bool=True):
return DomainRequestHandler.get_role(state, nym, STEWARD, isCommitted)
@staticmethod
def isSteward(state, nym, isCommitted: bool=True):
return bool(DomainRequestHandler.getSteward(state,
nym,
isCommitted))
@staticmethod
def getNymDetails(state, nym, isCommitted: bool=True):
key = DomainRequestHandler.nym_to_state_key(nym)
data = state.get(key, isCommitted)
if not data:
return {}
return DomainRequestHandler.stateSerializer.deserialize(data)
@staticmethod
def nym_to_state_key(nym: str) -> bytes:
return sha256(nym.encode()).digest()
def get_value_from_state(self, path, head_hash=None, with_proof=False, multi_sig=None):
'''
Get a value (and proof optionally)for the given path in state trie.
Does not return the proof is there is no aggregate signature for it.
:param path: the path generate a state proof for
:param head_hash: the root to create the proof against
:param get_value: whether to return the value
:return: a state proof or None
'''
if not multi_sig:
root_hash = head_hash if head_hash else self.state.committedHeadHash
encoded_root_hash = state_roots_serializer.serialize(bytes(root_hash))
multi_sig = self.bls_store.get(encoded_root_hash)
self.super(path, head_hash, with_proof, multi_sig)
| Python | 0 | @@ -7382,18 +7382,43 @@
-self.super
+return super().get_value_from_state
(pat
|
f68e8612f1e8198a4b300b67536d654e13809eb4 | Allow SHA256 hashes in URLs | plinth/modules/monkeysphere/urls.py | plinth/modules/monkeysphere/urls.py | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the monkeysphere module.
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^sys/monkeysphere/$', views.index, name='index'),
url(r'^sys/monkeysphere/(?P<ssh_fingerprint>[0-9A-Fa-f:]+)/import/$',
views.import_key, name='import'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/details/$',
views.details, name='details'),
url(r'^sys/monkeysphere/(?P<fingerprint>[0-9A-Fa-f]+)/publish/$',
views.publish, name='publish'),
url(r'^sys/monkeysphere/cancel/$', views.cancel, name='cancel'),
]
| Python | 0.000004 | @@ -919,13 +919,15 @@
-9A-
-Fa-f:
+Za-z:+/
%5D+)/
|
547c9e36255870bcee8a800a3fa95c3806a95c2c | Update links when it starts getting redirected | newsApp/linkManager.py | newsApp/linkManager.py | import os
import time
from constants import *
from dbhelper import *
from dbItemManagerV2 import DbItemManagerV2
from link import Link
LINK_EXPIRY_TIME_IN_DAYS = 80
class LinkManager(DbItemManagerV2):
"""
Manage links stored on AWS dynamo db database.
Contains functions for CRUD operations on the links stored
Following environment variables need to be set -
'LINKTAGSTABLE_CONNECTIONSTRING' : connection string of link tags table.
"""
def __init__(self):
"""
Instantiates the linkManager.
"""
DbItemManagerV2.__init__(self,
os.environ['LINKTAGSTABLE_CONNECTIONSTRING'])
def get(self, linkId):
"""
Put a new link.
"""
dbItem = DbItemManagerV2.get(self, linkId);
return Link(linkId, dbItem.tags)
def getStaleLinks(self):
"""
Returns a list of linkIds of stale links.
"""
linkExpiryCutoff = int(time.time()) - LINK_EXPIRY_TIME_IN_DAYS*24*60*60;
scanResults = DbItemManagerV2.scan(self, pubtime__lte = linkExpiryCutoff)
return (result.id for result in scanResults)
def getUnprocessedLinks(self):
return DbItemManagerV2.query_2(
self,
isProcessed__eq = 'false',
index = 'isProcessed-itemId-index')
| Python | 0 | @@ -781,22 +781,22 @@
-return
+link =
Link(li
@@ -811,24 +811,208 @@
Item.tags)%0A%0A
+ #handle the case when link starts gettting redirected to new url%0A if link.id != linkId:%0A self.delete(linkId)%0A self.put(link)%0A%0A return link%0A%0A
def getS
|
366ecdd77520004c307cbbf127bb374ab546ce7e | Use windows API to change the AppID and use our icon. | run-quince.py | run-quince.py | #!/usr/bin/env python3
# coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: Graham Rowlands
#
# This file runs the main loop
# Use PyQt5 by default
import os
os.environ["QT_API"] = 'pyqt5'
from qtpy.QtWidgets import QApplication
import sys
import argparse
from quince.view import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str, help='Measurement library filename')
args = parser.parse_args()
app = QApplication([])
# Setup icon
png_path = os.path.join(os.path.dirname(__file__), "assets/quince_icon.png")
app.setWindowIcon(QIcon(png_path))
window = NodeWindow()
window.load_yaml(args.filename)
app.aboutToQuit.connect(window.cleanup)
window.show()
sys.exit(app.exec_())
| Python | 0 | @@ -263,16 +263,30 @@
argparse
+%0Aimport ctypes
%0A%0Afrom q
@@ -660,16 +660,390 @@
path))%0A%0A
+ # Convince windows that this is a separate application to get the task bar icon working%0A # https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105%0A if (os.name == 'nt'):%0A myappid = u'BBN.quince.gui.0001' # arbitrary string%0A ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)%0A%0A
wind
|
976e9b622b66bee30d304a801cc39733cc3e8d58 | refactor to reduce size of __init__ | wikichatter/section.py | wikichatter/section.py | import mwparserfromhell as mwp
class Error(Exception):
pass
class TooManyHeadingsError(Error):
pass
EPI_LEVEL = 0
class Section(object):
def __init__(self, wikitext):
self._subsections = []
self.comments = []
# wikitext can be either a wikicode object or a string
if type(wikitext) is not mwp.wikicode.Wikicode:
wikicode = mwp.parse(self.wikitext, skip_style_tags=True)
else:
wikicode = wikitext
wiki_headings = [h for h in wikicode.filter_headings()]
if len(wiki_headings) > 1:
raise TooManyHeadingsError()
if len(wiki_headings) == 0:
self.heading = None
self.level = EPI_LEVEL
else:
self.heading = str(wiki_headings[0].title)
self.level = wiki_headings[0].level
self.text = self._get_section_text_from_wikicode(wikicode)
def append_subsection(self, subsection):
self._subsections.append(subsection)
def extract_comments(self, extractor):
self.comments = extractor(self.text)
for s in self._subsections:
s.extract_comments(extractor)
def _get_section_text_from_wikicode(self, wikicode):
sections = wikicode.get_sections(include_headings=False)
return str(sections[-1])
@property
def subsections(self):
return list(self._subsections)
def __str__(self):
return "<{0}: {1}>".format(self.level, self.heading)
def __repr__(self):
return str(self)
def simplify(self):
basic = {}
basic["subsections"] = [s.simplify() for s in self._subsections]
basic["comments"] = [c.simplify() for c in self.comments]
if self.heading is not None:
basic["heading"] = self.heading
return basic
def generate_sections_from_raw_text(text):
flat_sections = _generate_flat_list_of_sections(text)
return _sort_into_hierarchy(flat_sections)
def _generate_flat_list_of_sections(text):
wikicode = mwp.parse(text, skip_style_tags=True)
mw_sections = wikicode.get_sections(include_lead=True, flat=True)
sections = [Section(s) for s in mw_sections if len(s.nodes) > 0]
return sections
def _sort_into_hierarchy(section_list):
top_level_sections = []
section_stack = []
for section in section_list:
while len(section_stack) > 0:
cur_sec = section_stack[-1]
if cur_sec.level < section.level and cur_sec.level is not EPI_LEVEL:
cur_sec.append_subsection(section)
section_stack.append(section)
break
section_stack.pop()
if len(section_stack) is 0:
top_level_sections.append(section)
section_stack.append(section)
return top_level_sections
| Python | 0.000005 | @@ -239,16 +239,167 @@
ts = %5B%5D%0A
+ wikicode = self._get_wikicode_from_input(wikitext)%0A self._load_section_info(wikicode)%0A%0A def _get_wikicode_from_input(self, wikitext):
%0A
@@ -626,16 +626,85 @@
ikitext%0A
+ return wikicode%0A%0A def _load_section_info(self, wikicode):%0A
|
9e6b596aa856e1d50a9c2c2882289cf1a5d8c0c0 | Fix up plotting script | plot.py | plot.py | #!/usr/bin/env python
"""Processing routines for the waveFlapper case."""
import foampy
import numpy as np
import matplotlib.pyplot as plt
width_2d = 0.1
width_3d = 3.66
m_paddle = 1270.0 # Paddle mass in kg, from OMB manual
h_piston = 3.3147
I_paddle = 1/3*m_paddle*h_piston**2
def plot_force():
"""Plots the streamwise force on the paddle over time."""
def plot_moment():
data = foampy.load_forces_moments()
i = 10
t = data["time"][i:]
m = data["moment"]["pressure"]["z"] + data["moment"]["viscous"]["z"]
m = m[i:]*width_3d/width_2d
period = 2.2
omega = 2*np.pi/period
theta = 0.048*np.sin(omega*t)
theta_doubledot = -0.048*omega**2*np.sin(omega*t)
m_inertial = I_paddle*theta_doubledot
m = m + m_inertial
plt.figure()
plt.plot(t, m)
plt.xlabel("t (s)")
plt.ylabel("Flapper moment (Nm)")
print("Max moment from CFD (including inertia) = {:0.1f}".format(m.max()), "Nm")
print("Theoretical max moment (including inertia) =", 5500*3.3, "Nm")
plt.show()
if __name__ == "__main__":
plot_moment()
| Python | 0.000095 | @@ -183,16 +183,17 @@
1270.0
+
# Paddle
@@ -238,18 +238,16 @@
= 3.3147
-
%0AI_paddl
@@ -357,16 +357,25 @@
ime.%22%22%22%0A
+ pass%0A
%0A%0Adef pl
@@ -420,16 +420,8 @@
rces
-_moments
()%0A
@@ -471,68 +471,11 @@
data
-%5B%22moment%22%5D%5B%22pressure%22%5D%5B%22z%22%5D + data%5B%22moment%22%5D%5B%22viscous%22%5D%5B%22z%22%5D
+.mz
%0A
@@ -488,17 +488,19 @@
%5Bi:%5D
-*
+ *
width_3d
/wid
@@ -495,17 +495,19 @@
width_3d
-/
+ /
width_2d
@@ -541,15 +541,19 @@
= 2
-*
+ *
np.pi
-/
+ /
peri
@@ -572,17 +572,19 @@
= 0.048
-*
+ *
np.sin(o
@@ -583,25 +583,27 @@
np.sin(omega
-*
+ *
t)%0A theta
@@ -625,17 +625,19 @@
.048
-*
+ *
omega**2
*np.
@@ -632,17 +632,19 @@
omega**2
-*
+ *
np.sin(o
@@ -647,17 +647,19 @@
in(omega
-*
+ *
t)%0A m
@@ -678,17 +678,19 @@
I_paddle
-*
+ *
theta_do
@@ -772,17 +772,20 @@
xlabel(%22
-t
+Time
(s)%22)%0A
@@ -831,16 +831,25 @@
print(
+%0A
%22Max mom
@@ -890,16 +890,19 @@
%7B:0.1f%7D
+ Nm
%22.format
@@ -910,22 +910,21 @@
m.max())
-, %22Nm%22
+%0A
)%0A pr
@@ -990,17 +990,16 @@
3, %22Nm%22)
-
%0A plt
|
6549f68fe7fa2d36babfb2ea0b463cfd213f032c | Use CanonicalRampedSecondOrder by default from now on. | run_config.py | run_config.py | '''
run_config.py Configuration settings for a simulation run
Created on 30 Jun 2010
@author: Ian Huston
'''
import numpy as np
import cosmomodels as c
from configuration import PROGRAM_NAME, LOGLEVEL
from sourceterm import srcequations
import os.path
fixtures = {"msqphisq": {"potential_func": "msqphisq",
"ystart": np.array([18.0, -0.1,0,0,0,0,0])},
"lambdaphi4": {"potential_func": "lambdaphi4",
"ystart": np.array([25.0, 0,0,0,0,0,0])},
"hybrid2and4": {"potential_func": "hybrid2and4",
"ystart": np.array([25.0, 0,0,0,0,0,0])},
"linde": {"potential_func": "linde",
"ystart": np.array([25.0, 0,0,0,0,0,0])},
"phi2over3": {"potential_func": "phi2over3",
"ystart": np.array([10.0, 0,0,0,0,0,0])},
"msqphisq_withV0": {"potential_func": "msqphisq_withV0",
"ystart": np.array([18.0, 0,0,0,0,0,0])},
"step_potential": {"potential_func": "step_potential",
"ystart": np.array([18.0, -0.1,0,0,0,0,0])},
"bump_potential": {"potential_func": "bump_potential",
"ystart": np.array([18.0, -0.1,0,0,0,0,0])}
}
##############################
# CHOOSE FIXTURE HERE
fx = fixtures["msqphisq"]
##############################
##############################
# kinit, deltak values
# Add range to K_ranges to change values
##############################
K_ranges = { "K1": {"kinit": 0.5e-61, "deltak": 1e-61, "numsoks": 1025},
"K2": {"kinit": 1.5e-61, "deltak": 3e-61, "numsoks": 1025},
"K3": {"kinit": 0.25e-60, "deltak": 1e-60, "numsoks": 1025}}
#Pick K_range used
K_range = K_ranges["K1"]
#Do not change these values
kinit = K_range["kinit"]
deltak = K_range["deltak"]
numsoks = K_range["numsoks"] #Should be power of two + 1
def getkend(kinit, deltak, numsoks):
"""Correct kend value given the values of kinit, deltak and numsoks.
"""
#Change from numsoks-1 to numsoks to include extra point when deltak!=kinit
return 2*((numsoks)*deltak + kinit)
kend = getkend(kinit, deltak, numsoks)
##############################
# IMPORTANT VALUES
# DO NOT CHANGE UNLESS SURE
##############################
ntheta = 513
foclass = c.FOCanonicalTwoStage
srcclass = srcequations.FullSingleFieldSource
cq = 50
#If sourceterm files already exist should they be overwritten?
overwrite = True
##############################
# DO NOT CHANGE ANYTHING BELOW
# THIS LINE
##############################
from configuration import CODEDIR, RESULTSDIR, LOGDIR, QSUBLOGSDIR, QSUBSCRIPTSDIR, _debug
from configuration import provenancefilename
if not all(map(os.path.isdir, [CODEDIR, RESULTSDIR, LOGDIR, QSUBSCRIPTSDIR, QSUBLOGSDIR])):
raise IOError("Directory structure is not correct!")
logfile = os.path.join(LOGDIR, "run.log")
provenancefile = os.path.join(LOGDIR, provenancefilename)
#Arguments for first and second order models
pot_func = fx["potential_func"]
ystart = fx["ystart"]
foargs = {"potential_func": pot_func,
"ystart": ystart,
"cq": cq,
"solver": "rkdriver_tsix"}
soargs = {"solver": "rkdriver_tsix"}
##############################
# qsub submission values
#
##############################
runname = PROGRAM_NAME[0:4]
qsublogname = os.path.join(QSUBLOGSDIR, "log" )
timelimit = "23:00:00" # Time needed for each array job
taskmin= "1" #starting task id number
taskmax= "100" #finishing task id number
hold_jid_list= "" # List of jobs this task depends on
templatefile = os.path.join(CODEDIR, "qsub-template.sh")
foscriptname = os.path.join(QSUBSCRIPTSDIR, "fo.qsub")
srcscriptname = os.path.join(QSUBSCRIPTSDIR, "src.qsub")
mrgscriptname = os.path.join(QSUBSCRIPTSDIR, "mrg.qsub")
soscriptname = os.path.join(QSUBSCRIPTSDIR, "so.qsub")
cmbscriptname = os.path.join(QSUBSCRIPTSDIR, "cmb.qsub")
foresults = os.path.join(RESULTSDIR, "fo.hf5")
#Source results will be stored in src-#.hf5
srcstub = os.path.join(RESULTSDIR, "src-")
#This is the pattern that is checked when results are merged
pattern = "src-(\d*).hf5"
srcresults = os.path.join(RESULTSDIR, "src.hf5")
mrgresults = os.path.join(RESULTSDIR, "mrg.hf5")
soresults = os.path.join(RESULTSDIR, "so.hf5")
cmbresults = os.path.join(RESULTSDIR, "cmb.hf5")
| Python | 0 | @@ -2509,16 +2509,55 @@
dSource%0A
+soclass = c.CanonicalRampedSecondOrder%0A
cq = 50%0A
@@ -3398,16 +3398,46 @@
er_tsix%22
+,%0A %22soclass%22: soclass
%7D%0A %0A####
|
5b9b24053eaccc2083184a9ce58699b3765e284c | Add async=False option to AWS core class for easier testing | asyncaws/core.py | asyncaws/core.py | from tornado.httpclient import HTTPRequest, HTTPClient, AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.httputil import url_concat
from concurrent.futures import Future
from urlparse import urlparse
from lxml import objectify
import datetime
import hashlib
import hmac
def sign(key, msg):
"""Make sha256 signature"""
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def get_signature_key(key, date_stamp, region_name, service_name):
"""Sign all params sequentially"""
k_date = sign(('AWS4' + key).encode('utf-8'), date_stamp)
k_region = sign(k_date, region_name)
k_service = sign(k_region, service_name)
k_signing = sign(k_service, 'aws4_request')
return k_signing
class AWSRequest(HTTPRequest):
"""
Generic AWS Adapter for Tornado HTTP request
Generates v4 signature and sets all required headers
"""
def __init__(self, *args, **kwargs):
service = kwargs['service']
region = kwargs['region']
method = kwargs.get('method', 'GET')
url = kwargs.get('url') or args[0]
# tornado url_concat encodes spaces as '+', but AWS expects '%20'
url = url.replace('+', '%20')
parsed_url = urlparse(url)
host = parsed_url.netloc
canonical_uri = parsed_url.path
# sort params alphabetically
params = sorted(parsed_url.query.split('&'))
canonical_querystring = '&'.join(params)
kwargs['url'] = url.replace(parsed_url.query, canonical_querystring)
# reset args, everything is passed with kwargs
args = tuple()
# prepare timestamps
utc_time = datetime.datetime.utcnow()
amz_date = utc_time.strftime('%Y%m%dT%H%M%SZ')
date_stamp = utc_time.strftime('%Y%m%d')
# prepare aws-specific headers
canonical_headers = 'host:{host}\nx-amz-date:{amz_date}\n'.format(
host=host, amz_date=amz_date)
signed_headers = 'host;x-amz-date'
# for GET requests payload is empty
payload_hash = hashlib.sha256('').hexdigest()
canonical_request = (
'{method}\n{canonical_uri}\n{canonical_querystring}'
'\n{canonical_headers}\n{signed_headers}\n{payload_hash}'
).format(
method=method, canonical_uri=canonical_uri,
canonical_querystring=canonical_querystring,
canonical_headers=canonical_headers, signed_headers=signed_headers,
payload_hash=payload_hash
)
# creating signature
algorithm = 'AWS4-HMAC-SHA256'
scope = '{date_stamp}/{region}/{service}/aws4_request'.format(
date_stamp=date_stamp, region=region, service=service)
string_to_sign = '{algorithm}\n{amz_date}\n{scope}\n{hash}'.format(
algorithm=algorithm, amz_date=amz_date, scope=scope,
hash=hashlib.sha256(canonical_request).hexdigest())
sign_key = get_signature_key(kwargs['secret_key'],
date_stamp, region, service)
hash_tuple = (sign_key, string_to_sign.encode('utf-8'), hashlib.sha256)
signature = hmac.new(*hash_tuple).hexdigest()
authorization_header = (
'{algorithm} Credential={access_key}/{scope}, '
'SignedHeaders={signed_headers}, Signature={signature}'
).format(
algorithm=algorithm, access_key=kwargs['access_key'], scope=scope,
signed_headers=signed_headers, signature=signature
)
# clean-up kwargs
del kwargs['access_key']
del kwargs['secret_key']
del kwargs['service']
del kwargs['region']
# update headers
headers = kwargs.get('headers', {})
headers.update({'x-amz-date': amz_date,
'Authorization': authorization_header})
kwargs['headers'] = headers
# init Tornado HTTPRequest
super(AWSRequest, self).__init__(*args, **kwargs)
class AWS(object):
"""
Generic class for AWS API implementations: SQS, SNS, etc
"""
def __init__(self, access_key, secret_key, region, async=True):
self.region = region
self.__access_key = access_key
self.__secret_key = secret_key
self._http = AsyncHTTPClient() if async else HTTPClient()
def _process(self, url, params, service, parse_function):
"""Prepare request and result parsing callback"""
full_url = url_concat(url, params)
request = AWSRequest(full_url, service=service, region=self.region,
access_key=self.__access_key,
secret_key=self.__secret_key)
ioloop = IOLoop.current()
final_result = Future()
def inject_result(future):
"""callback to connect AsyncHTTPClient future with parse function"""
raw_response = future.result().body
xml_root = objectify.fromstring(raw_response)
final_result.set_result(parse_function(xml_root))
ioloop.add_future(self._http.fetch(request), inject_result)
return final_result
| Python | 0.000002 | @@ -4296,16 +4296,44 @@
Client()
+%0A self._async = async
%0A%0A de
@@ -4683,16 +4683,239 @@
et_key)%0A
+ if not self._async:%0A http_response = self._http.fetch(request)%0A xml_root = objectify.fromstring(http_response.body)%0A response = parse_function(xml_root)%0A return response%0A%0A
|
8d6fcc6d318423e87e9942c2551c0d9b3c282e25 | Allow TELEGRAM_TEMPLATE to be a string (#208) | plugins/telegram/alerta_telegram.py | plugins/telegram/alerta_telegram.py | import logging
import os
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
import telepot
from jinja2 import Template, UndefinedError
DEFAULT_TMPL = """
{% if customer %}Customer: `{{customer}}` {% endif %}
*[{{ status.capitalize() }}] {{ environment }} {{ severity.capitalize() }}*
{{ event | replace("_","\_") }} {{ resource.capitalize() }}
```
{{ text }}
```
"""
LOG = logging.getLogger('alerta.plugins.telegram')
TELEGRAM_TOKEN = app.config.get('TELEGRAM_TOKEN') \
or os.environ.get('TELEGRAM_TOKEN')
TELEGRAM_CHAT_ID = app.config.get('TELEGRAM_CHAT_ID') \
or os.environ.get('TELEGRAM_CHAT_ID')
TELEGRAM_WEBHOOK_URL = app.config.get('TELEGRAM_WEBHOOK_URL', None) \
or os.environ.get('TELEGRAM_WEBHOOK_URL')
TELEGRAM_TEMPLATE = app.config.get('TELEGRAM_TEMPLATE') \
or os.environ.get('TELEGRAM_TEMPLATE')
DASHBOARD_URL = app.config.get('DASHBOARD_URL', '') \
or os.environ.get('DASHBOARD_URL')
class TelegramBot(PluginBase):
def __init__(self, name=None):
self.bot = telepot.Bot(TELEGRAM_TOKEN)
LOG.debug('Telegram: %s', self.bot.getMe())
if TELEGRAM_WEBHOOK_URL and \
TELEGRAM_WEBHOOK_URL != self.bot.getWebhookInfo()['url']:
self.bot.setWebhook(TELEGRAM_WEBHOOK_URL)
LOG.debug('Telegram: %s', self.bot.getWebhookInfo())
super(TelegramBot, self).__init__(name)
if TELEGRAM_TEMPLATE and os.path.exists(TELEGRAM_TEMPLATE):
with open(TELEGRAM_TEMPLATE, 'r') as f:
self.template = Template(f.read())
else:
self.template = Template(DEFAULT_TMPL)
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
if alert.repeat:
return
try:
text = self.template.render(alert.__dict__)
except UndefinedError:
text = "Something bad has happened but also we " \
"can't handle your telegram template message."
LOG.debug('Telegram: message=%s', text)
if TELEGRAM_WEBHOOK_URL:
keyboard = {
'inline_keyboard': [
[
{'text': 'ack', 'callback_data': '/ack ' + alert.id},
{'text': 'close', 'callback_data': '/close ' + alert.id},
{'text': 'blackout',
'callback_data': '/blackout %s|%s|%s' % (alert.environment,
alert.resource,
alert.event)}
]
]
}
else:
keyboard = None
try:
response = self.bot.sendMessage(TELEGRAM_CHAT_ID,
text,
parse_mode='Markdown',
reply_markup=keyboard)
except telepot.exception.TelegramError as e:
raise RuntimeError("Telegram: ERROR - %s, description= %s, json=%s",
e.error_code,
e.description,
e.json)
except Exception as e:
raise RuntimeError("Telegram: ERROR - %s", e)
LOG.debug('Telegram: %s', response)
def status_change(self, alert, status, summary):
return
| Python | 0.000002 | @@ -1590,20 +1590,32 @@
TEMPLATE
- and
+:%0A if
os.path
@@ -1642,16 +1642,20 @@
PLATE):%0A
+
@@ -1698,16 +1698,20 @@
) as f:%0A
+
@@ -1753,16 +1753,94 @@
read())%0A
+ else:%0A self.template = Template(TELEGRAM_TEMPLATE)%0A
|
5a6cdb9dc08924dc90a24271dc45f4412250b06a | bump version | src/experimentator/__version__.py | src/experimentator/__version__.py | __version__ = '0.2.0'
| Python | 0 | @@ -12,11 +12,11 @@
= '0.2.
-0
+1
'%0A
|
9a18c20741f1d9b077f6ab992391463e033c53e4 | Use assertRaises() instead of try/except/isinstance in test suite | humanfriendly_tests.py | humanfriendly_tests.py | #!/usr/bin/env python
# Tests for the 'humanfriendly' module.
#
# Author: Peter Odding <peter.odding@paylogic.eu>
# Last Change: June 27, 2013
# URL: https://humanfriendly.readthedocs.org
# Standard library modules.
import math
import os
import unittest
# The module we are testing.
import humanfriendly
class HumanFriendlyTestCase(unittest.TestCase):
def test_format_timespan(self):
minute = 60
hour = minute * 60
day = hour * 24
week = day * 7
year = week * 52
self.assertEqual('0 seconds', humanfriendly.format_timespan(0))
self.assertEqual('0.54 seconds', humanfriendly.format_timespan(0.54321))
self.assertEqual('1 second', humanfriendly.format_timespan(1))
self.assertEqual('3.14 seconds', humanfriendly.format_timespan(math.pi))
self.assertEqual('1 minute', humanfriendly.format_timespan(minute))
self.assertEqual('1 minute and 20 seconds', humanfriendly.format_timespan(80))
self.assertEqual('2 minutes', humanfriendly.format_timespan(minute * 2))
self.assertEqual('1 hour', humanfriendly.format_timespan(hour))
self.assertEqual('2 hours', humanfriendly.format_timespan(hour * 2))
self.assertEqual('1 day', humanfriendly.format_timespan(day))
self.assertEqual('2 days', humanfriendly.format_timespan(day * 2))
self.assertEqual('1 week', humanfriendly.format_timespan(week))
self.assertEqual('2 weeks', humanfriendly.format_timespan(week * 2))
self.assertEqual('1 year', humanfriendly.format_timespan(year))
self.assertEqual('2 years', humanfriendly.format_timespan(year * 2))
self.assertEqual('1 year, 2 weeks and 3 days', humanfriendly.format_timespan(year + week * 2 + day * 3 + hour * 12))
def test_parse_date(self):
self.assertEqual((2013, 6, 17, 0, 0, 0), humanfriendly.parse_date('2013-06-17'))
self.assertEqual((2013, 6, 17, 2, 47, 42), humanfriendly.parse_date('2013-06-17 02:47:42'))
try:
humanfriendly.parse_date('2013-06-XY')
self.assertTrue(False)
except Exception as e:
self.assertTrue(isinstance(e, humanfriendly.InvalidDate))
def test_format_size(self):
self.assertEqual('0 bytes', humanfriendly.format_size(0))
self.assertEqual('1 byte', humanfriendly.format_size(1))
self.assertEqual('42 bytes', humanfriendly.format_size(42))
self.assertEqual('1 KB', humanfriendly.format_size(1024 ** 1))
self.assertEqual('1 MB', humanfriendly.format_size(1024 ** 2))
self.assertEqual('1 GB', humanfriendly.format_size(1024 ** 3))
self.assertEqual('1 TB', humanfriendly.format_size(1024 ** 4))
self.assertEqual('1 PB', humanfriendly.format_size(1024 ** 5))
def test_parse_size(self):
self.assertEqual(42, humanfriendly.parse_size('42'))
self.assertEqual(1024, humanfriendly.parse_size('1k'))
self.assertEqual(1024, humanfriendly.parse_size('1 KB'))
self.assertEqual(1024, humanfriendly.parse_size('1 kilobyte'))
self.assertEqual(1024 ** 3, humanfriendly.parse_size('1 GB'))
try:
humanfriendly.parse_size('1z')
self.assertTrue(False)
except Exception as e:
self.assertTrue(isinstance(e, humanfriendly.InvalidSize))
def test_round_number(self):
self.assertEqual('1', humanfriendly.round_number(1))
self.assertEqual('1', humanfriendly.round_number(1.0))
self.assertEqual('1.00', humanfriendly.round_number(1, keep_width=True))
self.assertEqual('3.14', humanfriendly.round_number(3.141592653589793))
def test_format_path(self):
abspath = os.path.join(os.environ['HOME'], '.vimrc')
self.assertEqual(os.path.join('~', '.vimrc'), humanfriendly.format_path(abspath))
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | @@ -2004,198 +2004,91 @@
-try:%0A humanfriendly.parse_date('2013-06-XY')%0A self.assertTrue(False)%0A except Exception as e:%0A self.assertTrue(isinstance(e, humanfriendly.InvalidDate)
+self.assertRaises(humanfriendly.InvalidDate, humanfriendly.parse_date, '2013-06-XY'
)%0A%0A
@@ -3046,24 +3046,52 @@
-try:%0A
+self.assertRaises(humanfriendly.InvalidSize,
hum
@@ -3115,19 +3115,16 @@
size
-(
+,
'1z')%0A
-
@@ -3142,90 +3142,39 @@
sert
-True(False)%0A except Exception as e:%0A self.assertTrue(isinstance(
+Raises(humanfriendly.InvalidSiz
e, h
@@ -3190,20 +3190,23 @@
dly.
-InvalidSize)
+parse_size, 'a'
)%0A%0A
|
101f8c44ec0b55111f93e7c2a0d8f1710405452f | FIX get_name funtion | addons/nautical_search_by_ni/res_partner.py | addons/nautical_search_by_ni/res_partner.py | # -*- coding: utf-8 -*-
import datetime
from lxml import etree
import math
import pytz
import re
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools.yaml_import import is_comment
class res_partner(osv.osv):
_inherit = "res.partner"
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
national_identity = ''
if record.national_identity:
national_identity = '[' + record.national_identity + ']'
name = "%s %s" % (national_identity, name)
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_id.name, name)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
query_args = {'name': search_name}
# TODO: simplify this in trunk with `display_name`, once it is stored
# Perf note: a CTE expression (WITH ...) seems to have an even higher cost
# than this query with duplicated CASE expressions. The bulk of
# the cost is the ORDER BY, and it is inevitable if we want
# relevant results for the next step, otherwise we'd return
# a random selection of `limit` results.
query = ('''SELECT partner.id FROM res_partner partner
LEFT JOIN res_partner company
ON partner.parent_id = company.id
WHERE partner.national_identity ''' + operator + ''' %(name)s OR
partner.email ''' + operator + ''' %(name)s OR
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END ''' + operator + ''' %(name)s
ORDER BY
CASE
WHEN company.id IS NULL OR partner.is_company
THEN partner.name
ELSE company.name || ', ' || partner.name
END''')
if limit:
query += ' limit %(limit)s'
query_args['limit'] = limit
cr.execute(query, query_args)
ids = map(lambda x: x[0], cr.fetchall())
ids = self.search(cr, uid, [('id', 'in', ids)] + args, limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
| Python | 0.000002 | @@ -820,16 +820,22 @@
%25s%22 %25 (
+name,
national
@@ -843,22 +843,16 @@
identity
-, name
)%0A
|
df962bc49d9880c2df886213c049bcb8a13bfd7e | Fix error when timezone is set but format is not | willie/modules/tell.py | willie/modules/tell.py | """
tell.py - Willie Tell and Ask Module
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
import os
import re
import time
import datetime
import pytz
import random
import threading
from willie.tools import Nick
maximum = 4
def loadReminders(fn, lock):
lock.acquire()
try:
result = {}
f = open(fn)
for line in f:
line = line.strip()
if line:
try: tellee, teller, verb, timenow, msg = line.split('\t', 4)
except ValueError: continue # @@ hmm
result.setdefault(tellee, []).append((teller, verb, timenow, msg))
f.close()
finally:
lock.release()
return result
def dumpReminders(fn, data, lock):
lock.acquire()
try:
f = open(fn, 'w')
for tellee in data.iterkeys():
for remindon in data[tellee]:
line = '\t'.join((tellee,) + remindon)
try: f.write(line + '\n')
except IOError: break
try: f.close()
except IOError: pass
finally:
lock.release()
return True
def setup(self):
fn = self.nick + '-' + self.config.host + '.tell.db'
self.tell_filename = os.path.join(self.config.dotdir, fn)
if not os.path.exists(self.tell_filename):
try: f = open(self.tell_filename, 'w')
except OSError: pass
else:
f.write('')
f.close()
self.memory['tell_lock'] = threading.Lock()
self.memory['reminders'] = loadReminders(self.tell_filename, self.memory['tell_lock'])
def get_user_time(willie, nick):
tz = 'UTC'
tformat = '%Y-%m-%d %H:%M:%S %Z'
if willie.db and nick in willie.db.preferences:
tz = willie.db.preferences.get(nick, 'tz') or 'UTC'
tformat = willie.db.preferences.get(nick, 'time_format')
if tz not in pytz.all_timezones_set:
tz = 'UTC'
return (pytz.timezone(tz.strip()), tformat)
def f_remind(willie, trigger):
teller = trigger.nick
verb, tellee, msg = trigger.groups()
verb = unicode(verb)
tellee = Nick(tellee.rstrip('.,:;'))
msg = unicode(msg)
if not os.path.exists(willie.tell_filename):
return
if len(tellee) > 20:
return willie.reply('That nickname is too long.')
if tellee == willie.nick:
return willie.reply("I'm here now, you can tell me whatever you want!")
tz, tformat = get_user_time(willie, tellee)
print tellee, tz, tformat
timenow = datetime.datetime.now(tz).strftime(tformat)
if not tellee in (Nick(teller), willie.nick, 'me'):
willie.memory['tell_lock'].acquire()
try:
if not willie.memory['reminders'].has_key(tellee):
willie.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]
else:
willie.memory['reminders'][tellee].append((teller, verb, timenow, msg))
finally:
willie.memory['tell_lock'].release()
response = "I'll pass that on when %s is around." % tellee
willie.reply(response)
elif Nick(teller) == tellee:
willie.say('You can %s yourself that.' % verb)
else: willie.say("Hey, I'm not as stupid as Monty you know!")
dumpReminders(willie.tell_filename, willie.memory['reminders'], willie.memory['tell_lock']) # @@ tell
f_remind.rule = ('$nick', ['tell', 'ask'], r'(\S+) (.*)')
def getReminders(willie, channel, key, tellee):
lines = []
template = "%s: %s <%s> %s %s %s"
today = time.strftime('%d %b', time.gmtime())
willie.memory['tell_lock'].acquire()
try:
for (teller, verb, datetime, msg) in willie.memory['reminders'][key]:
if datetime.startswith(today):
datetime = datetime[len(today)+1:]
lines.append(template % (tellee, datetime, teller, verb, tellee, msg))
try: del willie.memory['reminders'][key]
except KeyError: willie.msg(channel, 'Er...')
finally:
willie.memory['tell_lock'].release()
return lines
def message(willie, trigger):
tellee = trigger.nick
channel = trigger.sender
if not os.path.exists(willie.tell_filename):
return
reminders = []
remkeys = list(reversed(sorted(willie.memory['reminders'].keys())))
for remkey in remkeys:
if not remkey.endswith('*') or remkey.endswith(':'):
if tellee == remkey:
reminders.extend(getReminders(willie, channel, remkey, tellee))
elif tellee.startswith(remkey.rstrip('*:')):
reminders.extend(getReminders(willie, channel, remkey, tellee))
for line in reminders[:maximum]:
willie.say(line)
if reminders[maximum:]:
willie.say('Further messages sent privately')
for line in reminders[maximum:]:
willie.msg(tellee, line)
if len(willie.memory['reminders'].keys()) != remkeys:
dumpReminders(willie.tell_filename, willie.memory['reminders'], willie.memory['tell_lock']) # @@ tell
message.rule = r'(.*)'
message.priority = 'low'
if __name__ == '__main__':
print __doc__.strip()
| Python | 0 | @@ -1684,30 +1684,12 @@
t =
-'%25Y-%25m-%25d %25H:%25M:%25S %25Z'
+None
%0A
@@ -1976,16 +1976,42 @@
tformat
+ or '%25Y-%25m-%25d %25H:%25M:%25S %25Z'
)%0A%0A%0Adef
|
7b3f447e7fa83eed97b9d54fe79db01ea325f1d2 | Drop cargo-culted pin | application/setup.py | application/setup.py | from setuptools import setup
name = 'senic.nuimo_hub'
setup(
name=name,
version_format='{tag}.{commitcount}+{gitsha}',
url='https://github.com/getsenic/nuimo-hub-app',
author='Senic GmbH',
author_email='tom@senic.com',
description='...',
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
packages=[name],
namespace_packages=['senic'],
include_package_data=True,
package_dir={name: 'senic/nuimo_hub'},
package_data={
name: [
'.coveragerc',
'tests/*.py',
'tests/data/*.*',
'views/*.*',
],
},
zip_safe=False,
setup_requires=[
'setuptools-git >= 0',
'setuptools-git-version'
],
install_requires=[
'click',
'colander',
'cornice<2.0',
'pyramid',
'pyramid_tm',
'pytz',
'requests',
'senic.cryptoyaml',
'wifi',
],
extras_require={
'development': [
'devpi-client',
'docutils',
'flake8',
'jinja2',
'mock',
'pbr',
'pdbpp',
'pep8 < 1.6',
'py >= 1.4.17',
'pyflakes < 1.4.0',
'pyquery',
'pyramid_debugtoolbar',
'pytest',
'pytest-cov',
'pytest-flakes',
'pytest-pep8',
'python-dateutil',
'repoze.sphinx.autointerface',
'setuptools-git',
'Sphinx',
'tox',
'waitress',
'webtest',
],
},
entry_points="""
[paste.app_factory]
main = senic.nuimo_hub:main
[console_scripts]
scan_wifi = senic.nuimo_hub.commands:scan_wifi
join_wifi = senic.nuimo_hub.commands:join_wifi
""",
)
| Python | 0 | @@ -1356,16 +1356,8 @@
akes
- %3C 1.4.0
',%0A
|
e44473e7ca381dce379b89c2a17bb79aa917afbd | fix Order.ensure_subprodcts never making a query. closes #1 | humblebundle/models.py | humblebundle/models.py | """
Model classes for the Humble Bundle API
This module only is guaranteed to only contain model class definitions
"""
__author__ = "Joel Pedraza"
__copyright__ = "Copyright 2014, Joel Pedraza"
__license__ = "MIT"
class BaseModel(object):
def __init__(self, client, data):
self._client = client
def __str__(self):
return str({key: self.__dict__[key] for key in self.__dict__ if key != '_client'})
def __repr__(self):
return repr(self.__dict__)
def __iter__(self):
return self.__dict__.__iter__()
class Order(BaseModel):
def __init__(self, client, data):
super(Order, self).__init__(client, data)
self.product = Product(client, data['product'])
self.subscriptions = [Subscription(client, sub) for sub in data['subscriptions']]
self.thankname = data['thankname']
self.claimed = data['claimed']
self.gamekey = data['gamekey']
self.country = data['country']
self.giftee = data['giftee']
self.leaderboard = data['leaderboard']
self.owner_username = data['owner_username']
self.platforms = [plat for plat, v in data['platform'].items() if v > 0]
self.subproducts = [Subproduct(client, prod) for prod in data.get('subproducts', [])]
def __repr__(self):
return "Order: <%s>" % self.product.machine_name
def ensure_subproducts(self, *args, **kwargs):
if not hasattr(self, 'subproducts'):
self.__dict__.update(self._client.order(self.gamekey, *args, **kwargs).__dict__)
return self
class Product(BaseModel):
def __init__(self, client, data):
super(Product, self).__init__(client, data)
self.category = data['category']
self.human_name = data['human_name']
self.machine_name = data['machine_name']
self.supports_canonical = data['supports_canonical']
def __repr__(self):
return "Product: <%s>" % self.machine_name
class Subscription(BaseModel):
def __init__(self, client, data):
super(Subscription, self).__init__(client, data)
self.human_name = data['human_name']
self.list_name = data['list_name']
self.subscribed = data['subscribed']
def __repr__(self):
return "Subscription: <%s : %s>" % (self.list_name, self.subscribed)
class Subproduct(BaseModel):
def __init__(self, client, data):
super(Subproduct, self).__init__(client, data)
self.machine_name = data['machine_name']
self.payee = Payee(client, data['payee'])
self.url = data['url']
self.downloads = [Download(client, download) for download in data['downloads']]
self.human_name = data['human_name']
self.custom_download_page_box_html = data['custom_download_page_box_html']
self.icon = data['icon']
def __repr__(self):
return "Subproduct: <%s>" % self.machine_name
class Payee(BaseModel):
def __init__(self, client, data):
super(Payee, self).__init__(client, data)
self.human_name = data['human_name']
self.machine_name = data['machine_name']
def __repr__(self):
return "Payee: <%s>" % self.machine_name
class Download(BaseModel):
def __init__(self, client, data):
super(Download, self).__init__(client, data)
self.machine_name = data['machine_name']
self.platform = data['platform']
self.download_struct = [DownloadStruct(client, struct) for struct in data['download_struct']]
self.options_dict = data['options_dict']
self.download_identifier = data['download_identifier']
self.download_version_number = data['download_version_number']
def sign_download_url(self, *args, **kwargs):
return self._client.sign_download_url(self.machine_name, *args, **kwargs)
def __repr__(self):
return "Download: <%s>" % self.machine_name
class DownloadStruct(BaseModel):
def __init__(self, client, data):
super(DownloadStruct, self).__init__(client, data)
self.sha1 = data.get('sha1', None)
self.name = data.get('name', None)
self.message = data.get('message', None)
self.url = Url(client, data.get('url', {}))
self.external_link = data.get('external_link', None)
self.recommend_bittorrent = data['recommend_bittorrent']
self.human_size = data['human_size']
self.file_size = data.get('file_size', None)
self.md5 = data.get('md5', None)
self.fat32_warning = data['fat32_warning']
self.size = data.get('size', None)
self.small = data.get('small', None)
class Url(BaseModel):
def __init__(self, client, data):
super(Url, self).__init__(client, data)
self.web = data.get('web', None)
self.bittorrent = data.get('bittorrent', None) | Python | 0.000019 | @@ -1205,24 +1205,25 @@
bproducts =
+(
%5BSubproduct(
@@ -1276,16 +1276,25 @@
s', %5B%5D)%5D
+) or None
%0A%0A de
@@ -1435,27 +1435,13 @@
if
-not hasattr(
self
-, '
+.
subp
@@ -1451,10 +1451,16 @@
ucts
-')
+ is None
:%0A
|
42c80ac7499fbe05a9ddb916b42c56dcd265e5d9 | Write while filename in description, not basename | pmap.py | pmap.py | #!/usr/bin/env python2
"""Produce parametric maps by fitting one or more diffusion models to imaging
data. Multiple input images can be provided in ASCII format. Single input image
can be provided as a group of DICOM files. Output is written in ASCII files
named by input and model."""
import os.path
import sys
import argparse
from dwi import dwimage
from dwi import models
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description = __doc__)
p.add_argument('-v', '--verbose',
action='count',
help='increase verbosity')
p.add_argument('-l', '--listmodels',
action='store_true',
help='list available models')
p.add_argument('-a', '--average',
action='store_true',
help='average input voxels into one')
p.add_argument('-s', '--subwindow', metavar='I',
nargs=6, default=[], required=False, type=int,
help='use subwindow (specified by 6 one-based indices)')
p.add_argument('-m', '--models', metavar='MODEL',
nargs='+', default=[],
help='models to use')
p.add_argument('-i', '--input', metavar='FILENAME',
nargs='+', default=[],
help='input ASCII files')
p.add_argument('-d', '--dicom', metavar='PATHNAME',
nargs='+', default=[],
help='input DICOM files or directories')
p.add_argument('-o', '--output', metavar='FILENAME',
required=False,
help='output file (for single model only)')
args = p.parse_args()
return args
def write_pmap_ascii(dwi, model, params, pmap):
"""Write parameter images to an ASCII file."""
if args.output:
filename = args.output
else:
filename = '%s_%s.txt' % (os.path.basename(dwi.filename), model)
print 'Writing parameters to %s...' % filename
with open(filename, 'w') as f:
write_pmap_ascii_head(dwi, model, params, f)
write_pmap_ascii_body(pmap, f)
def write_pmap_ascii_head(dwi, model, params, f):
f.write('subwindow: [%s]\n' % ' '.join(map(str, dwi.subwindow)))
f.write('number: %d\n' % dwi.number)
f.write('bset: [%s]\n' % ' '.join(map(str, dwi.bset)))
f.write('ROIslice: %s\n' % dwi.roislice)
f.write('name: %s\n' % dwi.name)
f.write('executiontime: %d s\n' % dwi.execution_time())
f.write('description: %s %s\n' % (os.path.basename(dwi.filename),
repr(model)))
f.write('model: %s\n' % model.name)
f.write('parameters: %s\n' % ' '.join(map(str, params)))
def write_pmap_ascii_body(pmap, f):
for p in pmap:
f.write(' '.join(map(repr, p)) + '\n')
def log(str):
sys.stderr.write(str)
sys.stderr.flush()
def fit_dwi(model, dwi):
if args.subwindow:
dwi = dwi.get_roi(args.subwindow, onebased=True)
if args.verbose:
print dwi
logger = log if args.verbose > 1 else None
if not model.params:
model.params = ['SI%dN' % b for b in dwi.bset]
params = model.params + ['RMSE']
#pmap = dwi.fit_whole(model, log=logger, mean=args.average)
pmap = dwi.fit(model)
write_pmap_ascii(dwi, model, params, pmap)
def fit_ascii(model, filename):
dwis = dwimage.load(filename, 1)
for dwi in dwis:
fit_dwi(model, dwi)
def fit_dicom(model, filenames):
dwis = dwimage.load_dicom(filenames)
for dwi in dwis:
fit_dwi(model, dwi)
args = parse_args()
if args.output and len(args.models) > 1:
raise 'Error: one output file, several models.'
if args.listmodels:
for model in models.Models:
print '{n}: {d}'.format(n=model.name, d=model.desc)
print '{n}: {d}'.format(n='all', d='all models')
print '{n}: {d}'.format(n='normalized', d='all normalized models')
selected_models = args.models
if 'all' in selected_models:
selected_models += [m.name for m in models.Models]
elif 'normalized' in selected_models:
selected_models += 'SiN MonoN KurtN StretchedN BiexpN'.split()
for model in models.Models:
if model.name in selected_models:
for filename in args.input:
fit_ascii(model, filename)
if args.dicom:
fit_dicom(model, args.dicom)
| Python | 0.022384 | @@ -2398,33 +2398,16 @@
%25s%5Cn' %25
-(os.path.basename
(dwi.fil
@@ -2415,18 +2415,9 @@
name
-),%0A
+,
rep
|
52dd018d08e00356218cb2789cee10976eff4359 | Disable automatic geocoding for addresses in Django admin | firecares/firecares_core/admin.py | firecares/firecares_core/admin.py | import autocomplete_light
from .models import Address, ContactRequest, AccountRequest, RegistrationWhitelist
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.gis import admin
from import_export.admin import ExportMixin
from firecares.firecares_core.models import UserProfile, PredeterminedUser, DepartmentAssociationRequest
User = get_user_model()
class LocalOpenLayersAdmin(admin.OSMGeoAdmin):
openlayers_url = settings.STATIC_URL + 'openlayers/OpenLayers.js'
class AddressAdmin(LocalOpenLayersAdmin):
list_display = ['__unicode__']
list_filter = ['state_province']
search_fields = ['address_line1', 'state_province', 'city']
def save_model(self, request, obj, form, change):
if change:
obj.geocode()
super(AddressAdmin, self).save_model(request, obj, form, change)
class ContactRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['name', 'email', 'created_at']
search_fields = ['name', 'email']
class AccountRequestAdmin(ExportMixin, admin.ModelAdmin):
list_display = ['email', 'created_at']
search_fields = ['email']
form = autocomplete_light.modelform_factory(AccountRequest, fields='__all__')
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
form = autocomplete_light.modelform_factory(UserProfile, fields='__all__')
class UserAdmin(ExportMixin, BaseUserAdmin):
list_display = ['username', 'email', 'first_name', 'last_name', 'is_staff', 'date_joined']
inlines = [ProfileInline]
class DepartmentAssociationRequestAdmin(ExportMixin, admin.ModelAdmin):
model = DepartmentAssociationRequest
form = autocomplete_light.modelform_factory(DepartmentAssociationRequest, fields='__all__')
search_fields = ['user__username', 'user__email', 'approved_by__username', 'denied_by__username']
list_filter = ['approved_by', 'denied_by', 'approved_at', 'denied_at']
class RegistrationWhitelistAdmin(ExportMixin, admin.ModelAdmin):
model = RegistrationWhitelist
form = autocomplete_light.modelform_factory(RegistrationWhitelist, fields='__all__')
search_fields = ['email_or_domain', 'department__name', 'created_by__username']
list_filter = ['created_by', 'created_at', 'department__state']
class PredeterminedUserAdmin(ExportMixin, admin.ModelAdmin):
model = PredeterminedUser
form = autocomplete_light.modelform_factory(PredeterminedUser, fields='__all__')
search_fields = ['email', 'department__name']
admin.site.register(Address, AddressAdmin)
admin.site.register(ContactRequest, ContactRequestAdmin)
admin.site.register(AccountRequest, AccountRequestAdmin)
admin.site.register(RegistrationWhitelist, RegistrationWhitelistAdmin)
admin.site.register(PredeterminedUser, PredeterminedUserAdmin)
admin.site.register(DepartmentAssociationRequest, DepartmentAssociationRequestAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| Python | 0 | @@ -762,181 +762,8 @@
'%5D%0A%0A
- def save_model(self, request, obj, form, change):%0A if change:%0A obj.geocode()%0A super(AddressAdmin, self).save_model(request, obj, form, change)%0A%0A
%0Acla
|
7b3f239964c6663a9b655553202567fccead85c8 | Add 'me' to profile IdentifierError | mollie/api/resources/profiles.py | mollie/api/resources/profiles.py | from ..error import IdentifierError
from ..objects.profile import Profile
from .base import Base
class Profiles(Base):
RESOURCE_ID_PREFIX = 'pfl_'
def get_resource_object(self, result):
return Profile(result, self.client)
def get(self, profile_id, **params):
if not profile_id or \
(not profile_id.startswith(self.RESOURCE_ID_PREFIX)
and not profile_id == 'me'):
raise IdentifierError(
"Invalid profile ID: '{id}'. A profile ID should start with '{prefix}'.".format(
id=profile_id, prefix=self.RESOURCE_ID_PREFIX)
)
return super(Profiles, self).get(profile_id, **params)
| Python | 0.000003 | @@ -542,16 +542,56 @@
prefix%7D'
+ %22%0A %22or it should be 'me'
.%22.forma
@@ -627,16 +627,36 @@
file_id,
+%0A
prefix=
|
b000bef2ec323dc9b7862a828ab1fd2e9574f3b0 | allow networks to be read from Document objects as well as filenames | nineml/user/network.py | nineml/user/network.py | from itertools import chain
from .population import Population
from .projection import Projection
from .selection import Selection
from ..document import Document
from . import BaseULObject
from .component import write_reference, resolve_reference
from nineml.annotations import annotate_xml, read_annotations
from nineml.xmlns import E, NINEML
from nineml.utils import check_tag
import nineml
from nineml.exceptions import handle_xml_exceptions
class Network(BaseULObject):
"""
Container for populations and projections between those populations.
**Arguments**:
*name*
a name for the network.
*populations*
a dict containing the populations contained in the network.
*projections*
a dict containing the projections contained in the network.
*selections*
a dict containing the selections contained in the network.
"""
element_name = "Network"
defining_attributes = ("populations", "projections", "selections")
children = ("populations", "projections", "selections")
def __init__(self, name="anonymous", populations={}, projections={},
selections={}):
# better would be *items, then sort by type, taking the name from the
# item
super(Network, self).__init__()
self.name = name
self.populations = populations
self.projections = projections
self.selections = selections
def add(self, *objs):
"""
Add one or more Population, Projection or Selection instances to the
network.
"""
for obj in objs:
if isinstance(obj, Population):
self.populations[obj.name] = obj
elif isinstance(obj, Projection):
self.projections[obj.name] = obj
elif isinstance(obj, Selection):
self.selections[obj.name] = obj
else:
raise Exception("Networks may only contain Populations, "
"Projections, or Selections")
def get_components(self):
components = []
for p in chain(self.populations.values(), self.projections.values()):
components.extend(p.get_components())
return components
@write_reference
@annotate_xml
def to_xml(self):
return E(self.element_name,
name=self.name,
*[p.to_xml() for p in chain(self.populations.values(),
self.selections.values(),
self.projections.values())])
@classmethod
@resolve_reference
@read_annotations
@handle_xml_exceptions
def from_xml(cls, element, document):
check_tag(element, cls)
populations = []
for pop_elem in element.findall(NINEML + 'PopulationItem'):
pop = Population.from_xml(pop_elem, document)
populations[pop.name] = pop
projections = []
for proj_elem in element.findall(NINEML + 'ProjectionItem'):
proj = Projection.from_xml(proj_elem, document)
projections[proj.name] = proj
selections = []
for sel_elem in element.findall(NINEML + 'Selection'):
sel = Selection.from_xml(sel_elem, document)
selections[sel.name] = sel
network = cls(name=element.attrib["name"], populations=populations,
projections=projections, selections=selections)
return network
def write(self, filename):
document = Document(*chain(
self.populations.itervalues(), self.projections.itervalues(),
self.selections.itervalues()))
document.write(filename)
@classmethod
def read(self, filename):
document = nineml.read(filename)
return Network(
name='root',
populations=dict((p.name, p) for p in document.populations),
projections=dict((p.name, p) for p in document.projections),
selections=dict((s.name, s) for s in document.selections))
| Python | 0 | @@ -438,16 +438,36 @@
ceptions
+, NineMLRuntimeError
%0A%0A%0Aclass
@@ -3813,39 +3813,338 @@
-document = nineml.read(filename
+if isinstance(filename, basestring):%0A document = nineml.read(filename)%0A elif isinstance(filename, Document):%0A document = filename%0A else:%0A raise NineMLRuntimeError(%0A %22Unrecognised argument type %7B%7D, can be either filename or %22%0A %22Document%22.format(filename)
)%0A
|
5efdd29804249b40c9b9e589cb00cf10c56decb0 | Add the standard imports | conveyor/tasks/bulk.py | conveyor/tasks/bulk.py | import datetime
import logging
import time
from requests.exceptions import ConnectionError, HTTPError
from ..core import Conveyor
logger = logging.getLogger(__name__)
# We ignore the last component as we cannot properly handle it
def get_jobs(last=0):
current = time.mktime(datetime.datetime.utcnow().timetuple())
logger.info("Current time is '%s'", current)
app = Conveyor()
for package in set(app.processor.pypi.list_packages()):
yield package
def handle_job(name):
try:
tried = 0
delay = 1
while True:
try:
tried += 1
app = Conveyor()
app.processor.update(name)
break
except (ConnectionError, HTTPError):
# Attempt to process again if we have a connection error
if tried >= 10: # Try a max of 10 times
raise
else:
# Wait a moment
time.sleep(delay)
delay = delay * 2
except Exception as e:
logger.exception(str(e))
raise
| Python | 0.000378 | @@ -1,12 +1,124 @@
+from __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import unicode_literals%0A%0A
import datet
|
00203b7fbf8ed8f8728ce18838acb21eb6224723 | Disable unused code | flumotion/test/test_common_vfs.py | flumotion/test/test_common_vfs.py | # -*- Mode: Python; test-case-name: flumotion.test.test_common_planet -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2008 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import errno
import os
from flumotion.common.interfaces import IDirectory
from flumotion.common.testsuite import TestCase
from flumotion.common.vfs import listDirectory
class VFSTest(TestCase):
def setUp(self):
self.path = os.path.dirname(__file__)
try:
os.mkdir(os.path.join(self.path, 'access-denied'), 000)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def tearDown(self):
os.rmdir(os.path.join(self.path, 'access-denied'))
def testListDirectory(self):
try:
d = listDirectory(self.path)
except AssertionError:
# missing backends
return
def done(directory):
self.failUnless(IDirectory.providedBy(directory))
self.assertEqual(directory.filename,
os.path.basename(self.path))
self.assertEqual(directory.getPath(), self.path)
self.failUnless(directory.iconNames)
d.addCallback(done)
return d
| Python | 0.000003 | @@ -1172,262 +1172,8 @@
e__)
-%0A try:%0A os.mkdir(os.path.join(self.path, 'access-denied'), 000)%0A except OSError, e:%0A if e.errno != errno.EEXIST:%0A raise%0A%0A def tearDown(self):%0A os.rmdir(os.path.join(self.path, 'access-denied'))
%0A%0A
|
6f10678e98101f3514db58f55923959eb41c6988 | Make :baseclass: actually work | docs/sphinx_qtile.py | docs/sphinx_qtile.py | # Copyright (c) 2015 dmpayton
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import builtins
import functools
import importlib
import inspect
import os
import pprint
from subprocess import call
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from jinja2 import Template
from sphinx.util.nodes import nested_parse_with_titles
from libqtile import command_object, configurable, widget
qtile_module_template = Template('''
.. qtile_class:: {{ module }}.{{ class_name }}
{% if no_config %}:no-config:{% endif %}
{% if no_commands %}:no-commands:{% endif %}
''')
qtile_class_template = Template('''
{{ class_name }}
{{ class_underline }}
.. autoclass:: {{ module }}.{{ class_name }}{% for arg in extra_arguments %}
{{ arg }}{% endfor %}
{% if is_widget %}
.. compound::
Supported bar orientations: {{ obj.orientations }}
{% endif %}
{% if configurable %}
.. list-table::
:widths: 20 20 60
:header-rows: 1
* - key
- default
- description
{% for key, default, description in defaults %}
* - ``{{ key }}``
- ``{{ default }}``
- {{ description }}
{% endfor %}
{% endif %}
{% if commandable %}
{% for cmd in commands %}
.. automethod:: {{ module }}.{{ class_name }}.{{ cmd }}
{% endfor %}
{% endif %}
''')
qtile_hooks_template = Template('''
.. automethod:: libqtile.hook.subscribe.{{ method }}
''')
# Adapted from sphinxcontrib-httpdomain
def import_object(module_name, expr):
mod = __import__(module_name)
mod = functools.reduce(getattr, module_name.split('.')[1:], mod)
globals = builtins
if not isinstance(globals, dict):
globals = globals.__dict__
return eval(expr, globals, mod.__dict__)
class SimpleDirectiveMixin:
has_content = True
required_arguments = 1
def make_rst(self):
raise NotImplementedError
def run(self):
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.make_rst():
result.append(line, '<{0}>'.format(self.__class__.__name__))
nested_parse_with_titles(self.state, result, node)
return node.children
def sphinx_escape(s):
return pprint.pformat(s, compact=False, width=10000)
class QtileClass(SimpleDirectiveMixin, Directive):
optional_arguments = 2
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
arguments = self.arguments[1:]
obj = import_object(module, class_name)
is_configurable = ':no-config:' not in arguments
is_commandable = ':no-commands:' not in arguments
arguments = [i for i in arguments if i not in (':no-config:', ':no-commands:')]
# build up a dict of defaults using reverse MRO
defaults = {}
for klass in reversed(obj.mro()):
if not issubclass(klass, configurable.Configurable):
continue
if not hasattr(klass, "defaults"):
continue
klass_defaults = getattr(klass, "defaults")
defaults.update({
d[0]: d[1:] for d in klass_defaults
})
# turn the dict into a list of ("value", "default", "description") tuples
defaults = [
(k, sphinx_escape(v[0]), sphinx_escape(v[1])) for k, v in sorted(defaults.items())
]
context = {
'module': module,
'class_name': class_name,
'class_underline': "=" * len(class_name),
'obj': obj,
'defaults': defaults,
'configurable': is_configurable and issubclass(obj, configurable.Configurable),
'commandable': is_commandable and issubclass(obj, command_object.CommandObject),
'is_widget': issubclass(obj, widget.base._Widget),
'extra_arguments': arguments,
}
if context['commandable']:
context['commands'] = [
attr for attr in dir(obj) if attr.startswith('cmd_')
]
rst = qtile_class_template.render(**context)
for line in rst.splitlines():
yield line
class QtileHooks(SimpleDirectiveMixin, Directive):
def make_rst(self):
module, class_name = self.arguments[0].rsplit('.', 1)
obj = import_object(module, class_name)
for method in sorted(obj.hooks):
rst = qtile_hooks_template.render(method=method)
for line in rst.splitlines():
yield line
class QtileModule(SimpleDirectiveMixin, Directive):
# :baseclass: <base class path>
# :no-commands:
# :no-config:
optional_arguments = 4
def make_rst(self):
module = importlib.import_module(self.arguments[0])
BaseClass = None
if ':baseclass:' in self.arguments:
BaseClass = import_object(*self.arguments[
self.arguments.index(':baseclass:') + 1].rsplit('.', 1))
for item in dir(module):
obj = import_object(self.arguments[0], item)
if not inspect.isclass(obj) and (BaseClass and
not isinstance(obj, BaseClass)):
continue
context = {
'module': self.arguments[0],
'class_name': item,
'no_config': ':no-config:' in self.arguments,
'no_commands': ':no-commands:' in self.arguments,
}
rst = qtile_module_template.render(**context)
for line in rst.splitlines():
if not line.strip():
continue
yield line
def generate_keybinding_images():
this_dir = os.path.dirname(__file__)
base_dir = os.path.abspath(os.path.join(this_dir, ".."))
call(['make', '-C', base_dir, 'run-ffibuild'])
call(['make', '-C', this_dir, 'genkeyimg'])
def setup(app):
generate_keybinding_images()
app.add_directive('qtile_class', QtileClass)
app.add_directive('qtile_hooks', QtileHooks)
app.add_directive('qtile_module', QtileModule)
| Python | 0.000001 | @@ -6182,19 +6182,18 @@
ss(obj)
-and
+or
(BaseCl
@@ -6222,24 +6222,24 @@
not is
-instance
+subclass
(obj, Ba
|
b6bc77d6437f5fc9e9c2e793226960fe29841724 | Remove unused code | ponyexpress/api/lib/repositories.py | ponyexpress/api/lib/repositories.py | ## Handle configured repositores and query for outdated package data
from datetime import date
import re
from ponyexpress.database import db
from ponyexpress.api.lib.providers import *
from ponyexpress.models.repository import Repository
from ponyexpress.models.repo_history import RepoHistory
from ponyexpress.models.package_history import PackageHistory
class Repositories:
provider = None
pattern = None # store the compiled regex pattern
def __init__(self):
pass
@staticmethod
def create_repository(repodata):
# name, uri, label, provider + id
# skip checking for the existance of a repo
# could be done via URI only at this step
new_repo = Repository()
new_repo.name = repodata['name']
new_repo.label = repodata['label']
new_repo.uri = repodata['uri']
new_repo.provider = repodata['provider']
db.session.add(new_repo)
db.session.commit()
# return the new object's id
return new_repo.id
@staticmethod
def update_repository_info(repository, repodata):
# update all known fields
if 'name' in repodata:
repository.name = repodata['name']
if 'uri' in repodata:
repository.uri = repodata['uri']
if 'label' in repodata:
repository.label = repodata['label']
if 'provider' in repodata:
repository.provider = repodata['provider']
# update the database
db.session.commit()
@staticmethod
def delete_repository(repository):
# remove the entry
db.session.delete(repository)
db.session.commit()
def select_provider(self, repo):
if repo.provider == 'apt':
self.provider = AptRepository(repo.uri)
else:
raise NotImplementedError()
def update_repository(self, repository):
if self.provider is not None:
metadata = self.provider.fetch_metadata()
else:
raise Exception()
if metadata is not None:
try:
mvals = metadata.itervalues()
except:
mvals = metadata.values()
for m in mvals:
hist = RepoHistory(repository, m['sha256'], m['package'], m['version'], m['filename'], date.today())
db.session.add(hist)
db.session.commit()
def get_outdated_packages(self, node_filter, repo_list):
"""Compare packages available on the repository server with those available on a set of nodes"""
outdated_packages = {}
if not isinstance(repo_list, list):
return []
# get packages from selected nodes
node_filter_expression = ('%%%s%%' % node_filter)
packages_history = PackageHistory.query.filter(PackageHistory.nodename.like(node_filter_expression)).all()
# get packages from selected set of repositories, filter by label
#repo_list = self.get_repositories(repository)
if packages_history is not None:
for package in packages_history:
try:
if repo_list is not []:
rl = []
for repo in repo_list:
rl.append(repo.id)
mp = RepoHistory.query.filter(RepoHistory.pkgname == package.pkgname) \
.filter(RepoHistory.repo_id.in_(rl)).all()
#TODO check if multiple packages should be returned.
#TODO iterate of all packages we can find with that name
#
if mp is not None:
upstream_version = []
for p in mp:
# compare versions
res = self.ver_cmp(package.pkgversion, p.pkgversion)
if res < 0:
# repository is newer
upstream_version.append(p.pkgversion)
package.upstream_version = upstream_version
if package.pkgname not in outdated_packages:
outdated_packages[package.pkgname] = package
else:
return []
except Exception as e:
# Catch exceptions and move on to the next object
print(e)
#next()
return list(outdated_packages.values())
else:
return []
def get_repositories(self, expression):
#check if expression is an integer or a comma separated list of values
repo_list = []
if expression is not None and (isinstance(expression, int) or expression.isdigit()):
repo_id = int(expression)
if repo_id > 0:
repo = Repository.query.filter_by(id=repo_id).first()
if repo is not None:
repo_list.append(repo)
else:
# assume expression is a list of repository numeric identifiers
split = expression.split(',')
if split is None:
return []
expression_list = [s for s in split if s.isdigit()]
if expression_list is not None and isinstance(expression_list, list):
repos = Repository.query.filter(Repository.id.in_(expression_list)).all()
if repos is not None:
repo_list = repos
return repo_list
def _ver_tuple(self, z):
"""Parse debian/ubuntu style version strings and return a tuple containing only numbers"""
if self.pattern is None:
#self.pattern = re.compile("([0-9]+)\.([0-9]+)\.?([0-9]*)[\-\+\~]?([0-9]|[\+\-\~a-z0-9]*)[a-z]*([0-9\.]+)")
#self.pattern = re.compile("([0-9]+)\.([0-9]+)\.?([0-9]*)[\-\+\~]?([0-9]|[\+\-\~a-z0-9]*)([a-z]*)([0-9]*)([\.0-9]*)")
self.pattern = re.compile('/(?<=\d)(?=\D)|(?<=\D)(?=\d)/')
a = self.pattern.split(z)
if a is not None and len(a) > 0:
return tuple([str(x) for x in a[0] if x.isnumeric()])
# TODO: fallback, simply return not-equal!!
#tup = tuple([str(x) for x in z.split('.')])
return None
def ver_cmp(self, a, b):
"""Compare two version tuples"""
# TODO: handle different length versions
va = self._ver_tuple(a)
vb = self._ver_tuple(b)
# When the second tuple is longer we assume it's a newer version
#if len(va) != len(vb):
# return -1
#return va < vb
if va < vb:
return -1
elif va == vb:
return 0
elif va > vb:
return 1
| Python | 0 | @@ -5805,258 +5805,8 @@
ne:%0A
- #self.pattern = re.compile(%22(%5B0-9%5D+)%5C.(%5B0-9%5D+)%5C.?(%5B0-9%5D*)%5B%5C-%5C+%5C~%5D?(%5B0-9%5D%7C%5B%5C+%5C-%5C~a-z0-9%5D*)%5Ba-z%5D*(%5B0-9%5C.%5D+)%22)%0A #self.pattern = re.compile(%22(%5B0-9%5D+)%5C.(%5B0-9%5D+)%5C.?(%5B0-9%5D*)%5B%5C-%5C+%5C~%5D?(%5B0-9%5D%7C%5B%5C+%5C-%5C~a-z0-9%5D*)(%5Ba-z%5D*)(%5B0-9%5D*)(%5B%5C.0-9%5D*)%22)%0A
@@ -6020,114 +6020,8 @@
%5D)%0A%0A
- # TODO: fallback, simply return not-equal!!%0A #tup = tuple(%5Bstr(x) for x in z.split('.')%5D)%0A%0A
|
c325ebfc98555ef8c134a5ec2bff94f3bbcb54d0 | fix sorting issues with OrderedDict | autodiff/utils.py | autodiff/utils.py | import gc
import opcode
import inspect
from autodiff.compat import OrderedDict, getcallargs
#import theano
#from theano.sandbox.cuda import cuda_ndarray
#cuda_ndarray = cuda_ndarray.cuda_ndarray
def orderedcallargs(fn, *args, **kwargs):
"""
Returns an OrderedDictionary containing the names and values of a
function's arguments. The arguments are ordered according to the function's
argspec:
1. named arguments
2. variable positional argument
3. variable keyword argument
"""
callargs = getcallargs(fn, *args, **kwargs)
argspec = inspect.getargspec(fn)
o_callargs = OrderedDict()
for argname in argspec.args:
o_callargs[argname] = callargs[argname]
if argspec.varargs:
o_callargs[argspec.varargs] = callargs[argspec.varargs]
if argspec.keywords:
o_callargs[argspec.keywords] = callargs[argspec.keywords]
return o_callargs
def expandedcallargs(fn, *args, **kwargs):
"""
Returns a tuple of all function args and kwargs, expanded so that varargs
and kwargs are not nested. The args are ordered by their position in the
function signature.
"""
return tuple(flat_from_doc(orderedcallargs(fn, *args, **kwargs)))
def as_seq(x, seq_type=None):
"""
If x is not a sequence, returns it as one. The seq_type argument allows the
output type to be specified (defaults to list). If x is a sequence and
seq_type is provided, then x is converted to seq_type.
Arguments
---------
x : seq or object
seq_type : output sequence type
If None, then if x is already a sequence, no change is made. If x
is not a sequence, a list is returned.
"""
if x is None:
# None represents an empty sequence
x = []
elif not isinstance(x, (list, tuple, set, frozenset, dict)):
# if x is not already a sequence (including dict), then make it one
x = [x]
if seq_type is not None and not isinstance(x, seq_type):
# if necessary, convert x to the sequence type
x = seq_type(x)
return x
def itercode(code):
"""Return a generator of byte-offset, opcode, and argument
from a byte-code-string
"""
i = 0
extended_arg = 0
n = len(code)
while i < n:
c = code[i]
num = i
op = ord(c)
i = i + 1
oparg = None
if op >= opcode.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == opcode.EXTENDED_ARG:
extended_arg = oparg * 65536L
delta = yield num, op, oparg
if delta is not None:
abs_rel, dst = delta
assert abs_rel == 'abs' or abs_rel == 'rel'
i = dst if abs_rel == 'abs' else i + dst
def flat_from_doc(doc):
"""Iterate over the elements of a nested document in a consistent order,
unpacking dictionaries, lists, and tuples.
Returns a list.
Note that doc_from_flat(doc, flat_from_doc(doc)) == doc
"""
rval = []
if isinstance(doc, (list, tuple)):
for d_i in doc:
rval.extend(flat_from_doc(d_i))
elif isinstance(doc, dict):
for k in sorted(doc.iterkeys()):
if isinstance(k, (tuple, dict)):
# -- if keys are tuples containing ndarrays, should
# they be traversed also?
raise NotImplementedError(
'potential ambiguity in container key', k)
rval.extend(flat_from_doc(doc[k]))
else:
rval.append(doc)
return rval
def doc_from_flat(doc, flat):
"""Iterate over a nested document, building a clone from the elements of
flat.
Returns object with same type as doc.
Note that doc_from_flat(doc, flat_from_doc(doc)) == doc
"""
def doc_from_flat_inner(doc, pos):
if isinstance(doc, (list, tuple)):
rval = []
for d_i in doc:
d_i_clone, pos = doc_from_flat_inner(d_i, pos)
rval.append(d_i_clone)
rval = type(doc)(rval)
elif isinstance(doc, dict):
rval = type(doc)()
for k in sorted(doc.iterkeys()):
v_clone, pos = doc_from_flat_inner(doc[k], pos)
rval[k] = v_clone
else:
rval = flat[pos]
pos += 1
return rval, pos
return doc_from_flat_inner(doc, 0)[0]
# -- picklable decorated function
class post_collect(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
try:
return self.f(*args, **kwargs)
finally:
gc.collect()
#mem_info = cuda_ndarray.mem_info()
#om = cuda_ndarray.outstanding_mallocs()
#print 'Post-gc: %s %s' % (mem_info, om)
| Python | 0 | @@ -3237,39 +3237,170 @@
-for k in sorted(doc.iterkeys())
+if isinstance(doc, OrderedDict):%0A sortedkeys = doc.iterkeys()%0A else:%0A sortedkeys = sorted(doc.iterkeys())%0A for k in sortedkeys
:%0A
@@ -4343,39 +4343,186 @@
-for k in sorted(doc.iterkeys())
+if isinstance(doc, OrderedDict):%0A sortedkeys = doc.iterkeys()%0A else:%0A sortedkeys = sorted(doc.iterkeys())%0A for k in sortedkeys
:%0A
|
ce5f152a5769e90cb87a05a2bcc1beb837d6cdb4 | Simplify code | chainer/functions/pooling/pooling_2d.py | chainer/functions/pooling/pooling_2d.py | import collections
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import conv
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
def _check_cudnn_acceptable_type(x_dtype):
return _cudnn_version >= 3000 or x_dtype != numpy.float16
def _pair(x):
if isinstance(x, collections.Iterable):
return x
return x, x
class Pooling2D(function.Function):
"""Base class of pooling function over a set of 2d planes."""
def __init__(self, ksize, stride=None, pad=0, cover_all=True,
use_cudnn=True):
if stride is None:
stride = ksize
self.kh, self.kw = _pair(ksize)
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 4
)
def forward_gpu(self, x):
# Implementation using cudnn
n, c, h, w = x[0].shape
y_h = conv.get_conv_outsize(
h, self.kh, self.sy, self.ph, self.cover_all)
y_w = conv.get_conv_outsize(
w, self.kw, self.sx, self.pw, self.cover_all)
dtype = x[0].dtype
y = cuda.cupy.empty((n, c, y_h, y_w), dtype=dtype)
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
x_desc = cudnn.create_tensor_descriptor(x[0])
y_desc = cudnn.create_tensor_descriptor(y)
oz_dtype = dtype if dtype != 'e' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
libcudnn.poolingForward(
handle, pool_desc.value, one.data, x_desc.value,
x[0].data.ptr, zero.data, y_desc.value, y.data.ptr)
self.y = y
return y,
def backward_gpu(self, x, gy):
# Implementation using cudnn
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
# Pooling of cuDNNv2 does not seem to support non-contiguous gradients
gy = cuda.cupy.ascontiguousarray(gy[0])
x_desc = cudnn.create_tensor_descriptor(x[0])
y_desc = cudnn.create_tensor_descriptor(gy)
dtype = x[0].dtype
oz_dtype = dtype if dtype != 'e' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
gx = cuda.cupy.empty_like(x[0])
libcudnn.poolingBackward(
handle, pool_desc.value, one.data, y_desc.value,
self.y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,
x[0].data.ptr, zero.data, x_desc.value, gx.data.ptr)
return gx,
def create_pool_desc(self):
raise NotImplementedError()
| Python | 0.041259 | @@ -1171,32 +1171,49 @@
ion using cudnn%0A
+ x = x%5B0%5D%0A
n, c, h,
@@ -1218,19 +1218,16 @@
h, w = x
-%5B0%5D
.shape%0A
@@ -1419,35 +1419,8 @@
ll)%0A
- dtype = x%5B0%5D.dtype%0A
@@ -1467,16 +1467,18 @@
, dtype=
+x.
dtype)%0A%0A
@@ -1598,35 +1598,32 @@
sor_descriptor(x
-%5B0%5D
)%0A y_desc
@@ -1679,36 +1679,36 @@
dtype =
-dtype
+'d'
if
+x.
dtype
-!= 'e
+== 'd
' else '
@@ -1914,35 +1914,32 @@
e,%0A x
-%5B0%5D
.data.ptr, zero.
@@ -2073,32 +2073,49 @@
ion using cudnn%0A
+ x = x%5B0%5D%0A
handle =
@@ -2356,19 +2356,16 @@
riptor(x
-%5B0%5D
)%0A
@@ -2415,35 +2415,8 @@
y)%0A%0A
- dtype = x%5B0%5D.dtype%0A
@@ -2434,28 +2434,28 @@
e =
-dtype
+'d'
if
+x.
dtype
-!= 'e
+== 'd
' el
@@ -2601,19 +2601,16 @@
y_like(x
-%5B0%5D
)%0A
@@ -2781,19 +2781,16 @@
x
-%5B0%5D
.data.pt
|
b3448aef6f42b1a8825fc7c434a2c5eefb5bc0e5 | Replace OSError with LookupError when SENNA executable is not found | nltk/classify/senna.py | nltk/classify/senna.py | # Natural Language Toolkit: Senna Interface
#
# Copyright (C) 2001-2022 NLTK Project
# Author: Rami Al-Rfou' <ralrfou@cs.stonybrook.edu>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A general interface to the SENNA pipeline that supports any of the
operations specified in SUPPORTED_OPERATIONS.
Applying multiple operations at once has the speed advantage. For example,
Senna will automatically determine POS tags if you are extracting named
entities. Applying both of the operations will cost only the time of
extracting the named entities.
The SENNA pipeline has a fixed maximum size of the sentences that it can read.
By default it is 1024 token/sentence. If you have larger sentences, changing
the MAX_SENTENCE_SIZE value in SENNA_main.c should be considered and your
system specific binary should be rebuilt. Otherwise this could introduce
misalignment errors.
The input is:
- path to the directory that contains SENNA executables. If the path is incorrect,
Senna will automatically search for executable file specified in SENNA environment variable
- List of the operations needed to be performed.
- (optionally) the encoding of the input data (default:utf-8)
Note: Unit tests for this module can be found in test/unit/test_senna.py
>>> from nltk.classify import Senna
>>> pipeline = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner'])
>>> sent = 'Dusseldorf is an international business center'.split()
>>> [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)] # doctest: +SKIP
[('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP', 'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'),
('international', 'I-NP', 'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP', 'O', 'NN')]
"""
from os import environ, path, sep
from platform import architecture, system
from subprocess import PIPE, Popen
from nltk.tag.api import TaggerI
class Senna(TaggerI):
SUPPORTED_OPERATIONS = ["pos", "chk", "ner"]
def __init__(self, senna_path, operations, encoding="utf-8"):
self._encoding = encoding
self._path = path.normpath(senna_path) + sep
# Verifies the existence of the executable on the self._path first
# senna_binary_file_1 = self.executable(self._path)
exe_file_1 = self.executable(self._path)
if not path.isfile(exe_file_1):
# Check for the system environment
if "SENNA" in environ:
# self._path = path.join(environ['SENNA'],'')
self._path = path.normpath(environ["SENNA"]) + sep
exe_file_2 = self.executable(self._path)
if not path.isfile(exe_file_2):
raise OSError(
"Senna executable expected at %s or %s but not found"
% (exe_file_1, exe_file_2)
)
self.operations = operations
def executable(self, base_path):
"""
The function that determines the system specific binary that should be
used in the pipeline. In case, the system is not known the default senna binary will
be used.
"""
os_name = system()
if os_name == "Linux":
bits = architecture()[0]
if bits == "64bit":
return path.join(base_path, "senna-linux64")
return path.join(base_path, "senna-linux32")
if os_name == "Windows":
return path.join(base_path, "senna-win32.exe")
if os_name == "Darwin":
return path.join(base_path, "senna-osx")
return path.join(base_path, "senna")
def _map(self):
"""
A method that calculates the order of the columns that SENNA pipeline
will output the tags into. This depends on the operations being ordered.
"""
_map = {}
i = 1
for operation in Senna.SUPPORTED_OPERATIONS:
if operation in self.operations:
_map[operation] = i
i += 1
return _map
def tag(self, tokens):
"""
Applies the specified operation(s) on a list of tokens.
"""
return self.tag_sents([tokens])[0]
def tag_sents(self, sentences):
"""
Applies the tag method over a list of sentences. This method will return a
list of dictionaries. Every dictionary will contain a word with its
calculated annotations/tags.
"""
encoding = self._encoding
if not path.isfile(self.executable(self._path)):
raise OSError(
"Senna executable expected at %s but not found"
% self.executable(self._path)
)
# Build the senna command to run the tagger
_senna_cmd = [
self.executable(self._path),
"-path",
self._path,
"-usrtokens",
"-iobtags",
]
_senna_cmd.extend(["-" + op for op in self.operations])
# Serialize the actual sentences to a temporary string
_input = "\n".join(" ".join(x) for x in sentences) + "\n"
if isinstance(_input, str) and encoding:
_input = _input.encode(encoding)
# Run the tagger and get the output
p = Popen(_senna_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(input=_input)
senna_output = stdout
# Check the return code.
if p.returncode != 0:
raise RuntimeError("Senna command failed! Details: %s" % stderr)
if encoding:
senna_output = stdout.decode(encoding)
# Output the tagged sentences
map_ = self._map()
tagged_sentences = [[]]
sentence_index = 0
token_index = 0
for tagged_word in senna_output.strip().split("\n"):
if not tagged_word:
tagged_sentences.append([])
sentence_index += 1
token_index = 0
continue
tags = tagged_word.split("\t")
result = {}
for tag in map_:
result[tag] = tags[map_[tag]].strip()
try:
result["word"] = sentences[sentence_index][token_index]
except IndexError as e:
raise IndexError(
"Misalignment error occurred at sentence number %d. Possible reason"
" is that the sentence size exceeded the maximum size. Check the "
"documentation of Senna class for more information."
% sentence_index
) from e
tagged_sentences[-1].append(result)
token_index += 1
return tagged_sentences
| Python | 0 | @@ -2695,34 +2695,38 @@
raise
-OS
+Lookup
Error(%0A
@@ -4562,18 +4562,22 @@
raise
-OS
+Lookup
Error(%0A
|
19ea8f4fed70d96a5a2bc6a81b32bdaa65583f5c | throw user error on unknown command and show existing commands | i3configger/message.py | i3configger/message.py | import logging
from pathlib import Path
from i3configger import base, config, exc, partials, context
log = logging.getLogger(__name__)
I3STATUS = "i3status"
"""reserved key for status bar setting files"""
DEL = 'del'
"""signal to delete a key in shadow or set"""
class CMD:
SELECT_NEXT = "select-next"
SELECT_PREVIOUS = "select-previous"
SELECT = "select"
SET = "set"
MERGE = "merge"
PRUNE = "prune"
SHADOW = "shadow"
def process(statePath, prts, message):
mp = Messenger(statePath, prts, message)
mp.execute()
config.freeze(statePath, mp.payload)
class Messenger:
def __init__(self, messagesPath, prts, message=None):
self.messagesPath = messagesPath
self.prts = prts
self.message = message
self.payload = self.fetch_frozen_messages()
if self.message:
self.command, self.key, *rest = message
self.value = rest[0] if rest else ''
if self.command != CMD.SHADOW and ':' in self.key:
raise exc.UserError(
f"nesting of keys only sensible with {CMD.SHADOW}")
log.debug(f"sending message {message} to {messagesPath}")
def execute(self):
{
CMD.MERGE: self._process_merge,
CMD.PRUNE: self._process_prune,
CMD.SET: self._process_set,
CMD.SELECT: self._process_select,
CMD.SHADOW: self._process_shadow,
CMD.SELECT_NEXT: self._process_select_shift,
CMD.SELECT_PREVIOUS: self._process_select_shift,
}[self.command]()
def _process_merge(self):
self._transform(context.merge)
def _process_prune(self):
self._transform(context.prune)
def _transform(self, func):
path = Path(self.key).expanduser()
if not path.is_absolute():
path = self.messagesPath.parent / path
self.payload = func(self.payload, config.fetch(path))
config.freeze(self.messagesPath, self.payload)
def _process_set(self):
if self.value.lower() == DEL:
del self.payload[CMD.SET][base.VAR_MARK + self.key]
else:
self.payload[CMD.SET][base.VAR_MARK + self.key] = self.value
def _process_select(self):
candidates = partials.find(self.prts, self.key)
if not candidates:
raise exc.MessageError(
f"No candidates for {self.message} in {self.prts}")
candidate = partials.find(self.prts, self.key, self.value)
if not candidate:
raise exc.MessageError(
f"No candidates for {self.message} in {candidates}")
if self.value and self.value.lower() == DEL:
del self.payload[CMD.SELECT][self.key]
else:
self.payload[CMD.SELECT][self.key] = candidate.value
def _process_shadow(self):
"""Shadow arbitrary settings made in i3configger.json.
key:deeper:deepest[...] -> [key][deeper][deepest][...]
"""
parts = self.key.split(':')
current = self.payload[CMD.SHADOW]
while True:
part = parts.pop(0)
if parts:
current[part] = {}
current = current[part]
else:
if self.value is not None and self.value.lower() == DEL:
del current[part]
else:
current[part] = self.value
break
def _process_select_shift(self):
candidates = partials.find(self.prts, self.key)
if not candidates:
raise exc.MessageError(
f"No candidates for {self.message} in {self.prts}")
if self.command == CMD.SELECT_PREVIOUS:
candidates = reversed(candidates)
current = self.payload["select"].get(self.key) or candidates[0].key
for idx, candidate in enumerate(candidates):
if candidate.value == current:
try:
new = candidates[idx + 1]
except IndexError:
new = candidates[0]
log.info("select %s.%s", self.key, new)
self.payload[CMD.SELECT][self.key] = new.value
break
def fetch_frozen_messages(self):
if not self.messagesPath.exists():
state = {}
else:
state = config.fetch(self.messagesPath)
self.ensure_message_keys(state, self.prts)
config.freeze(self.messagesPath, state)
return state
def ensure_message_keys(self, state, prts):
if CMD.SELECT not in state:
initialSelects = {}
for prt in prts:
if not prt.needsSelection:
continue
if prt.key not in initialSelects and prt.key != I3STATUS:
initialSelects[prt.key] = prt.value
state[CMD.SELECT] = initialSelects
if CMD.SET not in state:
state[CMD.SET] = {}
if CMD.SHADOW not in state:
state[CMD.SHADOW] = {}
| Python | 0 | @@ -446,16 +446,167 @@
hadow%22%0A%0A
+ @classmethod%0A def get_all_commands(cls):%0A return %5Bv for k, v in cls.__dict__.items()%0A if k%5B0%5D.isupper() and k%5B0%5D != '_'%5D%0A%0A
%0Adef pro
@@ -1366,10 +1366,31 @@
-%7B%0A
+try:%0A %7B%0A
@@ -1437,24 +1437,28 @@
+
CMD.PRUNE: s
@@ -1477,16 +1477,20 @@
_prune,%0A
+
@@ -1525,32 +1525,36 @@
et,%0A
+
CMD.SELECT: self
@@ -1579,24 +1579,28 @@
+
+
CMD.SHADOW:
@@ -1621,16 +1621,20 @@
shadow,%0A
+
@@ -1686,32 +1686,36 @@
ft,%0A
+
+
CMD.SELECT_PREVI
@@ -1755,16 +1755,20 @@
+
%7D%5Bself.c
@@ -1776,16 +1776,194 @@
mmand%5D()
+%0A except KeyError:%0A raise exc.UserError(%0A f%22Unknown command: %7Bself.command%7D. %22%0A f%22Use one of %7B', '.join(CMD.get_all_commands())%7D%22)
%0A%0A de
|
8a010b6601ecf2eed216b3aa0b604a0985d06544 | Update chainer/training/extensions/__init__.py | chainer/training/extensions/__init__.py | chainer/training/extensions/__init__.py | # import classes and functions
from chainer.training.extensions._snapshot import snapshot # NOQA
from chainer.training.extensions._snapshot import snapshot_object # NOQA
from chainer.training.extensions.computational_graph import DumpGraph # NOQA
from chainer.training.extensions.evaluator import Evaluator # NOQA
from chainer.training.extensions.exponential_shift import ExponentialShift # NOQA
from chainer.training.extensions.fail_on_nonnumber import FailOnNonNumber # NOQA
from chainer.training.extensions.inverse_shift import InverseShift # NOQA
from chainer.training.extensions.linear_shift import LinearShift # NOQA
from chainer.training.extensions.log_report import LogReport # NOQA
from chainer.training.extensions.micro_average import MicroAverage # NOQA
from chainer.training.extensions.multistep_shift import MultistepShift # NOQA
from chainer.training.extensions.parameter_statistics import ParameterStatistics # NOQA
from chainer.training.extensions.plot_report import PlotReport # NOQA
from chainer.training.extensions.polynomial_shift import PolynomialShift # NOQA
from chainer.training.extensions.print_report import PrintReport # NOQA
from chainer.training.extensions.progress_bar import ProgressBar # NOQA
from chainer.training.extensions.step_shift import StepShift # NOQA
from chainer.training.extensions.value_observation import observe_lr # NOQA
from chainer.training.extensions.value_observation import observe_value # NOQA
from chainer.training.extensions.variable_statistics_plot import VariableStatisticsPlot # NOQA
from chainer.training.extensions.warmup_shift import WarmupShift # NOQA
# Aliase
from chainer.training.extensions.computational_graph import DumpGraph as dump_graph # NOQA
| Python | 0 | @@ -1637,17 +1637,16 @@
%0A# Alias
-e
%0Afrom ch
|
105dc001e5e0f2e1e02409cf77e5b31f0df30ffe | put on two lines | core/dbt/task/clean.py | core/dbt/task/clean.py | import os.path
import os
import shutil
from dbt.task.base import ProjectOnlyTask
from dbt.logger import GLOBAL_LOGGER as logger
class CleanTask(ProjectOnlyTask):
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
return not os.path.commonprefix(
[proj_path, os.path.abspath(path)]
) == proj_path
def __is_protected_path(self, path):
"""
This function identifies protected paths, so as not to clean them.
"""
abs_path = os.path.abspath(path)
protected_paths = self.config.source_paths + \
self.config.test_paths + ['.']
protected_abs_paths = [os.path.abspath for p in protected_paths]
return abs_path in set(protected_abs_paths) or \
self.__is_project_path(abs_path)
def run(self):
"""
This function takes all the paths in the target file
and cleans the project paths that are not protected.
"""
for path in self.config.clean_targets:
logger.info("Checking {}/*".format(path))
if not self.__is_protected_path(path):
shutil.rmtree(path, True)
logger.info(" Cleaned {}/*".format(path))
else:
logger.info("ERROR: not cleaning {}/* because it is protected".format(path))
logger.info("Finished cleaning all paths.")
| Python | 0.000006 | @@ -1309,16 +1309,47 @@
e it is
+%22%0A %22
protecte
|
5860d28e0f8f08f1bf4ca2426c08a83b687f33f8 | Fix Python3 issue (#173) | mod/tools/node.py | mod/tools/node.py | """wrapper for node.js, only check_exists"""
import subprocess
name = 'node'
platforms = ['linux']
optional = True
not_found = 'node.js required for emscripten cross-compiling'
#------------------------------------------------------------------------------
def check_exists(fips_dir) :
try :
out = subprocess.check_output(['node', '--version'])
if not out.startswith('v') :
log.warn("this doesn't look like a proper node.js 'node'")
return False
return True
except (OSError, subprocess.CalledProcessError) :
return False
| Python | 0 | @@ -382,16 +382,17 @@
rtswith(
+b
'v') :%0A
|
046bd8d310289ff71dd6b95422927dadb86637ec | Fix method override. | model/rtorrent.py | model/rtorrent.py | """
.. _rtorrent-class:
RTorrent
========
The RTorrent class serves as an interface to a remote RTorrent instance.
It implements a lot of the functionality that RTorrent exposes over XMLRPC;
currently only XMLRPC over HTTP is supported; but support for direct SCGI is
planned. Basically, HTTP support requires a web server to direct requests to
RTorrent, whereas SCGI talks directly to RTorrent. (The web server also uses
SCGI to talk to RTorrent)
The RTorrent constructor requires a host and optionally a port and url.
Some of the functions documented in here are in fact auto generated (at
runtime); We did this for a few reasons: extendability and ease of use.
(We can easily chain calls this way)
They will only have one argument in the documentation: *args.
Obviously some do not take any arguments; the docstring should
(in the near future, anyway) explain exactly what variables
should be passed.
A simple test:
.. code-block: python
x = RTorrent('sheeva')
# Simple test.
old = x.get_upload_throttle()
print 'Throttle:', old
print 'Return:', x.set_upload_throttle(20000)
print 'Throttle:', x.get_upload_throttle()
print 'Return:', x.set_upload_throttle(old)
print 'Throttle:', x.get_upload_throttle()
print 'Download list', x.get_download_list()
"""
from lib.xmlrpc import RTorrentXMLRPC
class RTorrent(object):
"""
RTorrent class. This wraps most of the RTorrent *main* functionality
(read: global functionality) in a class. Think of, current upload and
download, libTorrent version.
Methods specific to a Torrent can be found in the :ref:`torrent-class`
class.
"""
# FIXME: If we leave URL at '' xmlrpclib will default to /RPC2 as well.
def __init__(self, target):
"""
Initialise the RTorrent object.
``target`` is target dict as parsed by parse_config (pyrotorrent.py).
"""
self.target = target
self.s = RTorrentXMLRPC(target)
def __repr__(self):
return 'RTorrent(%s)' % self.target['name']
def get_download_list(self, _type=''):
"""
Returns a list of torrents.
_type defines what is returned. Valid:
* '' (Empty string), 'default'
* 'complete'
* 'incomplete'
* 'started'
* 'stopped'
* 'active'
* 'hashing'
* 'seeding'
Plus all customly defined views.
"""
# FIXME: List is not complete(?) + exception should be raised.
if _type not in ('complete', 'incomplete', 'started', 'stopped',
'active', 'hashing', 'seeding', '', 'default'):
return None
res = self.s.download_list(_type)
# FIXME: We now only have the hashes. Do we also want to fetch all the
# data per torrent? Or perhaps only the basic info?
return res
def query(self):
"""
Query returns a new RTorrentQuery object with the target
from the current RTorrent object.
Use this to execute several (different) calls on the RTorrent class in
one request. This can increase performance and reduce latency and load.
See :ref:`rtorrentquery-class` on how to use it.
"""
from lib.rtorrentquery import RTorrentQuery
return RTorrentQuery(self.target)
# XXX: Begin hacks
import types
_rpc_methods = {
'get_upload_throttle' : ('get_upload_rate',
"""
Returns the current upload throttle.
"""),
'set_upload_throttle' : ('set_upload_rate',
"""
Set the upload throttle.
Pass the new throttle size in bytes.
"""),
'get_download_throttle' : ('get_download_rate',
"""
Returns the download upload throttle.
"""),
'set_download_throttle' : ('set_download_rate',
"""
Set the current download throttle.
Pass the new throttle size in bytes.
"""),
'get_upload_rate' : ('get_up_rate',
"""
Returns the current upload rate.
"""),
'get_upload_rate_total' : ('get_up_total',
"""
Returns the total uploaded data.
"""), # XXX ^ Unsure about comment
'get_download_rate' : ('get_down_rate',
"""
Returns the current download rate.
"""),
'get_download_rate_total' : ('get_down_total',
"""
Returns the total downloaded data.
"""), # XXX ^ Unsure about comment
'get_memory_usage' : ('get_memory_usage',
"""
Returns rtorrent memory usage.
"""),
'get_max_memory_usage' : ('get_max_memory_usage',
"""
Returns rtorrent maximum memory usage.
"""),
'get_libtorrent_version' : ('system.library_version',
"""
Returns the libTorrent version.
"""),
'get_client_version' : ('system.client_version',
"""
Returns the rTorrent version.
"""),
'get_hostname' : ('system.hostname',
"""
Returns the hostname.
"""),
'add_torrent' : ('load',
"""
Loads a torrent into rtorrent from the torrent path.
"""),
'add_torrent_start' : ('load_start',
"""
Loads a torrent into rtorrent from the torrent path.
Will also start the download immediately.
"""),
'add_torrent_raw' : ('load_raw',
"""
Loads a torrent into rtorrent from a given string.
"""),
'add_torrent_raw' : ('load_raw_start',
"""
Loads a torrent into rtorrent from a given string.
Will also start the download immediately.
"""),
'get_ip' : ('get_ip',
"""
Returns the IP rtorrent is bound to. (For XMLRPC?)
"""), # XXX:For XMLRPC? ^
'get_view_list' : ('view_list',
"""
Returns a list of all views.
"""),
'create_view' : ('view_add',
"""
Creates a view; requires a single argument: A name for the view.
WARNING: If you add an already existing view; rtorrent will simply crash
(at least 0.8.6 does).
"""),
'get_process_id' : ('system.pid',
"""
Returns the process ID.
"""),
'get_cwd' : ('system.get_cwd',
"""
Returns the current working directory.
"""),
'get_xmlrpc_size_limit' : ('get_xmlrpc_size_limit',
"""
Returns the XMLRPC Size Limit
"""),
'set_xmlrpc_size_limit' : ('set_xmlrpc_size_limit',
"""
Set the XMLRPC size limit.
"""),
'execute_command' : ('execute_capture',
"""
Execute command as rtorrent user and return output as string.
""")
}
# Hack in all the methods in _rpc_methods!
for x, y in _rpc_methods.iteritems():
# caller = create_caller(y[0], create_argcheck(y[2])) # belongs to the
# argument checking
caller = (lambda name: lambda self, *args: getattr(self.s, name)(*args))(y[0])
caller.__doc__ = y[1] + '\nOriginal libTorrent method: ``%s``' % y[0]
setattr(RTorrent, x, types.MethodType(caller, None, RTorrent))
del caller
| Python | 0 | @@ -5494,32 +5494,38 @@
'add_torrent_raw
+_start
' : ('load_raw_s
|
1794fb8865241e22a5af30020111471ea00a6250 | check if you the plugins really need to be reloaded | InvenTree/plugin/admin.py | InvenTree/plugin/admin.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.apps import apps
import plugin.models as models
def plugin_update(queryset, new_status: bool):
"""general function for bulk changing plugins"""
for model in queryset:
model.active = new_status
model.save(no_reload=True)
app = apps.get_app_config('plugin')
app.reload_plugins()
@admin.action(description='Activate plugin(s)')
def plugin_activate(modeladmin, request, queryset):
"""activate a set of plugins"""
plugin_update(queryset, True)
@admin.action(description='Deactivate plugin(s)')
def plugin_deactivate(modeladmin, request, queryset):
"""deactivate a set of plugins"""
plugin_update(queryset, False)
class PluginConfigAdmin(admin.ModelAdmin):
"""Custom admin with restricted id fields"""
readonly_fields = ["key", "name", ]
list_display = ['key', 'name', 'active', ]
actions = [plugin_activate, plugin_deactivate, ]
admin.site.register(models.PluginConfig, PluginConfigAdmin)
| Python | 0 | @@ -262,101 +262,356 @@
-for model in queryset:%0A model.active = new_status%0A model.save(no_reload=True)%0A%0A
+apps_changed = False%0A%0A # run through all plugins in the queryset as the save method needs to be overridden%0A for model in queryset:%0A if model.active is not new_status:%0A model.active = new_status%0A apps_changed = True%0A model.save(no_reload=True)%0A%0A # reload plugins if they changed%0A if apps_changed:%0A
@@ -646,16 +646,20 @@
lugin')%0A
+
app.
|
0682e3b4ce5a23683ac1bd7d68cb69e3df92cc99 | Fix bug when hyp_length == 0 | nematus/metrics/sentence_bleu.py | nematus/metrics/sentence_bleu.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from math import exp
from operator import mul
from collections import defaultdict
from scorer import Scorer
from reference import Reference
class SentenceBleuScorer(Scorer):
"""
Scores SmoothedBleuReference objects.
"""
def __init__(self, argument_string):
"""
Initialises metric-specific parameters.
"""
Scorer.__init__(self, argument_string)
# use n-gram order of 4 by default
if not 'n' in self._arguments.keys():
self._arguments['n'] = 4
def set_reference(self, reference_tokens):
"""
Sets the reference against hypotheses are scored.
"""
self._reference = SentenceBleuReference(
reference_tokens,
self._arguments['n']
)
class SentenceBleuReference(Reference):
"""
Smoothed sentence-level BLEU as as proposed by Lin and Och (2004).
Implemented as described in (Chen and Cherry, 2014).
"""
def __init__(self, reference_tokens, n=4):
"""
@param reference the reference translation that hypotheses shall be
scored against. Must be an iterable of tokens (any
type).
@param n maximum n-gram order to consider.
"""
Reference.__init__(self, reference_tokens)
self.n = n
# preprocess reference
self._reference_length = len(self._reference_tokens)
self._reference_ngrams = self._get_ngrams(self._reference_tokens, self.n)
def _get_ngrams(self, tokens, max_n):
"""
Extracts all n-grams of order 1 up to (and including) @param max_n from
a list of @param tokens.
"""
n_grams = []
for n in range(1, max_n+1):
n_grams.append(defaultdict(int))
for n_gram in zip(*[tokens[i:] for i in range(n)]):
n_grams[n-1][n_gram] += 1
return n_grams
def score(self, hypothesis_tokens):
"""
Scores @param hypothesis against this reference.
@return the smoothed sentence-level BLEU score: 1.0 is best, 0.0 worst.
"""
def product(iterable):
return reduce(mul, iterable, 1)
def ngram_precisions(ref_ngrams, hyp_ngrams):
precisions = []
for n in range(1, self.n+1):
overlap = 0
for ref_ngram, ref_ngram_count in ref_ngrams[n-1].iteritems():
if ref_ngram in hyp_ngrams[n-1]:
overlap += min(ref_ngram_count, hyp_ngrams[n-1][ref_ngram])
hyp_length = len(hypothesis_tokens)-n+1
if n >= 2:
# smoothing as proposed by Lin and Och (2004),
# implemented as described in (Chen and Cherry, 2014)
overlap += 1
hyp_length += 1
precisions.append(overlap/hyp_length)
return precisions
def brevity_penalty(ref_length, hyp_length):
return min(1.0, exp(1-(ref_length/hyp_length)))
# preprocess hypothesis
hypothesis_length = len(hypothesis_tokens)
hypothesis_ngrams = self._get_ngrams(hypothesis_tokens, self.n)
# calculate n-gram precision for all orders
np = ngram_precisions(self._reference_ngrams, hypothesis_ngrams)
# calculate brevity penalty
bp = brevity_penalty(self._reference_length, hypothesis_length)
# compose final BLEU score
return product(np)**(1/self.n) * bp
| Python | 0.00033 | @@ -2987,24 +2987,51 @@
p/hyp_length
+ if hyp_length %3E 0 else 0.0
)%0A
|
3e57219677fad5e3127c46843771978b1ff8bb20 | Add profilegate endpoint | resources/lib/services/nfsession/nfsession_endpoints.py | resources/lib/services/nfsession/nfsession_endpoints.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2020 Stefano Gottardo - @CastagnaIT (original implementation module)
Netflix API endpoints
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
# Secure Netflix url
BASE_URL = 'https://www.netflix.com'
# List of all static endpoints for HTML/JSON POST/GET requests
# is_api_call:
# specify which address to use for the endpoint
# True -> The https address used is composed with 'apiUrl' value from reactContext data
# False -> The https address used is composed with the BASE_URL
# use_default_params:
# Add to the request the default parameters (see _prepare_request_properties)
# add_auth_url:
# Specifies if and where to put the 'authURL' value
# None -> Will not be added
# 'to_data' -> It will be added with the data to send
# 'to_params' -> It will be added to the request parameters
# content_type:
# If required add the Content-Type attribute to request header
# accept:
# If required add the Accept attribute to request header (if not specified use '*/*')
ENDPOINTS = {
'login':
{'address': '/login',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None,
# By default to login Netflix use 'application/x-www-form-urlencoded' Content-Type,
# instead we use 'application/json' for simplicity of data conversion
# if in the future login raise InvalidMembershipStatusAnonymous can means that json is no more accepted
'content_type': 'application/json',
'accept': '*/*'},
'logout':
{'address': '/SignOut',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None,
'accept': '*/*'},
'shakti':
{'address': '/pathEvaluator',
'is_api_call': True,
'use_default_params': True,
'add_auth_url': 'to_data',
'content_type': 'application/x-www-form-urlencoded'},
'browse':
{'address': '/browse',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None,
'accept': '*/*'},
'profiles':
{'address': '/profiles/manage',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None,
'accept': '*/*'},
'switch_profile':
{'address': '/SwitchProfile',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None,
'accept': '*/*'},
'activate_profile':
{'address': '/profiles/switch',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': None},
'profile_lock':
{'address': '/profileLock',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': 'to_data',
'content_type': 'application/json',
'accept': 'application/json, text/javascript, */*'},
'pin':
{'address': '/pin',
'is_api_call': False,
'use_default_params': False,
'add_auth_url': None},
'pin_reset':
{'address': '/pin/reset',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': None},
'pin_service':
{'address': '/pin/service',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': 'to_data',
'content_type': 'application/json',
'accept': 'application/json, text/javascript, */*'},
'metadata':
{'address': '/metadata',
'is_api_call': True,
'use_default_params': True,
'add_auth_url': 'to_params'},
'set_video_rating': # Old rating system
{'address': '/setVideoRating',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': 'to_data',
'content_type': 'application/json',
'accept': 'application/json, text/javascript, */*'},
'set_thumb_rating':
{'address': '/setThumbRating',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': 'to_data',
'content_type': 'application/json',
'accept': 'application/json, text/javascript, */*'},
'update_my_list':
{'address': '/playlistop',
'is_api_call': True,
'use_default_params': False,
'add_auth_url': 'to_data',
'content_type': 'application/json',
'accept': 'application/json, text/javascript, */*'}
# Don't know what these could be used for. Keeping for reference
# 'video_list_ids': {'address': '/preflight', 'is_api_call': True},
# 'kids': {'address': '/Kids', 'is_api_call': False}
}
| Python | 0 | @@ -2214,24 +2214,327 @@
t': '*/*'%7D,%0A
+ 'profiles_gate':%0A # This endpoint is used after ending editing profiles page, i think to force close an active profile session%0A %7B'address': '/ProfilesGate',%0A 'is_api_call': False,%0A 'use_default_params': False,%0A 'add_auth_url': 'to_data',%0A 'accept': '*/*'%7D,%0A
'profile
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.