repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Art3mk4/python | process/pid.py | Python | gpl-3.0 | 58 | 0.017241 | import os
print os.p | ath.dirname(os.path.abspath(__file__)) | |
aquadrop/solr_py | client/find_multi_intention.py | Python | gpl-3.0 | 1,346 | 0.00075 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import _ | uniout
import sys
import csv
reload(sys)
sys.setdefaultencoding("utf-8")
data_path = '../data/business/business_train_v7'
classes = list()
inputs = dict()
results = dict()
if __name__ == '__main__':
with open(data_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for line in reader:
a = line[0]
if a not in classes:
classes.append(a)
# | print("classes:", _uniout.unescape(str(classes), 'utf8'))
with open(data_path, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for line in reader:
a = line[0]
b = line[1]
a_slots = a.split(',')
if b not in inputs:
inputs[b] = []
inputs[b].append(a)
for x in inputs[b]:
x_slots = x.split(',')
if x_slots[0] == a_slots[0] and x_slots[1] != a_slots[1]:
inputs[b].append(a)
for inp, intentions in inputs.iteritems():
if len(intentions) >= 2:
# results[inp] = intentions
print(inp, _uniout.unescape(str(intentions), 'utf8'))
# print(_uniout.unescape(str(results), 'utf8'))
# print(_uniout.unescape(str(results['用卡取两百块']), 'utf8')) |
dbbhattacharya/kitsune | vendor/packages/selenium/py/selenium/webdriver/remote/webdriver.py | Python | bsd-3-clause | 25,947 | 0.00185 | # Copyright 2008-2014 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The WebDriver implementation.
"""
import base64
import warnings
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol as defined
here: http://code.google.com/p/selenium/wiki/JsonWireProtocol
:Attributes:
- command_executor - The command.CommandExecutor object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to verify that the server did not return an error.
- session_id - The session ID to send with every command.
- capabilities - A dictionary of capabilities of the underlying browser for this instance's session.
- proxy - A selenium.webdriver.common.proxy.Proxy object, to specify a proxy for the browser to use.
"""
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None, keep_alive=False):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a command.CommandExecutor object or a string that specifies the URL of a remote server to send commands to.
- desired_capabilities - Dictionary holding predefined values for starting a browser
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or type(self.command_executor) is str:
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom star | tup behavior.
"""
pass
def stop_client(self):
"""
| Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, desired_capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if browser_profile:
desired_capabilities['firefox_profile'] = browser_profile.encoded
response = self.execute(Command.NEW_SESSION, {
'desiredCapabilities': desired_capabilities,
})
self.session_id = response['sessionId']
self.capabilities = response['value']
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, WebElement):
return {'ELEMENT': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""
Creates a web element with the specified element_id.
"""
return WebElement(self, element_id)
def _unwrap_value(self, value):
if isinstance(value, dict) and 'ELEMENT' in value:
return self.create_web_element(value['ELEMENT'])
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_b |
conversationai/wikidetox | experimental/conversation_go_awry/prediction_utils/configure.py | Python | apache-2.0 | 2,644 | 0.016263 | """
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pickle as cPickle
import numpy as np
from collections import defaultdict
def configure(constraint):
UNIGRAMS_FILENAME = "data/bow_features/%s/unigram100.pkl"%(constraint)
BIGRAMS_FILENAME = "data/bow_features/%s/bigram200.pkl"%(constraint)
UNIGRAMS_LIST = cPickle.load(open(UNIGRAMS_FILENAME, "rb"))
BIGRAMS_LIST = cPickle.load(open(BIGRAMS_FILENAME, "rb"))
STATUS = {4: ['founder', 'sysop'],
3: ['accountcreator', 'bureaucrat', 'checkuser'], \
2: [ 'abusefilter', 'abusefilter-helper', 'autoreviewer', 'extendedmover', \
'filemover', 'import', 'oversight', 'patroller', \
'reviewer','rollbacker','templateeditor','epadmin', 'epcampus', 'epcoordinator',\
'epinstructor', 'eponline'],\
1: ['massmessage-sender', 'ipblock-exempt', 'extendedconfirmed',\
'autoconfirmed', 'researcher', 'user']}
ASPECTS = ['age', 'status', 'comments_on_same_talk_page', 'comments_on_all_talk_pages',\
'edits_on_subjectpage', 'edits_on_wikipedia_articles', 'history_toxicity']
attacker_profile_ASPECTS =['proportion_of_being_replied',\
'total_reply_time_gap', 'reply_latency',\
'age', 'status', 'number_of_questions_asked', \
'edits_on_wikipedia_articles']
with open('feature_extraction/utils/lexicons') as f:
LEXICONS = json.load(f)
with open("feature_extraction/question_features/%s.json"%(constraint)) as f:
q = json.load(f)
QUESTIONS = defaultdict(list)
l = 0
for key, val in q.items():
action = key.split('-')[2]
new_key = key.split('-')[1 | ]
QUESTIONS[new_key].append({'action_id': action, 'question_type': np.argmin(val['normy_cluster_dist_vector'])})
with open("data/user_features.json") as f:
inp = json.load(f)
user | _features = {}
for conv, users in inp:
user_features[conv] = users
ARGS = [STATUS, ASPECTS, attacker_profile_ASPECTS, LEXICONS, QUESTIONS, UNIGRAMS_LIST, BIGRAMS_LIST]
return user_features, ARGS
|
twallace27603/robot_army | wheels.py | Python | gpl-3.0 | 3,797 | 0.008954 | from enum import Enum
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import time
import atexit
Direction = Enum('Forward','Reverse','Spin','Left','Right','None')
class Wheels:
def __init__(self):
self.lc = 100 #Loop count
self.ld = .02 #Loop delay
self.mh = Adafruit_MotorHAT(addr=0x60)
self.speed = 0
self.direction = Direction.None
atexit.register(self.turnOffMotors)
self.rightWheel = self.mh.getMotor(4)
self.leftWheel = self.mh.getMotor(1)
self.minPower = 55
def setConfig(self,config):
self.config = config
self.lc = config.wheelsLoopCount
self.ld = config.wheelsLoopDelay
self.mh = Adafruit_MotorHAT(addr=config.wheelsAddr)
self.speed = 0
self.direction = Direction.None
self.minPower = config.wheelsMinPower
self.rightWheel = self.mh.getMotor(config.wheelsRight)
self.leftWheel = self.mh.getMotor(config.wheelsLeft)
def turnOffMotors(self):
self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
def setSpeed(self,speed, direction):
if((self.speed>0) and (speed>0) and (self.direction != direction)):
self.setSpeed(0,self.direction)
step = 1
if(speed < self.speed):
step = -step
if (direction!=self.direction):
if direction == Direction.Forward:
self.rightWheel.run(Adafruit_MotorHAT.FORWARD)
self.leftWheel.run(Adafruit_MotorHAT.FORWARD)
elif direction == Direction.Reverse:
self.rightWheel.run(Adafruit_MotorHAT.BACKWARD)
self.leftWheel.run(Adafruit_MotorHAT.BACKWARD)
elif (direction == Direction.Spin) or (direction == Direction.Left):
self.rightWheel.run(Adafruit_MotorHAT.FORWARD)
self.leftWheel.run(Adafruit_MotorHAT.BACKWARD)
elif direction == Direction.Right:
self.rightWheel.run(Adafruit_MotorHAT.BACKWARD)
self.leftWheel.run(Adafruit_MotorHAT.FORWARD)
elif direction == Direction.None:
self.turnOffMotors()
return
finalSpeed = self.speed
for i in range(self.speed, speed, step):
self.rightWheel.setSpeed(i)
self.leftWheel.setSpeed(i)
finalSpeed = i
time.sleep(self.ld | )
self.speed = finalSpeed
self.direction = direction
def correct(self,direction,bump=20):
wheel = self.rightWheel
if(direction == Direction.Right):
wheel = self.leftWheel
wheel.setSpeed(self.speed + bump)
time.sleep(.25)
wheel.setSpeed(self.speed)
| def pulse(self,seconds=.25,bump=20):
self.rightWheel.setSpeed(self.speed + bump)
self.leftWheel.setSpeed(self.speed + bump)
time.sleep(seconds)
self.rightWheel.setSpeed(self.speed)
self.leftWheel.setSpeed(self.speed)
def test(self):
print("Forward!")
self.setSpeed(100,Direction.Forward)
print("Speed: {} \t Direction: {}".format(w.speed, w.direction))
time.sleep(2)
#self.setSpeed(0,Direction.Forward)
print("Reverse!")
self.setSpeed(100,Direction.Reverse)
time.sleep(2)
#self.setSpeed(0,Direction.Reverse)
print("Spin!")
self.setSpeed(100,Direction.Spin)
time.sleep(2)
self.setSpeed(0,Direction.Spin)
if __name__ == '__main__':
w = wheels()
w.test()
w.turnOffMotors()
|
huntxu/neutron | neutron/db/l3_dvr_ha_scheduler_db.py | Python | apache-2.0 | 1,967 | 0 | # Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import neutron.db.l3_dvrscheduler_db as l3agent_dvr_sch_db
import neutron.db.l3_hascheduler_db as l3_ha_sch_db
class L3_DVR_HA_scheduler_db_mixin(l3agent_dvr_sch_db.L3_DV | Rsch_db_mixin,
l3_ha_sch_db.L3_HA_scheduler_db_mixin):
def get_dvr_routers_to_remove(self, context, port_id):
"""Returns info about which routers should be removed
| In case dvr serviceable port was deleted we need to check
if any dvr routers should be removed from l3 agent on port's host
"""
remove_router_info = super(L3_DVR_HA_scheduler_db_mixin,
self).get_dvr_routers_to_remove(context,
port_id)
# Process the router information which was returned to make
# sure we don't delete routers which have dvrhs snat bindings.
processed_remove_router_info = []
for router_info in remove_router_info:
router_id = router_info['router_id']
agent_id = router_info['agent_id']
if not self._check_router_agent_ha_binding(
context, router_id, agent_id):
processed_remove_router_info.append(router_info)
return processed_remove_router_info
|
ianalis/treepy | treepy/plotting.py | Python | mit | 2,994 | 0.008016 | # Copyright (c) 2015 Christian Alis
#
# See the file LICENSE for copying permission.
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from urllib import unquote
from scipy.stats import zscore
def plot_corr(results):
"""
Plot title, correlation and corrected p-values as horizontal bars
Rendering works best with 50 results.
Parameters
----------
results : list of dict
Output of `get_most_correlated()`
Returns
-------
ax : matplotlib.Axes
Rendered
| """
fig, ax = plt.subplots(figsize=(6,10)
| #subplot_kw=dict(left=0.05, right=0.95,
# bottom=0.03, top=0.97)
)
ax.barh(np.arange(len(results))+0.5, [r['r'] for r in results[::-1]], 0.3,
label='r')
ax.barh(range(len(results)), [r['p_bh']['double'] for r in results[::-1]],
0.3, color='r', label='FDR-corrected p')
for i, result in enumerate(results[::-1]):
title = unquote(result['title']).replace('_', ' ')
if len(title) > 30:
title = title[:27].strip()
title = title + '...'
if result['r'] > 0:
ax.text(-0.01, i, title, horizontalalignment='right')
else:
ax.text(0.01, i, title, horizontalalignment='left')
ax.set_xlim(-1,1)
ax.set_xticks(np.linspace(1,-1,9))
ax.set_yticks([])
legend = ax.legend(loc='best', frameon=True, framealpha=0.4)
legend.get_frame().set_facecolor('w')
fig.tight_layout()
return ax
def plot_max(tseries, results, legend_label='', axis_label='', n=0):
topp_tseries = results[n]['tseries']
topp_tseries = topp_tseries[topp_tseries.notnull()]
limits = (max(tseries.index[0], topp_tseries.index[0]),
min(tseries.index[-1], topp_tseries.index[-1]))
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
tseries.plot(ax=ax1, label=legend_label, color='b')
ax1.set_ylabel(axis_label)
lines, labels = ax1.get_legend_handles_labels()
ax1b = topp_tseries.plot(ax=ax1, color='g', secondary_y=True,
label=unquote(results[n]['title']).replace('_', ' '))
ax1b.set_ylabel('Normalized page views')
lines.extend(ax1b.get_legend_handles_labels()[0])
labels.extend(ax1b.get_legend_handles_labels()[1])
ax1b.legend(lines, labels)
((tseries - tseries.mean()) / tseries.std()).plot(ax=ax2, color='b',
label=legend_label)
((topp_tseries - topp_tseries.mean()) / topp_tseries.std()).plot(ax=ax2,
color='g',
label=unquote(results[n]['title']).replace('_', ' '))
ax2.set_xlim(*limits)
ax2.set_ylabel('z-score')
ax2.legend()
fig.tight_layout()
|
siosio/intellij-community | python/testData/optimizeImports/moduleLevelDunderWithImportFromFutureAbove.py | Python | apache-2.0 | 236 | 0.004237 | from __future__ import print_function
__author__ = "akniazev"
from datetime import date
from sys import path |
from foo import bar
from collections import OrderedDict
from datetime import time
date(1, 1, 1)
time(1)
OrderedDict()
bar( | ) |
liu-jian/seos1.0 | test/select_test.py | Python | apache-2.0 | 1,329 | 0.027293 | #!/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import json
import requests
url = "http://127.0.0.1:3000/api"
#登录并获取token
def login(username,password):
rep_url = "%s/login?username=%s&passwd=%s" % (url,username,password)
r = requests.get(rep | _url)
result = json.loads(r.content)
if result['code'] == 0:
token = result["authorization"]
| return json.dumps({'code':0,'token':token})
else:
return json.dumps({'code':1,'errmsg':result['errmsg']})
def rpc():
res=login('admin','123456')
result = json.loads(res)
if result['code'] ==0:
token=result['token']
headers = {'content-type': 'application/json','authorization':token}
print token
else:
return result
data = {
'jsonrpc':'2.0',
'method': 'selected.get',
'id':'1',
'params':{
'm_table':'cmdb_server',
'field':'product_id',
'where':{'id':1},
's_table':'cmdb_product'
}
}
r = requests.post(url,headers=headers,json=data)
print r.status_code
print r.text
rpc()
|
irzaip/cipi | cp_speak.py | Python | lgpl-3.0 | 7,663 | 0.033146 | #!/usr/bin/python
import commons
from espeak import espeak
import mosquitto
import subprocess
from os import listdir
import random
from os.path import join
from twython import Twython
import ConfigParser
#import time
import moc
import math
from datetime import *
from pytz import timezone
import calendar
from dateutil.relativedelta import *
config = ConfigParser.ConfigParser()
config.read("config.ini")
CONSUMER_KEY = config.get("TWYTHON","CONSUMER_KEY")
CONSUMER_SECRET = config.get("TWYTHON","CONSUMER_SECRET")
ACCESS_KEY = config.get("TWYTHON","ACCESS_KEY")
ACCESS_SECRET = config.get("TWYTHON","ACCESS_SECRET")
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET)
rndspeak = ["seenee","hai","hallo","bip bip","robot","yuhu","ea","oi","we","oh","aah"]
folder = "/home/pi/cipi/sounds/"
files = listdir(folder)
plfolder = "/home/pi/cipi/playlist/"
playlist = listdir(plfolder)
language = "en"
musiclist = []
atime = commons.getmillis()
stoptime = commons.getmillis()
stoptimeb = commons.getmillis()
# start the moc server
try:
moc.start_server()
except:
pass
def dt(str):
r = datetime.strptime(str,"%Y-%m-%d")
return r
def get_cal():
f = file("agenda.txt","rb")
ap = f.readlines()
data = []
for dt in ap:
data.append(dt.split(" "))
return data
#SPEAK EVENT FOR TODAY
def today_event():
today = datetime.today()
now = datetime.now()
for dt in mycal:
print dt
ev_dt=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_dt.date() == today.date():
espeak.synth("dont forget to " + evnt +"\n" )
#COMPARE HALF HOUR EVENT
def event_reminder():
today = datetime.today()
now = datetime.now()
for dt in mycal:
ev_dt=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_dt.date() == today.date():
if ev_dt > now:
intime = int(math.floor((ev_dt - now).seconds / 60))
if intime < 300:
data = evnt + ", event in " + str(intime) + " minutes"
espeak.synth(data)
def event_ongoing():
today = datetime.today()
now = datetime.now()
for dt in mycal:
ev_fr=datetime.strptime(dt[0]+" "+dt[1],"%Y-%m-%d %H:%M")
ev_to=datetime.strptime(dt[2]+" "+dt[3],"%Y-%m-%d %H:%M")
evnt = dt[6]
if ev_fr < now:
if ev_to > now:
data = "Do "+evnt+" now"
espeak.synth(data)
#RETRIEVE CALENDAR FROM GOOGLE CAL AND WRITE TO FILE
def retrieve_agenda():
try:
mycmd = "gcalget.sh"
subprocess.call(["sh",mycmd])
except:
espeak.synth("calendar error")
def parsemusic(dat):
f = file(dat,"r")
a = f.readlines()
try:
a.remove('\n')
except:
pass
return a
def on_connect(mosq, obj, rc):
mosq.subscribe("speak", 0)
mosq.subscribe("sound", 0)
mosq.subscribe("tweet", 0)
mosq.subscribe("teleop", 0)
mosq.subscribe("wii",0)
print("rc: "+str(rc))
def on_message(mosq, obj, msg):
global folder
global files
global language
global api
global stoptime
#routing dari teleop/wii ke topic MOTOR
if msg.topic == "teleop":
try:
mqttc.publish("motor",msg.payload)
a = 0
except:
pass
return
if msg.topic == "wii":
try:
mqttc.publish("motor",msg.payload)
a = 0
except:
pass
#process topic tweet
if msg.topic == "tweet":
try:
api.update_status(status=str(msg.payload))
espeak.synth("Tweeted")
except:
espeak.synth("Tweet failed")
return
#process topic speak
if msg.topic == "speak":
#print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
if msg.payload == "en2":
language = "en2"
return
elif msg.payload == "en":
language = "en"
return
elif msg.payload == "ko":
language = "ko"
return
elif msg.payload == "id":
language = "id"
return
elif msg.payload == "rnd":
espeak.synth(random.choice(rndspeak))
return
#incoming from wii
if msg.payload == "music start":
espeak.synth("play music")
musiclist = parsemusic(join(plfolder,random.choice(playlist)))
moc.quickplay(musiclist)
return
elif msg.payload == "music stop":
moc.stop()
espeak.synth("music stop")
return
elif msg.payload == "volume up":
moc.increase_volume(10)
return
elif msg.payload == "volume down":
moc.decrease_volume(10)
return
elif msg.payload == "next music":
moc.next()
return
elif msg.payload == "previous music":
moc.previous()
return
elif msg.payload == "toggle shuffle":
moc.toggle_shuffle()
return
elif msg.payload == "enable shuffle":
moc.enable_shuffle()
return
elif msg.payload == "disable shuffle":
moc.disable_shuffle()
return
elif msg.payload == "main++":
espeak.synth("run main")
commons.run_main()
return
elif msg.payload == "main--":
espeak.synth("kill main")
commons.kill_main()
return
elif msg.payload == "display++":
espeak.synth("display plus plus")
return
elif msg.payload == "display--":
espeak.synth("display minus minus")
return
elif msg.payload == "light++":
espeak.synth("light plus plus")
return
elif msg.payload == "light--":
espeak.synth("light minus minus")
return
elif msg.payload == "print++":
espeak.synth("print plus plus")
return
elif msg.payload == "print--":
espeak.synth("print minus minus")
return
if language == "en":
espeak.synth(msg.payload)
elif language == "ko":
subprocess.call(["/home/pi/cipi/speech_ko.sh",msg.payload])
elif language == "id":
subprocess.call(["/home/pi/cipi/speech_id.sh",msg.payload])
elif language == "en2":
subprocess.call(["/home/pi/cipi/speech_en.sh",msg.payload])
#process topic sound
if msg.topic == "sound":
if msg.payload == "rnd":
subprocess.call(["aplay",join(folder,random.choice(files))])
else:
subprocess.call(["aplay",msg.payload])
def on_publish(mosq, obj, mid):
pass
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mosq, obj, level, string):
print(string)
# If you want to use a specific client id, use
# mqttc = mosquitto.Mosquitto("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mosquitto.Mosquitto()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#m | qttc.on_log = on_log
mqttc.connect("127.0. | 0.1", 1883, 60)
#mqttc.subscribe("string", 0)
#mqttc.subscribe(("tuple", 1))
#mqttc.subscribe([("list0", 0), ("list1", 1)])
#Speak agenda
retrieve_agenda()
mycal = get_cal()
today_event()
while True:
mqttc.loop()
#loop reminder for every 5 minutes
btime = commons.getmillis()
if btime-atime > 300000:
atime = commons.getmillis()
event_reminder()
event_ongoing()
#loop timer untuk stop motor tiap 10 detik / safety
stoptimeb = commons.getmillis()
if stoptimeb-stoptime > 5000:
stoptime = commons.getmillis()
mqttc.publish("motor","1:1:0:0:#")
|
mangosmoothie/dnla-playlists | dnla-playlists/playlists.py | Python | gpl-3.0 | 7,175 | 0.002927 | import mutagen
import os
import re
import sys
from optparse import OptionParser
music_file_exts = ['.mp3', '.wav', '.ogg']
seconds_re = re.compile('(\d+)(\.\d+)? seconds')
def main(argv):
(options, args) = build_parser().parse_args(argv)
validate_options(options)
print('playlist(s) will be written to ', options.outdir)
if not options.contains and not options.regex:
playlists = build_top_10_playlists(options.start_at, [], options.extended,
options.absolute, options.depth)
else:
predicates = build_match_predicates(options.contains, options.regex)
playlists = [build_playlist(options.start_at, predicates, options.extended,
options.absolute, options.depth, options.name)]
outdir = options.outdir.rstrip(os.path.sep)
write_playlists(playlists, outdir)
def build_match_predicates(contains, regex):
predicates = []
if contains:
c = contains.lower()
predicates.append(
lambda x: c in os.path.basename(x['path']).lower() or c in x['title'].lower() or c in x['artist'].lower()
)
if regex:
r = re.compile(regex)
predicates.append(
lambda x: re.search(r, os.path.basename(x['path'])) or re.search(r, x['title']) or re.search(r, x['artist'])
)
return predicates
def build_parser():
parser = OptionParser()
parser.add_option('-n', '--name', dest='name', default=os.path.basename(os.getcwd()),
help='NAME of playlist', metavar='NAME')
parser.add_option('-s', '--start-at', dest='start_at', default=os.getcwd(),
help='DIR location to start media file search from (default is current DIR)',
metavar='DIR')
parser.add_option('-e', '--extended', dest='extended',
action='store_true', default=False,
help='use m3u extended format (has additional media metadata)')
parser.add_option('-a', '--absolute', dest='absolute',
action='store_true', default=False,
help='use absolute file paths (default is relative paths)')
parser.add_option('-d', '--depth', dest='depth', type="int", default=-1,
help='depth to search, 0 for target dir only (default is fully recursive)')
parser.add_option('-o', '--outdir', dest='outdir', default=os.getcwd(),
help='DIR location of output file(s) (default is current DIR)',
metavar='DIR')
parser.add_option('-c', '--contains', dest='contains', default=None,
help='case insensitive match on given string, i.e. "string contains SUBSTR". ' +
'Checks file names and metadata.', metavar='SUBSTR')
parser.add_option('-r', '--regex', dest='regex', default=None,
help='regex match. checks file name and metadata',
metavar='EXP')
parser.add_option('-f', '--force', dest='force', default=False,
action='store_true', help='force execution through warnings')
return parser
def validate_options(options):
if not os.path.isdir(options.outdir):
print('output directory does not exist!')
sys.exit(1)
if not os.path.isdir(options.start_at):
print('starting directory does not exist!')
sys.exit(1)
if options.depth != -1:
print('invalid depth: ' + str(options.depth))
sys.exit(1)
if os.path.exists(
os.path.join(options.outdir,
options.name if options.name.endswith('.m3u') else options.name + '.m3u')):
if options.force:
print('overwriting playlist: ' + options.name)
else:
print('playlist already exists with name: ' + options.name)
print('run with option -f to overwrite existing playlist')
sys.exit(1)
class Playlist:
def __init__(self, path, extended, absolute, name):
self.items = []
self.predicates = []
self.path = path
self.isExtended = extended
self.isAbsolute = absolute
self.name = name if name.endswith('.m3u') else name + '.m3u'
def __str__(self):
return self.name + ' items: ' + str(len(self.items))
def get_out_str(self, item, outdir):
x = 0
if not self.isAbsolute:
while x < len(outdir) and x < len(item['path']) \
and outdir[x] == item['path'][x]:
x += 1
if x == 0:
x = -1
if self.isExtended:
return '\n' + '#EXTINF:' + item['seconds'] + ', ' + item['artist'] + ' - ' + item['title'] \
+ '\n' + item['path'][x + 1:]
else:
return '\n' + item['path'][x + 1:]
def write_playlists(playlists, outdir):
for p in playlists:
print('writing playlist: ' + str(p))
with open(os.path.join(outdir, p.name), mode='w') as p_out:
if p.isExtended:
p_out.write('#EXTM3U')
else:
p_out.write('#STDM3U')
for i i | n p.items:
p_out.write(p.get_out_str(i, outdir))
def all_pass(x, predicates):
for p in predicates:
if not p(x):
return False
return True
def extract_metadata(path, extended=False):
meta = {'path': path, 'ti | tle': '', 'artist': '', 'seconds': '0'}
if extended:
f = mutagen.File(path)
if f:
match = re.search(seconds_re, f.info.pprint())
meta['seconds'] = match.group(1) if match else '0'
else:
f = {}
meta['title'] = f.get('title',
[os.path.basename(path)])[0]
meta['artist'] = f.get('artist',
[path.split(os.path.sep)[-2]])[0]
return meta
def build_top_10_playlists(root_path, predicates, extended, absolute, depth):
playlists = []
predicates.append(
lambda x: re.search('^\d{2}_\d{2} ', os.path.basename(x['path']))
)
predicates.append(
lambda x: int(os.path.basename(x['path'])[3:5]) < 11
)
for d in os.listdir(root_path):
dpath = os.path.join(root_path, d)
if os.path.isdir(dpath) \
and re.search('^\d{4}$', d) \
and 2100 > int(d) > 1900:
playlists.append(build_playlist(dpath, predicates,
extended, absolute,
0, os.path.basename(dpath)))
return playlists
def build_playlist(root_path, predicates, extended, absolute, depth, name):
playlist = Playlist(root_path, extended, absolute, name)
for root, dirs, files in os.walk(root_path):
for f in files:
path = os.path.join(root, f)
if os.path.splitext(path)[1].lower() in music_file_exts:
item = extract_metadata(path, extended)
if all_pass(item, predicates):
playlist.items.append(item)
return playlist
if __name__ == "__main__":
main(sys.argv[1:])
|
eoinmurray/icarus | Icarus/Algorithms/__init__.py | Python | mit | 101 | 0.009901 | from aut | o import auto
from basis import basis
from cross import cross
from pow | er_dep import power_dep |
coolbombom/CouchPotatoServer | couchpotato/core/downloaders/sabnzbd/main.py | Python | gpl-3.0 | 4,865 | 0.007605 | from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.helpers.variable import cleanHost, mergeDicts
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
from urllib2 import URLError
import json
import traceback
log = CPLog(__name__)
class Sabnzbd(Downloader):
type = ['nzb']
def download(self, data = {}, movie = {}, filedata = None):
log.info('Sending "%s" to SABnzbd.', data.get('name'))
req_params = {
'cat': self.conf('category'),
'mode': 'addurl',
'nzbname': self.createNzbName(data, movie),
}
if filedata:
if len(filedata) < 50:
log.error('No proper nzb available: %s', (filedata))
return False
# If it's a .rar, it adds the .rar extension, otherwise it stays .nzb
nzb_filename = self.createFileName(data, filedata, movie)
req_params['mode'] = 'addfile'
else:
req_params['name'] = data.get('url')
try:
if req_params.get('mode') is 'addfile':
sab_data = self.call(req_params, params = {'nzbfile': (ss(nzb_filename), filedata)}, multipart = True)
else:
sab_data = self.call(req_params)
except URLError:
log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0))
return False
except:
log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0))
return False
log.debug('Result from SAB: %s', sab_data)
if sab_data.get('status') and not sab_data.get('error'):
log.info('NZB sent to SAB successfully.')
if filedata:
return self.downloadReturnId(sab_data.get('nzo_ids')[0])
else:
return True
else:
log.error('Error getting data from SABNZBd: %s', sab_data)
return False
def getAllDownloadStatus(self):
log.debug('Checking SABnzbd download status.')
# Go through Queue
try:
queue = self.call({
'mode': 'queue',
})
except:
log.error('Failed getting queue: %s', traceback.format_exc(1))
return False
# Go through history items
try:
history = self.call({
'mode': 'history',
'limit': 15,
})
except:
log.error('Failed getting history json: %s', traceback.format_exc(1))
return False
statuses = StatusList(self)
# Get busy releases
for item in queue.get('slots', []):
statuses.append({
'id': item['nzo_id'],
'name': item['filename'],
'original_status': item['status'],
'timeleft': item['timeleft'] if not queue['paused'] else -1,
})
# Get old releases
for item in history.get('slots', []):
status = 'busy'
if item['status'] == 'Failed' or (item['status'] == 'Completed' and item['fail_message'].strip()):
status = 'failed'
elif item['status'] == 'Completed':
status = 'completed'
statuses.append({
'id': item['nzo_id'],
'name': item['name'],
'status': status,
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': item['storage'],
})
return statuses
def removeFailed(self, item):
log.info('%s failed downloading, deleting...', item['name'])
try:
self.call({
'mode': 'history',
'name': 'delete',
'del_files': '1',
'value': item['id']
}, use_json = False)
except:
lo | g.error('Failed deleting: %s', traceback.format_exc(0))
| return False
return True
def call(self, request_params, use_json = True, **kwargs):
url = cleanHost(self.conf('host')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
'apikey': self.conf('api_key'),
'output': 'json'
}))
data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
if use_json:
d = json.loads(data)
if d.get('error'):
log.error('Error getting data from SABNZBd: %s', d.get('error'))
return {}
return d.get(request_params['mode']) or d
else:
return data
|
mariecpereira/IA369Z | deliver/ia870/iaskelmrec.py | Python | mit | 482 | 0.018672 | # -*- encoding: utf-8 -*-
# Module iaskel | mrec
from numpy import *
def iaskelmrec(f, B=None):
from iabinary import iabinary
from iaintersec import iaintersec
from iadil import iadil
from iaunion import iaunion
| from iasecross import iasecross
if B is None:
B = iasecross(None)
y = iabinary( iaintersec(f, 0))
for r in range(max(ravel(f)),1,-1):
y = iadil( iaunion(y,iabinary(f,r)), B)
y = iaunion(y, iabinary(f,1))
return y
|
xuwenbao/suds | suds/client.py | Python | lgpl-3.0 | 26,012 | 0.002422 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a | suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmetho | d
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
|
wscullin/spack | var/spack/repos/builtin/packages/lzma/package.py | Python | lgpl-2.1 | 1,935 | 0.000517 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program | is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
| # You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Lzma(AutotoolsPackage):
"""LZMA Utils are legacy data compression software with high compression
ratio. LZMA Utils are no longer developed, although critical bugs may be
fixed as long as fixing them doesn't require huge changes to the code.
Users of LZMA Utils should move to XZ Utils. XZ Utils support the legacy
.lzma format used by LZMA Utils, and can also emulate the command line
tools of LZMA Utils. This should make transition from LZMA Utils to XZ
Utils relatively easy."""
homepage = "http://tukaani.org/lzma/"
url = "http://tukaani.org/lzma/lzma-4.32.7.tar.gz"
version('4.32.7', '2a748b77a2f8c3cbc322dbd0b4c9d06a')
|
jeffbr13/python-ted | ted/client.py | Python | mpl-2.0 | 7,305 | 0.003012 | from collections import namedtuple
import logging
import re
from datetime import datetime, timedelta
from daterange import DateRange
from icalendar import | Calendar, Event
from lxml import html
from requests import Session
from .aspx_session import ASPXSession
from .course import Course
class Client:
"""
Main interface for interacting with T@Ed.
Attributes:
Client.session
Client.aspx_session
Client.courses
Client.week_dateranges
>>> timetable = Client()
>>> this_week = timetable.week_dateranges['Sem2 wk10']
>>> course = timetable.course('INFR08015')
>>> events = timetable.events(course)
"""
base | _url = 'https://www.ted.is.ed.ac.uk/UOE1314_SWS/default.aspx'
weeklist_url = 'https://www.ted.is.ed.ac.uk/UOE1314_SWS/weeklist.asp'
def __init__(self):
"""
Initialise T@Ed session and download course list.
"""
self.session = Session()
# Get ASPX session variables from default page:
response = self.session.get(Client.base_url, verify=False)
index = html.document_fromstring(response.text)
self.aspx_session = ASPXSession(index)
logging.info('Fetching course list webpage...')
parameters = {
'__EVENTTARGET': 'LinkBtn_modules',
'tLinkType': 'information',
}
course_page = self.post(Client.base_url, parameters=parameters)
course_options = course_page.xpath('//select[@name="dlObject"]/option')
logging.info('Building Course list')
self.courses = []
for option in course_options:
try:
title, identifier = option.text.strip().rsplit(' - ', 1)
except ValueError as e:
title = identifier = option.text.strip()
logging.warning('Error in splitting {0}: {1}'.format(title, e))
logging.info('Title and identifier will be the same for {0}'.format(title))
code = identifier[:9]
self.courses.append(Course(title=title,
identifier=identifier,
code=code))
logging.info('Fetching academic-week/date conversion webpage...')
week_date_page = self.get(Client.weeklist_url)
logging.info('Building academic-week/date dictionary...')
self.week_dateranges = dict()
week_date_rows = week_date_page.xpath('/html/body/table[@class="weektable"]//tr[./td]')
for row in week_date_rows:
week_str, date_str = row.xpath('./td/text()')
dtstart = datetime.strptime(date_str[4:], '%a %d %b %Y')
dtend = dtstart + timedelta(days=7)
self.week_dateranges[week_str] = DateRange(dtstart, dtend)
return
def get(self, url, parameters=None):
"""
Get a webpage, returning HTML.
"""
logging.debug('GET ' + url + '?' + str(parameters))
if parameters:
parameters.update(self.aspx_session.parameters())
response = self.session.get(url, params=parameters, verify=False)
page = html.document_fromstring(response.text)
try:
self.aspx_session = ASPXSession(page)
except Exception as e:
logging.warning(url + ' - ' + str(e))
return page
def post(self, url, parameters):
"""
Post data to a webpage, returning HTML.
"""
logging.debug('POST ' + url + '?' + str(parameters))
parameters.update(self.aspx_session.parameters())
response = self.session.post(url, data=parameters, verify=False)
page = html.document_fromstring(response.text)
try:
self.aspx_session = ASPXSession(page)
except Exception as e:
logging.warning(url + ' - ' + str(e))
return page
def match(self, regex):
"""
Returns a list of all courses which have an attribute matching the given regex.
"""
return [c for c in self.courses if regex.match(c.title)
or regex.match(c.code)
or regex.match(c.identifier)]
def course(self, course_code=None):
for c in self.courses:
if course_code in c.code:
return c
def dateranges_for_week_str(self, weeks_str):
"""
>>> weeks('Sem2 wk3-Sem2 wk5, Sem2 wk6-Sem2 wk11')
['Sem2 wk3', 'Sem2 wk4', 'Sem2 wk5', 'Sem2 wk6', 'Sem2 wk7', 'Sem2 wk8', 'Sem2 wk9', 'Sem2 wk10', 'Sem2 wk11']
"""
week_ranges = weeks_str.split(', ')
date_ranges = []
for week_range in week_ranges:
first_week_str, last_week_str = week_range.split('-')
first_week = self.week_dateranges[first_week_str]
last_week = self.week_dateranges[last_week_str]
date_ranges.append(DateRange(first_week.date, last_week.to))
return date_ranges
def events(self, course):
parameters = {
'tLinkType': 'modules',
'dlFilter': '',
'tWildcard': '',
'dlObject': course.identifier,
'lbWeeks': 't',
'lbDays': '1-7',
'dlPeriod': '1-32',
'dlType': 'TextSpreadsheet;swsurl;SWSCUST Object TextSpreadsheet',
'bGetTimetable': '',
}
timetable_page = self.post(Client.base_url, parameters=parameters)
day_numbered_trs = enumerate(timetable_page.xpath('//table[@class="spreadsheet"]//tr[not(contains(@class, "columnTitles"))]'))
events = []
for (day_number, event_row) in day_numbered_trs:
event_tds = event_row.xpath('./td')
event = Event()
event.uid = course.identifier + '-' + event_tds[0].text + '@timetab.benjeffrey.com'
event.add('summary', event_tds[0].text)
event.add('description', event_tds[1].text)
event.add('dtstamp', datetime.now())
start_time = datetime.strptime(event_tds[3].text.strip(), '%H:%M').time()
end_time = datetime.strptime(event_tds[4].text.strip(), '%H:%M').time()
weeks = self.dateranges_for_week_str(event_tds[5].text)
event.add('dtstart',
datetime.combine(weeks[0].date + timedelta(days=day_number),
start_time))
event.add('dtend',
datetime.combine(weeks[0].date + timedelta(days=day_number),
start_time))
for week in weeks:
event.add('rdate', week.date + timedelta(days=day_number))
location = event_tds[7].text.strip() + ', ' + event_tds[6].xpath('./a/text()')[0].strip()
event.add('location', location)
event.add('categories', ['EDUCATION', event_tds[2].text])
events.append(event)
return events
def calendar(self, events):
"""
Build an icalendar Calendar containing all the given events.
"""
cal = Calendar()
cal.add('prodid', '-//Ben Jeffrey//NONSGML timetab//EN')
cal.add('version', '2.0')
for event in events:
cal.add_component(event)
return cal
|
xkmato/casepro | casepro/urls.py | Python | bsd-3-clause | 1,472 | 0.001359 | from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.views import static
from casepro.backend import get_backend
from casepro.utils.views import PartialTemplate
urlpatterns = [
url(r'', include('casepro.cases.urls')),
url(r'', include('casepro.contacts.urls')),
url(r'', include('casepro.msg_board.urls')),
url(r'', include('casepro.msgs.urls')),
url(r'', include('casepro.rules.urls')),
url(r'', include('casepro.profiles.urls')),
url(r'', include('casepro.orgs_ext.urls')),
url(r'^pods/', include('casepro.pods.urls')),
url(r'^stats/', include('casepro.statistics.urls')),
url(r'^users/', include('dash.users.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^comments/', include('django_comments.urls')),
url(r'^partials/(?P<template>[a-z0-9\-_]+)\.html$', PartialTemplate.as_view(), name='utils.partial_template')
]
backend_urls = get_backend().get_url_patterns() or []
urlpatterns | += backend_urls
if settings.DEBUG: # pragma: no cover
try | :
import debug_toolbar
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
except ImportError:
pass
urlpatterns = [
url(r'^media/(?P<path>.*)$', static.serve, {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'', include('django.contrib.staticfiles.urls'))
] + urlpatterns
|
akx/shoop | shoop_tests/admin/test_picotable.py | Python | agpl-3.0 | 4,680 | 0.003419 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import pytest
from django.contrib.auth import get_user_model
from shoop.admin.utils.picotable import (
ChoicesFilter, Column, DateRangeFilter, Filter, MultiFieldTextFilter,
Picotable, RangeFilter, TextFilter
)
from shoop_tests.utils import empty_iterable
from shoop_tests.utils.fixtures import regular_user
class PicoContext(object):
def superuser_display(self, instance): # Test indirect `display` callable
return "very super" if instance.is_superuser else "-"
def instance_id(instance): # Test direct `display` callable
return instance.id
def false_and_true():
return [(False, "False"), (True, "True")]
def get_pico(rf):
return Picotable(
request=rf.get("/"),
columns=[
Column("id", "Id", filter_config=Filter(), display=instance_id),
Column("username", "Username", sortable=False, filter_config=MultiFieldTextFilter(filter_fields=("username", "email"), operator="iregex")),
Column("email", "Email", sortable=False, filter_config=TextFilter()),
Column("is_superuser", "Is Superuser", display="superuser_display", filter_config=ChoicesFilter(choices=false_and_true())),
Column("is_active", "Is Active", filter_config=ChoicesFilter(choices=false_and_true)), # `choices` callable
Column("date_joined", "Date Joined", filter_config=DateRangeFilter())
],
queryset=get_user_model().objects.all(),
context=PicoContext()
)
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_basic(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_da | ta({"perPage": 100, "page": 1})
assert len(data["items"]) == get_user_model().objects.count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_display(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1})
for item in data["items"]:
if item["id"] == admin_user.pk:
assert item["is_superuser"] == "very super" |
if item["id"] == regular_user.pk:
assert item["is_superuser"] == "-"
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_sort(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "sort": "-id"})
id = None
for item in data["items"]:
if id is not None:
assert item["id"] <= id, "sorting does not work"
id = item["id"]
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_invalid_sort(rf, admin_user, regular_user):
pico = get_pico(rf)
with pytest.raises(ValueError):
data = pico.get_data({"perPage": 100, "page": 1, "sort": "-email"})
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_choice_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"is_superuser": True}})
assert len(data["items"]) == get_user_model().objects.filter(is_superuser=True).count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_text_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"email": admin_user.email}})
assert len(data["items"]) == get_user_model().objects.filter(is_superuser=True).count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_multi_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"username": "."}})
assert len(data["items"]) == get_user_model().objects.count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_range_filter(rf, regular_user):
pico = get_pico(rf)
one_day = datetime.timedelta(days=1)
assert not empty_iterable(pico.get_data({"perPage": 100, "page": 1, "filters": {"date_joined": {"min": regular_user.date_joined - one_day}}})["items"])
assert not empty_iterable(pico.get_data({"perPage": 100, "page": 1, "filters": {"date_joined": {"max": regular_user.date_joined + one_day}}})["items"])
# TODO: a false test for this
def test_column_is_user_friendly():
with pytest.raises(NameError):
Column(id="foo", title="bar", asdf=True)
|
hj3938/panda3d | contrib/src/sceneeditor/sePlacer.py | Python | bsd-3-clause | 33,063 | 0.010949 | """ DIRECT Nine DoF Manipulation Panel """
from direct.showbase.DirectObject import DirectObject
from direct.directtools.DirectGlobals import *
from direct.tkwidgets.AppShell import AppShell
from direct.tkwidgets.Dial import AngleDial
from direct.tkwidgets.Floater import Floater
from Tkinter import Button, Menubutton, Menu, StringVar
from pandac.PandaModules import *
import Tkinter, Pmw
"""
TODO:
Task to monitor pose
"""
class Placer(AppShell):
# Override class variables here
appname = 'Placer Panel'
frameWidth = 625
frameHeight = 215
usecommandarea = 0
usestatusarea = 0
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
('nodePath', SEditor.camera, None),
)
self.defineoptions(kw, optiondefs)
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(Placer)
# Accept the message from sceneEditor to update the information about the target nodePath
self.accept('placerUpdate', self.updatePlacer)
def appInit(self):
# Initialize state
self.tempCS = SEditor.group.attachNewNode('placerTempCS')
self.orbitFromCS = SEditor.group.attachNewNode(
'placerOrbitFromCS')
self.orbitToCS = SEditor.group.attachNewNode('placerOrbitToCS')
self.refCS = self.tempCS
# Dictionary keeping track of all node paths manipulated so far
self.nodePathDict = {}
self.nodePathDict['camera'] = SEditor.camera
self.nodePathDict['widget'] = SEditor.widget
self.nodePathNames = ['camera', 'widget', 'selected']
self.refNodePathDict = {}
self.refNodePathDict['parent'] = self['nodePath'].getParent()
self.refNodePathDict['render'] = render
self.refNodePathDict['camera'] = SEditor.camera
self.refNodePathDict['widget'] = SEditor.widget
self.refNodePathNames = ['parent', 'self', 'render',
'camera', 'widget', 'selected']
# Initial state
self.initPos = Vec3(0)
self.initHpr = Vec3(0)
self.initScale = Vec3(1)
self.deltaHpr = Vec3(0)
# Offset for orbital mode
self.posOffset = Vec3(0)
# Set up event hooks
self.undoEvents = [('DIRECT_undo', self.undoHook),
('DIRECT_pushUndo', self.pushUndoHook),
('DIRECT_undoListEmpty', self.undoListEmptyHook),
('DIRECT_redo', self.redoHook),
('DIRECT_pushRedo', self.pushRedoHook),
('DIRECT_redoListEmpty', self.redoListEmptyHook)]
for event, method in self.undoEvents:
self.accept(event, method)
# Init movement mode
self.movementMode = 'Relative To:'
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
interior['relief'] = Tkinter.FLAT
# Add placer commands to menubar
self.menuBar.addmenu('Placer', 'Placer Panel Operations')
self.menuBar.addmenuitem('Placer', 'command',
'Zero Node Path',
label = 'Zero All',
command = self.zeroAll)
self.menuBar.addmenuitem('Placer', 'command',
'Reset Node Path',
label = 'Reset All',
command = self.resetAll)
self.menuBar.addmenuitem('Placer', 'command',
'Print Node Path Info',
label = 'Print Info',
command = self.printNodePathInfo)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget visability',
label = 'Toggle Widget Vis',
command = SEditor.toggleWidgetVis)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget manipulation mode',
label = 'Toggle Widget Mode',
command = SEditor.manipulationControl.toggleObjectHandlesMode)
# Get a handle to the menu frame
menuFrame = self.menuFrame
self.nodePathMenu = Pmw.ComboBox(
menuFrame, labelpos = Tkinter.W, label_text = 'Node Path:',
entry_width = 20,
selectioncommand = self.selectNodePathNamed,
scrolledlist_items = self.nodePathNames)
self.nodePathMenu.selectitem('selected')
self.nodePathMenuEntry = (
self.nodePathMenu.component('entryfield_entry'))
self.nodePathMenuBG = (
self.nodePathMenuEntry.configure('background')[3])
self.nodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.nodePathMenu, 'Select node path to manipulate')
modeMenu = Pmw.OptionMenu(menuFrame,
items = ('Relative To:',
'Orbit:'),
initialitem = 'Relative To:',
command = self.setMovementMode,
menubutton_width = 8)
modeMenu.pack(side = 'left', expand = 0)
self.bind(modeMenu, 'Select manipulation mode')
self.refNodePathMenu = Pmw.ComboBox(
menuFrame, entry_width = 16,
selectioncommand = self.selectRefNodePathNamed,
scrolledlist_items = self.refNodePathNames)
self.refNodePathMenu.selectitem('parent')
self.refNodePathMenuEntry = (
self.refNodePathMenu.component('entryfield_entry'))
self.refNodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.refNodePathMenu, 'Select relative node path')
self.undoButton = Button(menuFrame, text = 'Undo',
command = SEditor.undo)
if SEditor.undoList:
self.undoButton['state'] = 'normal'
else:
self.undoButton['state'] = 'disabled'
self.undoButton.pack(side = 'left', expand = 0)
self.bind(self.undoButton, 'Undo last operation')
self.redoButton = Button(menuFrame, text = 'Redo',
command = SEd | itor.redo)
if SEditor.redoList:
self.redoButton['state'] = 'normal'
else:
self.redoButton['state'] = 'disabled'
self.redoButton | .pack(side = 'left', expand = 0)
self.bind(self.redoButton, 'Redo last operation')
# Create and pack the Pos Controls
posGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Position',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
posMenubutton = posGroup.component('tag')
self.bind(posMenubutton, 'Position menu operations')
posMenu = Menu(posMenubutton, tearoff = 0)
posMenu.add_command(label = 'Set to zero', command = self.zeroPos)
posMenu.add_command(label = 'Reset initial',
command = self.resetPos)
posMenubutton['menu'] = posMenu
posGroup.pack(side='left', fill = 'both', expand = 1)
posInterior = posGroup.interior()
# Create the dials
self.posX = self.createcomponent('posX', (), None,
Floater, (posInterior,),
text = 'X', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = 'Red')
self.posX['commandData'] = ['x']
self.posX['preCallback'] = self.xformStart
self.posX['postCallback'] = self.xformStop
self.posX['callbackData'] = ['x']
self.posX.pack(expand=1,fill='both')
self.posY = self.createcomponent('pos |
eek6/squeakspace | www/proxy/scripts/proxy/group_quota.py | Python | gpl-3.0 | 2,328 | 0.006873 | import squeakspace.common.util as ut
import squeakspace.common.util_http as ht
import squeakspace.proxy.server.db_sqlite3 as db
import squeakspace.common.squeak_ex as ex
import config
def post_handler(environ):
query = ht.parse_post_request(environ)
cookies = ht.parse_cookies(env | iron)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
group_id = ht.get_required(query, 'group_id')
new_size = ht.convert_int(ht.get_required(query, 'new_size'), 'new_size')
when_space_exhausted = ht.get_required(query, 'when_space_exhausted')
public_key_hash = ht.get_required(query, 'public_key_ha | sh')
passphrase = ht.get_optional(query, 'passphrase')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp = db.change_group_quota(c, user_id, session_id,
node_name, group_id, new_size, when_space_exhausted,
public_key_hash, passphrase)
db.commit(conn)
raise ht.ok_json({'status' : 'ok', 'resp' : resp})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def get_handler(environ):
query = ht.parse_get_request(environ)
cookies = ht.parse_cookies(environ)
user_id = ht.get_required_cookie(cookies, 'user_id')
session_id = ht.get_required_cookie(cookies, 'session_id')
node_name = ht.get_required(query, 'node_name')
group_id = ht.get_required(query, 'group_id')
owner_id = ht.get_required(query, 'owner_id')
passphrase = ht.get_required(query, 'passphrase')
conn = db.connect(config.db_path)
try:
c = db.cursor(conn)
resp = db.read_group_quota(c, user_id, session_id, node_name, group_id, owner_id, passphrase)
raise ht.ok_json({'status' : 'ok', 'resp' : resp})
except ex.SqueakException as e:
raise ht.convert_squeak_exception(e)
finally:
db.close(conn)
def main_handler(environ):
ht.dispatch_on_method(environ, {
'POST' : post_handler,
'GET' : get_handler })
def application(environ, start_response):
return ht.respond_with_handler(environ, start_response, main_handler)
|
yaqiyang/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/SubscriptionIdApiVersion/microsoftazuretesturl/operations/group_operations.py | Python | mit | 3,678 | 0.001903 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
from .. import models
class GroupOperations(object):
"""GroupOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_sample_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Provides a resouce group with name 'testgroup101' and location 'West
US'.
:param | resource_group_name: Resource Group name 'testgroup101'.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool | raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SampleResourceGroup
<fixtures.acceptancetestssubscriptionidapiversion.models.SampleResourceGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SampleResourceGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
ajduncan/downspout | setup.py | Python | mit | 721 | 0.001387 | #!/usr/bin/env python
from setuptools import setup
install_requires = [
'bandcamp-downloader==0.0.10',
'beautifulsoup4==4.10.0',
'Pafy==0.3.66',
'fudge==1.0.3',
'requests>=2.20.0',
'simplejson==3.6.5',
'slimit==0.8.1',
'stagger',
]
dependency_links = [
'git://github.com/ajduncan/stagger.git@master#egg=stagger',
]
setup(n | ame='downspout',
version='v0.0.6',
description='Capture cloud based media for offline merrim | ent.',
license='MIT',
author='Andy Duncan',
author_email='ajduncan@gmail.com',
url='https://github.com/ajduncan/downspout/',
install_requires=install_requires,
dependency_links=dependency_links,
test_suite='tests.runtests'
)
|
zenweasel/pybuilder-djangoexample | pyb_django/catalog/urls.py | Python | bsd-3-clause | 253 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from views import CatalogListView
urlpatterns = patterns('',
| url(r'^$', Catal | ogListView.as_view(), name="list"),
)
|
weld-project/weld | weld-python/weld/grizzly/__init__.py | Python | bsd-3-clause | 105 | 0 |
from weld.grizzly.core.frame import GrizzlyDataFrame
from we | ld.gr | izzly.core.series import GrizzlySeries
|
jordanemedlock/psychtruths | temboo/core/Library/Zendesk/Groups/ListGroups.py | Python | apache-2.0 | 4,462 | 0.005379 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListGroups
# List available groups.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.c | horeography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListGroups(Choreography) | :
def __init__(self, temboo_session):
"""
Create a new instance of the ListGroups Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListGroups, self).__init__(temboo_session, '/Library/Zendesk/Groups/ListGroups')
def new_input_set(self):
return ListGroupsInputSet()
def _make_result_set(self, result, path):
return ListGroupsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListGroupsChoreographyExecution(session, exec_id, path)
class ListGroupsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListGroups
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(ListGroupsInputSet, self)._set_input('Email', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number of the results to be returned. Used together with the PerPage parameter to paginate a large set of results.)
"""
super(ListGroupsInputSet, self)._set_input('Page', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(ListGroupsInputSet, self)._set_input('Password', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page. Maximum is 100 and default is 100.)
"""
super(ListGroupsInputSet, self)._set_input('PerPage', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(ListGroupsInputSet, self)._set_input('Server', value)
class ListGroupsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListGroups Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
def get_NextPage(self):
"""
Retrieve the value for the "NextPage" output from this Choreo execution. ((integer) The index for the next page of results.)
"""
return self._output.get('NextPage', None)
def get_PreviousPage(self):
"""
Retrieve the value for the "PreviousPage" output from this Choreo execution. ((integer) The index for the previous page of results.)
"""
return self._output.get('PreviousPage', None)
class ListGroupsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListGroupsResultSet(response, path)
|
kbiscanic/apt_project | apt/features/kbiscanic/vector_space_similarity.py | Python | apache-2.0 | 815 | 0.001227 | __author__ = 'kbiscanic'
from collections impor | t Counter
from scipy.linalg import norm
from features.karlo.WWO import calc_ic
def _lsa(s):
w = Counter(s)
return {x: w[x] for x in w}
def _lsaic(s):
w = Counte | r(s)
return {x: calc_ic(x) * w[x] for x in w}
def _cosine_sim(da, db):
sol = 0.
for key in set(da.keys()).intersection(db.keys()):
sol += da[key] * db[key]
if sol == 0:
return 0.
return abs(sol / norm(da.values()) / norm(db.values()))
def vector_space_similarity(sa, sb, ic=False):
if ic:
lsaa = _lsaic(sa)
lsab = _lsaic(sb)
else:
lsaa = _lsa(sa)
lsab = _lsa(sb)
return _cosine_sim(lsaa, lsab)
def vector_space_similarity_words(words, ic=False):
return vector_space_similarity(words[0], words[1], ic) |
dstufft/sqlalchemy | test/requirements.py | Python | mit | 28,475 | 0.002669 | """Requirements specific to SQLAlchemy's own unit tests.
"""
from sqlalchemy import util
import sys
from sqlalchemy.testing.requirements import SuiteRequirements
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import \
skip, \
skip_if,\
only_if,\
only_on,\
fails_on_everything_except,\
fails_on,\
fails_if,\
succeeds_if,\
SpecPredicate,\
against,\
LambdaPredicate,\
requires_tag
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if([
no_support('firebird', 'not supported by database'),
no_support('mysql', 'not supported by database'),
no_support('mssql', 'not supported by database'),
])
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return skip_if([
no_support('sqlite', 'not supported by database'),
])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(
no_support('sqlite', 'not supported by database')
)
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
['sqlite', 'oracle'],
'target backend %(doesnt_support)s ON UPDATE CASCADE'
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') + \
skip_if('mssql')
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(['oracle'])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if([
"firebird", "oracle", "mysql"
], "not supported by database"
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if([
no_support('firebird', 'not supported by database'),
no_support('oracle', 'not supported by database'),
no_support('mssql', 'not supported by database'),
no_support('sybase', 'not supported by database'),
])
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"],
"not supported by driver")
@property
def identity(self):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at INSERT-time
without requiring pre-execution of a SEQUENCE or other artifact.
"""
return skip_if(["firebird", "oracle", "postgresql", "sybase"],
"not supported by database"
)
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(
["mssql", "firebird"], "not supported (?)"
)
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus Postgresql, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence to
be explicitly added, including if the table was reflected.
"""
return skip_if(["firebird", "oracle", "sybase"],
"not supported by database"
)
@property
def insert_from_select(self):
return skip_if(
["firebird"], "crashes for unknown reason"
)
@property
def fetch_rows_post_commit(self):
return skip_if(
["firebird"], "not supported"
)
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"],
"not supported by database/driver"
)
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"],
"not supported by database/driver"
)
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql+pyodbc", "mssql+mxodbc"], "no driver support")
@property
def independent_connections(self):
"""Target must support simultaneous, independent database connections."""
# This is also true of some configurations of UnixODBC and probably win32
# ODBC as well.
return skip_if([
no_support("sqlite",
"independent connections disabled "
"when :memory: connections are used"),
exclude("mssql", "<", (9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections"
)
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(["mssql", "sybase"],
"IDENTITY columns can't be updated")
@property
def isolation_level(self):
return only_on(
('postgresql', 'sqlite', 'mysql'),
"DBAPI has no isolation level support"
) + fails_on('postgresql+pypostgresql',
'pypostgresql bombs on multiple isolation level calls')
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if([
# no access to same table
no_support('mysql', 'requires SUPER priv'),
exclude(' | mysql', '<', (5, 0, 10), 'not supported by database'),
# huh? TODO: implement triggers for PG tests, remove this
no_support('postgresql',
'PG triggers need to be implemented for | tests'),
])
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if("oracle", 'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"')
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return o |
nirmeshk/oh-mainline | mysite/customs/migrations/0039_auto__add_field_tigrisquerymodel_startid__add_field_tigrisquerymodel_d.py | Python | agpl-3.0 | 26,529 | 0.00769 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TigrisQueryModel.startid'
db.add_column('customs_tigrisquerymodel', 'startid', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
# Adding field 'TigrisQueryModel.description'
db.add_column('customs_tigrisquerymodel', 'description', self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'TigrisQueryModel.startid'
db.delete_column('customs_tigrisquerymodel', 'startid')
# Deleting field 'TigrisQueryModel.description'
db.delete_column('customs_tigrisquerymodel', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 6, 27, 17, 30, 33, 974561)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 6, 27, 17, 30, 33, 974473)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
| 'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '1 | 00'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.bugzillaquerymodel': {
'Meta': {'object_name': 'BugzillaQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'query_type': ('django.db.models.fields.CharField', [], {'default': "'xml'", 'max_length': '20'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.BugzillaTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'customs.bugzillatrackermodel': {
'Meta': {'object_name': 'BugzillaTrackerModel', '_ormbases': ['customs.TrackerModel']},
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'base_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'bitesized_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'bitesized_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'bug_project_name_format': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'documentation_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'query_url_type': ('django.db.models.fields.CharField', [], {'default': "'xml'", 'max_length': '20'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.githubquerymodel': {
'Meta': {'object_name': 'GitHubQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'state': ('django.db.models.fields.CharField', [], {'default': "'open'", 'max_length': '20'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.GitHubTrackerModel']"}),
'trackerquerymodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerQueryModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.githubtrackermodel': {
'Meta': {'unique_together': "(('github_name', 'github_repo'),)", 'object_name': 'GitHubTrackerModel', '_ormbases': ['customs.TrackerModel']},
'bitesized_tag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'documentation_tag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'github_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tracker_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'trackermodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['customs.TrackerModel']", 'unique': 'True', 'primary_key': 'True'})
},
'customs.googlequerymodel': {
'Meta': {'object_name': 'GoogleQueryModel', '_ormbases': ['customs.TrackerQueryModel']},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cust |
impactlab/eemeter | tests/ee/test_annualized_weather_normal.py | Python | mit | 849 | 0 | import tempfile
import pytest
from numpy.testing import assert_allclose
from eemeter.modeling.formatters import Mode | lDataFormatter
from eemeter.ee.derivatives import annualized_weather_normal
from eemeter.testing.mocks import MockModel, MockWeatherClient
from eemeter.weathe | r import TMY3WeatherSource
@pytest.fixture
def mock_tmy3_weather_source():
tmp_dir = tempfile.mkdtemp()
ws = TMY3WeatherSource("724838", tmp_dir, preload=False)
ws.client = MockWeatherClient()
ws._load_data()
return ws
def test_basic_usage(mock_tmy3_weather_source):
formatter = ModelDataFormatter("D")
model = MockModel()
output = annualized_weather_normal(
formatter, model, mock_tmy3_weather_source)
assert_allclose(output['annualized_weather_normal'],
(365, 19.1049731745428, 19.1049731745428, 365))
|
nickhargreaves/AfricanSpending | setup.py | Python | mit | 952 | 0 | from setuptools import setup, find_packages
setup(
name='af | ricanspending',
version='1.0',
description="Mapping the money, across the African continent",
long_description='',
classifiers=[],
keywords='',
author='Code for Africa',
author_email='friedrich@pudo.org',
url='http://www.africanspending.org',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
| install_requires=[
"Flask==0.10.1",
"Flask-Assets==0.10",
"Flask-FlatPages==0.5",
"Flask-Script==2.0.5",
"Frozen-Flask==0.11",
"PyYAML==3.11",
"Unidecode==0.04.16",
"awscli==1.4.4",
"cssmin==0.2.0",
"python-dateutil==2.2",
"python-slugify==0.0.9",
"requests==2.4.3",
"gunicorn>=19.0"
],
tests_require=[],
entry_points="",
)
|
adamcunnington/OlympianAdmin | cstrike/addons/eventscripts/admin/mods/rpg/perks/long_jump/long_jump.py | Python | gpl-3.0 | 539 | 0.003711 | # <path to game directory>/addons/eventscipts/admin/mods/rpg/perks/
# long_jump/long_jump.py
# by | Adam Cunnington
import ps | yco
psyco.full()
from esutils import players
from rpg import rpg
def player_jump(event_var):
user_ID = int(event_var["userid"])
long_jump_level = rpg.get_level(user_ID, _long_jump)
if long_jump_level == 0:
return
players.Player(user_ID).push(float(long_jump_level * 0.1), 0)
_long_jump = rpg.PerkManager("long_jump", 5, lambda x: x * 0.1,
lambda x: x * 20)
|
pliniopereira/ccd3 | src/utils/rodafiltros/Leitura_portas.py | Python | gpl-3.0 | 920 | 0 | import glob
import sys
import serial
def serial_ports():
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
| elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
| result.append(port)
except (OSError, serial.SerialException):
pass
return result
|
google/clusterfuzz | src/clusterfuzz/_internal/base/modules.py | Python | apache-2.0 | 2,700 | 0.00963 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for module management."""
# Do not add any imports to non-standard modules here.
import os
import site
import sys
def _config_modules_directory(root_directory):
"""Get the config modules directory."""
config_dir = os.getenv('CONFIG_DIR_OVERRIDE')
if not config_dir:
config_dir = os.path.join(root_directory, 'src', 'appengine', 'config')
return os.path.join(config_dir, 'modules')
def _patch_appengine_modules_for_bots():
"""Patch out App Engine reliant behaviour from bots."""
if os.getenv('SERVER_SOFTWARE'):
# Not applicable on App Engine.
return
# google.auth uses App Engine credentials based on importability of
# google.appengine.api.app_identity.
try:
from google.auth import app_engine as auth_app_engine
if a | uth_app_engine.app_identity:
auth_app_engine.app_identity | = None
except ImportError:
pass
def fix_module_search_paths():
"""Add directories that we must be able to import from to path."""
root_directory = os.environ['ROOT_DIR']
source_directory = os.path.join(root_directory, 'src')
python_path = os.getenv('PYTHONPATH', '').split(os.pathsep)
third_party_libraries_directory = os.path.join(source_directory,
'third_party')
config_modules_directory = _config_modules_directory(root_directory)
if (os.path.exists(config_modules_directory) and
config_modules_directory not in sys.path):
sys.path.insert(0, config_modules_directory)
python_path.insert(0, config_modules_directory)
if third_party_libraries_directory not in sys.path:
sys.path.insert(0, third_party_libraries_directory)
python_path.insert(0, third_party_libraries_directory)
if source_directory not in sys.path:
sys.path.insert(0, source_directory)
python_path.insert(0, source_directory)
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
# Add site directory to make from imports work in google namespace.
site.addsitedir(third_party_libraries_directory)
# TODO(ochang): Remove this once SDK is removed from images.
_patch_appengine_modules_for_bots()
|
smlacombe/sageo | app/lib/livestatusconnection.py | Python | gpl-3.0 | 142 | 0.007042 | from app.lib import livestat | us
from app import app
enabled_sites = app.config['SITES']
live = livestatus.MultiSiteConnection(enabled_si | tes)
|
ricardog/raster-project | user-scripts/katia/bii.py | Python | apache-2.0 | 3,202 | 0.002498 | #!/usr/bin/env python
import argparse
import time
import fiona
import multiprocessing
from rasterio.plot import show
import math
import os
import click
#import matlibplot.pyplot as plt
import numpy as np
import numpy.ma as ma
import rasterio
from rasterio.plot import show, show_hist
from projections.rasterset import RasterSet, Raster
from projections.simpleexpr import SimpleExpr
import projections.predicts as predicts
import projections.r2py.modelr as modelr
import projections.utils as utils
import projections.raster_utils as ru
parser = argparse.ArgumentParser(description="bii.py -- BII projections")
parser.add_argument('--mainland', '-m', dest='mainland', default=False,
action='store_true',
help='Project using mainland coefficients '
'(default: islands)')
parser.add_argument('--clip', '-c', dest='clip', default=False,
action='store_true',
help='Clip predictor variables to max value seen '
'during model fitting')
args = parser.parse_args()
if args.mainland:
suffix = 'mainland'
ab_max = 1.655183
sr_max = 1.636021
else:
suffix = 'islands'
ab_max = 1.443549
sr_max = 1.413479
folder = 'clip' if args.clip else 'no-clip'
# pull in all the rasters for computing bii
bii_rs = RasterSet({'abundance': Raster('abundance',
utils.outfn('katia',
folder,
'ab-%s.tif' % suffix)),
'comp_sim': Raster('comp_sim',
utils.outfn('katia',
'ab-cs-%s.tif' % suffix)),
'clip_ab': SimpleExpr('clip_ab',
'clip(abundance, 0, %f)' % ab_max),
'bii_ab': SimpleExpr('bii_ab', 'abundance * comp_sim'),
'bii_ab2': SimpleExpr('bii_ab2', 'clip_ab * comp_sim'),
})
# write out bii raster
bii_rs.write('bii_ab' if args.clip else 'bii_ab2',
utils.outfn('katia', folder,
'abundance-based-bii-%s.tif' % suffix))
# do the same for species richness
# pull in all the rasters for computing bii
bii_rs = RasterSet({'sp_rich': Raster('sp_rich',
utils.outfn('katia',
folder,
'sr-%s.tif' % suffix)),
'comp_sim': Raster('comp_sim',
| utils.outfn('katia',
'sr-cs-%s.tif' % suffix)),
'clip_sr': SimpleExpr('clip_sr',
| 'clip(sp_rich, 0, %f)' % sr_max),
'bii_sr': SimpleExpr('bii_sr', 'sp_rich * comp_sim'),
'bii_sr2': SimpleExpr('bii_sr2', 'clip_sr * comp_sim')})
# write out bii raster
bii_rs.write('bii_sr' if args.clip else 'bii_sr2',
utils.outfn('katia', folder,
'speciesrichness-based-bii-%s.tif' % suffix))
|
miurahr/seahub | tools/seahub-admin.py | Python | apache-2.0 | 2,395 | 0.002923 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2012-2016 Seafile Ltd.
import sqlite3
import os
import sys
import time
import hashlib
import getpass
# Get .ccnet directory from argument or user input
if len(sys.argv) >= 2:
ccnet_dir = sys.argv[1]
else:
home_dir = os.path.join(os.path.expanduser('~'), '.ccnet')
ccnet_dir = input("Enter ccnet directory:(leave blank for %s) " % home_dir)
if not ccnet_dir:
ccnet_dir = home_dir
# Test usermgr.db exists
usermgr_db = os.path.join(ccnet_dir, 'PeerMgr/usermgr.db')
if not os.path.exists(usermgr_db):
print('%s DOES NOT exist. FAILED' % usermgr_db)
sys.exit(1)
# Connect db
conn = sqlite3.connect(usermgr_db)
# Get cursor
c = conn.cursor()
# Check whether admin user exists
sql = "SELECT email FROM EmailUser WHERE is_staff = 1"
try:
c.execute(sql)
except sqlite3.Error as e:
print("An error orrured:", e.args[0])
sys.exit(1)
staff_list = c.fetchall()
if staff_list:
print("Admin is already in database. Email as follows: ")
print('--------------------')
for e in staff_list:
print(e[0])
print('--------------------')
choice = input('Previous admin would be deleted, would you like to continue?[y/n] ')
if choice == 'y':
sql = "DELETE FROM EmailUser WHERE is_staff = 1"
try:
c.execute(sql)
except sqlite3.Error as e:
print("An error orrured:", e.args[0])
sys.exit(1)
else:
print('Previous admin is deleted.')
else:
conn.close()
sys.exit(0)
|
# Create admin user
choice = input('Would you like to create admin user?[y/n]')
if choice != 'y':
conn.close()
sys.exit(0)
username = input('E-mail address:')
passwd = getpass.getpass('Password:')
passwd2 = getpass.getpass('Password (again):')
if passwd != passwd2:
print("Two passwords NOT same.")
sys.exit(1)
mySha1 = hashlib.sha1()
mySha1.update(passwd.encode('utf-8')) |
enc_passwd = mySha1.hexdigest()
sql = "INSERT INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES ('%s', '%s', 1, 1, '%d');" % (username, enc_passwd, time.time()*1000000)
try:
c = conn.cursor()
c.execute(sql)
conn.commit()
except sqlite3.Error as e:
print("An error occured:", e.args[0])
sys.exit(1)
else:
print("Admin user created successfully.")
# Close db
conn.close()
|
AndrBecker/conference-central | utils.py | Python | apache-2.0 | 1,577 | 0 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
| if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
| url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
|
google/deepvariant | third_party/nucleus/util/errors_test.py | Python | bsd-3-clause | 3,169 | 0.003787 | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the f | ollowing conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the | above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for third_party.nucleus.util.errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
import errno
import sys
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import mock
from third_party.nucleus.util import errors
class ErrorsTest(parameterized.TestCase):
@parameterized.parameters(
('empty flag', errors.CommandLineError),
('bad value', ValueError),
('base error', errors.Error),
)
def test_log_and_raise(self, msg, cls):
with mock.patch.object(logging, 'error') as mock_logging:
with self.assertRaisesRegexp(cls, msg):
errors.log_and_raise(msg, cls)
mock_logging.assert_called_once_with(msg)
@parameterized.parameters(
(ValueError, 'ValueError exception'),
(IOError, 'IOError exception'),
)
def test_clean_commandline_error_exit_raise_non_allowed(self, exc_type, msg):
with self.assertRaisesRegexp(exc_type, msg):
with errors.clean_commandline_error_exit():
raise exc_type(msg)
@parameterized.parameters(
(errors.CommandLineError, errno.ENOENT),
(errors.Error, errno.EINVAL),
)
def test_clean_commandline_error_exit_clean_exit(self, exc_type, exit_value):
with mock.patch.object(sys, 'exit') as mock_exit:
with errors.clean_commandline_error_exit(exit_value=exit_value):
raise exc_type()
mock_exit.assert_called_once_with(exit_value)
if __name__ == '__main__':
absltest.main()
|
sensysnetworks/uClinux | user/python/Tools/scripts/pathfix.py | Python | gpl-2.0 | 3,851 | 0.033238 | #! /usr/bin/env python
# Cha | nge the #! line occurring in Python scr | ipts. The new interpreter
# pathname must be given with a -i option.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixfile() you can turn this
# into a program for a different change to Python programs...
import sys
import regex
import os
from stat import *
import string
import getopt
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
new_interpreter = None
def main():
global new_interpreter
usage = ('usage: %s -i /interpreter file-or-directory ...\n' %
sys.argv[0])
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:')
except getopt.error, msg:
err(msg + '\n')
err(usage)
sys.exit(2)
for o, a in opts:
if o == '-i':
new_interpreter = a
if not new_interpreter or new_interpreter[0] != '/' or not args:
err('-i option or file-or-directory missing\n')
err(usage)
sys.exit(2)
bad = 0
for arg in args:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = regex.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(' + `dirname` + ')\n')
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err(dirname + ': cannot list directory: ' + `msg` + '\n')
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(' + `filename` + ')\n')
try:
f = open(filename, 'r')
except IOError, msg:
err(filename + ': cannot open: ' + `msg` + '\n')
return 1
line = f.readline()
fixed = fixline(line)
if line == fixed:
rep(filename+': no change\n')
f.close()
return
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err(tempname+': cannot create: '+`msg`+'\n')
return 1
rep(filename + ': updating\n')
g.write(fixed)
BUFSIZE = 8*1024
while 1:
buf = f.read(BUFSIZE)
if not buf: break
g.write(buf)
g.close()
f.close()
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err(tempname + ': warning: chmod failed (' + `msg` + ')\n')
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err(filename + ': warning: backup failed (' + `msg` + ')\n')
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err(filename + ': rename failed (' + `msg` + ')\n')
return 1
# Return succes
return 0
def fixline(line):
if line[:2] != '#!':
return line
if string.find(line, "python") < 0:
return line
return '#! %s\n' % new_interpreter
main()
|
z9484/ALoMA | pytmx/tmxloader3.py | Python | gpl-3.0 | 18,775 | 0.003941 | """
Map loader for TMX Files
bitcraft (leif.theden at gmail.com)
v.7 -- for python 3.x
If you have any problems, please contact me via email.
Tested with Tiled 0.7.1 for Mac.
released under the LGPL v3
======================================================================
Design Goals:
Simple api
Memory efficient and fast
Quick access to tiles, attributes, and properties
Non-Goals:
Rendering
Works:
Pygame image loading
Map loading with all required types
Properties for all types: maps, layers, objects, tiles
Automatic flipping of tiles
Todo:
Pygame: test colorkey transparency
Optimized for maps that do not make heavy use of tile
properties. If i find that it is used a lot then i can rework
it for better performance.
======================================================================
Basic usage sample:
>>> import tmxloader
>>> tiledmap = tmxloader.load_pygame("map.tmx")
When you want to draw tiles, you simply call "get_tile_image":
>>> image = tiledmap.get_tile_image(x, y, layer)
>>> screen.blit(position, image)
Layers, objectgroups, tilesets, and maps all have a simple way to access
metadata that was set inside tiled: they all become class attributes.
>>> print(layer.tilewidth)
32
>>> print(layer.weather)
'sunny'
Tiles are the exception here, and must be accessed through "getTileProperties"
and are regular Python dictionaries:
>>> tile = tiledmap.getTileProperties(x, y, layer)
>>> tile["name"]
'CobbleStone'
"""
from itertools import chain
# internal flags
FLIP_X = 1
FLIP_Y = 2
# Tiled gid flags
GID_FLIP_X = 1<<31
GID_FLIP_Y = 1<<30
class TiledElement(object):
pass
class TiledMap(TiledElement):
"""
not really useful unless "loaded" ie: don't instance directly.
see the pygame loader for inspiration
"""
def __init__(self):
TiledElement.__init__(self)
self.layers = [] # list of all layer types (tile layers + object layers)
self.tilesets = [] # list of TiledTileset objects
self.tilelayers = [] # list of TiledLayer objects
self.objectgroups = [] # list of TiledObjectGroup objects
self.tile_properties = {} # dict of tiles that have additional metadata (properties)
self.filename = None
# this is a work around to tiled's strange way of storing gid's
self.images = [0]
# defaults from the TMX specification
self.version = 0.0
self.orientation = None
self.width = 0
self.height = 0
self.tilewidth = 0
self.tileheight = 0
def get_tile_image(self, x, y, layer):
"""
return the tile image for this location
x and y must be integers and are in tile coordinates, not pixel
return value will be 0 if there is no tile with that location.
"""
try:
gid = self.tilelayers[layer].data[y][x]
except (IndexError, ValueError):
msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
raise Exception(msg)
else:
try:
return self.images[gid]
except (IndexError, ValueError):
msg = "Coords: ({0},{1}) in layer {2} has invalid GID: {3}/{4}.".format(x, y, layer, gid, len(self.images))
raise Exception(msg)
def getTileGID(self, x, y, layer):
"""
return GID of a tile in this location
x and y must be integers and are in tile coordinates, not pixel
"""
try:
return self.tilelayers[layer].data[y][x]
except (IndexError, ValueError):
msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
raise Exception(msg)
def getDrawOrder(self):
"""
return a list of objects in the order that they should be drawn
this will also exclude any layers that are not set to visible
may be useful if you have objects and want to control rendering
from tiled
"""
raise NotImplementedError
def getTileImages(self, r, layer):
"""
return a group of tiles in an area
expects a pygame rect or rect-like list/tuple
usefull if you don't want to repeatedly call get_tile_image
probably not the most effecient way of doing this, but oh well.
"""
raise NotImplementedError
def getObjects(self):
"""
Return iterator all of the objects associated with this map
"""
return chain(*[ i.objects for i in self.objectgroups ])
def getTileProperties(self, x, y, layer):
"""
return the properties for the tile, if any
x and y must be integers and are in tile coordinates, not pixel
returns a dict of there are properties, otherwise will be None
"""
try:
gid = self.tilelayers[layer].data[y][x]
except (IndexError, ValueError):
msg = "Coords: ({0},{1}) in layer {2} is invalid.".format(x, y, layer)
raise Exception(msg)
else:
try:
return self.tile_properties[gid]
except (IndexError, ValueError):
msg = "Coords: ({0},{1}) in layer {2} has invaid GID: {3}/{4}.".format(x, y, layer, gid, len(self.images))
raise Exception(msg)
def getTilePropertiesByGID(self, gid):
try:
return self.tile_properties[gid]
except KeyError:
return None
# the following classes get their attributes filled in with the loader
class TiledTileset(TiledElement):
def __init__(self):
TiledElement.__init__(self)
# defaults from the specification
self.firstgid = 0
self.lastgid = 0
self.name = None
self.tilewidth = 0
self.tileheight = 0
self.spacing = 0
self.margin = 0
class TiledLayer(TiledElement):
def __init__(self):
TiledElement.__init__(self)
self.data = None
# defaults from the specification
self.name = None
self.opacity = 1.0
self.visible = 1
class TiledObjectGroup(TiledElement):
def __init__(self):
TiledElement.__init__(self)
self.objects = []
# defaults from the specification
self.name = None
class TiledObject(TiledElement):
__slots__ = ['name', 'type', 'x', 'y', 'width', 'height', 'gid']
def __init__(self):
TiledElement.__init__(self)
# defaults from the specification
self.name = None
self.type = None
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.gid = 0
def load_tmx(filename):
"""
Utility function to parse a Tiled TMX and return a usable object.
Images will not be loaded, so probably not useful to call this directly
See the load_pygame func for an idea of what to do
"""
from xml.dom.minidom import parse
from itertools import tee, islice, chain
from collections import defaultdict
from struct import unpack
import a | rray, os
# used to change the unicode string returned from minidom to
# proper python variable types.
types = {
"version": float,
"orientation": str,
"width": int,
"height": int,
"tilewidth": int,
"tileheight": int,
"firstgid": int,
"source": str,
"name": str,
"spacing": int,
"margin": int,
"source": str,
"trans": str,
"id": int,
"opacity": | float,
"visible": bool,
"encoding": str,
"compression": str,
"gid": int,
"type": str,
"x": int,
"y": int,
"value": str,
}
def pairwise(iterable):
# return a list as a sequence of pairs
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def group(l, n):
# return a list as a sequence of n tuples
return zip(*[islice(l, i, None, n) for i in range(n)])
def parse_pro |
sargam111/python | textblob/en/np_extractors.py | Python | mit | 6,734 | 0.000446 | # -*- coding: utf-8 -*-
'''Various noun phrase extractors.'''
from __future__ import unicode_literals, absolute_import
import nltk
from textblob.taggers import PatternTagger
from textblob.decorators import requires_nltk_corpus
from textblob.utils import tree2str, filter_insignificant
from textblob.base import BaseNPExtractor
class ChunkParser(nltk.ChunkParserI):
def __init__(self):
self._trained = False
@requires_nltk_corpus
def train(self):
'''Train the Chunker on the ConLL-2000 corpus.'''
train_data = [[(t, c) for _, t, c in nltk.chunk.tree2conlltags(sent)]
for sent in
nltk.corpus.conll2000.chunked_sents('train.txt',
chunk_types=['NP'])]
unigram_tagger = nltk.UnigramTagger(train_data)
self.tagger = nltk.BigramTagger(train_data, backoff=unigram_tagger)
self._trained = True
def parse(self, sentence):
'''Return the parse tree for the sentence.'''
if not self._trained:
self.train()
pos_tags = [pos for (word, pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos, chunktag) in tagged_pos_tags]
conlltags = [(word, pos, chunktag) for ((word, pos), chunktag) in
zip(sentence, chunktags)]
return nltk.chunk.util.conlltags2tree(conlltags)
class ConllExtractor(BaseNPExtractor):
'''A noun phrase extractor that uses chunk parsing trained with the
ConLL-2000 training corpus.
'''
POS_TAGGER = PatternTagger()
# The context-free grammar with which to filter the noun phrases
CFG = {
('NNP', 'NNP'): 'NNP',
('NN', 'NN'): 'NNI',
('NNI', 'NN'): 'NNI',
('JJ', 'JJ'): 'JJ',
('JJ', 'NN'): 'NNI',
}
# POS suffixes that will be ignored
INSIGNIFICANT_SUFFIXES = ['DT', 'CC', 'PRP$', 'PRP']
def __init__(self, parser=None):
self.parser = ChunkParser() if not parser else parser
def extract(self, text):
'''Return a list of noun phrases (strings) for body of text.'''
sentences = nltk.tokenize.sent_tokenize(text)
noun_phrases = []
for sentence in sentences:
parsed = self._parse_sentence(sentence)
# Get the string representation of each subtree that is a
# noun phrase tree
phrases = [_normalize_tags(filter_insignificant(each,
self.INSIGNIFICANT_SUFFIXES)) for each in parsed
if isinstance(each, nltk.tree.Tree) and each.label()
== 'NP' and len(filter_insignificant(each)) >= 1
and _is_match(each, cfg=self.CFG)]
nps = [tree2str(phrase) for phrase in phrases]
noun_phrases.extend(nps)
return noun_phrases
def _parse_sentence(self, sentence):
'''Tag and parse a sentence (a plain, untagged string).'''
tagged = self.POS_TAGGER.tag(sentence)
return self.parser.parse(tagged)
class FastNPExtractor(BaseNPExtractor):
'''A fast and simple noun phrase extractor.
Credit to Shlomi Babluk. Link to original blog post:
http://thetokenizer.com/2013/05/09/ | efficient-way-to-extract-the-main-topics-of-a-sentence/
'''
CFG = {
('NNP', 'NNP'): 'NNP',
('NN', 'NN'): 'NNI',
('NNI', 'NN'): 'NNI',
('JJ', 'JJ'): 'JJ',
('JJ', ' | NN'): 'NNI',
}
def __init__(self):
self._trained = False
@requires_nltk_corpus
def train(self):
train_data = nltk.corpus.brown.tagged_sents(categories='news')
regexp_tagger = nltk.RegexpTagger([
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'),
(r'(-|:|;)$', ':'),
(r'\'*$', 'MD'),
(r'(The|the|A|a|An|an)$', 'AT'),
(r'.*able$', 'JJ'),
(r'^[A-Z].*$', 'NNP'),
(r'.*ness$', 'NN'),
(r'.*ly$', 'RB'),
(r'.*s$', 'NNS'),
(r'.*ing$', 'VBG'),
(r'.*ed$', 'VBD'),
(r'.*', 'NN'),
])
unigram_tagger = nltk.UnigramTagger(train_data, backoff=regexp_tagger)
self.tagger = nltk.BigramTagger(train_data, backoff=unigram_tagger)
self._trained = True
return None
def _tokenize_sentence(self, sentence):
'''Split the sentence into single words/tokens'''
tokens = nltk.word_tokenize(sentence)
return tokens
def extract(self, sentence):
'''Return a list of noun phrases (strings) for body of text.'''
if not self._trained:
self.train()
tokens = self._tokenize_sentence(sentence)
tagged = self.tagger.tag(tokens)
tags = _normalize_tags(tagged)
merge = True
while merge:
merge = False
for x in range(0, len(tags) - 1):
t1 = tags[x]
t2 = tags[x + 1]
key = t1[1], t2[1]
value = self.CFG.get(key, '')
if value:
merge = True
tags.pop(x)
tags.pop(x)
match = '%s %s' % (t1[0], t2[0])
pos = value
tags.insert(x, (match, pos))
break
matches = [t[0] for t in tags if t[1] in ['NNP', 'NNI']]
return matches
### Utility methods ###
def _normalize_tags(chunk):
'''Normalize the corpus tags.
("NN", "NN-PL", "NNS") -> "NN"
'''
ret = []
for word, tag in chunk:
if tag == 'NP-TL' or tag == 'NP':
ret.append((word, 'NNP'))
continue
if tag.endswith('-TL'):
ret.append((word, tag[:-3]))
continue
if tag.endswith('S'):
ret.append((word, tag[:-1]))
continue
ret.append((word, tag))
return ret
def _is_match(tagged_phrase, cfg):
'''Return whether or not a tagged phrases matches a context-free grammar.
'''
copy = list(tagged_phrase) # A copy of the list
merge = True
while merge:
merge = False
for i in range(len(copy) - 1):
first, second = copy[i], copy[i + 1]
key = first[1], second[1] # Tuple of tags e.g. ('NN', 'JJ')
value = cfg.get(key, None)
if value:
merge = True
copy.pop(i)
copy.pop(i)
match = '{0} {0}'.format(first[0], second[0])
pos = value
copy.insert(i, (match, pos))
break
match = any([t[1] in ('NNP', 'NNI') for t in copy])
return match
|
kbrannan/PyHSPF | examples/advanced/preprocess_02040101.py | Python | bsd-3-clause | 1,347 | 0.020787 | # preprocess_02040101.py
#
# David J. Lampert (djlampert@gmail.com)
#
# last updated: 08/09/2015
#
# Purpose: Extracts GIS data from sources and builds the input file for HSPF
# for a given set of assumptions for HUC 02040101, Delaware River, NY + DE.
import os, datetime
source = | 'Z:'
destination = 'C:/HSPF_data'
from pyhspf.preprocessing import Preprocessor
# 8-digit hydrologic unit code of int | erest; the lists here of states, years,
# and RPUs are just used to point to location of the data files below
HUC8 = '02040101'
state = 'Delaware'
start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(2011, 1, 1)
drainmax = 400
aggregation = 'cdlaggregation.csv'
landuse = 'lucs.csv'
if __name__ == '__main__':
processor = Preprocessor()
processor.set_network(source)
processor.set_output(destination)
processor.set_parameters(HUC8 = HUC8,
start = start,
end = end,
state = state,
cdlaggregate = aggregation,
landuse = landuse)
processor.preprocess(drainmax = drainmax, parallel = False)
# this took about 40 minutes to run on my 3 year old laptop not counting the
# time to download the raw data from the NHDPlus and CDL
|
edwinsteele/biblebox-pi | ansible/plugins/mitogen-0.2.2/ansible_mitogen/services.py | Python | apache-2.0 | 16,202 | 0.000062 | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Classes in this file define Mitogen 'services' that run (initially) within the
connection multiplexer process that is forked off the top-level controller
process.
Once a worker process connects to a multiplexer process
(Connection._connect()), it communicates with these services to establish new
connections, grant access to files by children, and register for notification
when a child has completed a job.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import os
import os.path
import sys
import threading
import mitogen
import mitogen.service
import ansible_mitogen.module_finder
import ansible_mitogen.target
LOG = logging.getLogger(__name__)
if sys.version_info[0] == 3:
def reraise(tp, value, tb):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
exec(
"def reraise(tp, value, tb=None):\n"
" raise tp, value, tb\n"
)
class Error(Exception):
pass
class ContextService(mitogen.service.Service):
"""
Used by workers to fetch the single Context instance corresponding to a
connection configuration, creating the matching connection if it does not
exist.
For connection methods and their parameters, see:
https://mitogen.readthedocs.io/en/latest/api.html#context-factories
This concentrates connections in the top-level process, which may become a
bottleneck. The bottleneck can be removed using per-CPU connection
processes and arranging for the worker to select one according to a hash of
the connection parameters (sharding).
"""
max_interpreters = int(os.getenv('MITOGEN_MAX_INTERPRETERS', '20'))
def __init__(self, *args, **kwargs):
super(ContextService, self).__init__(*args, **kwargs)
self._lock = threading.Lock()
#: Records the :meth:`get` result dict for successful calls, returned
#: for identical subsequent calls. Keyed by :meth:`key_from_kwargs`.
self._response_by_key = {}
#: List of :class:`mitogen.core.Latch` awaiting the result for a
#: particular key.
self._latches_by_key = {}
#: Mapping of :class:`mitogen.core.Context` -> reference count. Each
#: call to :meth:`get` increases this by one. Calls to :meth:`put`
#: decrease it by one.
self._refs_by_context = {}
#: List of contexts in creation order by via= parameter. When
#: :attr:`max_interpreters` is reached, the most recently used context
#: is destroyed to make room for any additional context.
self._lru_by_via = {}
#: :meth:`key_from_kwargs` result by Context.
self._key_by_context = {}
@mitogen.service.expose(mitogen.service.AllowParents())
@mitogen.service.arg_spec({
'context': mitogen.core.Context
})
def put(self, context):
"""
Return a reference, making it eligable for recycling once its reference
count reaches zero.
"""
LOG.debug('%r.put(%r)', self, context)
if self._refs_by_context.get(context, 0) == 0:
LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?',
self, context)
return
self._refs_by_context[context] -= 1
def key_from_kwargs(self, **kwargs):
"""
Generate a deduplication key from the request.
"""
out = []
stack = [kwargs]
while stack:
obj = stack.pop()
if isinstance(obj, dict):
stack.extend(sorted(obj.items()))
elif isinstance(obj, (list, tuple)):
stack.extend(obj)
else:
out.append(str(obj))
return ''.join(out)
def _produce_response(self, key, response):
"""
Reply to every waiting request matching a configuration key with a
response dictionary, deleting the list of waiters when done.
|
:param str key:
Result of :meth:`key_from_kwargs`
:param dict response:
Response dictionary
:returns:
Number of waiters that were replied to.
| """
self._lock.acquire()
try:
latches = self._latches_by_key.pop(key)
count = len(latches)
for latch in latches:
latch.put(response)
finally:
self._lock.release()
return count
def _shutdown(self, context, lru=None, new_context=None):
"""
Arrange for `context` to be shut down, and optionally add `new_context`
to the LRU list while holding the lock.
"""
LOG.info('%r._shutdown(): shutting down %r', self, context)
context.shutdown()
key = self._key_by_context[context]
self._lock.acquire()
try:
del self._response_by_key[key]
del self._refs_by_context[context]
del self._key_by_context[context]
if lru and context in lru:
lru.remove(context)
if new_context:
lru.append(new_context)
finally:
self._lock.release()
def _update_lru(self, new_context, spec, via):
"""
Update the LRU ("MRU"?) list associated with the connection described
by `kwargs`, destroying the most recently created context if the list
is full. Finally add `new_context` to the list.
"""
lru = self._lru_by_via.setdefault(via, [])
if len(lru) < self.max_interpreters:
lru.append(new_context)
return
for context in reversed(lru):
if self._refs_by_context[context] == 0:
break
else:
LOG.warning('via=%r reached maximum number of interpreters, '
'but they are all marked as in-use.', via)
return
self._shutdown(context, lru=lru, new_context=new_context)
@mitogen.service.expose(mitogen.service.AllowParents())
def shutdown_all(self):
"""
For testing use, arrange for all connections to be shut down.
"""
for context in list(self._key_by_context):
self._shutdown(context)
self._lru_by_via = {}
def _on_stream_disconnect(self, stream):
"""
Respond to Stream disconnection by deleting any record of contexts
reached via that stream. This method runs in the Broker thread and must
not to block.
"""
# TODO: there is a race between creation of a context and disconnection
|
Swordf1sh/Moderat | map_demo/test.py | Python | gpl-2.0 | 4,307 | 0.003715 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from mapstyle import style
maphtml = '''
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="initial-scale=1.0, user-scalable=no" />
<style type="text/css">
html { height: 100% }
body { height: 100%; margin: 0px; padding: 0px }
#map_canvas { height: 100% }
</style>
<script type="text/javascript"
src="http://maps.google.com/maps/api/js?sensor=false">
</script>
<script type="text/javascript">
var map;
var clients = {};
function initialize() {
var latlng = new google.maps.LatLng(44.0727142, -26.533);
var myOptions = {
zoom: 3,
center: latlng,
mapTypeId: google.maps.MapTypeId.ROADMAP,
disableDefaultUI: true
};
map = new google.maps.Map(document.getElementById("map_canvas"),
myOptions);
var styles = '''+style+'''
map.setOptions({styles: styles});
}
function addMarker(id, lat, lng, ip_address, alias) {
var myLatLng = new google.maps.LatLng(lat, lng);
clients[id] = new google.maps.Marker({position: myLatLng,
| icon: "C:/Users/uripa/Desktop/moderat/assets/hacked.png",
ip_address: ip_address,
alias: alias,
| id: id,
});
clients[id].setMap(map)
addInfoWindow(clients[id]);
}
function addInfoWindow(client) {
var header = "<p align='center' style='background: #2c3e50; color: #c9f5f7; padding: 10px;'>" + client.alias + "<br>" + client.ip_address + "</p>"
var shell = "<input type='button' style='background: transparent; color: #34495e; border: none;' onClick='gotoNode("+client.id+")'value='Remote Shell'/><br>"
var explorer = "<input type='button' style='background: transparent; color: #34495e; border: none;' onClick='gotoNode("+client.id+")'value='Remote Explorer'/><br>"
var scripting = "<input type='button' style='background: transparent; color: #34495e; border: none;' onClick='gotoNode("+client.id+")'value='Remote Scripting'/><br>"
var desktop = "<input type='button' style='background: transparent; color: #34495e; border: none;' onClick='gotoNode("+client.id+")'value='Remote Desktop'/><br>"
var webcam = "<input type='button' style='background: transparent; color: #34495e; border: none;' onClick='gotoNode("+client.id+")'value='Remote Webcam'/><br>"
var info = header + shell + explorer + scripting + desktop + webcam + "</div>"
var infoWindow = new google.maps.InfoWindow({
content: info
});
google.maps.event.addListener(client, 'click', function () {
infoWindow.open(map, client);
});
}
function gotoNode(id) {
self.browse(clients[id].url)
}
</script>
</head>
<body onload="initialize();">
<div id="map_canvas" style="width:100%; height:100%"></div>
</body>
</html>
'''
class Browser(QApplication):
def __init__(self):
QApplication.__init__(self, [])
self.window = QWidget()
self.window.setWindowTitle("Google Google Maps Maps")
self.web = QWebView(self.window)
self.web.setMinimumSize(800,800)
self.web.page().mainFrame().addToJavaScriptWindowObject('self', self)
self.web.setHtml(maphtml)
self.button = QPushButton('AddMarker')
self.button.clicked.connect(self.addMarker)
self.layout = QVBoxLayout(self.window)
self.layout.addWidget(self.web)
self.layout.addWidget(self.button)
self.window.show()
self.exec_()
def addMarker(self):
self.web.page().mainFrame().evaluateJavaScript(QString("addMarker(1, 41.5, 45.2, '92.125.102.146', 'UG-Giorgi')"))
self.web.page().mainFrame().evaluateJavaScript(QString("addMarker(1, 41.3, 45.1, '94.125.102.146', 'UG-Giorgi')"))
self.web.page().mainFrame().evaluateJavaScript(QString("addMarker(1, 41.1, 45.3, '92.243.102.146', 'UG-Giorgi')"))
self.web.page().mainFrame().evaluateJavaScript(QString("addMarker(1, 41.2, 45.4, '102.125.102.146', 'UG-Giorgi')"))
@pyqtSlot(str)
def browse(self, url):
print url
Browser() |
mbuesch/cnc-control | driver/setup_driver.py | Python | gpl-2.0 | 215 | 0.097674 | from | distutils.core import setup
setup(
name = "cnccontrol-driver",
description = "CNC-Control device driver",
author = "Michael Buesch",
author_email = "m@bues.ch",
py_module | s = [ "cnccontrol_driver", ],
)
|
gedhe/sidesa2.0 | input_data_kemiskinan.py | Python | gpl-2.0 | 13,554 | 0.014092 | #Boa:Frame:input_data_kemiskinan
import wx
import wx.lib.buttons
import frm_sideka_menu
import sqlite3
db = sqlite3.connect('/opt/sidesa/sidesa')
cur = db.cursor()
def create(parent):
return input_data_kemiskinan(parent)
[wxID_INPUT_DATA_KEMISKINAN, wxID_INPUT_DATA_KEMISKINANBUTTON1,
wxID_INPUT_DATA_KEMISKINANBUTTON2, wxID_INPUT_DATA_KEMISKINANBUTTON3,
wxID_INPUT_DATA_KEMISKINANCARI_KK, wxID_INPUT_DATA_KEMISKINANINPUT_ALAMAT,
wxID_INPUT_DATA_KEMISKINANISIPENDUDUK, wxID_INPUT_DATA_KEMISKINANNAMA_KK,
wxID_INPUT_DATA_KEMISKINANNOMOR_KK, wxID_INPUT_DATA_KEMISKINANPROG1,
wxID_INPUT_DATA_KEMISKINANPROG2, wxID_INPUT_DATA_KEMISKINANPROG3,
wxID_INPUT_DATA_KEMISKINANPROG4, wxID_INPUT_DATA_KEMISKINANPROG5,
wxID_INPUT_DATA_KEMISKINANPROG6, wxID_INPUT_DATA_KEMISKINANPROG7,
wxID_INPUT_DATA_KEMISKINANPROG8, wxID_INPUT_DATA_KEMISKINANSTATICTEXT1,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT10,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT11,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT12,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT13,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT14,
wxID_INPUT_ | DATA_KEMISKINANSTATICTEXT2, wxID_INPUT_DATA_KEMISKINANSTATICTEXT3,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT4, wxID_INPUT_DATA_KEMISKINANSTATICTEXT5,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT6, wxID_INPUT_DATA_KEMISKINANSTATICTEXT7,
wxID_INPUT_DATA_KEMISKINANSTATICTEXT8, wxID_INPUT_DATA_KEMISKINANSTATICTEXT9,
wxID_INPUT_DATA_KEMISKINANSTATUS_MISKIN,
] = [wx.NewId() for _init_ctrls in range(32)]
class input_data_kemiskinan(wx.Dialog): |
def _init_coll_isipenduduk_Columns(self, parent):
# generated method, don't edit
parent.InsertColumn(col=0, format=wx.LIST_FORMAT_LEFT,
heading='Nomor KK', width=150)
parent.InsertColumn(col=1, format=wx.LIST_FORMAT_LEFT,
heading='Nama Kepala Keluarga', width=250)
parent.InsertColumn(col=2, format=wx.LIST_FORMAT_LEFT, heading='Alamat',
width=260)
parent.InsertColumn(col=3, format=wx.LIST_FORMAT_LEFT, heading='Status Kemiskinan',
width=100)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_INPUT_DATA_KEMISKINAN,
name=u'input_data_kemiskinan', parent=prnt, pos=wx.Point(428,
194), size=wx.Size(843, 453), style=wx.FRAME_NO_TASKBAR,
title=u'Input Data Kemiskinan')
self.SetClientSize(wx.Size(843, 453))
self.Center(wx.BOTH)
self.isipenduduk = wx.ListCtrl(id=wxID_INPUT_DATA_KEMISKINANISIPENDUDUK,
name=u'penduduk', parent=self, pos=wx.Point(16, 8),
size=wx.Size(808, 184), style=wx.LC_REPORT)
self._init_coll_isipenduduk_Columns(self.isipenduduk)
self.isipenduduk.Bind(wx.EVT_LIST_ITEM_SELECTED,
self.OnIsipendudukListItemSelected,
id=wxID_INPUT_DATA_KEMISKINANISIPENDUDUK)
self.staticText1 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT1,
label=u'Nomor Kartu Keluarga', name='staticText1', parent=self,
pos=wx.Point(408, 200), size=wx.Size(145, 15), style=0)
self.cari_kk = wx.TextCtrl(id=wxID_INPUT_DATA_KEMISKINANCARI_KK,
name=u'cari_kk', parent=self, pos=wx.Point(560, 200),
size=wx.Size(168, 25), style=0, value='')
self.button1 = wx.Button(id=wxID_INPUT_DATA_KEMISKINANBUTTON1,
label=u'Cari', name='button1', parent=self, pos=wx.Point(744,
200), size=wx.Size(80, 24), style=0)
self.staticText2 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT2,
label=u'Nomor KK', name='staticText2', parent=self,
pos=wx.Point(16, 240), size=wx.Size(65, 15), style=0)
self.nomor_kk = wx.TextCtrl(id=wxID_INPUT_DATA_KEMISKINANNOMOR_KK,
name=u'nomor_kk', parent=self, pos=wx.Point(96, 232),
size=wx.Size(312, 25), style=wx.TE_READONLY, value='')
self.staticText3 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT3,
label=u'Alamat', name='staticText3', parent=self,
pos=wx.Point(416, 232), size=wx.Size(47, 15), style=0)
self.input_alamat = wx.TextCtrl(id=wxID_INPUT_DATA_KEMISKINANINPUT_ALAMAT,
name=u'input_alamat', parent=self, pos=wx.Point(472, 232),
size=wx.Size(352, 25), style=wx.TE_READONLY, value=u'')
self.staticText4 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT4,
label=u'Nama KK', name='staticText4', parent=self,
pos=wx.Point(16, 264), size=wx.Size(60, 15), style=0)
self.nama_kk = wx.TextCtrl(id=wxID_INPUT_DATA_KEMISKINANNAMA_KK,
name=u'nama_kk', parent=self, pos=wx.Point(96, 264),
size=wx.Size(312, 25), style=wx.TE_READONLY, value=u'')
self.staticText5 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT5,
label=u'Status Kemiskinan', name='staticText5', parent=self,
pos=wx.Point(416, 264), size=wx.Size(119, 15), style=0)
self.status_miskin = wx.ComboBox(choices=['Miskin', 'Tidak Miskin'],
id=wxID_INPUT_DATA_KEMISKINANSTATUS_MISKIN, name=u'status_miskin',
parent=self, pos=wx.Point(552, 264), size=wx.Size(272, 25),
style=0, value='')
self.staticText6 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT6,
label=u'Program Perlindungan Sosial', name='staticText6',
parent=self, pos=wx.Point(16, 296), size=wx.Size(185, 15),
style=0)
self.button2 = wx.Button(id=wxID_INPUT_DATA_KEMISKINANBUTTON2,
label=u'Simpan Data', name='button2', parent=self,
pos=wx.Point(232, 400), size=wx.Size(192, 30), style=0)
self.button2.Bind(wx.EVT_BUTTON, self.OnButton2Button,
id=wxID_INPUT_DATA_KEMISKINANBUTTON2)
self.button3 = wx.Button(id=wxID_INPUT_DATA_KEMISKINANBUTTON3,
label=u'Kembali Ke Menu', name='button3', parent=self,
pos=wx.Point(440, 400), size=wx.Size(184, 30), style=0)
self.button3.Bind(wx.EVT_BUTTON, self.OnButton3Button,
id=wxID_INPUT_DATA_KEMISKINANBUTTON3)
self.prog1 = wx.ComboBox(choices=['RASKIN'], id=wxID_INPUT_DATA_KEMISKINANPROG1, name=u'prog1',
parent=self, pos=wx.Point(24, 320), size=wx.Size(187, 27),
style=0, value=u'')
self.prog1.SetLabel(u'')
self.prog2 = wx.ComboBox(choices=['JKN'], id=wxID_INPUT_DATA_KEMISKINANPROG2, name=u'prog2',
parent=self, pos=wx.Point(24, 360), size=wx.Size(187, 27),
style=0, value=u'')
self.prog2.SetLabel(u'')
self.prog3 = wx.ComboBox(choices=['BLSM'], id=wxID_INPUT_DATA_KEMISKINANPROG3, name=u'prog3',
parent=self, pos=wx.Point(232, 320), size=wx.Size(187, 27),
style=0, value=u'')
self.prog3.SetLabel(u'')
self.prog4 = wx.ComboBox(choices=['BSM'], id=wxID_INPUT_DATA_KEMISKINANPROG4, name=u'prog4',
parent=self, pos=wx.Point(232, 360), size=wx.Size(187, 27),
style=0, value=u'')
self.prog4.SetLabel(u'')
self.prog5 = wx.ComboBox(choices=['PKH'], id=wxID_INPUT_DATA_KEMISKINANPROG5, name=u'prog5',
parent=self, pos=wx.Point(440, 320), size=wx.Size(187, 27),
style=0, value=u'')
self.prog5.SetLabel(u'')
self.prog6 = wx.ComboBox(choices=['Prog. Kesehatan Daerah'], id=wxID_INPUT_DATA_KEMISKINANPROG6, name=u'prog6',
parent=self, pos=wx.Point(440, 360), size=wx.Size(187, 27),
style=0, value=u'')
self.prog6.SetLabel(u'')
self.prog7 = wx.ComboBox(choices=['Prog. Pendidikan Daerah'], id=wxID_INPUT_DATA_KEMISKINANPROG7, name=u'prog7',
parent=self, pos=wx.Point(648, 320), size=wx.Size(187, 27),
style=0, value=u'')
self.prog7.SetLabel(u'')
self.staticText7 = wx.StaticText(id=wxID_INPUT_DATA_KEMISKINANSTATICTEXT7,
label=u'1', name='staticText7', parent=self, pos=wx.Point(8, 328),
size=wx.S |
facebookexperimental/eden | eden/scm/edenscm/hgext/stat.py | Python | gpl-2.0 | 1,158 | 0 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from edenscm.mercurial import error, patch, registrar, templatekw, util
from edenscm.mercurial.i18n import _
templatefunc = registrar.templatefunc()
@templatefunc("stat(style=none)", argspec="style")
def showdiffstat(context, mapping, args):
"""String. Return diffstat-style summary of changes.
If 'style' is not 'none', it could be 'status', in w | hich case "added",
"changed", "removed" will be shown before file names.
"""
if "style" in args:
style = args["style"][1]
else:
style = "none"
repo = mapping["repo"]
ctx = mapping["ctx"]
revcache = mapping["revcache"]
width = repo.ui.termwidth()
if style == "none":
status = None
elif style == "status":
status = templatekw.getfiles(repo, ctx, revcache)
els | e:
raise error.ParseError(_("stat does not support style %r") % (style,))
return patch.diffstat(
util.iterlines(ctx.diff(noprefix=False)), width=width, status=status
)
|
haoyuchen1992/osf.io | website/project/licenses/__init__.py | Python | apache-2.0 | 3,350 | 0.00209 | import functools
import json
import os
import warnings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
from website import settings
def _serialize(fields, instance):
return {
field: getattr(instance, field)
for field in fields
}
serialize_node_license = functools.partial(_serialize, ('id', 'name', 'text'))
def serialize_node_license_record(node_license_record):
if node_license_record is None:
return {}
ret = serialize_node_license(node_license_record.node_license)
ret.update(_serialize(('year', 'copyright_holders'), node_license_record))
return ret
@mongo_utils.unique_on(['id', '_id'])
class NodeLicense(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
id = fields.StringField(required=True, unique=True, editable=False)
name = fields.StringField(required=True, unique=True)
text = fields.StringField(required=True)
properties = fields.StringField(list=True)
class NodeLicenseRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node_license = fields.ForeignField('nodelicense', required=True)
# Deliberately left as a StringField to support year ranges (e.g. 2012-2015)
year = fields.StringField()
copyright_ | holders = fields.StringField | (list=True)
@property
def name(self):
return self.node_license.name if self.node_license else None
@property
def text(self):
return self.node_license.text if self.node_license else None
@property
def id(self):
return self.node_license.id if self.node_license else None
def to_json(self):
return serialize_node_license_record(self)
def copy(self):
copied = NodeLicenseRecord(
node_license=self.node_license,
year=self.year,
copyright_holders=self.copyright_holders
)
copied.save()
return copied
def ensure_licenses(warn=True):
with open(
os.path.join(
settings.APP_PATH,
'node_modules', 'list-of-licenses', 'dist', 'list-of-licenses.json'
)
) as fp:
licenses = json.loads(fp.read())
for id, info in licenses.items():
name = info['name']
text = info['text']
properties = info.get('properties', [])
node_license = None
try:
node_license = NodeLicense.find_one(
Q('id', 'eq', id)
)
except NoResultsFound:
if warn:
warnings.warn(
"License {name} ({id}) not already in the database. Adding it now.".format(
name=name,
id=id
)
)
node_license = NodeLicense(
id=id,
name=name,
text=text,
properties=properties
)
else:
node_license.name = name
node_license.text = text
node_license.properties = properties
node_license.save()
|
boris-p/ladybug | src/Ladybug_GenCumulativeSkyMtx.py | Python | gpl-3.0 | 16,430 | 0.011625 | # GenCumulativeSkyMtx
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
This component uses Radiance's gendaymtx function to calculate the sky's radiation for each hour of the year. This is a necessary pre-step before doing radiation analysis with Rhino geometry or generating a radiation rose.
The first time you use this component, you will need to be connected to the internet so that the component can download the "gendaymtx.exe" function to your system.
Gendaymtx is written by Ian Ashdown and Greg Ward. For more information, check the Radiance manual at:
http://www.radiance-online.org/learning/documentation/manual-pages/pdfs/gendaymtx.pdf
-
Provided by Ladybug 0.0.61
Args:
_epwFile: The output of the Ladybug Open EPW component or the file path location of the epw weather file on your system.
_skyDensity_: Set to 0 to generate a Tregenza sky, which will divide up the sky dome with a coarse density of 145 sky patches. Set to 1 to generate a Reinhart sky, which will divide up the sky dome using a very fine density of 580 sky patches. Note that, while the Reinhart sky is more accurate, it will result in considerably longer calculation times. Accordingly, the default is set to 0 for a Tregenza sky.
workingDir_: An optional working directory in your system where the sky will be generated. Default is set to C:\Ladybug or C:\Users\yourUserName\AppData\Roaming\Ladybug. The latter is used if you cannot write to the C:\ drive of your computer. Any valid file path location can be connected.
useOldRes_: Set this to "True" if you have already run this component previously and you want to use the already-generated data for this weather file.
_runIt: Set to "True" to run the component and generate a sky matrix.
Returns:
readMe!: ...
cumulativeSkyMtx: The result of the gendaymtx function. Use the selectSkyMtx component to select a desired sky matrix from this output for use in a radiation study, radition rose, or sky dome visualization.
"""
ghenv.Component.Name = "Ladybug_GenCumulativeSkyMtx"
ghenv.Component.NickName = 'genCumulativeSkyMtx'
ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "2 | VisualizeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import os
import scriptcontext as sc
from clr import AddReference
AddReference('Grasshopper')
import Grasshopper.Kernel as gh
from itertools import izip
import shutil
def date2Hour(month, day, hour):
# fix the end day
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
# dd = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
JD = numOfDays[int(month)-1] + int(day)
return (JD - 1) * 24 + hour
def hour2Date(hour):
monthList = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
numOfDays = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 3 | 04, 334, 365]
numOfHours = [24 * numOfDay for numOfDay in numOfDays]
for h in range(len(numOfHours)-1):
if | hour <= numOfHours[h+1]: month = h + 1; break
if hour == 0: day = 1
elif (hour)%24 == 0: day = int((hour - numOfHours[h]) / 24)
else: day = int((hour - numOfHours[h]) / 24) + 1
time = hour%24 + 0.5
return str(day), str(month), str(time)
def getRadiationValues(epw_file, analysisPeriod, weaFile):
# start hour and end hour
stHour = 0
endHour = 8760
epwfile = open(epw_file,"r")
for lineCount, line in enumerate(epwfile):
hour = lineCount - 8
if int(stHour) <= hour <= int(endHour):
dirRad = (line.split(',')[14])
difRad = (line.split(',')[15])
day, month, time = hour2Date(hour)
weaFile.write(month + " " + day + " " + time + " " + dirRad + " " + difRad + "\n")
epwfile.close()
return weaFile
def weaHeader(epwFileAddress, lb_preparation):
locName, lat, long, timeZone, elev, dataStr = lb_preparation.epwLocation(epwFileAddress)
#print locName, lat, long, timeZone, elev
return "place " + locName + "\n" + \
"latitude " + lat + "\n" + \
"longitude " + `-float(long)` + "\n" + \
"time_zone " + `-float(timeZone) * 15` + "\n" + \
"site_elevation " + elev + "\n" + \
"weather_data_file_units 1\n"
def epw2wea(weatherFile, analysisPeriod, lb_preparation):
outputFile = weatherFile.replace(".epw", ".wea")
header = weaHeader(weatherFile, lb_preparation)
weaFile = open(outputFile, 'w')
weaFile.write(header)
weaFile = getRadiationValues(weatherFile, analysisPeriod, weaFile)
weaFile.close()
return outputFile
def main(epwFile, skyType, workingDir, useOldRes):
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_preparation = sc.sticky["ladybug_Preparation"]()
# make working directory
if workingDir: workingDir = lb_preparation.removeBlankLight(workingDir)
workingDir = lb_preparation.makeWorkingDir(workingDir)
# make sure the directory has been created
if workingDir == -1: return -2
workingDrive = workingDir[0:1]
# GenCumulativeSky
gendaymtxFile = os.path.join(workingDir, 'gendaymtx.exe')
if not os.path.isfile(gendaymtxFile):
# let's see if we can grab it from radiance folder
if os.path.isfile("c:/radiance/bin/gendaymtx.exe"):
# just copy this file
shutil.copyfile("c:/radiance/bin/gendaymtx.exe", gendaymtxFile)
else:
# download the file
lb_preparation.downloadGendaymtx(workingDir)
#check if the file is there
if not os.path.isfile(gendaymtxFile) or os.path.getsize(gendaymtxFile)< 15000 : return -3
## check for epw file to be connected
if epwFile != None and epwFile[-3:] == 'epw':
if not os.path.isfile(epwFile):
print "Can't find epw file at " + epwFile
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "Can't find epw file at " + epwFile)
return -1
# import data from epw file
locName, lat, lngt, timeZone, elev, locationStr = lb_preparation.epwLocation(epwFile)
newLocName = lb_preparation.removeBlank(locName)
# make new folder for each city
subWorkingDir = lb_preparation.makeWorkingDir(workingDir + "\\" + newLocName)
print 'Current working directory is set to: ', subWorkingDir
|
X-dark/Flexget | flexget/plugins/output/rapidpush.py | Python | mit | 6,710 | 0.001192 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils import json
from flexget.utils.template import RenderError
log = logging.getLogger('rapidpush')
__version__ = 0.4
headers = {'User-Agent': "FlexGet RapidPush plugin/%s" % str(__version__)}
url = 'https://rapidpush.net/api'
class OutputRapidPush(object):
"""
Example::
rapidpush:
apikey: xxxxxxx (can also be a list of api keys)
[category: category, default FlexGet]
[title: title, default New release]
[group: device group, default no group]
[message: the message, default {{title}}]
[channel: the broadcast notification channel, if provided it will be send to the channel subscribers instead of
your devices, default no channel]
[priority: 0 - 6 (6 = highest), default 2 (normal)]
[notify_accepted: boolean true or false, default true]
[notify_rejected: boolean true or false, default false]
[notify_failed: boolean true or false, default false]
[notify_undecided: boolean true or false, default false]
Configuration parameters are also supported from entries (eg. through set).
"""
def validator(self):
from flexget import validator
config = validator.factory('dict')
config.accept('text', key='apikey', required=True)
config.accept('list', key='apikey').accept('text')
config.accept('text', key='category')
config.accept('text', key='title')
config.accept('text', key='group')
config.accept('text', key='channel')
config.accept('integer', key='priority')
config.accept('text', key='message')
config.accept('boolean', key='notify_accepted')
config.accept('boolean', key='notify_rejected')
config.accept('boolean', key='notify_failed')
config.accept('boolean', key='notify_undecided')
return config
def prepare_config(self, config):
config.setdefault('title', 'New release')
config.setdefault('category', 'FlexGet')
config.setdefault('priority', 2)
config.setdefault('group', '')
config.setdefault('channel', '')
config.setdefault('message', '{{title}}')
config.setdefault('notify_accepted', True)
config.setdefault('notify_rejected', False)
config.setdefault('notify_failed', False)
config.setdefault('notify_undecided', False)
return config
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
# get the parameters
config = self.prepare_config(config)
if config['notify_accepted']:
log.debug("Notify accepted entries")
self.process_notifications(task, task.accepted, config)
if config['notify_rejected']:
log.debug("Notify rejected entries")
self.process_notifications(task, task.rejected, config)
if config['notify_failed']:
log.debug("Notify failed entries")
self.process_notifications(task, task.failed, config)
if config['notify_undecided']:
log.debug("Notify undecided entries")
self.process_notifications(task, task.undecided, config)
# Process the given events.
def process_notifications(self, task, entries, config):
for entry in entries:
if task.options.test:
log.info("Would send RapidPush notification about: %s", entry['title'])
continue
log.info("Send RapidPush notification about: %s", entry['title'])
apikey = entry.get('apikey', config['apikey'])
if isinstance(apikey, list):
apikey = ','.join(apikey)
title = config['title']
try:
title = entry.render(title)
except RenderError as e:
log.error('Error setting RapidPush title: %s' % e)
message = config['message']
try:
message = entry.render( | message)
except RenderError as e:
log.error('Error setting RapidPush message: %s' % e)
# Check if we have to send a normal or a broadcast notification.
if not config['channel']:
priority = entry.get('priority', config['priority'])
category = entry.get('category', config['category'])
try:
category = entry.render(category)
| except RenderError as e:
log.error('Error setting RapidPush category: %s' % e)
group = entry.get('group', config['group'])
try:
group = entry.render(group)
except RenderError as e:
log.error('Error setting RapidPush group: %s' % e)
# Send the request
data_string = json.dumps({
'title': title,
'message': message,
'priority': priority,
'category': category,
'group': group})
data = {'apikey': apikey, 'command': 'notify', 'data': data_string}
else:
channel = config['channel']
try:
channel = entry.render(channel)
except RenderError as e:
log.error('Error setting RapidPush channel: %s' % e)
# Send the broadcast request
data_string = json.dumps({
'title': title,
'message': message,
'channel': channel})
data = {'apikey': apikey, 'command': 'broadcast', 'data': data_string}
response = task.requests.post(url, headers=headers, data=data, raise_status=False)
json_data = response.json()
if 'code' in json_data:
if json_data['code'] == 200:
log.debug("RapidPush message sent")
else:
log.error(json_data['desc'] + " (" + str(json_data['code']) + ")")
else:
for item in json_data:
if json_data[item]['code'] == 200:
log.debug(item + ": RapidPush message sent")
else:
log.error(item + ": " + json_data[item]['desc'] + " (" + str(json_data[item]['code']) + ")")
@event('plugin.register')
def register_plugin():
plugin.register(OutputRapidPush, 'rapidpush', api_ver=2)
|
MoonShineVFX/core | avalon/tools/cbloader/app.py | Python | mit | 10,714 | 0 | import sys
import time
from ..projectmanager.widget import AssetWidget, AssetModel
from ...vendor.Qt import QtWidgets, QtCore, QtGui
from ... import api, io, style
from .. import lib
from .lib import refresh_family_config
from .widgets import SubsetWidget, VersionWidget, FamilyListWidget
module = sys.modules[__name__]
module.window = None
# Custom roles
DocumentRole = AssetModel.DocumentRole
class Window(QtWidgets.QDialog):
"""Asset loader interface"""
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.setWindowTitle(
"Asset Loader 2.0 - %s/%s" % (
api.registered_root(),
api.Session.get("AVALON_PROJECT")))
# Enable minimize and maximize for app
self.setWindowFlags(QtCore.Qt.Window)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
body = QtWidgets.QWidget()
footer = QtWidgets.QWidget()
footer.setFixedHeight(20)
container = QtWidgets.QWidget()
assets = AssetWidget()
families = FamilyListWidget()
subsets = SubsetWidget()
version = VersionWidget()
# Create splitter to show / hide family filters
asset_filter_splitter = QtWidgets.QSplitter()
asset_filter_splitter.setOrientation(QtCore.Qt.Vertical)
asset_filter_splitter.addWidget(assets)
asset_filter_splitter.addWidget(families)
asset_filter_splitter.setStretchFactor(0, 65)
asset_filter_splitter.setStretchFactor(1, 35)
container_layout = QtWidgets.QHBoxLayout(container)
container_layout.setContentsMargins(0, 0, 0, 0)
split = QtWidgets.QSplitter()
split.addWidget(asset_filter_splitter)
split.addWidget(subsets)
split.addWidget(version)
split.setSizes([225, 925, 0])
container_layout.addWidget(split)
body_layout = QtWidgets.QHBoxLayout(body)
body_layout.addWidget(container)
body_layout.setContentsMargins(0, 0, 0, 0)
message = QtWidgets.QLabel()
message.hide()
footer_layout = QtWidgets.QVBoxLayout(footer)
footer_layout.addWidget(message)
footer_layout.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(body)
layout.addWidget(footer)
self.data = {
"widgets": {"families": families},
"model": {
"assets": assets,
"subsets": subsets,
"version": version,
},
"label": {
"message": message,
},
"state": {
"template": None,
"locations": list(),
"context": {
"root": None,
"project": None,
"asset": None,
"silo": None,
"subset": None,
"version": None,
"representation": None,
},
}
}
families.active_changed.connect(subsets.set_family_filters)
assets.selection_changed.connect(self.on_assetschanged)
subsets.active_changed.connect(self.on_versionschanged)
refresh_family_config()
# Defaults
self.resize(1150, 700)
# -------------------------------
# Delay calling blocking methods
# -------------------------------
def refresh(self):
self.echo("Fetching results..")
lib.schedule(self._refresh, 50, channel="mongo")
def on_assetschanged(self, *args):
self.echo("Fetching results..")
lib.schedule(self._assetschanged, 50, channel="mongo")
def on_versionschanged(self, *args):
self.echo("Fetching results..")
lib.schedule(self._versionschanged, 50, channel="mongo")
def set_context(self, context, refresh=True):
self.echo("Setting context: {}".format(context))
lib.schedule(lambda: self._set_context(context, refresh=refresh),
50, channel="mongo")
# ------------------------------
def _refresh(self):
"""Load assets from database"""
# Ensure a project is loaded
project = io.find_one({"type": "project"})
assert project, "This is a bug"
assets_model = self.data["model"]["assets"]
assets_model.refresh()
assets_model.setFocus()
families = self.data["widgets"]["families"]
families.refresh()
# Update state
state = self.data["state"]
state["template"] = project["config"]["template"]["publish"]
state["context"]["root"] = api.registered_root()
state["context"]["project"] = project["name"]
def _assetschanged(self):
"""Selected assets have changed"""
assets_model = self.data["model"]["assets"]
subsets = self.data["model"]["subsets"]
subsets_model = subsets.model
subsets_model.clear()
t1 = time.time()
asset_item = assets_model.get_active_index()
if asset_item is None or not asset_item.isValid():
return
document = asset_item.data(DocumentRole)
subsets_model.set_asset(document['_id'])
# Enforce the columns to fit the data (purely cosmetic)
rows = subsets_model.rowCount(QtCore.QModelIndex())
for i in range(rows):
subsets.view.resizeColumnToContents(i)
# Clear the version information on asset change
self.data['model']['version'].set_version(None)
self.data["state"]["context"]["asset"] = document["name"]
self.data["state"]["context"]["silo"] = document[ | "silo"]
self.echo("Duration: %.3fs" % (time.time() - t1))
|
def _versionschanged(self):
subsets = self.data["model"]["subsets"]
selection = subsets.view.selectionModel()
# Active must be in the selected rows otherwise we
# assume it's not actually an "active" current index.
version = None
active = selection.currentIndex()
if active:
rows = selection.selectedRows(column=active.column())
if active in rows:
node = active.data(subsets.model.NodeRole)
version = node['version_document']['_id']
self.data['model']['version'].set_version(version)
def _set_context(self, context, refresh=True):
"""Set the selection in the interface using a context.
The context must contain `silo` and `asset` data by name.
Note: Prior to setting context ensure `refresh` is triggered so that
the "silos" are listed correctly, aside from that setting the
context will force a refresh further down because it changes
the active silo and asset.
Args:
context (dict): The context to apply.
Returns:
None
"""
silo = context.get("silo", None)
if silo is None:
return
asset = context.get("asset", None)
if asset is None:
return
if refresh:
# Workaround:
# Force a direct (non-scheduled) refresh prior to setting the
# asset widget's silo and asset selection to ensure it's correctly
# displaying the silo tabs. Calling `window.refresh()` and directly
# `window.set_context()` the `set_context()` seems to override the
# scheduled refresh and the silo tabs are not shown.
self._refresh()
asset_widget = self.data['model']['assets']
asset_widget.set_silo(silo)
asset_widget.select_assets([asset], expand=True)
def echo(self, message):
widget = self.data["label"]["message"]
widget.setText(str(message))
widget.show()
print(message)
lib.schedule(widget.hide, 5000, channel="message")
def closeEvent(self, event):
# Kill on holding SHIFT
modifiers = QtWidgets.QApplication.queryKeyboardModifiers()
shift_pressed = QtCore.Qt.ShiftModifier & modifiers
if shift_pressed:
print("Force quitted..") |
vrtsystems/hszinc | hszinc/zoneinfo.py | Python | bsd-2-clause | 6,577 | 0.002433 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Project Haystack timezone data
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import pytz
import datetime
from .version import LATEST_VER
# The official list of timezones as of 6th Jan 2016:
# Yes, that's *without* the usual country prefix.
HAYSTACK_TIMEZONES="""Abidjan
Accra
Adak
Addis_Ababa
Adelaide
Aden
Algiers
Almaty
Amman
Amsterdam
Anadyr
Anchorage
Andorra
Antananarivo
Antigua
Apia
Aqtau
Aqtobe
Araguaina
Ashgabat
Asmara
Asuncion
Athens
Atikokan
Auckland
Azores
Baghdad
Bahia
Bahia_Banderas
Bahrain
Baku
Bangkok
Barbados
Beirut
Belem
Belgrade
Belize
Berlin
Bermuda
Beulah
Bishkek
Bissau
Blanc-Sablon
Boa_Vista
Bogota
Boise
Brisbane
Broken_Hill
Brunei
Brussels
Bucharest
Budapest
Buenos_Aires
Cairo
Cambridge_Bay
Campo_Grande
Canary
Cancun
Cape_Verde
Caracas
Casablanca
Casey
Catamarca
Cayenne
Cayman
Center
Ceuta
Chagos
Chatham
Chicago
Chihuahua
Chisinau
Chita
Choibalsan
Christmas
Chuuk
Cocos
Colombo
Comoro
Copenhagen
Cordoba
Costa_Rica
Creston
Cuiaba
Curacao
Currie
Damascus
Danmarkshavn
Dar_es_Salaam
Darwin
Davis
Dawson
Dawson_Creek
Denver
Detroit
Dhaka
Dili
Djibouti
Dubai
Dublin
DumontDUrville
Dushanbe
Easter
Edmonton
Efate
Eirunepe
El_Aaiun
El_Salvador
Enderbury
Eucla
Fakaofo
Faroe
Fiji
Fortaleza
Funafuti
GMT
GMT+1
GMT+10
GMT+11
GMT+12
GMT+2
GMT+3
GMT+4
GMT+5
GMT+6
GMT+7
GMT+8
GMT+9
GMT-1
GMT-10
GMT-11
GMT-12
GMT-13
GMT-14
GMT-2
GMT-3
GMT-4
GMT-5
GMT-6
GMT-7
GMT-8
GMT-9
Galapagos
Gambier
Gaza
Gibraltar
Glace_Bay
Godthab
Goose_Bay
Grand_Turk
Guadalcanal
Guam
Guatemala
Guayaquil
Guyana
Halifax
Havana
Hebron
Helsinki
Hermosillo
Ho_Chi_Minh
Hobart
Hong_Kong
Honolulu
Hovd
Indianapolis
Inuvik
Iqaluit
Irkutsk
Istanbul
Jakarta
Jamaica
Jayapura
Jerusalem
Johannesburg
Jujuy
Juneau
Kabul
Kaliningrad
Kamchatka
Kampala
Karachi
Kathmandu
Kerguelen
Khandyga
Khartoum
Kiev
Kiritimati
Knox
Kolkata
Kosrae
Krasnoyarsk
Kuala_Lumpur
Kuching
Kuwait
Kwajalein
La_Paz
La_Rioja
Lagos
Lima
Lindeman
Lisbon
London
Lord_Howe
Los_Angeles
Louisville
Luxembourg
Macau
Maceio
Macquarie
Madeira
Madrid
Magadan
Mahe
Majuro
Makassar
Maldives
Malta
Managua
Manaus
Manila
Maputo
Marengo
Marquesas
Martinique
Matamoros
Mauritius
Mawson
Mayotte
Mazatlan
Melbourne
Mendoza
Menominee
Merida
Metlakatla
Mexico_City
Midway
Minsk
Miquelon
Mogadishu
Monaco
Moncton
Monrovia
Monterrey
Montevideo
Monticello
Montreal
Moscow
Muscat
Nairobi
Nassau
Nauru
Ndjamena
New_Salem
New_York
Nicosia
Nipigon
Niue
Nome
Norfolk
Noronha
Noumea
Novokuznetsk
Novosibirsk
Ojinaga
Omsk
Oral
Oslo
Pago_Pago
Palau
Palmer
Panama
Pangnirtung
Paramaribo
Paris
Perth
Petersburg
Phnom_Penh
Phoenix
Pitcairn
Pohnpei
Pont | ianak
Port-au-Prince
Port_Moresby
Port_of_Spain
Porto_Velho
Prague
Puerto_Rico
Pyongyang
Qatar
Qyzylorda
Rainy_River
Rangoon
Rankin_Inlet
Rarotonga
Recife
Regina
Rel
Resolute
Reunion
Reykjavik
Riga
Rio_Branco
Rio_Gallegos
Riyadh
Rome
Rothera
Saipan
Sakhalin
Salt | a
Samara
Samarkand
San_Juan
San_Luis
Santa_Isabel
Santarem
Santiago
Santo_Domingo
Sao_Paulo
Scoresbysund
Seoul
Shanghai
Simferopol
Singapore
Sitka
Sofia
South_Georgia
Srednekolymsk
St_Johns
Stanley
Stockholm
Swift_Current
Sydney
Syowa
Tahiti
Taipei
Tallinn
Tarawa
Tashkent
Tbilisi
Tegucigalpa
Tehran
Tell_City
Thimphu
Thule
Thunder_Bay
Tijuana
Tirane
Tokyo
Tongatapu
Toronto
Tripoli
Troll
Tucuman
Tunis
UCT
UTC
Ulaanbaatar
Urumqi
Ushuaia
Ust-Nera
Uzhgorod
Vancouver
Vevay
Vienna
Vientiane
Vilnius
Vincennes
Vladivostok
Volgograd
Vostok
Wake
Wallis
Warsaw
Whitehorse
Winamac
Windhoek
Winnipeg
Yakutat
Yakutsk
Yekaterinburg
Yellowknife
Yerevan
Zaporozhye
Zurich""".split('\n')
HAYSTACK_TIMEZONES_SET=set(HAYSTACK_TIMEZONES)
# Mapping of pytz-recognised timezones to Haystack timezones.
_TZ_MAP = None
_TZ_RMAP = None
def _map_timezones():
"""
Map the official Haystack timezone list to those recognised by pytz.
"""
tz_map = {}
todo = HAYSTACK_TIMEZONES_SET.copy()
for full_tz in pytz.all_timezones:
# Finished case:
if not bool(todo): # pragma: no cover
# This is nearly impossible for us to cover, and an unlikely case.
break
# Case 1: exact match
if full_tz in todo:
tz_map[full_tz] = full_tz # Exact match
todo.discard(full_tz)
continue
# Case 2: suffix match after '/'
if '/' not in full_tz:
continue
(prefix, suffix) = full_tz.split('/',1)
# Case 2 exception: full timezone contains more than one '/' -> ignore
if '/' in suffix:
continue
if suffix in todo:
tz_map[suffix] = full_tz
todo.discard(suffix)
continue
return tz_map
def _gen_map():
global _TZ_MAP
global _TZ_RMAP
if (_TZ_MAP is None) or (_TZ_RMAP is None):
_TZ_MAP = _map_timezones()
_TZ_RMAP = dict([(z,n) for (n,z) in list(_TZ_MAP.items())])
def get_tz_map(version=LATEST_VER):
"""
Return the timezone map, generating it if needed.
"""
_gen_map()
return _TZ_MAP
def get_tz_rmap(version=LATEST_VER):
"""
Return the reverse timezone map, generating it if needed.
"""
_gen_map()
return _TZ_RMAP
def timezone(haystack_tz, version=LATEST_VER):
"""
Retrieve the Haystack timezone
"""
tz_map = get_tz_map(version=version)
try:
tz_name = tz_map[haystack_tz]
except KeyError:
raise ValueError('%s is not a recognised timezone on this host' \
% haystack_tz)
return pytz.timezone(tz_name)
def timezone_name(dt, version=LATEST_VER):
"""
Determine an appropriate timezone for the given date/time object
"""
tz_rmap = get_tz_rmap(version=version)
if dt.tzinfo is None:
raise ValueError('%r has no timezone' % dt)
# Easy case: pytz timezone.
try:
tz_name = dt.tzinfo.zone
return tz_rmap[tz_name]
except KeyError:
# Not in timezone map
pass
except AttributeError:
# Not a pytz-compatible tzinfo
pass
# Hard case, try to find one that's equivalent. Hopefully we don't get
# many of these. Start by getting the current timezone offset, and a
# timezone-naïve copy of the timestamp.
offset = dt.utcoffset()
dt_notz = dt.replace(tzinfo=None)
if offset == datetime.timedelta(0):
# UTC?
return 'UTC'
for olson_name, haystack_name in list(tz_rmap.items()):
if pytz.timezone(olson_name).utcoffset(dt_notz) == offset:
return haystack_name
raise ValueError('Unable to get timezone of %r' % dt)
|
eHealthAfrica/ureport | ureport/assets/urls.py | Python | agpl-3.0 | 74 | 0.013514 | from .views import ImageCRUDL
urlpatte | rns = ImageCRUDL().as_url | patterns() |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/code/browser/branchsubscription.py | Python | agpl-3.0 | 11,371 | 0.000264 | # Copyright 2009-2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
__all__ = [
'BranchPortletSubscribersContent',
'BranchSubscriptionAddOtherView',
'BranchSubscriptionAddView',
'BranchSubscriptionEditOwnView',
'BranchSubscriptionEditView',
'BranchSubscriptionPrimaryContext',
]
from lazr.restful.utils import smartquote
from zope.component import getUtility
from zope.interface import implements
from lp.app.browser.launchpadform import (
action,
LaunchpadEditFormView,
LaunchpadFormView,
)
from lp.app.interfaces.services import IService
from lp.code.enums import BranchSubscriptionNotificationLevel
from lp.code.interfaces.branchsubscription import IBranchSubscription
from lp.registry.interfaces.person import IPersonSet
from lp.services.webapp import (
canonical_url,
LaunchpadView,
)
from lp.services.webapp.authorization import (
check_permission,
precache_permission_for_objects,
)
from lp.services.webapp.escaping import structured
from lp.services.webapp.interfaces import IPrimaryContext
class BranchSubscriptionPrimaryContext:
"""The primary context is the subscription is that of the branch."""
implements(IPrimaryContext)
def __init__(self, branch_subscription):
self.context = IPrimaryContext(branch_subscription.branch).context
class BranchPortletSubscribersContent(LaunchpadView):
"""View for the contents for the subscribers portlet."""
def subscriptions(self):
"""Return a decorated list of branch subscriptions."""
# Cache permissions so private subscribers can be rendered.
# The security adaptor will do the job also but we don't want or need
# the expense of running several complex SQL queries.
person_ids = [sub.personID for sub in self.context.subscriptions]
list(getUtility(IPersonSet).getPrecachedPersonsFromIDs(
person_ids, need_validity=True))
if self.user is not None:
subscribers = [
subscription.person
for subscription in self.context.subscriptions]
precache_permission_for_objects(
self.request, "launchpad.LimitedView", subscribers)
visible_subscriptions = [
subscription for subscription in self.context.subscriptions
if check_permission('launchpad.LimitedView', subscription.person)]
return sorted(
visible_subscriptions,
key=lambda subscription: subscription.person.displayname)
class _BranchSubscriptionView(LaunchpadFormView):
"""Contains the common functionality of the Add and Edit views."""
schema = IBranchSubscription
field_names = ['notification_level', 'max_diff_lines', 'review_level']
LEVELS_REQUIRING_LINES_SPECIFICATION = (
BranchSubscriptionNotificationLevel.DIFFSONLY,
BranchSubscriptionNotificationLevel.FULL)
@property
def user_is_subscribed(self):
# Since it is technically possible to get to this page when
# the user is not subscribed by hacking the URL, we should
# handle the case nicely.
return self.context.getSubscription(self.user) is not None
@property
def next_url(self):
return canonical_url(self.context)
cancel_url = next_url
def add_notification_message(self, initial, notification_level,
max_diff_lines, review_level):
if notification_level in self.LEVELS_REQUIRING_LINES_SPECIFICATION:
lines_message = '<li>%s</li>' % max_diff_lines.description
else:
lines_message = ''
format_str = '%%s<ul><li>%%s</li>%s<li>%%s</li></ul>' % lines_message
message = structured(
format_str, initial, notification_level.description,
review_level.description)
self.request.response.addNotification(message)
def optional_max_diff_lines(self, notification_level, max_diff_lines):
if notification_level in self.LEVELS_REQUIRING_LINES_SPECIFICATION:
return max_diff_lines
else:
return None
class BranchSubscriptionAddView(_BranchSubscriptionView):
subscribing_self = True
page_title = label = "Subscribe to branch"
@action("Subscribe")
def subscribe(self, action, data):
# To catch the stale post problem, check that the user is not
# subscribed before continuing.
if self.context.hasSubscription(self.user):
self.request.response.addNotification(
'You are already subscribed to this branch.')
else:
notification_level = data['notification_level']
max_diff_lines = self.optional_max_diff_lines(
notification_level, data['max_diff_lines'])
review_level = data['review_level']
self.context.subscribe(
self.user, notification_level, max_diff_lines, review_level,
self.user)
self.add_notification_message(
'You have subscribed to this branch with: ',
notification_level, max_diff_lines, review_level)
class BranchSubscriptionEditOwnView(_BranchSubscriptionView):
@property
def label(self):
return "Edit subscription to branch"
@property
def page_title(self):
return smartquote(
'Edit subscription to branch "%s"' % self.context.displayname)
@property
def initial_values(self):
subscription = self.context.getSubscription(self.user)
if subscription is None:
# This is the case of URL hacking or stale page.
return {}
else:
return {'notification_level': subscription.notification_level,
'max_diff_lines': subscription.max_diff_lines,
'review_level': subscription.review_level}
@action("Change")
def change_details(self, action, data):
# Be proactive in the checking to catch the stale post problem.
if self.context.hasSubscription(self.user):
subscription = self.context.getSubscription(self.user)
subscription.notification_level = data['notification_level']
| subscription.max_diff_lines = self.optional_max_diff_lines(
subscription.notification_level,
data['max_diff_lines'])
subscription.review_level = data['review_level']
self.add_notification_message(
'Subscription updated to: ',
subscription.notification_level,
subscription.max_diff_lines,
subscription.review_level)
else:
| self.request.response.addNotification(
'You are not subscribed to this branch.')
@action("Unsubscribe")
def unsubscribe(self, action, data):
# Be proactive in the checking to catch the stale post problem.
if self.context.hasSubscription(self.user):
self.context.unsubscribe(self.user, self.user)
self.request.response.addNotification(
"You have unsubscribed from this branch.")
else:
self.request.response.addNotification(
'You are not subscribed to this branch.')
class BranchSubscriptionAddOtherView(_BranchSubscriptionView):
"""View used to subscribe someone other than the current user."""
field_names = [
'person', 'notification_level', 'max_diff_lines', 'review_level']
for_input = True
# Since we are subscribing other people, the current user
# is never considered subscribed.
user_is_subscribed = False
subscribing_self = False
page_title = label = "Subscribe to branch"
def validate(self, data):
if data.has_key('person'):
person = data['person']
subscription = self.context.getSubscription(person)
if subscription is None and not self.context.userCanBeSubscribed(
person):
self.setFieldError('person', "Open and delegated teams "
|
projecthamster/hamster | src/hamster-cli.py | Python | gpl-3.0 | 18,772 | 0.002184 | #!/usr/bin/env python3
# - coding: utf-8 -
# Copyright (C) 2010 Matías Ribecky <matias at mribecky.com.ar>
# Copyright (C) 2010-2012 Toms Bauģis <toms.baugis@gmail.com>
# Copyright (C) 2012 Ted Smith <tedks at cs.umd.edu>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
'''A script to control the applet from the command line.'''
import sys, os
import argparse
import re
import gi
gi.require_version('Gdk', '3.0') # noqa: E402
gi.require_version('Gtk', '3.0') # noqa: E402
from gi.repository import GLib as glib
from gi.repository import Gdk as gdk
from gi.repository import Gtk as gtk
from gi.repository import Gio as gio
from gi.repository import GLib as glib
import hamster
from hamster import client, reports
from hamster import logger as hamster_logger
from hamster.about import About
from hamster.edit_activity import CustomFactController
from hamster.overview import Overview
from hamster.preferences import PreferencesEditor
from hamster.lib import default_logger, stuff
from hamster.lib import datetime as dt
from hamster.lib.fact import Fact
logger = default_logger(__file__)
def word_wrap(line, max_len):
"""primitive word wrapper"""
lines = []
cur_line, cur_len = "", 0
for word in line.split():
if len("%s %s" % (cur_line, word)) < max_len:
cur_line = ("%s %s" % (cur_line, word)).strip()
else:
if cur_line:
lines.append(cur_line)
cur_line = word
if cur_line:
lines.append(cur_line)
return lines
def fact_dict(fact_data, with_date):
fact = {}
if with_date:
fmt = '%Y-%m-%d %H:%M'
else:
fmt = '%H:%M'
fact['start'] = fact_data.start_time.strftime(fmt)
if fact_data.end_time:
fact['end'] = fact_data.end_time.strftime(fmt)
else:
end_date = dt.datetime.now()
fact['end'] = ''
fact['duration'] = fact_data.delta.format()
fact['activity'] = fact_data.activity
fact['category'] = fact_data.category
if fact_data.tags:
fact['tags'] = ' '.join('#%s' % tag for tag in fact_data.tags)
else:
fact['tags'] = ''
fact['description'] = fact_data.description
return fact
class Hamster(gtk.Application):
"""Hamster gui.
Actions should eventually be accessible via Gio.DBusActionGroup
with the 'org.gnome.Hamster.GUI' id.
but that is still experimental, the actions API is subject to change.
Discussion with "external" developers welcome !
The separate dbus org.gnome.Hamster.WindowServer
is still the stable recommended way to show windows for now.
"""
def __init__(self):
# inactivity_timeout: How long (ms) the service should stay alive
# after all windows have been closed.
gtk.Application.__init__(self,
application_id="org.gnome.Hamster.GUI",
#inactivity_timeout=10000,
register_session=True)
self.about_controller = None # 'about' window controller
self.fact_controller = None # fact window controller
self.overview_controller = None # overview window controller
self.preferences_controller = None # settings window controller
self.connect("startup", self.on_startup)
self.connect("activate", self.on_activate)
# we need them before the startup phase
# so register/activate_action work before the app is ran.
# cf. https://gitlab.gnome.org/GNOME/glib/blob/master/gio/tests/gapplication-example-actions.c
self.add_actions()
def add_actions(self):
# most actions have no parameters
# for type "i", use Variant.new_int32() and .get_int32() to pack/unpack
for name in ("about", "add", "clone", "edit", "overview", "preferences"):
data_type = glib.VariantType("i") if name in ("edit", "clone") else None
action = gio.SimpleAction.new(name, data_type)
action.connect("activate", self.on_activate_window)
self.add_action(action)
action = gio.SimpleAction.new("quit", None)
action.connect("activate", self.on_activate_quit)
self.add_action(action)
def on_activate(self, data=None):
logger.debug("activate")
if not self.get_windows():
self.activate_action("overview")
def on_activate_window(self, action=None, data=None):
self._open_window(action.get_name(), data)
def on_activate_quit(self, data=None):
self.on_activate_quit()
def on_startup(self, data=None):
logger.debug("startup")
# Must be the same as application_id. Won't be required with gtk4.
glib.set_prgname(self.get_application_id())
# localized name, but let's keep it simple.
glib.set_application_name("Hamster")
def _open_window(self, name, data=None):
logger.debug("opening '{}'".format(name))
if name == "about":
if not self.about_controller:
# silence warning "GtkDialog mapped without a transient parent"
# https://stackoverflow.com/a/38408127/3565696
_dummy = gtk.Window()
self.about_controller = About(parent=_dummy)
logger.debug("new About")
controller = self.about_controller
elif name in ("add", "clone", "edit"):
if self.fact_controller:
# Something is already going on, with other arguments, present it.
# Or should we just discard the forgotten one ?
logger.warning("Fact controller already active. Please close first.")
else:
fact_id = d | ata.get_int32() if data else None
self.fact_controller = CustomFactController(name, fact_id=fact_id)
logger.debug("new CustomFactController")
co | ntroller = self.fact_controller
elif name == "overview":
if not self.overview_controller:
self.overview_controller = Overview()
logger.debug("new Overview")
controller = self.overview_controller
elif name == "preferences":
if not self.preferences_controller:
self.preferences_controller = PreferencesEditor()
logger.debug("new PreferencesEditor")
controller = self.preferences_controller
window = controller.window
if window not in self.get_windows():
self.add_window(window)
logger.debug("window added")
# Essential for positioning on wayland.
# This should also select the correct window type if unset yet.
# https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html
if name != "overview" and self.overview_controller:
window.set_transient_for(self.overview_controller.window)
# so the dialog appears on top of the transient-for:
window.set_type_hint(gdk.WindowTypeHint.DIALOG)
else:
# toplevel
window.set_transient_for(None)
controller.present()
logger.debug("window presented")
def present_fact_controller(self, action, fact_id=0):
"""Present the fact controller window to add, clone or edit a fact.
Args:
action (str): "add", "clone" or "edit"
"""
assert action in ("add", "clone", "edit")
if action in ("clone", "edit"):
action_data = glib.Variant.new_int |
mvidalgarcia/indico | indico/modules/events/registration/models/registrations.py | Python | mit | 22,495 | 0.002223 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import posixpath
import time
from collections import OrderedDict
from decimal import Decimal
from uuid import uuid4
from babel.numbers import format_currency
from flask import has_request_context, request, session
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.event import listens_for
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import mapper
from indico.core import signals
from indico.core.config import config
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime
from indico.core.db.sqlalchemy.util.queries import increment_and_get
from indico.core.storage import StoredFileMixin
from indico.modules.events.payment.models.transactions import TransactionStatus
from indico.modules.users.models.users import format_display_full_name
from indico.util.date_time import now_utc
from indico.util.decorators import classproperty
from indico.util.i18n import L_
from indico.util.locators import locator_property
from indico.util.signals import values_from_signal
from indico.util.string import format_full_name, format_repr, return_ascii, strict_unicode
from indico.util.struct.enum import RichIntEnum
class RegistrationState(RichIntEnum):
__titles__ = [None, L_('Completed'), L_('Pending'), L_('Rejected'), L_('Withdrawn'), L_('Awaiting payment')]
complete = 1
pending = 2
rejected = 3
withdrawn = 4
unpaid = 5
def _get_next_friendly_id(context):
"""Get the next friendly id for a registration."""
from indico.modules.events import Event
event_id = context.current_parameters['event_id']
assert event_id is not None
return increment_and_get(Event._last_friendly_registration_id, Event.id == event_id)
class Registration(db.Model):
"""Somebody's registration for an event through a registration form"""
__tablename__ = 'registrations'
__table_args__ = (db.CheckConstraint('email = lower(email)', 'lowercase_email'),
db.Index(None, 'friendly_id', 'event_id', unique=True,
postgresql_where=db.text('NOT is_deleted')),
db.Index(None, 'registration_form_id', 'user_id', unique=True,
postgresql_where=db.text('NOT is_deleted AND (state NOT IN (3, 4))')),
db.Index(None, 'registration_form_id', 'email', unique=True,
postgresql_where=db.text('NOT is_deleted AND (state NOT IN (3, 4))')),
db.ForeignKeyConstraint(['event_id', 'registration_form_id'],
['event_registration.forms.event_id', 'event_registration.forms.id']),
{'schema': 'event_registration'})
#: The ID of the object
id = db.Column(
db.Integer,
primary_key=True
)
#: The unguessable ID for the object
uuid = db.Column(
UUID,
index=True,
unique=True,
nullable=False,
default=lambda: unicode(uuid4())
)
#: The human-friendly ID for the object
friendly_id = db.Column(
db.Integer,
nullable=False,
default=_get_next_friendly_id
)
#: The ID of the event
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
#: The ID of the registration form
registration_form_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.forms.id'),
index=True,
nullable=False
)
#: The ID of the user who registered
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True
)
#: The ID of the latest payment transaction associat | ed with this registration
transaction_id = db.Column(
db.Integer,
db.ForeignKey('events.payment_transactions.id'),
index=True,
unique=True,
nullable=True
)
#: The state a registration | is in
state = db.Column(
PyIntEnum(RegistrationState),
nullable=False,
)
#: The base registration fee (that is not specific to form items)
base_price = db.Column(
db.Numeric(8, 2), # max. 999999.99
nullable=False,
default=0
)
#: The price modifier applied to the final calculated price
price_adjustment = db.Column(
db.Numeric(8, 2), # max. 999999.99
nullable=False,
default=0
)
#: Registration price currency
currency = db.Column(
db.String,
nullable=False
)
#: The date/time when the registration was recorded
submitted_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc,
)
#: The email of the registrant
email = db.Column(
db.String,
nullable=False
)
#: The first name of the registrant
first_name = db.Column(
db.String,
nullable=False
)
#: The last name of the registrant
last_name = db.Column(
db.String,
nullable=False
)
#: If the registration has been deleted
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The unique token used in tickets
ticket_uuid = db.Column(
UUID,
index=True,
unique=True,
nullable=False,
default=lambda: unicode(uuid4())
)
#: Whether the person has checked in. Setting this also sets or clears
#: `checked_in_dt`.
checked_in = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The date/time when the person has checked in
checked_in_dt = db.Column(
UTCDateTime,
nullable=True
)
#: The Event containing this registration
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'registrations',
lazy='dynamic'
)
)
# The user linked to this registration
user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'registrations',
lazy='dynamic'
# XXX: a delete-orphan cascade here would delete registrations when NULLing the user
)
)
#: The latest payment transaction associated with this registration
transaction = db.relationship(
'PaymentTransaction',
lazy=True,
foreign_keys=[transaction_id],
post_update=True
)
#: The registration this data is associated with
data = db.relationship(
'RegistrationData',
lazy=True,
cascade='all, delete-orphan',
backref=db.backref(
'registration',
lazy=True
)
)
# relationship backrefs:
# - invitation (RegistrationInvitation.registration)
# - legacy_mapping (LegacyRegistrationMapping.registration)
# - registration_form (RegistrationForm.registrations)
# - transactions (PaymentTransaction.registration)
@classmethod
def get_all_for_event(cls, event):
"""Retrieve all registrations in all registration forms of an event."""
from indico.modules.events.registration.models.forms import RegistrationForm
return Registration.find_all(Registration.is_active, ~RegistrationForm.is_deleted,
RegistrationForm.event_id == event.id, _join=Registration.registration_form)
@hybrid_property
def is_active(self):
return not self.is_cancelled and not self.is_deleted
@is_active.expression
def is_active(cls):
return ~cls.is_cancelled & ~cls.is_deleted
@hybrid_property
def is_publishable(self):
return self.is_active and self.state in (RegistrationState.complete, RegistrationState.unpaid)
@is_publishable.expression
def is_publishable(cls):
return cls.is_ac |
tinloaf/home-assistant | tests/components/mqtt/test_config_flow.py | Python | apache-2.0 | 4,724 | 0 | """Test config flow."""
from unittest.mock import patch
import pytest
from homeassistant.setup import async_setup_component
from tests.common import mock_coro, MockConfigEntry
@pytest.fixture(autouse=True)
def mock_finish_setup():
"""Mock out the finish setup method."""
with patch('homeassistant.components.mqtt.MQTT.async_connect',
return_value=mock_coro(True)) as mock_finish:
yield mock_finish
@pytest.fixture
def mock_try_connection():
"""Mock the try connection method."""
with patch(
'homeassistant.components.mqtt.config_flow.try_connection'
) as mock_try:
yield mock_try
async def test_user_connection_works(hass, mock_try_connection,
mock_finish_setup):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
'mqtt', context={'source': 'user'})
assert result['type'] == 'form'
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {
'broker': '127.0.0.1',
}
)
assert result['type'] == 'create_entry'
assert result['result'].data == {
'broker': '127.0.0.1',
'port': 1883,
'discovery': False,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
async def test_user_connection_fails(hass, mock_try_connection,
mock_finish_setup):
"""Test if connnection cannot be made."""
mock_try_connection.return_value = False
result = await hass.config_entries.flow.async_init(
'mqtt', context={'source': 'user'})
assert result['type'] == 'form'
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {
'broker': '127.0.0.1',
}
)
assert result['type'] == 'form'
assert result['errors']['base'] == 'cannot_connect'
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry did not setup
assert len(mock_finish_setup.mock_calls) == 0
async def test_manual_config_set(hass, mock_try_connection,
mock_finish_setup):
"""Test we ignore entry if manual config available."""
assert await async_setup_component(
hass, 'mqtt', {'mqtt': {'broker': 'bla'}})
assert len(mock_finish_setup.mock_calls) == 1
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
'mqtt', context={'source': 'user'})
assert result['type' | ] == 'abort'
async def test_user_single_instance(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain='mqtt').add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
'mqtt', context={'source': 'user'})
assert result['type'] == 'abort'
assert result['reason'] == 'single_instance_allowed'
async def test_hassio_single_instance(hass):
"""Test we only allow a single config flow."""
Mock | ConfigEntry(domain='mqtt').add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
'mqtt', context={'source': 'hassio'})
assert result['type'] == 'abort'
assert result['reason'] == 'single_instance_allowed'
async def test_hassio_confirm(hass, mock_try_connection,
mock_finish_setup):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
'mqtt',
data={
'addon': 'Mock Addon',
'host': 'mock-broker',
'port': 1883,
'username': 'mock-user',
'password': 'mock-pass',
'protocol': '3.1.1'
},
context={'source': 'hassio'}
)
assert result['type'] == 'form'
assert result['step_id'] == 'hassio_confirm'
assert result['description_placeholders'] == {
'addon': 'Mock Addon',
}
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {
'discovery': True,
}
)
assert result['type'] == 'create_entry'
assert result['result'].data == {
'broker': 'mock-broker',
'port': 1883,
'username': 'mock-user',
'password': 'mock-pass',
'protocol': '3.1.1',
'discovery': True,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
|
dongweiming/data-analysis | data_analysis/models.py | Python | apache-2.0 | 603 | 0.004975 | from data_analysis import db
class Apidist(db.Document):
name = db.StringField(max_length=255, required=True)
call = db.IntField(required=True)
include = db.StringField(max_length=255, required=True)
class Celery(db.Document):
cost = db.FloatField(required=True)
time = db.DateTimeF | ield(required=True)
file = db.StringField(max_length=25, required=True)
task = db.StringField(max_length=255, required=True)
class Mongo(db.Document):
total = db.IntField(required=True)
database = db.StringField(max_length=255, required=True)
hour = db.DictField(required= | True)
|
gazeti/aleph | docs/conf.py | Python | mit | 10,610 | 0.000189 | # -*- coding: utf-8 -*-
#
# Aleph documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 2 16:22:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'autoapi.extension'
]
# Document Python Code
autoapi_dirs = ['../aleph']
autoapi_ignore = [
'*tests/test_*',
'*migrate*',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# Enable support for Markdown
#
source_parsers = {'.md': CommonMarkParser}
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Aleph'
copyright = u'2016, aleph Contributors'
author = u'aleph Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.1'
# The full version, including alpha/beta/rc tags.
release = u'1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and modul | eauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = Fa | lse
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'default'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Aleph v1.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Alephdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List |
GoodCloud/johnny-cache | johnny/cache.py | Python | mit | 21,360 | 0.002949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Johnny's main caching functionality."""
import sys
import re
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
from uuid import uuid4
try:
from hashlib import md5
except ImportError:
from md5 import md5
import localstore
import signals
from johnny import settings
from transaction import TransactionManager
from django.core.exceptions import ImproperlyConfigured
try:
any
except NameError:
def any(iterable):
for i in iterable:
if i: return True
return False
local = localstore.LocalStore()
def blacklist_match(*tables):
"""Returns True if a set of tables is in the blacklist, False otherwise."""
# XXX: When using a blacklist, this has to be done EVERY query;
# It'd be nice to make this as fast as possible. In general, queries
# should have relatively few tables involved, and I don't imagine that
# blacklists would grow very vast. The fastest i've been able to come
# up with is to pre-create a blacklist set and use intersect.
return bool(settings.BLACKLIST.intersection(tables))
def get_backend(cache_backend=None, keyhandler=None, keygen=None):
"""Get's a QueryCacheBackend object for the given options and current
version of django. If no arguments are given, and a QCB has been
created previously, ``get_backend`` returns that. Otherwise,
``get_backend`` will return the default backend."""
import django
cls = None
if django.VERSION[:2] == (1, 1):
cls = QueryCacheBackend11
if django.VERSION[:2] in ((1, 2), (1, 3)):
cls = QueryCacheBackend
if cls is None:
raise ImproperlyConfigured("Johnny doesn't work on this version of django.")
return cls(cache_backend=cache_backend, keyhandler=keyhandler, keygen=keygen)
def invalidate(*tables, **kwargs):
"""Invalidate the current generation for one or more tables. The arguments
can be either strings representing database table names or models. Pass in
kwarg ``using`` to set the database."""
backend = get_backend()
db = kwargs.get('using', 'default')
def resolve(x):
if isinstance(x, basestring):
return x
return x._meta.db_table
if backend._patched:
for t in map(resolve, tables):
backend.keyhandler.invalidate_table(t, db)
def get_tables_for_query(query):
"""Takes a Django 'query' object and returns all tables that will be used in
that query as a list. Note that where clauses can have their own querysets
with their own dependent queries, etc."""
from django.db.models.sql.where import WhereNode
from django.db.models.query import QuerySet
tables = [v[0] for v in getattr(query,'alias_map',{}).values()]
def get_tables(where_node, tables):
for child in where_node.children:
if isinstance(child, WhereNode):# and child.children:
tables = get_tables(child, tables)
continue
for item in child:
if isinstance(item, QuerySet):
tables += get_tables_for_query(item.query)
return tables
if query.where and query.where.children and isinstance(query.where.children[0], WhereNode):
where_node = query.where.children[0]
tables = get_tables(where_node, tables)
return list(set(tables))
def get_tables_for_query11(query):
"""Takes a django BaseQuery object and tries to return all tables that will
be used in that query as a list. Unfortunately, the where clauses give us
"QueryWrapper" instead of "QuerySet" objects, so we have to parse SQL once
we get down to a certain layer to get the tables we are using. This is
meant for use in Django 1.1.x only! Later versions can use the above."""
from django.db.models.sql.where import WhereNode
from django.db.models.query_utils import QueryWrapper
def parse_tables_from_sql(sql):
"""This attempts to parse tables out of sql. Django's SQL compiler is
highly regular and always uses extended SQL forms like 'INNER JOIN'
instead of ','. This probably needs a lot of testing for different
backends and is not guaranteed to work on a custom backend."""
table_re = re.compile(r'(?:FROM|JOIN) `(?P<table>\w+)`')
return table_re.findall(sql)
tables = list(query.tables)
if query.where and query.where.children and isinstance(query.where.children[0], WhereNode):
where_node = query.where.children[0]
for child in where_node.children:
if isinstance(child, WhereNode):
continue
for item in child:
if isinstance(item, QueryWrapper):
tables += parse_tables_from_sql(item.data[0])
return list(set(tables))
from functools import wraps
def timer(func):
import time
times = []
@wraps(func)
def foo(*args, **kwargs):
t0 = time.time()
ret = func(*args, **kwargs)
times.append(time.time() - t0)
print "%d runs, %0.6f avg" % (len(times), sum(times)/float(len(times)))
return ret
return foo
# The KeyGen is used only to | generate keys. Some of these keys will be used
# directly in the cache, while others are only general purpose functions to
# generate hashes off of one or more values.
clas | s KeyGen(object):
"""This class is responsible for generating keys."""
def __init__(self, prefix):
self.prefix = prefix
def random_generator(self):
"""Creates a random unique id."""
return self.gen_key(str(uuid4()))
def gen_table_key(self, table, db='default'):
"""Returns a key that is standard for a given table name and database alias.
Total length up to 212 (max for memcache is 250)."""
table = unicode(table)
if db in settings.DATABASE_MAPPING:
db = settings.DATABASE_MAPPING[db]
db = unicode(db)
if len(table) > 100:
table = table[0:68] + self.gen_key(table[68:])
if db and len(db) > 100:
db = db[0:68] + self.gen_key(db[68:])
return '%s_%s_table_%s' % (self.prefix, db, table)
def gen_multi_key(self, values, db='default'):
"""Takes a list of generations (not table keys) and returns a key."""
if db in settings.DATABASE_MAPPING:
db = settings.DATABASE_MAPPING[db]
if db and len(db) > 100:
db = db[0:68] + self.gen_key(db[68:])
return '%s_%s_multi_%s' % (self.prefix, db, self.gen_key(*values))
@staticmethod
def _convert(x):
if isinstance(x, unicode):
return x.encode('utf-8')
return str(x)
@staticmethod
def _recursive_convert(x, key):
for item in x:
if isinstance(item, (tuple, list)):
KeyGen._recursive_convert(item, key)
else:
key.update(KeyGen._convert(item))
def gen_key(self, *values):
"""Generate a key from one or more values."""
key = md5()
KeyGen._recursive_convert(values, key)
return key.hexdigest()
class KeyHandler(object):
"""Handles pulling and invalidating the key from from the cache based
on the table names. Higher-level logic dealing with johnny cache specific
keys go in this class."""
def __init__(self, cache_backend, keygen=KeyGen, prefix=None):
self.prefix = prefix
self.keygen = keygen(prefix)
self.cache_backend = cache_backend
def get_generation(self, *tables, **kwargs):
"""Get the generation key for any number of tables."""
db = kwargs.get('db', 'default')
if len(tables) > 1:
return self.get_multi_generation(tables, db)
return self.get_single_generation(tables[0], db)
def get_single_generation(self, table, db='default'):
"""Creates a random generation value for a single table name"""
key = self.keygen.gen_table_key(table, db)
val = self.cache_backend.get(key, None, db)
#if local.get('in_test', None): print st |
earonne/nadb-sample-site | nadb-sample-site/settings.py | Python | bsd-3-clause | 4,940 | 0.001619 | # Django settings for nadbproj project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'nadbproj', # Or path to database file if using sqlite3.
'USER': 'nadbproj', # Not used with sqlite3.
'PASSWORD': 'nadbproj', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'staticfiles'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'zx!582g59qwpwdnds)8b$pm(v-03jgpiq1e1(ix&iyvw*)$_yi'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'nadb-sample-site.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relat | ive paths.
os.path.join(os.path.dirname(__ | file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'nadb',
'django.contrib.markup',
'django.contrib.admin',
'django.contrib.comments',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/gaussfitter.py | Python | mit | 10,557 | 0.019703 | """
===============
Gaussian fitter
===============
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Created 3/17/08
Original version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
(the version below uses a Class instead of independent functions)
"""
import numpy
from numpy.ma import median
from numpy import pi
from pyspeckit.mpfit import | mpfit
import matplotlib.cbook as mpcb
from . import mpfit_messages
| from . import model
class gaussian_fitter(model.SpectralModel):
"""
A rather complicated Gaussian fitter class. Inherits from, but overrides
most components of, :mod:`model.SpectralModel`
"""
def __init__(self,multisingle='multi'):
self.npars = 3
self.npeaks = 1
self.onepeakgaussfit = self._fourparfitter(self.onepeakgaussian)
if multisingle in ('multi','single'):
self.multisingle = multisingle
else:
raise Exception("multisingle must be multi or single")
def __call__(self,*args,**kwargs):
if self.multisingle == 'single':
return self.onepeakgaussfit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multigaussfit(*args,**kwargs)
def onepeakgaussian(self, x,H,A,dx,w):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
[height,amplitude,center,width]
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
return H+A*numpy.exp(-(x-dx)**2/(2*w**2))
def multipeakgaussian(self, x, pars):
"""
Returns flux at position x due to contributions from multiple Gaussians.
"""
x = numpy.array(x) # make sure xarr is no longer a spectroscopic axis
pars = numpy.reshape(pars, (len(pars) / 3, 3))
result = 0
for fit in pars: result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2])
return result
def slope(self, x):
"""
Return slope at position x for multicomponent Gaussian fit. Need this in measurements class for
finding the FWHM of multicomponent lines whose centroids are not identical.
"""
pars = numpy.reshape(self.mpp, (len(self.mpp) / 3, 3))
result = 0
for fit in pars:
result += self.onepeakgaussian(x, 0, fit[0], fit[1], fit[2]) * (-2. * (x - fit[1]) / 2. / fit[2]**2)
return result
def n_gaussian(self, pars=None,a=None,dx=None,sigma=None):
"""
Returns a function that sums over N gaussians, where N is the length of
a,dx,sigma *OR* N = len(pars) / 3
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - a list with len(pars) = 3n, assuming a,dx,sigma repeated
dx - offset (velocity center) values
sigma - line widths
a - amplitudes
"""
if len(pars) % 3 == 0:
a = [pars[ii] for ii in xrange(0,len(pars),3)]
dx = [pars[ii] for ii in xrange(1,len(pars),3)]
sigma = [pars[ii] for ii in xrange(2,len(pars),3)]
elif not(len(dx) == len(sigma) == len(a)):
raise ValueError("Wrong array lengths! dx: %i sigma: %i a: %i" % (len(dx),len(sigma),len(a)))
def g(x):
v = numpy.zeros(len(x))
for ii in range(len(pars)/3):
v += a[ii] * numpy.exp( - ( x - dx[ii] )**2 / (2.0*sigma[ii]**2) )
return v
return g
def multigaussfit(self, xax, data, npeaks=1, err=None, params=[1,0,1],
fixed=[False,False,False], limitedmin=[False,False,True],
limitedmax=[False,False,False], minpars=[0,0,0], maxpars=[0,0,0],
quiet=True, shh=True, veryverbose=False, negamp=None,
tied = ['', '', ''], parinfo=None, debug=False, **kwargs):
"""
An improvement on onepeakgaussfit. Lets you fit multiple gaussians.
Inputs:
xax - x axis
data - y axis
npeaks - How many gaussians to fit? Default 1 (this could supersede onepeakgaussfit)
err - error corresponding to data
These parameters need to have length = 3*npeaks. If npeaks > 1 and length = 3, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [amplitude, offset, width] * npeaks
If len(params) % 3 == 0, npeaks will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
tied - link parameters together
quiet - should MPFIT output each iteration?
shh - output final parameters?
kwargs are passed to mpfit
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if len(params) != npeaks and (len(params) / 3) > npeaks:
self.npeaks = len(params) / 3
else:
self.npeaks = npeaks
if isinstance(params,numpy.ndarray): params=params.tolist()
# make sure all various things are the right length; if they're not, fix them using the defaults
# multiformaldehydefit should process negamp directly if kwargs.has_key('negamp') is False: kwargs['negamp'] = None
pardict = {"params":params,"fixed":fixed,"limitedmin":limitedmin,"limitedmax":limitedmax,"minpars":minpars,"maxpars":maxpars,"tied":tied}
for parlistname in pardict:
parlist = pardict[parlistname]
if len(parlist) != 3*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by 3 to get to the
# right number of formaldehydeians, it will just replicate
if veryverbose: print "Correcting length of parameter %s" % parlistname
if len(parlist) == 3:
parlist *= self.npeaks
elif parlistname=="params":
parlist[:] = [1,0,1] * self.npeaks
elif parlistname=="fixed":
parlist[:] = [False,False,False] * self.npeaks
elif parlistname=="limitedmax":
if negamp is None: parlist[:] = [False,False,False] * self.npeaks
elif negamp is False: parlist[:] = [False,False,False] * self.npeaks
else: parlist[:] = [True,False,False] * self.npeaks
elif parlistname=="limitedmin":
if negamp is None: parlist[:] = [False,False,True] * self.npeaks # Lines can't have negative width!
elif negamp is False: parlist[:] = [True,False,True] * self.npeaks
else: parlist[:] = [False,False,True] * self.npeaks
elif parlistname=="minpars" or parlistname=="maxpars":
parlist[:] = [0,0,0] * self.npeaks
elif parlistname=="tied":
parlist[:] = ['','',''] * self.npeaks
# mpfit doesn't recognize negamp, so get rid of it now that we're done setting limitedmin/max and min/maxpars
#if kwargs.has_key('negamp'): kwargs.pop('negamp')
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_gaussian(pars=p)(x))/err]
return f
if xax is None:
xax = numpy.arange(len(data))
parnames = {0:"AMPLITUDE",1:"SHIFT",2:"WIDTH"}
if parinfo is None:
parinfo = [ {'n':ii, 'value':params[ii],
'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3), 'error':ii, 'tied':tied[ii]}
for ii in xrange(len(params)) ]
if ve |
vbelakov/h2o | py/testdir_multi_jvm/test_storeview_diff.py | Python | apache-2.0 | 3,338 | 0.006591 | import unittest, time, random, sys, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_storeview_diff(self):
print "Do an import/parse then compare the store views on all nodes"
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilelist = [
("covtype.data", 300),
]
trial = 0
importFolderPath = "standard"
for (csvFilename, timeoutSecs) in csvFilelist:
trialStart = time.time()
csvPathname = importFolderPath + "/" + csvFilename
# PARSE****************************************
hex_key = csvFilename + "_" + str(trial) + ".hex"
print "parse start on:", csvFilename
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname,
hex_key=hex_key, timeoutSecs=timeoutSecs)
| elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
|
# INSPECT******************************************
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
# SUMMARY****************************************
# gives us some reporting on missing values, constant values,
# to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=0,
key=parseResult['destination_key'], timeoutSecs=300)
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=360)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
# STOREVIEW***************************************
print "Trying StoreView to all nodes after the parse"
for n, node in enumerate(h2o.nodes):
print "\n*****************"
print "StoreView node %s:%s" % (node.http_addr, node.port)
storeViewResult = h2o_cmd.runStoreView(node, timeoutSecs=30)
f = open(SYNDATASETS_DIR + "/storeview_" + str(n) + ".txt", "w" )
result = json.dump(storeViewResult, f, indent=4, sort_keys=True, default=str)
f.close()
lastStoreViewResult = storeViewResult
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds."
trial += 1
if __name__ == '__main__':
h2o.unit_main()
|
fnp/edumed | forum/search_indexes.py | Python | agpl-3.0 | 226 | 0 | from haystack i | mport | indexes
from pybb.models import Post
class PostIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Post
|
lolotux/cabot-docker | cabot/cabotapp/models.py | Python | mit | 32,605 | 0.001564 | from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from polymorphic import PolymorphicModel
from django.db.models import F
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from celery.exceptions import SoftTimeLimitExceeded
from .jenkins import get_job_status
from .alert import (
send_alert,
send_alert_update,
AlertPlugin,
AlertPluginUserData,
update_alert_plugins
)
from .calendar import get_events
from .graphite import parse_metric
from .graphite import get_data
from .tasks import update_service, update_instance
from datetime import datetime, timedelta
from django.utils import timezone
import json
import re
import time
import os
import subprocess
import itertools
import requests
from celery.utils.log import get_task_logger
RAW_DATA_LIMIT = 5000
logger = get_task_logger(__name__)
CHECK_TYPES = (
('>', 'Greater than'),
('>=', 'Greater than or equal'),
('<', 'Less than'),
('<=', 'Less than or equal'),
('==', 'Equal to'),
)
def serialize_recent_results(recent_results):
if not recent_results:
return ''
def result_to_value(result):
if result.succeeded:
return '1'
else:
return '-1'
vals = [result_to_value(r) for r in recent_results]
vals.reverse()
return ','.join(vals)
def calculate_debounced_passing(recent_results, debounce=0):
"""
`debounce` is the number of previous failures we need (not including this)
to mark a search as passing or failing
Returns:
True if passing given debounce factor
False if failing
"""
if not recent_results:
return True
debounce_window = recent_results[:debounce + 1]
for r in debounce_window:
if r.succeeded:
return True
return False
class CheckGroupMixin(models.Model):
class Meta:
abstract = True
PASSING_STATUS = 'PASSING'
WARNING_STATUS = 'WARNING'
ERROR_STATUS = 'ERROR'
CRITICAL_STATUS = 'CRITICAL'
CALCULATED_PASSING_STATUS = 'passing'
CALCULATED_INTERMITTENT_STATUS = 'intermittent'
CALCULATED_FAILING_STATUS = 'failing'
STATUSES = (
(CALCULATED_PASSING_STATUS, CALCULATED_PASSING_STATUS),
(CALCULATED_INTERMITTENT_STATUS, CALCULATED_INTERMITTENT_STATUS),
(CALCULATED_FAILING_STATUS, CALCULATED_FAILING_STATUS),
)
IMPORTANCES = (
(WARNING_STATUS, 'Warning'),
(ERROR_STATUS, 'Error'),
(CRITICAL_STATUS, 'Critical'),
)
name = models.TextField()
users_to_notify = models.ManyToManyField(
User,
blank=True,
help_text='Users who should receive alerts.',
)
alerts_enabled = models.BooleanField(
default=True,
help_text='Alert when this service is not healthy.',
)
status_checks = models.ManyToManyField(
'StatusCheck',
blank=True,
help_text='Checks used to calculate service status.',
)
last_alert_sent = models.DateTimeField(
null=True,
blank=True,
)
alerts = models.ManyToManyField(
'AlertPlugin',
blank=True,
help_text='Alerts channels through which you wish to be notified'
)
email_alert = models.BooleanField(default=False)
hipchat_alert = models.BooleanField(default=True)
sms_alert = models.BooleanField(default=False)
telephone_alert = models.BooleanField(
default=False,
help_text='Must be enabled, and check importance set to Critical, to receive telephone alerts.',
)
overall_status = models.TextField(default=PASSING_STATUS)
old_overall_status = models.TextField(default=PASSING_STATUS)
hackpad_id = models.TextField(
null=True,
blank=True,
verbose_name='Recovery instructions',
help_text='Gist, Hackpad or Refheap js embed with recovery instructions e.g. https://you.hackpad.com/some_document.js'
)
def __unicode__(self):
return self.name
def most_severe(self, check_list):
failures = [c.importance for c in check_list]
if self.CRITICAL_STATUS in failures:
return self.CRITICAL_STATUS
if self.ERROR_STATUS in failures:
return self.ERROR_STATUS
if self.WARNING_STATUS in failures:
return self.WARNING_STATUS
return self.PASSING_STATUS
@property
def is_critical(self):
"""
Break out separately because it's a bit of a pain to
get wrong.
"""
if self.old_overall_status != self.CRITICAL_STATUS and self.overall_status == self.CRITICAL_STATUS:
return True
return False
def alert(self):
if not self.alerts_enabled:
return
if self.overall_status != self.PASSING_STATUS:
# Don't alert every time
if self.overall_status == self.WARNING_STATUS:
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.NOTIFICATION_INTERVAL)) < self.last_alert_sent:
return
elif self.overall_status in (self.CRITICAL_STATUS, self.ERROR_STATUS):
if self.last_alert_sent and (timezone.now() - timedelta(minutes=settings.ALERT_INTERVAL)) < self.last_alert_sent:
return
self.last_alert_sent = timezone.now()
else:
# We don't count "back to normal" as an alert
self.last_alert_sent = None
self.save()
if self.unexpired_acknowledgement():
send_alert_update(self, duty_officers=get_duty_officers())
else:
self.snapshot.did_send_alert = True
self.snapshot.save()
send_alert(self, duty_officers=get_duty_officers())
def unexpired_acknowledgements(self):
acknowledgements = self.alertacknowledgement_set.all().filter(
time__gte=timezone.now()-timedelta(minutes=settings.ACKNOWLEDGEMENT_EXPIRY),
cancelled_time__isnull=True,
).order_by('-time')
return acknowledgements
def acknowledge_alert(self, user):
if self.unexpired_acknowledgements(): # Don't allow users to jump on each other
return None
acknowledgement = AlertAcknowledgement.objects.create(
user=user,
time=timezone.now(),
service=self,
)
def remove_acknowledgement(self, user):
self.unexpired_acknowledgements().update(
cancelled_time=timezone.now(),
cancelled_user=user,
)
def unexpired_acknowle | dgement(self):
try:
return self.unexpired_acknowledgements()[0]
except:
return None
@property
def recent_snapshots(self):
snapshots = self.snapshots.filter(
time__gt=(timezone.now() - timedelta(minutes=60 * 24)))
snapshots = list(snapshots.values())
for s in snapshots:
s['time'] = time.mktime(s['time'].timetuple())
return snapsh | ots
def graphite_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='graphitestatuscheck')
def http_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='httpstatuscheck')
def jenkins_status_checks(self):
return self.status_checks.filter(polymorphic_ctype__model='jenkinsstatuscheck')
def active_graphite_status_checks(self):
return self.graphite_status_checks().filter(active=True)
def active_http_status_checks(self):
return self.http_status_checks().filter(active=True)
def active_jenkins_status_checks(self):
return self.jenkins_status_checks().filter(active=True)
def active_status_checks(self):
return self.status_checks.filter(active=True)
def inactive_status_checks(self):
return self.status_checks.filter(active=False)
def all_passing_checks(self):
return self.active_status_checks().filter(calculated_status=self.CALCULATED_PASSING_STATUS)
def all_failing_checks(self):
return self.active_status_checks().exclud |
x3rj/watransport | XMPPLayer.py | Python | gpl-3.0 | 3,760 | 0.009574 |
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.protocol_acks.protocolentities import OutgoingAckProtocolEntity
from yowsup.layers import YowLayerEvent
from yowsup.layers.network import YowNetworkLayer
from xml.etree import ElementTree as ET
from xml.etree.ElementTree import XMLParser
import io
import threading
import asyncore
import logging
logger = logging.getLogger('watransport.layer')
from Jid import Jid
message_tag = "{jabber:component:accept}message"
iq_tag = "{jabber:component:accept}iq"
presence_tag = "{jabber:component:accept}presence"
class XMPPLayer(YowInterfaceLayer):
account = None
def __init__(self):
YowInterfaceLayer.__init__(self)
self.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
logger.info("Layer initialized")
def onEvent(self, event):
if event.getName() == YowNetworkLayer.EVENT_STATE_DISCONNECT or event.getName() == YowNetworkLayer.EVENT_STATE_DISCONNECTED:
self.account.onYowDisconnect()
logger.info("Got event %s" % event.getName())
@ProtocolEntityCallback("success")
def onSuccess(self, entity):
self.account = self.getProp("xmpp.transport.account")
self.account.onYowConnect(self)
logger.info( "Successfully logged in with %s" % self.account)
@ProtocolEntityCallback("failure")
def onFailure(self, entity):
logger.info( "Login failed, reason: %s" % entity.getReason())
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
logger.debug( "Got receipt: %s" % entity)
ack = OutgoingAckProtocolEntity( entity.getId()
| , "receipt"
, entity.getType()
, entity.getFrom()
)
self.toLower(ack)
self.account.incomingWAReceipt(entity)
@ProtocolEntityCal | lback("message")
def onMessage(self, messageProtocolEntity):
receipt = OutgoingReceiptProtocolEntity(messageProtocolEntity.getId(), messageProtocolEntity.getFrom())
self.toLower(receipt)
if not messageProtocolEntity.isGroupMessage():
if messageProtocolEntity.getType() == "text":
self.account.incomingWAMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == "media":
self.account.incomingWAMedia(messageProtocolEntity)
else:
# not yet implemented
logger.info( "Received group message: %s" % messageProtocolEntity)
@ProtocolEntityCallback("notification")
def onNotification(self, message):
logger.debug( "Got notification: %s" % message)
@ProtocolEntityCallback("ib")
def onIb(self, message):
logger.debug( "Got IB: %s" % message)
@ProtocolEntityCallback("iq")
def onIq(self, message):
logger.debug( "Got Iq: %s" % message)
@ProtocolEntityCallback("chatstate")
def onChatstate(self, message):
logger.debug( "Got chatstate: %s" % message)
@ProtocolEntityCallback("presence")
def onPresence(self, message):
logger.debug( "Got presence: %s" % presence)
#@ProtocolEntityCallback("ack")
#def onAck(self, message):
# # message reached server
# if ack.getClass() == "message":
# msgId = ack.getId()
# # do custom non-standarized stuff
|
marbu/pylatest | contrib/convertdirectives.py | Python | gpl-3.0 | 2,851 | 0.001052 | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""
This script changes given rst document in place, converting old test action
directives ``test_step`` and ``test_result`` into ``test_action``.
It's a best effort hack, not an official part of pylatest (-: I realized that
such tool could be quite straighforward given already implemented pylatest
functionality, so here it is.
There are multiple edge cases when this doesn't work, such as:
* empty directives
* directives with content which doesn't start with text paragraph
"""
import argparse
from pylatest.document import TestActions
from pylatest.rstsource import find_actions
from pylatest.xdocutils.core import register_all
parser = argparse.ArgumentParser(
description="Convert directives test_{step,result} into test_action.")
parser.add_argument("rstfile")
args = parser.parse_args()
register_all(use_plain=True)
actions = TestActions()
# extract test actions of all deprecated test_{step,result} directives,
# including line numbers
with open(args.rstfile) as rstfile:
rstsource = rstfile.read()
for action in find_actions(rstsource): |
actions.add(action.action_name, action, action.action_id)
# list with content of rstfile
rstcontent = rstsource.splitline | s()
# number of next line in rstfile to go to output, zero indexed
next_line_number = 0
with open(args.rstfile, "w") as rstfile:
for action_id, test_step, test_result in actions:
# make the assumptions clear
assert test_step is not None
assert test_step.start_line > next_line_number
if test_result is not None:
assert test_step.end_line < test_result.start_line
# print all lines from last printed one to star of the current test_step
for linenum in range(next_line_number, test_step.start_line - 1):
print(rstcontent[linenum], file=rstfile)
# convert test_step/test_result directive pair into test_action directive
print(".. test_action::", file=rstfile)
print(" :step:", file=rstfile)
for linenum in range(test_step.start_line + 1, test_step.end_line):
if len(rstcontent[linenum]) > 0:
print(" " + rstcontent[linenum], file=rstfile)
else:
print(file=rstfile)
next_line_number = test_step.end_line
if test_result is not None:
print(" :result:", file=rstfile)
for linenum in range(test_result.start_line + 1, test_result.end_line):
if len(rstcontent[linenum]) > 0:
print(" " + rstcontent[linenum], file=rstfile)
else:
print(file=rstfile)
next_line_number = test_result.end_line
# ok, and now print the rest of the file
for line in rstcontent[next_line_number:]:
print(line, file=rstfile)
|
OpenSoccerManager/opensoccermanager | structures/shortlist.py | Python | gpl-3.0 | 1,561 | 0 | #!/usr/bin/env python3
# This file is part of OpenSoccerManager.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
class Shortlist:
'''
Class handling shortlisted players via user add or transfer negotiation.
'''
def __init__(self):
sel | f.shortlist = set()
def get_shortlist(self):
'''
Return complete set of shortlisted players.
'''
return self.shortlist
def get_player_in_shortlist(self, player):
'''
Return whether given player id is already in the shortlist.
'''
return player in self.shortlist
def add_to_shortlist(self, p | layer):
'''
Add specified player id to the shortlist.
'''
self.shortlist.add(player)
def remove_from_shortlist(self, player):
'''
Remove specified player id from shortlist.
'''
if player in self.shortlist:
self.shortlist.remove(player)
|
rande/tornado-flowdock | tornadoflowdock/push.py | Python | mit | 1,605 | 0.003738 | from tornado.httpclient import HTTPRequest, AsyncHTTPClient
import json
class Flow(object):
def __init__(self, id, token, external_user_name=None):
self.id = id
self.token = token
self.external_user_name = external_user_name
self.http_client = Asyn | cHTTPClient()
def _post(self, push_type, body, callback=None):
request = | HTTPRequest("https://api.flowdock.com/v1/messages/%s/%s" % (push_type, self.token), **{
'headers': {
'Content-Type': 'application/json'
},
'method': "POST",
'body': json.dumps(body)
})
self.http_client.fetch(request, callback)
def chat(self, content, external_user_name=None, callback=None, message_id=None, tags=None):
self._post("chat", {
'event': 'message',
'content': content,
'external_user_name': external_user_name or self.external_user_name,
'message_id': message_id,
'tags': tags or []
}, callback=callback)
def team_inbox(self, source, from_address, subject, content, from_name=None, reply_to=None, project=None, format=None, callback=None, tags=None, link=None):
self._post("team_inbox", {
'source': source,
'from_address': from_address,
'subject': subject,
'content': content,
'from_name': from_name,
'reply_to': reply_to,
'project': project,
'format': format,
'tags': tags or [],
'link': link,
}, callback=callback)
|
bluebreezecf/kafka | tests/kafkatest/services/mirror_maker.py | Python | apache-2.0 | 8,150 | 0.002577 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.services.kafka.directory import kafka_dir
import os
import subprocess
"""
0.8.2.1 MirrorMaker options
Option Description
------ -----------
--abort.on.send.failure <Stop the Configure the mirror maker to exit on
entire mirror maker when a send a failed send. (default: true)
failure occurs>
--blacklist <Java regex (String)> Blacklist of topics to mirror.
--consumer.config <config file> Embedded consumer config for consuming
from the source cluster.
--consumer.rebalance.listener <A The consumer rebalance listener to use
custom rebalance listener of type for mirror maker consumer.
ConsumerRebalanceListener>
--help Print this message.
--message.handler <A custom message Message handler which will process
handler of type every record in-between consumer and
MirrorMakerMessageHandler> producer.
--message.handler.args <Arguments Arguments used by custom rebalance
passed to message handler listener for mirror maker consumer
constructor.>
--num.streams <Integer: Number of Number of consu | mption streams.
threads> (default: 1)
--offset.commit.interval.ms <Integer: Offset commit interval in ms (default:
offset com | mit interval in 60000)
millisecond>
--producer.config <config file> Embedded producer config.
--rebalance.listener.args <Arguments Arguments used by custom rebalance
passed to custom rebalance listener listener for mirror maker consumer
constructor as a string.>
--whitelist <Java regex (String)> Whitelist of topics to mirror.
"""
class MirrorMaker(Service):
# Root directory for persistent output
PERSISTENT_ROOT = "/mnt/mirror_maker"
LOG_DIR = os.path.join(PERSISTENT_ROOT, "logs")
LOG_FILE = os.path.join(LOG_DIR, "mirror_maker.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PRODUCER_CONFIG = os.path.join(PERSISTENT_ROOT, "producer.properties")
CONSUMER_CONFIG = os.path.join(PERSISTENT_ROOT, "consumer.properties")
logs = {
"mirror_maker_log": {
"path": LOG_FILE,
"collect_default": True}
}
def __init__(self, context, num_nodes, source, target, whitelist=None, blacklist=None, num_streams=1, consumer_timeout_ms=None):
"""
MirrorMaker mirrors messages from one or more source clusters to a single destination cluster.
Args:
context: standard context
source: source Kafka cluster
target: target Kafka cluster to which data will be mirrored
whitelist: whitelist regex for topics to mirror
blacklist: blacklist regex for topics not to mirror
num_streams: number of consumer threads to create; can be a single int, or a list with
one value per node, allowing num_streams to be the same for each node,
or configured independently per-node
consumer_timeout_ms: consumer stops if t > consumer_timeout_ms elapses between consecutive messages
"""
super(MirrorMaker, self).__init__(context, num_nodes=num_nodes)
self.consumer_timeout_ms = consumer_timeout_ms
self.num_streams = num_streams
if not isinstance(num_streams, int):
# if not an integer, num_streams should be configured per-node
assert len(num_streams) == num_nodes
self.whitelist = whitelist
self.blacklist = blacklist
self.source = source
self.target = target
def start_cmd(self, node):
cmd = "export LOG_DIR=%s;" % MirrorMaker.LOG_DIR
cmd += " export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\";" % MirrorMaker.LOG4J_CONFIG
cmd += " /opt/%s/bin/kafka-run-class.sh kafka.tools.MirrorMaker" % kafka_dir(node)
cmd += " --consumer.config %s" % MirrorMaker.CONSUMER_CONFIG
cmd += " --producer.config %s" % MirrorMaker.PRODUCER_CONFIG
if isinstance(self.num_streams, int):
cmd += " --num.streams %d" % self.num_streams
else:
# config num_streams separately on each node
cmd += " --num.streams %d" % self.num_streams[self.idx(node) - 1]
if self.whitelist is not None:
cmd += " --whitelist=\"%s\"" % self.whitelist
if self.blacklist is not None:
cmd += " --blacklist=\"%s\"" % self.blacklist
cmd += " 1>> %s 2>> %s &" % (MirrorMaker.LOG_FILE, MirrorMaker.LOG_FILE)
return cmd
def pids(self, node):
try:
cmd = "ps ax | grep -i MirrorMaker | grep java | grep -v grep | awk '{print $1}'"
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (subprocess.CalledProcessError, ValueError) as e:
return []
def alive(self, node):
return len(self.pids(node)) > 0
def start_node(self, node):
node.account.ssh("mkdir -p %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
node.account.ssh("mkdir -p %s" % MirrorMaker.LOG_DIR, allow_fail=False)
# Create, upload one consumer config file for source cluster
consumer_props = self.render('consumer.properties', zookeeper_connect=self.source.zk.connect_setting())
node.account.create_file(MirrorMaker.CONSUMER_CONFIG, consumer_props)
# Create, upload producer properties file for target cluster
producer_props = self.render('producer.properties', broker_list=self.target.bootstrap_servers(),
producer_type="async")
node.account.create_file(MirrorMaker.PRODUCER_CONFIG, producer_props)
# Create and upload log properties
log_config = self.render('tools_log4j.properties', log_file=MirrorMaker.LOG_FILE)
node.account.create_file(MirrorMaker.LOG4J_CONFIG, log_config)
# Run mirror maker
cmd = self.start_cmd(node)
self.logger.debug("Mirror maker command: %s", cmd)
node.account.ssh(cmd, allow_fail=False)
wait_until(lambda: self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to start.")
self.logger.debug("Mirror maker is alive")
def stop_node(self, node):
node.account.kill_process("java", allow_fail=True)
wait_until(lambda: not self.alive(node), timeout_sec=10, backoff_sec=.5,
err_msg="Mirror maker took to long to stop.")
def clean_node(self, node):
if self.alive(node):
self.logger.warn("%s %s was still alive at cleanup time. Killing forcefully..." %
(self.__class__.__name__, node.account))
node.account.kill_process("java", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf %s" % MirrorMaker.PERSISTENT_ROOT, allow_fail=False)
|
wutron/compbio | compbio/vis/argvis.py | Python | mit | 29,618 | 0.00368 | """
Visualization of ARGs (Ancestral Recombination Graphs)
"""
# python imports
from itertools import chain, izip
import random
from math import *
# rasmus imports
from rasmus import util, treelib, stats, sets
# compbio imports
from compbio import arglib
# summon imports
import summon
from summon import sumtree
from summon.shapes import box
from summon.core import *
#=============================================================================
# visualization
def minlog(x, default=10):
return log(max(x, default))
def layout_arg_leaves(arg):
"""Layout the leaves of an ARG"""
basetree = treelib.Tree()
nodes = list(arg.postorder())
nodes.sort(key=lambda x: x.age)
lookup = {}
for node in nodes:
if node.is_leaf():
lookup[node] = basetree.new_node(node.name)
else:
basechildren = []
for child in node.children:
basechild = lookup[child]
while basechild.parent:
basechild = basechild.parent
basechildren.append(basechild)
basechildren = util.unique(basechildren)
if len(basechildren) > 1:
lookup[node] = basenode = basetree.new_node(node.name)
for basechild in basechildren:
basetree.add_child(basenode, basechild)
else:
lookup[node] = basechildren[0]
basetree.root = lookup[nodes[-1]]
# assign layout based on basetree layout
# layout leaves
return dict((arg[name], i) for i, name in enumerate(basetree.leaf_names()))
def layout_arg(arg, leaves=None, yfunc=lambda x: x):
"""Layout the nodes of an ARG"""
layout = {}
# layout leaves
if leaves is None:
leafx = layout_arg_leaves(arg)
else:
leafx = util.list2lookup(leaves)
for node in arg.postorder():
if node.is_leaf():
layout[node] = [leafx[node], yfunc(node.age)]
else:
layout[node] = [
stats.mean(layout[child][0] for child in node.children),
yfunc(node.age)]
return layout
def map_layout(layout, xfunc=lambda x: x, yfunc=lambda x: x):
for node, (x, y) in layout.items():
layout[node] = [xfunc(x), yfunc(y)]
return layout
def get_branch_layout(layout, node, parent, side=0, recomb_width=.4):
"""Layout the branches of an ARG"""
nx, ny = layout[node]
px, py = layout[parent]
if node.event == "recomb":
if len(node.parents) == 2 and node.parents[0] == node.parents[1]:
step = recomb_width * [-1, 1][side]
else:
step = recomb_width * [-1, 1][node.parents.index(parent)]
return [nx+step, ny, nx+step, py]
else:
return [nx, ny, nx, py]
def show_arg(arg, layout=None, leaves=None, mut=None, recomb_width=.4,
win=None):
"""Visualize an ARG"""
if win is None:
win = summon.Window()
else:
win.clear_groups()
# ensure layout
if layout is None:
layout = layout_arg(arg, leaves)
# callbacks
def branch_click(node, parent):
print node.name, parent.name
# draw ARG
win.add_group(draw_arg(arg, layout, recomb_width=recomb_width,
| branch_click=branch_click))
# draw mutations
if mut:
g = group()
for node, parent, pos, t in mut:
x1, y1, x2, y2 = get_branch_layout(layout, node, parent)
g.append(group(draw_mark(x1, t, col=(0,0,1)), color(1,1,1)))
w | in.add_group(g)
return win
def draw_arg(arg, layout, recomb_width=.4, branch_click=None):
def branch_hotspot(node, parent, x, y, y2):
def func():
branch_click(node, parent)
return hotspot("click", x-.5, y, x+.5, y2, func)
# draw branches
g = group(color(1,1,1))
for node in layout:
if not node.is_leaf():
x, y = layout[node]
for i, child in enumerate(node.children):
cx, cy = layout[child]
x1, y1, x2, y2 = get_branch_layout(
layout, child, node, i, recomb_width=recomb_width)
g.append(line_strip(x, y, x2, y2, x1, y1, cx, cy))
if branch_click:
g.append(branch_hotspot(child, node, x1, y1, y2))
# draw recomb
for node in layout:
if node.event == "recomb":
x, y = layout[node]
g.append(draw_mark(x, y, col=(1, 0, 0)))
return g
def show_marginal_trees(arg, mut=None):
win = summon.Window()
x = 0
step = 2
treewidth = len(list(arg.leaves())) + step
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
blocks = arglib.iter_recomb_blocks(arg)
for tree, block in izip(arglib.iter_marginal_trees(arg), blocks):
pos = block[0]
print pos
leaves = sorted((x for x in tree.leaves()), key=lambda x: x.name)
layout = layout_arg(tree, leaves)
win.add_group(
translate(x, 0, color(1,1,1),
draw_tree(tree, layout),
text_clip(
"%d-%d" % (block[0], block[1]),
treewidth*.05, 0,
treewidth*.95, -max(l[1] for l in layout.values()),
4, 20,
"center", "top")))
# mark responsible recomb node
for node in tree:
if pos != 0.0 and node.pos == pos:
nx, ny = layout[node]
win.add_group(draw_mark(x + nx, ny))
# draw mut
if mut:
for node, parent, mpos, t in mut:
if (node.name in tree and node.name != tree.root.name and
block[0] < mpos < block[1]):
nx, ny = layout[tree[node.name]]
win.add_group(draw_mark(x + nx, t, col=(0,0,1)))
if node.name in tree and tree[node.name].parents:
nx, ny = layout[tree[node.name]]
py = layout[tree[node.name].parents[0]][1]
start = arg[node.name].data["ancestral"][0][0]
win.add_group(lines(color(0,1,0),
x+nx, ny, x+nx, py,
color(1,1,1)))
x += treewidth
win.set_visible(* win.get_root().get_bounding() + ("exact",))
return win
def show_tree_track(tree_track, mut=None, show_labels=False,
use_blocks=False, branch_click=None):
"""
tree_track = [((start, end), tree), ...]
"""
def draw_labels(tree, layout):
return group(*
[text_clip(leaf.name, layout[leaf][0], layout[leaf][1],
1, layout[leaf][1] + 1e4, 4, 20, "middle", "left")
for leaf in tree.leaves()])
def branch_hotspot(node, parent, x, y, y2):
def func():
branch_click(node, parent)
return hotspot("click", x-.5, y, x+.5, y2, func)
def print_branch(node, parent):
print "node", node.name
tree_track = iter(tree_track)
if mut:
mut = util.PushIter(mut)
block, tree = tree_track.next()
if branch_click is True:
branch_click = print_branch
win = summon.Window()
treex = 0
step = 2
treewidth = len(list(tree.leaves())) + step
def trans_camera(win, x, y):
v = win.get_visible()
win.set_visible(v[0]+x, v[1]+y, v[2]+x, v[3]+y, "exact")
win.set_binding(input_key("]"), lambda : trans_camera(win, treewidth, 0))
win.set_binding(input_key("["), lambda : trans_camera(win, -treewidth, 0))
for block, tree in chain([(block, tree)], tree_track):
pos = block[0]
print pos
layout = treelib.layout_tree(tree, xscale=1, yscale=1)
treelib.layout_tree_vertical(layout, leaves=0)
g = win.add_group(
translate(treex, |
eelcovv/vapory | examples/pawn.py | Python | mit | 1,312 | 0.046494 | """ Just a purple sphere """
from vapory import *
objects = [
# SUN
LightSource([1500,2500,-2500], 'color',1),
# SKY
Sphere( [0,0,0],1, 'hollow',
Texture(
Pigment( 'gradient', [0,1,0],
'color_map{[0 color White] [1 color Blue ]}'
'quick_color', 'White'
| ),
Finish( 'ambient', 1, 'diffuse', 0)
),
'scale', 10000
),
# GROUND
Plane( [0,1,0], 0 ,
Texture( Pigment( 'color', [1.1*e for e in [0.80,0.55,0.35]])),
Normal( 'bumps', 0.75, 'scale', 0.035),
Finish( 'phong', 0.1 )
),
# PAWN
Union( Sphere([0,1,0],0.35),
Cone([0,0,0],0.5,[0,1,0 | ],0.0),
Texture( Pigment( 'color', [1,0.65,0])),
Finish( 'phong', 0.5)
)
]
scene = Scene( Camera( 'ultra_wide_angle',
'angle',45,
'location',[0.0 , 0.6 ,-3.0],
'look_at', [0.0 , 0.6 , 0.0]
),
objects= objects,
included=['colors.inc']
)
scene.render('pawn.png', remove_temp=False) |
shakamunyi/solum | solum/objects/sqlalchemy/__init__.py | Python | apache-2.0 | 1,920 | 0 | # Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:// | www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum import objects
from solum.objects import extension | as abstract_extension
from solum.objects import operation as abstract_operation
from solum.objects import plan as abstract_plan
from solum.objects import sensor as abstract_sensor
from solum.objects import service as abstract_srvc
from solum.objects.sqlalchemy import extension
from solum.objects.sqlalchemy import operation
from solum.objects.sqlalchemy import plan
from solum.objects.sqlalchemy import sensor
from solum.objects.sqlalchemy import service
def load():
"""Activate the sqlalchemy backend."""
objects.registry.add(abstract_plan.Plan, plan.Plan)
objects.registry.add(abstract_plan.PlanList, plan.PlanList)
objects.registry.add(abstract_srvc.Service, service.Service)
objects.registry.add(abstract_srvc.ServiceList, service.ServiceList)
objects.registry.add(abstract_operation.Operation, operation.Operation)
objects.registry.add(abstract_operation.OperationList,
operation.OperationList)
objects.registry.add(abstract_sensor.Sensor, sensor.Sensor)
objects.registry.add(abstract_sensor.SensorList, sensor.SensorList)
objects.registry.add(abstract_extension.Extension, extension.Extension)
objects.registry.add(abstract_extension.ExtensionList,
extension.ExtensionList)
|
ThePerkinrex/PercOS | PercOS_filesystem/bin/cd.py | Python | gpl-3.0 | 709 | 0 | from command import Command
from os import path
class cd(Command):
name = 'cd'
desc = 'Changes the working directory'
u | sage = 'cd <place to go>'
author = 'native'
def call(self, args=None):
if args is not None:
if len(arg | s) == 0:
print('I need at least the place to go to work')
else:
toGo = self.dire.cd(args[0], True)
if path.exists(toGo.realdir):
if path.isdir(toGo.realdir):
self.dire.cd(args[0], False)
else:
print('That isn\'t a directory')
else:
print('That doesn\'t exist')
|
affordablewindurbines/jarvisproject | client/modules/knowledged.py | Python | gpl-3.0 | 982 | 0.003055 | # -*- coding: utf-8-*-
import random
import re
import wolframalpha
import time
import sys
from sys import maxint
from client import jarvispath
WORDS = ["WHO", "WHAT", "WHERE", "HOW MUCH" | ]
def handle(text, mic, profile):
app_id=profile['keys']['WOLFRAMALPHA']
client = wolframalpha.Client(app_id)
query = client.query(text)
if len(query.pods) > 0:
tex | ts = ""
pod = query.pods[1]
if pod.text:
texts = pod.text
else:
texts = "I can not find anything"
mic.say(texts.replace("|",""))
else:
mic.say("Sorry, Could you be more specific?.")
def isValid(text):
if re.search(r'\bwho\b', text, re.IGNORECASE):
return True
elif re.search(r'\bwhat\b', text, re.IGNORECASE):
return True
elif re.search(r'\bwhere\b', text, re.IGNORECASE):
return True
elif re.search(r'\bhow much\b', text, re.IGNORECASE):
return True
else:
return False
|
wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/sympy/sympy/functions/special/tests/test_error_functions.py | Python | bsd-3-clause | 24,044 | 0.00287 | from sympy import (
symbols, expand, expand_func, nan, oo, Float, conjugate, diff,
re, im, Abs, O, factorial, exp_polar, polar_lift, gruntz, limit,
Symbol, I, integrate, S,
sqrt, sin, cos, sinh, cosh, exp, log, pi, EulerGamma,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv,
gamma, uppergamma, loggamma,
Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc,
hyper, meijerg)
from sympy.functions.special.error_functions import _erfs, _eis
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
x, y, z = symbols('x,y,z')
w = Symbol("w", real=True)
n = Symbol("n", integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_real is False
assert erf(0).is_real is True
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == S.One - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(S.Pi) + 2*z*_erfs(z)
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite("intractable") == (-erf(z) + 1)*exp(z**2)
def test_erfc():
ass | ert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo | *I
assert erfc(-x) == S(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(I).is_real is False
assert erfc(0).is_real is True
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == S.One
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(oo) == S.Infinity
assert erfi(-oo) == S.NegativeInfinity
assert erfi(0) == S.Zero
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_real is False
assert erfi(0).is_real is True
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(S.Half,
-z**2)/sqrt(S.Pi) - S.One))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == S.Zero
assert erf2(x, x) == S.Zero
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x,-oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x,y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, 0).is_real is False
assert erf2(0, 0).is_real is True
#assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - |
btat/Booktype | lib/booki/bookizip.py | Python | agpl-3.0 | 5,243 | 0.001717 | # This file is part of Booktype.
# Copyright (c) 2012 Douglas Bagnall
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os, sys
from booki.utils.json_wrapper import json
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
MEDIATYPES = {
'html': "text/html",
'xhtml': "application/xhtml+xml",
'css': 'text/css',
'json': "application/json",
'png': 'image/png',
'gif': 'image/gif',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'svg': 'image/svg+xml',
'tiff': 'image/tiff',
'ncx': 'application/x-dtbncx+xml',
'dtb': 'application/x-dtbook+xml',
'xml': 'application/xml',
'pdf': "application/pdf",
'txt': 'text/plain',
'epub': "application/epub+zip",
'booki': "application/x-booki+zip",
None: 'application/octet-stream',
}
#metadata construction routines
DC = "http://purl.org/dc/elements/1.1/"
FM = "http://booki.cc/"
def get_metadata(metadata, key, ns=DC,
scheme='', default=[]):
"""Get a list of metadata values matching a key, namespace and
| scheme | . If the ns or scheme are not set, they default to Dublin
Core and an empty string, respectively.
If no values are set, an empty list is returned, unless the
default argument is given, in which case you get that.
"""
values = metadata.get(ns, {}).get(key, {})
if scheme == '*':
return sum(values.values(), [])
return values.get(scheme, default)
def get_metadata_schemes(metadata, key, ns=DC):
"""Say what schemes are available for a given key and namespace."""
values = metadata.get(ns, {}).get(key, {})
return values.keys()
def add_metadata(metadata, key, value, ns=DC, scheme=''):
"""Add a metadata (ns, key, scheme, value) tuple. Namespace
defaults to Dublin Core, and scheme to an empty string. In most
cases that is what you want."""
namespace = metadata.setdefault(ns, {})
items = namespace.setdefault(key, {})
values = items.setdefault(scheme, [])
values.append(value)
def clear_metadata(metadata, key, ns=DC, scheme='*'):
"""Clear metadata for a key in a namespace (ns). If namespace is
ommited, Dublin Core is assumed. If a scheme is specified (and is
not '*'), only metadata in that scheme is removed. By default all
schemes are removed.
If ns is '*', that key is removed from all namespaces.
"""
if ns in metadata:
if key in metadata[ns]:
if scheme == '*':
metadata[ns][key] = {}
elif scheme in metadata[ns][key]:
del metadata[ns][key][scheme]
elif ns == '*':
for ns in metadata:
clear_metadata(metadata, key, ns, scheme)
class BookiZip(object):
"""Helper for writing booki-zips"""
def __init__(self, filename, info={}):
"""Start a new zip and put an uncompressed 'mimetype' file at the
start. This idea is copied from the epub specification, and
allows the file type to be dscovered by reading the first few
bytes."""
self.zipfile = ZipFile(filename, 'w', ZIP_DEFLATED, allowZip64=True)
self.write_blob('mimetype', MEDIATYPES['booki'], ZIP_STORED)
self.filename = filename
self.manifest = {}
self.info = info
def write_blob(self, filename, blob, compression=ZIP_DEFLATED, mode=0644):
"""Add something to the zip without adding to manifest"""
zinfo = ZipInfo(filename)
zinfo.external_attr = mode << 16L # set permissions
zinfo.compress_type = compression
self.zipfile.writestr(zinfo, blob)
def add_to_package(self, ID, fn, blob, mediatype=None,
contributors=[], rightsholders=[], license=[]):
"""Add an item to the zip, and save it in the manifest. If
mediatype is not provided, it will be guessed according to the
extrension."""
self.write_blob(fn, blob)
if mediatype is None:
ext = fn[fn.rfind('.') + 1:]
mediatype = MEDIATYPES.get(ext, MEDIATYPES[None])
self.manifest[ID] = {
"url": fn,
"mimetype": mediatype,
"contributors": contributors,
"rightsholders": rightsholders,
"license": license,
}
def _close(self):
self.zipfile.close()
def finish(self):
"""Finalise the metadata and write to disk"""
self.info['manifest'] = self.manifest
infojson = json.dumps(self.info, indent=2)
self.add_to_package('info.json', 'info.json', infojson, 'application/json')
self._close()
|
commaai/openpilot | selfdrive/locationd/test/test_calibrationd.py | Python | mit | 739 | 0.002706 | #!/usr/bin/env python3
import random
import unittest
import numpy as np
import cereal.messaging as messaging
from common.params import Params
from selfdrive.locationd.calibrationd import Calibrator
class TestCalibrationd(unittest.TestCase):
def test_read_saved_params(self):
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = random.randint(1, 10)
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
Params().put("CalibrationParams", msg.to_bytes())
c = Calibrator(param_put=True)
np.testing.assert_allclose(msg.liveCalibration.rpy | Calib, c.rpy)
self.assertEqua | l(msg.liveCalibration.validBlocks, c.valid_blocks)
if __name__ == "__main__":
unittest.main()
|
sixuanwang/SAMSaaS | wirecloud-develop/src/wirecloud/platform/tests/__init__.py | Python | gpl-2.0 | 2,777 | 0.005764 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2013 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from wirecloud.platform.tests.base import BasicViewsAPI
from wirecloud.platform.tests.plugins import WirecloudPluginTestCase
from wirecloud.platform.tests.rest_api import ApplicationMashupAPI, ResourceManagementAPI, ExtraApplicationMashupAPI
from wirecloud.platform.tests.south_migrations import PlatformSouthMigrationsTestCase
from wirecloud.platform.localcatalogue.tests import LocalCatalogueTestCase, PackagedResourcesTestCase
from wirecloud.platform.wiring.tests import WiringTestCase, OperatorCodeEntryTestCase
from wirecloud.platform.widget.tests import CodeTransformationTestCase
from wirecloud.platform.workspace.tests import WorkspaceTestCase, WorkspaceCacheTestCase, ParameterizedWorkspaceParseTestCase, ParameterizedWorkspaceGenerationTestCase
from wirecloud.proxy.tests import ProxyTests, ProxySecureDataTests
from wi | recloud.commons.utils.testcases import build_selenium_test_cases
build_selenium_test_cases(('wirecloud.platform.tests.selenium_tests.BasicSeleniumTests',), locals())
build_selenium_test_cases(('wirecloud.platform.tests.base.BasicViewsSeleniumTestC | ase',), locals())
build_selenium_test_cases(('wirecloud.platform.localcatalogue.tests.LocalCatalogueSeleniumTests',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.WiringSeleniumTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.WiringRecoveringTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.WiringGhostTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.EndpointOrderTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.MulticonnectorTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.markets.tests.MarketManagementSeleniumTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.SimpleRecommendationsTestCase',), locals())
build_selenium_test_cases(('wirecloud.platform.wiring.tests.StickyEffectTestCase',), locals())
|
darthryking/simpleisa | simplesim_fsm.py | Python | bsd-3-clause | 28,564 | 0.009628 | """
simplesim_fsm.py
By Ryan Lam
Defines the Finite State Machine (Flying Spaghetti Monster) controller element
for the datapath.
"""
from simplesim_elements import Element, bits_required
from constants import State, Flags, ALUSelA, ALUSelB, ALUOp, Op
class Controller(Element):
def __init__(self,
inputInstruction, inputFlags,
outputState, outputHalted,
outputALUSelA, outputALUSelB, outputALUOp, outputLdFlags,
outputLdPC, outputLdIR, outputLdReg,
outputLdMAR, outputLdMDR,
outputMemRead, outputMemWrite,
):
assert inputInstruction.width == 8
assert outputALUSelA.width == bits_required(ALUSelA.NUM_ALU_A)
assert outputALUSelB.width == bits_required(ALUSelB.NUM_ALU_B)
assert outputALUOp.width == bits_required(ALUOp.NUM_ALU_OPS)
assert all(
elem.width == 1
for elem in (
outputLdFlags, outputLdPC, outputLdIR, outputLdReg,
outputLdMAR, outputLdMDR, outputMemRead, outputMemWrite,
)
)
super(Controller, self).__init__()
self.inputInstruction = inputInstruction
self.inputFlags = inputFlags
inputInstruction.register_callback(self.update)
inputFlags.register_callback(self.update)
self.outputState = outputState
self.outputHalted = outputHalted
self.outputALUSelA = outputALUSelA
self.outputALUSelB = outputALUSelB
self.outputALUOp = outputALUOp
self.outputLdFlags = outputLdFlags
self.outputLdPC = outputLdPC
self.outputLdIR = outputLdIR
self.outputLdReg = outputLdReg
self.outputLdMAR = outputLdMAR
self.outputLdMDR = outputLdMDR
self.outputMemRead = outputMemRead
self.outputMemWrite = outputMemWrite
def update(self, value):
state = self.state.state
outputHalted = 0
outputALUSelA = None
outputALUSelB = None
outputALUOp = None
outputLdFlags = 0
outputLdPC = 0
outputLdIR = 0
outputLdReg = 0
outputLdMAR = 0
outputLdMDR = 0
outputMemRead = 0
outputMemWrite = 0
if state == State.FETCH_0:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.FETCH_1
elif state == State.FETCH_1:
outputMemRead = 1
outputALUSelA = ALUSelA.PC
outputALUSelB = ALUSelB.ONE
outputALUOp = ALUOp.ADD
outputLdPC = 1
nextState = State.FETCH_2
elif state == State.FETCH_2:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdIR = 1
nextState = State.DECODE
elif state == State.DECODE:
try:
nextState = {
Op.NOP : State.FETCH_0,
Op.END : State.HALT,
Op.MOV : State.MOV_0,
Op.LDC : State.LDC_0,
Op.LDM : State.L | DM_0,
Op.STM : State.STM_0,
Op.INC : State.INC_0,
Op.DEC : State.DEC_0,
Op.NEG : State.NEG_0,
Op.BCM : State.BCM_0,
Op.USR : State.USR_0,
Op.SSR : State.SSR_0,
Op.USL : State.USL_0,
Op.ADD : State.ADD_0,
Op.SUB : State.SUB_0,
| Op.AND : State.AND_0,
Op.OR : State.OR_0,
Op.CMP : State.CMP_0,
Op.JMP : State.JMP_0,
Op.JEQ : State.JEQ_0,
Op.JUL : State.JUL_0,
Op.JUG : State.JUG_0,
Op.JSL : State.JSL_0,
Op.JSG : State.JSG_0,
}[self.inputInstruction.value]
except KeyError:
nextState = None
elif state == State.HALT:
outputHalted = 1
nextState = State.HALT
elif state == State.MOV_0:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.MOV_1
elif state == State.MOV_1:
outputMemRead = 1
outputALUSelA = ALUSelA.PC
outputALUSelB = ALUSelB.ONE
outputALUOp = ALUOp.ADD
outputLdPC = 1
nextState = State.MOV_2
elif state == State.MOV_2:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdIR = 1
nextState = State.MOV_3
elif state == State.MOV_3:
outputALUSelB = ALUSelB.REG_B
outputALUOp = ALUOp.PASS_B
outputLdReg = 1
nextState = State.FETCH_0
elif state == State.LDC_0:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.LDC_1
elif state == State.LDC_1:
outputMemRead = 1
outputALUSelA = ALUSelA.PC
outputALUSelB = ALUSelB.ONE
outputALUOp = ALUOp.ADD
outputLdPC = 1
nextState = State.LDC_2
elif state == State.LDC_2:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdIR = 1
nextState = State.LDC_3
elif state == State.LDC_3:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.LDC_4
elif state == State.LDC_4:
outputMemRead = 1
outputALUSelA = ALUSelA.PC
outputALUSelB = ALUSelB.ONE
outputALUOp = ALUOp.ADD
outputLdPC = 1
nextState = State.LDC_5
elif state == State.LDC_5:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdReg = 1
nextState = State.FETCH_0
elif state == State.LDM_0:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.LDM_1
elif state == State.LDM_1:
outputMemRead = 1
outputALUSelA = ALUSelA.PC
outputALUSelB = ALUSelB.ONE
outputALUOp = ALUOp.ADD
outputLdPC = 1
nextState = State.LDM_2
elif state == State.LDM_2:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdIR = 1
nextState = State.LDM_3
elif state == State.LDM_3:
outputALUSelB = ALUSelB.REG_B
outputALUOp = ALUOp.PASS_B
outputLdMAR = 1
nextState = State.LDM_4
elif state == State.LDM_4:
outputMemRead = 1
nextState = State.LDM_5
elif state == State.LDM_5:
outputALUSelA = ALUSelA.MDR
outputALUOp = ALUOp.PASS_A
outputLdReg = 1
nextState = State.FETCH_0
elif state == State.STM_0:
outputALUSelA = ALUSelA.PC
outputALUOp = ALUOp.PASS_A
outputLdMAR = 1
nextState = State.STM_1
elif state == State.STM_1:
|
qedsoftware/commcare-hq | corehq/blobs/atomic.py | Python | bsd-3-clause | 2,197 | 0 | from corehq.blobs import DEFAULT_BUCKET
from corehq.blobs.exceptions import InvalidContext
class AtomicBlobs(object):
"""A blob db wrapper that can put and delete blobs atomically
Usage:
with AtomicBlobs(get_blob_db()) as db:
# do stuff here that puts or deletes blobs
db.delete(old_blob_id)
info = db.put(content)
save(info, deleted=old_blob_id)
If an exception occurs inside the `AtomicBlobs` context then all
blob write operations (puts and deletes) will be rolled back.
"""
def __init__(self, db):
self.db = db
self.puts = None
self.deletes = None
def put(self, content, basename="", bucket=DEFAULT_BUCKET):
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
info = self.db.put(content, basename, bucket)
self.puts.append((info, bucket))
return info
def get(self, *args, **kw):
return self.db.get(*args, **kw)
def delete(self, *args, **kw):
"""Delete a blob or bucket of blobs
NOTE blobs will not actually be deleted until the context exits,
so subsequent gets inside the context will return an object even
though the blob or bucket has been queued for deletion.
"""
if self.puts is None:
raise InvalidContext("AtomicBlobs context is not active")
self.db.get_args_for_delete(*args, **kw) # validate args
self.deletes.append((args, kw))
return None # result is unknown
def copy_blob(self, *args, **kw):
raise NotImplementedError
def get_identifier(self, *args, **kw):
return self.db.get_identifier(*args, **kw)
def __enter__(self):
self.puts = []
self.deletes = []
return self
def __exit__(self, exc_type, exc_value, tb):
puts, deletes = self.puts, self.deletes
self.puts = None
self. | deletes = None
if exc_type is None:
for args, kw in deletes:
self.db.delete(*args, **kw)
else:
for info, bucket in puts:
self.db.dele | te(info.identifier, bucket)
|
universalcore/unicore-cms-django | project/wsgi.py | Python | bsd-2-clause | 1,424 | 0.000702 | """
WSGI config for skeleton project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom o | ne
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "skeleton.settings"
os.env | iron.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
tchellomello/home-assistant | tests/components/homeassistant/triggers/test_numeric_state.py | Python | apache-2.0 | 41,794 | 0.000455 | """The tests for numeric state automation."""
from datetime import timedelta
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
async_fire_time_changed,
async_mock_service,
mock_component,
)
from tests.components.automation import common
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_on_entity_change_below(hass, calls):
"""Test the firing with changed entity."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# Set above 12 so the automation will fire again
hass.states.async_set("test.entity", 12)
await common.async_turn_off(hass)
await hass.async_block_till_done()
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_over_to_below(hass, calls):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entities_change_over_to_below(hass, calls):
"""Test the firing with changed entities."""
hass.states.async_set("test.entity_1", 11)
hass.states.async_set("test.entity_2", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": ["test.entity_1", "test.entity_2"],
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10
hass.states.async_set("test.entity_1", 9)
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set("test.entity_2", 9)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_not_fires_on_entity_change_below_to_below(hass, calls):
"""Test the firing with changed entity."""
context = Context()
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 9 is below 10 so this should fire
hass.states.async_set("test.entity", 9, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
# already below so should not fire again
hass.states.async_set("test.entity", 5)
await hass.async_block_till_done()
assert len(calls) == 1
# still below so should not fire again
hass.states.async_set("test.entity", 3)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_below_fires_on_entity_change_to_equal(hass, calls):
"""Test the firing with changed entity."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
| "platform": "numeric_state",
"entity_id": "test.entity",
"below": 10,
},
"action": {"service": "test.aut | omation"},
}
},
)
# 10 is not below 10 so this should not fire again
hass.states.async_set("test.entity", 10)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_initial_entity_below(hass, calls):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"below": 10,
},
"action": {"service": "test.automation"},
}
},
)
# Fire on first update even if initial state was already below
hass.states.async_set("test.entity", 8)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_initial_entity_above(hass, calls):
"""Test the firing when starting with a match."""
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 10,
},
"action": {"service": "test.automation"},
}
},
)
# Fire on first update even if initial state was already above
hass.states.async_set("test.entity", 12)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_above(hass, calls):
"""Test the firing with changed entity."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 10,
},
"action": {"service": "test.automation"},
}
},
)
# 11 is above 10
hass.states.async_set("test.entity", 11)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_entity_change_below_to_above(hass, calls):
"""Test the firing with changed entity."""
# set initial state
hass.states.async_set("test.entity", 9)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "numeric_state",
"entity_id": "test.entity",
"above": 10,
},
"action |
jainaman224/Algo_Ds_Notes | Rain_Water_Trapping/Rain_Water_Trapping.py | Python | gpl-3.0 | 1,208 | 0.044702 | # Rain_Water_Trapping
def trappedWater(a, size) :
# left[i] stores height of tallest bar to the to left of it including itself
left = [0] * size
# Right [i] stores height of tallest bar to the to right of it including itself
right = [0] * size
# Initialize result
waterVolume = 0
# filling left (list/array)
left[0] = a[0]
for i in range( 1, size):
left[i] = max(left[i-1], a[i])
# filling right (list/array)
right[size - 1] = a[size - 1]
for i in range(size - 2, - 1, - 1):
right[i] = max(right[i + 1], a[i]);
# Calculating volume of the | accumulated water element by element
for i in range(0, size):
waterVolume += min(left[i],right[i]) - a[i]
return waterVolume
# main program
arr =[]
n = int(input()) #input the number of towers
for i in range(n):
arr.append(int(input())) #storing length of each tower in array
print("Maximum water that can be accumulated is ", trappedWater(arr, len(arr)) | )
#Input:
#12
#0
#1
#0
#2
#1
#0
#1
#3
#2
#1
#2
#1
#Output:
#The maximum water trapped is 6
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/external/org_mozilla_bleach/bleach/encoding.py | Python | bsd-2-clause | 2,277 | 0 | import datetime
from decimal import Decimal
import types
import six
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
six.integer_types +
(types.NoneType,
datetime.datetime, datetime.date, datetime.time,
float, Decimal))
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s,
# encoding, errors), so that if s is a SafeBytes, it ends up being
# a SafeText at the end | .
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(*e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the excep | tion args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
|
boar/boar | boar/uploads/manager.py | Python | bsd-3-clause | 227 | 0.008811 | from django.db import models
class UploadManager(mode | ls.Manager):
def published(self):
return self.get_query_set().annotate(
article_count=models.Count('article')
| ).filter(article_count__gt=0)
|
Amitmund/quick_tools | tellWords/tellWords.py | Python | mit | 438 | 0.006849 | #!/usr/bin/env python
import subprocess
# Example on how to use this script.
# ./tellWords.py [ and press enter]
# In the prompt line type what you want and press enter again.
command = "say"
text = raw_input("Enter word or a sentence and press enter: ")
characters = list(text)
for c1 i | n characters:
if c1 == " ":
subprocess.call([command, ",,"])
else:
subprocess.call([command, | c1])
subprocess.call([command, text])
|
lubomir/productmd | productmd/compose.py | Python | lgpl-2.1 | 4,093 | 0.001222 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
This module provides Compose class that provides easy access
to ComposeInfo, Rpms and Images in compose metadata.
Example::
import productmd.compose
compose = productmd.compose.Compose("/path/to/compose")
# then you can access compose metadata via following properties:
compose.info
compose.images
compose.rpms
"""
import os
import productmd.composeinfo
import productmd.images
import productmd.rpms
from productmd.common import _file_exists
__all__ = (
"Compose",
)
class Compose(object):
"""
This class provides easy access to compose metadata.
:param compose_path: Path to a compose. HTTP(s) URL is also accepted.
:type compose_path: str
"""
def __init__(self, compose_path):
# example: MYPRODUCT-1.0-YYYYMMDD.0/metadata
self.compose_path = compose_path
# example: MYPRODUCT-1.0-YYYYMMDD.0/compose/metadata (preferred location)
path = os.path.join(compose_path, "compose")
if _file_exists(path):
self.compose_path = path
elif "://" not in compose_path and os.path.exists(compose_path):
# Scan all subdirs under compose_path for 'metadata'. Doesn't work over HTTP.
# example: MYPRODUCT-1.0- | YYYYMMDD.0/1.0/metadata (legacy location)
for i in os.listdir(compose_path):
path = os.path.join(compose_path, i)
metadata_path = os.path.join(path, "metadata")
if _file_exists(metadata_path):
self.compose_path = path
break
self._composeinfo = None
self._images = None
self._rpms = | None
def _find_metadata_file(self, paths):
for i in paths:
path = os.path.join(self.compose_path, i)
if _file_exists(path):
return path
raise RuntimeError('Failed to load metadata from %s' % self.compose_path)
@property
def info(self):
"""(:class:`productmd.composeinfo.ComposeInfo`) -- Compose metadata"""
if self._composeinfo is not None:
return self._composeinfo
composeinfo = productmd.composeinfo.ComposeInfo()
paths = [
"metadata/composeinfo.json",
]
path = self._find_metadata_file(paths)
composeinfo.load(path)
self._composeinfo = composeinfo
return self._composeinfo
@property
def images(self):
"""(:class:`productmd.images.Images`) -- Compose images metadata"""
if self._images is not None:
return self._images
images = productmd.images.Images()
paths = [
"metadata/images.json",
"metadata/image-manifest.json",
]
path = self._find_metadata_file(paths)
images.load(path)
self._images = images
return self._images
@property
def rpms(self):
"""(:class:`productmd.rpms.Rpms`) -- Compose RPMs metadata"""
if self._rpms is not None:
return self._rpms
rpms = productmd.rpms.Rpms()
paths = [
"metadata/rpms.json",
"metadata/rpm-manifest.json",
]
path = self._find_metadata_file(paths)
rpms.load(path)
self._rpms = rpms
return self._rpms
|
Elbandi/PyMunin | pysysinfo/varnish.py | Python | gpl-3.0 | 2,163 | 0.012483 | """Implements VarnishInfo Class for gathering stats from Varnish Cache.
The statistics are obtained by running the command varnishstats.
"""
import re
import util
__author__ = "Ali Onur Uyar"
__copyright__ = "Copyright 2011, Ali Onur Uyar"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.24"
__maintainer__ = "Ali Onur Uyar"
__email__ = "aouyar at gmail.com"
__status__ = "Development"
# Defaults
varnishstatCmd = "varnishstat"
class VarnishIn | fo:
"""Class to retrieve stats from Varnish Cache."""
_descDict = | {}
def __init__(self, instance=None):
"""Initialization for monitoring Varnish Cache instance.
@param instance: Name of the Varnish Cache instance.
(Defaults to hostname.)
"""
self._instance = instance
def getStats(self):
"""Runs varnishstats command to get stats from Varnish Cache.
@return: Dictionary of stats.
"""
info_dict = {}
args = [varnishstatCmd, '-1']
if self._instance is not None:
args.extend(['-n', self._instance])
output = util.exec_command(args)
if self._descDict is None:
self._descDict = {}
for line in output.splitlines():
mobj = re.match('(\S+)\s+(\d+)\s+(\d+\.\d+|\.)\s+(\S.*\S)\s*$',
line)
if mobj:
fname = mobj.group(1).replace('.', '_')
info_dict[fname] = util.parse_value(mobj.group(2))
self._descDict[fname] = mobj.group(4)
return info_dict
def getDescDict(self):
"""Returns dictionary mapping stats entries to decriptions.
@return: Dictionary.
"""
if len(self._descDict) == 0:
self.getStats()
return self._descDict
def getDesc(self, entry):
"""Returns description for stat entry.
@param entry: Entry name.
@return: Description for entry.
"""
if len(self._descDict) == 0:
self.getStats()
return self._descDict.get(entry)
|
tamasgal/km3pipe | km3pipe/style/km3pipe_notebook.py | Python | mit | 49 | 0 | fr | om ..style import use
use("km3pi | pe-notebook")
|
keithhendry/treadmill | treadmill/bootstrap/vagrant_aliases.py | Python | apache-2.0 | 1,513 | 0 | """Vagrant aliases."""
ALIASES = {
'pid1': '/opt/treadmill-pid1/bin/pid1',
'treadmill': '/opt/treadmill',
'treadmill_bin': '/opt/treadmill/bin/treadmill',
# openldap
'slapd': '/usr/sbin/slapd',
'slapadd': '/usr/sbin/slapadd',
'dnscache': None,
'java_home': None,
'kafka_run_class': None,
'kafka_server_start': None,
# Kerberos
'kinit': None,
'klist': None,
'tkt-recv': None,
'tkt-send': None,
'rrdcached': None,
'rrdtool': None,
'logstash-forwarder': None,
'treadmill_bind_preload.so': None,
'treadmill_spawn': None,
'treadmill_spawn_finish': None,
'treadmill_spawn_run': None,
# s6 - use full path for all utilities, do not interpolate.
's6': '/opt/s6',
'backtick': '/opt/s6/bin/backtick',
'elglob': '/opt/s6/bin/elglob',
'emptyenv': '/opt/s6/bin/emptyenv',
'execlineb': '/opt/s6/bin/execlineb',
'fdmove': '/opt/s6/bin/fdmove',
'if': '/opt/s6/bin/if',
'impo | rt': '/opt/s6/bin/import',
'importas': '/opt/s6/bin/importas',
'redirfd': '/opt/s6/bin/redirfd',
's6_envdir': '/opt/s6/bin/s6-e | nvdir',
's6_envuidgid': '/opt/s6/bin/s6-envuidgid',
's6_log': '/opt/s6/bin/s6-log',
's6_setuidgid': '/opt/s6/bin/s6-setuidgid',
's6_svc': '/opt/s6/bin/s6-svc',
's6_svok': '/opt/s6/bin/s6-svok',
's6_svscan': '/opt/s6/bin/s6-svscan',
's6_svscanctl': '/opt/s6/bin/s6-svscanctl',
's6_svwait': '/opt/s6/bin/s6-svwait',
'umask': '/opt/s6/bin/umask',
}
|
LosFuzzys/CTFd | tests/api/v1/test_flags.py | Python | apache-2.0 | 4,159 | 0.00024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_challenge,
gen_flag,
login_as_user,
)
def test_api_flags_get_non_admin():
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
gen_flag(app.db, 1)
with app.test_client() as client:
# test_api_flags_get_non_admin
"""Can a user get /api/v1/flags if not admin"""
r = client.get("/api/v1/flags", json="")
assert r.status_code == 403
# test_api_flags_post_non_admin
"""Can a user post /api/v1/flags if not admin"""
r = client.post("/api/v1/flags")
assert r.status_code == 403
# test_api_flag_types_get_non_admin
"""Can a user get /api/v1/flags/types[/<type_name>] if not admin"""
r = client.get("/api/v1/flags/types", json="")
assert r.status_code == 403
# test_api_flag_get_non_admin
"""Can a user get /api/v1/flags/<flag_id> if not admin"""
r = client.get("/api/v1/flags/1", json="")
assert r.status_code == 403
# test_api_flag_patch_non_admin
"""Can a user patch /api/v1/flags/<flag_id> if not admin"""
r = client.patch("/api/v1/flags/1", json="")
assert r.status_code == 403
# test_api_flag_delete_non_admin
"""Can a user delete /api/v1/flags/<flag_id> if not admin"""
r = client.delete("/api/v1/flags/1", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_flags_get_admin():
"""Can a user get /api/v1/flags if admin"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/flags", json="")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_flags_post_admin():
"""Can a user post /api/v1/flags if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, name="admin") as client:
r = client.post(
"/api/v1/flags",
json={"content": "flag", "type": "static", "challenge": 1},
)
assert r.status_code == 200
destroy_ctfd(app)
def test_api_flag_types_get_admin():
"""Can a user get /api/v1/flags/types[/<type_name>] if admin"""
app = create_ctfd()
w | ith app.app_context():
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/flags/types", json="")
assert r.status_code == 200
r = client.get("/api/v1/flags/types/static", json="")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_flag_get_admin():
"""Can a user get /api/v1/flags/<flag_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
| gen_flag(app.db, 1)
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/flags/1", json="")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_flag_patch_admin():
"""Can a user patch /api/v1/flags/<flag_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
gen_flag(app.db, 1)
with login_as_user(app, "admin") as client:
r = client.patch(
"/api/v1/flags/1",
json={"content": "flag_edit", "data": "", "type": "static", "id": "1"},
)
assert r.status_code == 200
assert r.get_json()["data"]["content"] == "flag_edit"
destroy_ctfd(app)
def test_api_flag_delete_admin():
"""Can a user patch /api/v1/flags/<flag_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
gen_flag(app.db, 1)
with login_as_user(app, "admin") as client:
r = client.delete("/api/v1/flags/1", json="")
assert r.status_code == 200
assert r.get_json().get("data") is None
destroy_ctfd(app)
|
TheGurke/Progenitus | sleekxmpp/features/feature_mechanisms/stanza/failure.py | Python | gpl-3.0 | 2,500 | 0.0016 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase, StanzaBase, ET
from sleekxmpp.xmlstream import register_stanza_plugin
class Failure(StanzaBase):
"""
"""
name = 'failure'
namespace = 'urn:ietf:params:xml:ns:xmpp-sasl'
interfaces = set(('condition', 'text'))
plugin_attrib = name
sub_interfaces = set(('text',))
conditions = set(('aborted', 'account-disabled', 'credentials-expired',
'encryption-required', 'incorrect-encoding', 'invalid-authzid',
'invalid-mechanism', 'malformed-request', 'mechansism-too-weak',
'not-authorized', 'temporary-auth-failure'))
def setup(self, xml=None):
"""
Populate the stanza object using an optional XML object.
Overrides ElementBase.setup.
Sets a default error t | ype and condition, and changes the
parent stanza's type to 'error'.
Arguments:
xml -- Use an existing XML object for the stanza's values.
"""
# StanzaBase overrides self.namespace
self.namespace = Failure.namespace
if StanzaBase.setup(self, xml):
#If we had to generate XML then set default values.
self['condition'] = 'not-authorized'
self.xml.tag = self.tag_name()
def g | et_condition(self):
"""Return the condition element's name."""
for child in self.xml.getchildren():
if "{%s}" % self.namespace in child.tag:
cond = child.tag.split('}', 1)[-1]
if cond in self.conditions:
return cond
return 'not-authorized'
def set_condition(self, value):
"""
Set the tag name of the condition element.
Arguments:
value -- The tag name of the condition element.
"""
if value in self.conditions:
del self['condition']
self.xml.append(ET.Element("{%s}%s" % (self.namespace, value)))
return self
def del_condition(self):
"""Remove the condition element."""
for child in self.xml.getchildren():
if "{%s}" % self.condition_ns in child.tag:
tag = child.tag.split('}', 1)[-1]
if tag in self.conditions:
self.xml.remove(child)
return self
|
sanguinariojoe/FreeCAD | src/Mod/Fem/femobjects/constraint_electrostaticpotential.py | Python | lgpl-2.1 | 4,421 | 0.000452 | # ***************************************************************************
# * Copyright (c) 2017 Markus Hovorka <m.hovorka@live.de> *
# * Copyright (c) 2020 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Publi | c License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, w | rite to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM constraint electrostatic potential document object"
__author__ = "Markus Hovorka, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package constraint_electrostaticpotential
# \ingroup FEM
# \brief constraint electrostatic potential object
from . import base_fempythonobject
class ConstraintElectrostaticPotential(base_fempythonobject.BaseFemPythonObject):
Type = "Fem::ConstraintElectrostaticPotential"
def __init__(self, obj):
super(ConstraintElectrostaticPotential, self).__init__(obj)
self.add_properties(obj)
def onDocumentRestored(self, obj):
self.add_properties(obj)
def add_properties(self, obj):
if not hasattr(obj, "Potential"):
obj.addProperty(
"App::PropertyFloat",
"Potential",
"Parameter",
"Potential"
),
obj.Potential = 0.0
if not hasattr(obj, "PotentialEnabled"):
obj.addProperty(
"App::PropertyBool",
"PotentialEnabled",
"Parameter",
"Potential Enabled"
),
obj.PotentialEnabled = False
if not hasattr(obj, "PotentialConstant"):
obj.addProperty(
"App::PropertyBool",
"PotentialConstant",
"Parameter",
"Potential Constant"
),
obj.PotentialConstant = False
if not hasattr(obj, "ElectricInfinity"):
obj.addProperty(
"App::PropertyBool",
"ElectricInfinity",
"Parameter",
"Electric Infinity"
),
obj.ElectricInfinity = False
if not hasattr(obj, "ElectricForcecalculation"):
obj.addProperty(
"App::PropertyBool",
"ElectricForcecalculation",
"Parameter",
"Electric Force Calculation"
),
obj.ElectricForcecalculation = False
if not hasattr(obj, "CapacitanceBody"):
obj.addProperty(
"App::PropertyInteger",
"CapacitanceBody",
"Parameter",
"Capacitance Body"
),
obj.CapacitanceBody = 0
if not hasattr(obj, "CapacitanceBodyEnabled"):
obj.addProperty(
"App::PropertyBool",
"CapacitanceBodyEnabled",
"Parameter",
"Capacitance Body Enabled"
)
obj.CapacitanceBodyEnabled = False
|
voc/voctomix | example-scripts/ffmpeg/record-all-audio-streams.py | Python | mit | 2,097 | 0 | #!/usr/bin/env python3
import socket
import sys
import json
import shlex
import subprocess
import logging
from configparser import SafeConfigParser
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('record-all-audio-streams')
host = 'localhost'
port = 9999
log.info('Connecting to %s:%u', host, port)
conn = socket.create_connection((host, port))
fd = conn.makefile('rw')
log.info('Fetching Config from Server')
fd.write("get_config\n")
fd.flush()
for line in f | d:
if line.startswith('server_config'):
[cmd, arg] = line.split(' ', 1)
server_config_json = arg
log.info('Received Config from Server')
break
log.info('Parsing Server-Config')
server_config = json.loads(server_config_json)
def getlist(self, section, option):
return [x.strip() for x in self.get(section, | option).split(',')]
SafeConfigParser.getlist = getlist
config = SafeConfigParser()
config.read_dict(server_config)
sources = config.getlist('mix', 'sources')
inputs = []
maps = []
for idx, source in enumerate(sources):
inputs.append('-i tcp://localhost:{:d}'.format(13000 + idx))
maps.append('-map {0:d}:a -metadata:s:a:{0:d} language=und'.format(idx))
try:
output = sys.argv[1]
except IndexError:
output = 'output.ts'
# example call:
# -------------
# ffmpeg
# -hide_banner
# -y -nostdin
# -i tcp://localhost:13000
# -i tcp://localhost:13001
# -i tcp://localhost:13002
# -ac 2 -channel_layout stereo
# -map 0:a -metadata:s:a:0 language=und
# -map 1:a -metadata:s:a:1 language=und
# -map 2:a -metadata:s:a:2 language=und
# -c:a mp2 -b:a 192k -ac:a 2 -ar:a 48000
# -flags +global_header -flags +ilme+ildct
# -f mpegts
# -vv
cmd = """
ffmpeg
-hide_banner
-y -nostdin
{}
-ac 2 -channel_layout stereo
{}
-c:a mp2 -b:a 192k -ac:a 2 -ar:a 48000
-flags +global_header -flags +ilme+ildct
-f mpegts
{}
""".format(' '.join(inputs), ' '.join(maps), output)
log.info('running command:\n%s', cmd)
args = shlex.split(cmd)
p = subprocess.run(args)
sys.exit(p.returncode)
|
barsi/odoo-rtl | report_rtl/__init__.py | Python | agpl-3.0 | 952 | 0.00105 | # -*- coding: utf-8 -*-
########################################################### | ###################
#
# Odoo RTL support
# Copyright (C) 2014 Mohammed Barsi.
#
# This program is free software: you can redistribute it and/or modify
# it under the te | rms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import models
|
zeropool/crosswalk | build/android/generate_xwalk_core_library_aar.py | Python | bsd-3-clause | 2,953 | 0.009143 | #!/usr/bin/env python
#
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
import zipfile
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('-t', dest='target',
help='Product out target directory.')
option_parser.add_option('--shared', action='store_true',
default=False,
help='Generate shared library', )
options, _ = option_parser.parse_args()
# The first entry of each tuple is the source file/directory that will be
# copied (and must exist), the second entry is its relative path inside the
# AAR file.
if options.shared:
dirs = (
(os.path.join(options.target, 'xwalk_shared_library', 'libs'),
'jni'),
(os.path.join(options.target, 'xwalk_shared_library', 'res'),
'res'),
)
files = (
(os.path.join(options.target, 'xwalk_shared_library',
'AndroidManifest.xml'), 'AndroidManifest.xml'),
(os.path.join(options.target, 'xwalk_shared_library', 'libs',
'xwalk_core_library_java_app_part.jar'), 'classes.jar'),
(os.path.join(options.target, 'xwalk_core_empty_embedder_apk',
'gen', 'R.txt'), 'R.txt'),
)
exclude_files = (
os.path.join(options.target, 'xwalk_shared_library', 'libs',
'xwalk_core_library_java_app_part.jar'),
)
aar_path = os.path.join(options.target, 'xwalk_shared_library.aar')
else:
dirs = (
(os.path.join(options.target, 'xwalk_core_library', 'libs'),
'jni'),
(os.path.join(options.target, 'xwalk_core_library', 'res'),
'res'),
)
files = (
(os.path.join(options.target, 'xwalk_core_library', 'AndroidManifest.xml'),
'AndroidManifest.xml'),
(os.path.join(options.target, 'xwalk_core_library', 'libs',
'xwalk_core_library_java.jar'),
'classes.jar'),
(os.path.join(options.target, 'xwalk_core_empty_embedder_apk',
'gen', 'R.txt'), 'R.txt'),
)
# This is a list of files that will not be packaged: mostly a blacklist of
# files within |dirs|.
exclude_files = (
os.path.join(options.target, 'xwalk_core_library', 'libs',
'xwalk_core_library_java.jar'),
)
aar_path = os.path.join(options.target, 'xwalk_core_library.aar')
with zipfile.ZipFile(aar_path, 'w', zipfile.ZIP_DEFLATED) as aar_file:
for src, dest in files:
aar_file.write(src, dest)
for src, dest in dirs:
for root, _, files in os.walk(src):
for | f i | n files:
real_path = os.path.join(root, f)
zip_path = os.path.join(dest, os.path.relpath(root, src), f)
if real_path in exclude_files:
continue
aar_file.write(real_path, zip_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Sarthak30/User-Registration | source/manage.py | Python | gpl-2.0 | 768 | 0.00651 | #!/usr/bin/env python
import os
from app import create_app, db
from app.models import User, Role
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_con | text():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
with app.app_context():
db.create_ | all()
manager.run()
|
percyfal/bokeh | sphinx/source/conf.py | Python | bsd-3-clause | 9,863 | 0.004157 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from os.path import abspath, dirname, join
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_enum',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_index_toctree',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_options',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_palette_group',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.bokeh_sitemap',
'bokeh.sphinxext.collapsible_code_block',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bokeh'
copyright = '© Copyright 2015, Continuum Analytics.'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# get all the versions that will appear in the version dropdown
f = open(join(dirname(abspath(__file__)), "all_versions.txt"))
all_versions = [x.strip() for x in reversed(f.readlines())]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#
# NOTE: in these docs all .py script are assumed to be bokeh plot scripts!
# with bokeh_plot_pyfile_include_dirs set desired folder to look for .py files
bokeh_plot_pyfile_include_dirs = ['docs']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# This would more properly be done with rst_epilog but something about
# the combination of this with the bokeh-gallery directive breaks the build
rst_prolog = """
.. |Color| replace:: :py:class:`~bokeh.core.properties.Color`
.. |DataSpec| replace:: :py:class:`~bokeh.core.properties.DataSpec`
.. |Document| replace:: :py:class:`~bokeh.document.Document`
.. |HasProps| replace:: :py:class:`~bokeh.core.has_props.HasProps`
.. |Model| replace:: :py:class:`~bokeh.model.Model`
.. |Property| replace:: :py:class:`~bokeh.core.property.bases.Property`
.. |PropertyContainer| replace:: :py:class:`~bokeh.core.property.containers.PropertyContainer`
.. |PropertyDescriptor| replace:: :py:class:`~bokeh.core.property.descriptor.PropertyDescriptor`
.. |UnitsSpec| replace:: :py:class:`~bokeh.core.properties.UnitsSpec`
.. |field| replace:: :py:func:`~bokeh.core.properties.field`
.. |value| replace:: :py:func:`~bokeh.core.properties.value`
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bokeh_theme'
html_theme_path = ['.']
MAIN_SITE = '//bokehplots.com'
html_context = {
'SITEMAP_BASE_URL': 'http://bokeh.pydata.org/en/', # Trailing slash is needed
'DESCRIPTION': 'Bokeh visualization library, documentation site.',
'AUTHOR': 'Bokeh contributors',
'VERSION': version,
# Nav
'NAV': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Gallery', '/docs/gallery.html'),
('Docs', '//bokeh.pydata.org/en/latest/'),
('Github', '//github.com/bokeh/bokeh'),
),
# Links
'LINKS': (
('FAQs', MAIN_SITE + '/pages/faqs.html'),
('Technical vision', MAIN_SITE + '/pages/technical-vision.html'),
('Roadmap', MAIN_SITE + '/pages/roadmap.html'),
('Citation', MAIN_SITE + '/pages/citation.html'),
),
# About Links
'ABOUT': (
('About', MAIN_SITE + '/pages/about-bokeh.html'),
('Team', MAIN_SITE + '/pages/team.html'),
('Contact', MAIN_SITE + '/pages/contact.html'),
),
# Social links
'SOCIAL': (
('Contribute', MAIN_SITE + '/pages/contribute.html'),
('Mailing list', '//groups.google.com/a/continuum.io/forum/#!forum/bokeh'),
('Github', '//github.com/bokeh/bokeh'),
('Twitter', '//twitter.com/Bok | ehPlots'),
('YouTube', '//www.youtube.com/channel/UCK0rSk29mmg4UT4bIOvPYhw')
),
# Links for the docs sub navigation
'N | AV_DOCS': (
('Installation', 'installation'),
('User Guide', 'user_guide'),
('Gallery', 'gallery'),
('Reference', 'reference'),
('Releases', 'releases/%s' % version),
('Developer Guide', 'dev_guide'),
),
'ALL_VERSIONS': all_versions,
'css_server': os.environ.get('BOKEH_DOCS_CSS_SERVER', 'bokehplots.com'),
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation', u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.