text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
import json
import subprocess
def rf_command_line():
return 'pyfeld --json browse'
class DirLevel:
def __init__(self, path, friendly_name, items):
self.path = path
self.friendly_name = friendly_name
self.items = items
class DirBrowseExtended:
def __init__(self):
dNull = DirLevel("0", "services", self.retrieve("0"))
self.path = "0"
self.pathes = ["0"]
self.dirs = [dNull]
self.depth = 0
self.retrieve(self.pathes[self.depth])
def get_current_path(self):
return self.path
def enter(self, index):
self.path = self.dirs[self.depth].items[index]['idPath']
items = self.retrieve(self.path)
new_dir = DirLevel(self.path, self.dirs[self.depth].items[index]['title'], items)
self.depth += 1
if len(self.dirs) <= self.depth:
self.dirs.append(new_dir)
else:
self.dirs[self.depth] = new_dir
def enter_by_friendly_name(self, name):
pass
def leave(self):
if self.depth != 0:
self.depth -= 1
self.path = self.dirs[self.depth].path
def retrieve(self, path):
command = rf_command_line()
if type(path).__name__ == 'bytes':
command += ' "' + path.decode('utf-8') + '"'
else:
command += ' "' + path + '"'
try:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception as e:
return 0
jsonFile = ""
while True:
nextline = process.stdout.readline()
if len(nextline) == 0 and process.poll() != None:
break
jsonFile += nextline.decode('utf-8')
return json.loads(jsonFile)
def get_friendly_path_name(self, separator=" -> "):
s = ""
for i in range(1, self.depth+1):
s += self.dirs[i].friendly_name + separator
return s[:-len(separator)] #remove padded separator
def get_friendly_name(self, index):
return self.dirs[self.depth].items[index]['title']
def get_path_for_index(self, index):
return self.dirs[self.depth].items[index]['idPath']
def get_type(self, index):
return self.dirs[self.depth].items[index]['class']
def get_albumarturi(self, index):
return self.dirs[self.depth].items[index]['albumarturi']
def get_album(self, index):
return self.dirs[self.depth].items[index]['album']
def get_artist(self, index):
return self.dirs[self.depth].items[index]['artist']
def get_class(self, index):
return self.dirs[self.depth].items[index]['class']
def get_resourceName(self, index):
return self.dirs[self.depth].items[index]['resSourceName']
def get_resourceType(self, index):
return self.dirs[self.depth].items[index]['resSourceType']
def max_entries_on_level(self):
return len(self.dirs[self.depth].items)
if __name__ == '__main__':
db = DirBrowseExtended()
db.enter(1)
db.enter(1)
print (str(db.dirs[2].items[0]))
for item in db.dirs[2].items:
print(item['title'].encode('utf-8'),
item['class'].encode('utf-8'),
item['resSourceName'].encode('utf-8'),
item['album'].encode('utf-8'),
item['albumarturi'].encode('utf-8'),
item['artist'].encode('utf-8'),
)
|
{
"content_hash": "62d339faa97c783efa37922e827aab9e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 109,
"avg_line_length": 31.294642857142858,
"alnum_prop": 0.5728958630527817,
"repo_name": "scjurgen/pyfeld",
"id": "8917cf5c01fae801a91c561f8daf14e16a7d1b0c",
"size": "3528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfeld/dirBrowseExtended.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20656"
},
{
"name": "PHP",
"bytes": "10588"
},
{
"name": "Python",
"bytes": "239567"
},
{
"name": "Shell",
"bytes": "2839"
}
],
"symlink_target": ""
}
|
"""
https://en.wikipedia.org/wiki/Infix_notation
https://en.wikipedia.org/wiki/Reverse_Polish_notation
https://en.wikipedia.org/wiki/Shunting-yard_algorithm
"""
from .balanced_parentheses import balanced_parentheses
from .stack import Stack
def precedence(char: str) -> int:
"""
Return integer value representing an operator's precedence, or
order of operation.
https://en.wikipedia.org/wiki/Order_of_operations
"""
return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1)
def infix_to_postfix(expression_str: str) -> str:
"""
>>> infix_to_postfix("(1*(2+3)+4))")
Traceback (most recent call last):
...
ValueError: Mismatched parentheses
>>> infix_to_postfix("")
''
>>> infix_to_postfix("3+2")
'3 2 +'
>>> infix_to_postfix("(3+4)*5-6")
'3 4 + 5 * 6 -'
>>> infix_to_postfix("(1+2)*3/4-5")
'1 2 + 3 * 4 / 5 -'
>>> infix_to_postfix("a+b*c+(d*e+f)*g")
'a b c * + d e * f + g * +'
>>> infix_to_postfix("x^y/(5*z)+2")
'x y ^ 5 z * / 2 +'
"""
if not balanced_parentheses(expression_str):
raise ValueError("Mismatched parentheses")
stack: Stack[str] = Stack()
postfix = []
for char in expression_str:
if char.isalpha() or char.isdigit():
postfix.append(char)
elif char == "(":
stack.push(char)
elif char == ")":
while not stack.is_empty() and stack.peek() != "(":
postfix.append(stack.pop())
stack.pop()
else:
while not stack.is_empty() and precedence(char) <= precedence(stack.peek()):
postfix.append(stack.pop())
stack.push(char)
while not stack.is_empty():
postfix.append(stack.pop())
return " ".join(postfix)
if __name__ == "__main__":
from doctest import testmod
testmod()
expression = "a+b*(c^d-e)^(f+g*h)-i"
print("Infix to Postfix Notation demonstration:\n")
print("Infix notation: " + expression)
print("Postfix notation: " + infix_to_postfix(expression))
|
{
"content_hash": "32cdc126bb036a0ebb2fd66f14cf838d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 88,
"avg_line_length": 30.014492753623188,
"alnum_prop": 0.5615644616127474,
"repo_name": "TheAlgorithms/Python",
"id": "9017443091cf95f6525bf599c79e07781cc515b4",
"size": "2071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_structures/stacks/infix_to_postfix_conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
}
|
import clientPackets
import glob
def handle(userToken, packetData):
# Read token and packet data
userID = userToken.userID
packetData = clientPackets.matchInvite(packetData)
# Get match ID and match object
matchID = userToken.matchID
# Make sure we are in a match
if matchID == -1:
return
# Make sure the match exists
if matchID not in glob.matches.matches:
return
# Get match object
match = glob.matches.matches[matchID]
# Send invite
match.invite(userID, packetData["userID"])
|
{
"content_hash": "db248d30dec59b8c763d9cd61eb1ad6b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 51,
"avg_line_length": 20.916666666666668,
"alnum_prop": 0.7470119521912351,
"repo_name": "osuripple/ripple",
"id": "f27ccaa3eef3a9bc5273422bc6c0768008de436d",
"size": "502",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c.ppy.sh/matchInviteEvent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "58"
},
{
"name": "CSS",
"bytes": "73804"
},
{
"name": "HTML",
"bytes": "4066"
},
{
"name": "JavaScript",
"bytes": "832595"
},
{
"name": "PHP",
"bytes": "732908"
},
{
"name": "Python",
"bytes": "156752"
},
{
"name": "Shell",
"bytes": "4249"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
from kombu import BrokerConnection
from kombu.mixins import ConsumerMixin
from entropy.queues import pass_events
LOG = logging.getLogger(__name__)
LOG_REPO = os.path.join(os.getcwd(), 'entropy', 'logs')
def get_vm_count(body, **kwargs):
LOG.warning("Received message: %r" % body)
try:
payload = body['payload']
for host, count in payload['vm_count'].items():
if count > kwargs['limit']:
LOG.error("Host %s has %s vms, more than %s",
host, count, kwargs['limit'])
elif count == -1:
LOG.error("Libvirt errored out when connecting to %s", host)
except Exception as e:
LOG.error(e)
class SomeConsumer(ConsumerMixin):
def __init__(self, connection, **kwargs):
self.connection = connection
self.args = kwargs
return
def get_consumers(self, consumer, channel):
return [consumer(pass_events, callbacks=[self.on_message])]
def on_message(self, body, message):
get_vm_count(body, **self.args)
message.ack()
return
def recv_message(**kwargs):
connection = BrokerConnection('amqp://%(mq_user)s:%(mq_password)s@'
'%(mq_host)s:%(mq_port)s//'
% kwargs['mq_args'])
with connection as conn:
try:
SomeConsumer(conn, **kwargs).run()
except KeyboardInterrupt:
LOG.warning('Quitting %s' % __name__)
def parse_conf(conf):
with open(conf, 'r') as json_data:
data = json.load(json_data)
# stuff for the message queue
mq_args = {'mq_host': data['mq_host'],
'mq_port': data['mq_port'],
'mq_user': data['mq_user'],
'mq_password': data['mq_password']}
kwargs = data
kwargs['mq_args'] = mq_args
return kwargs
def main(**kwargs):
LOG.warning('starting react script %s' % kwargs['name'])
args = parse_conf(kwargs['conf'])
recv_message(**args)
if __name__ == '__main__':
logging.basicConfig(filename=os.path.join(LOG_REPO, 'react.log'))
main()
|
{
"content_hash": "efc4c3085b042f8de78858a20d621c0d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 76,
"avg_line_length": 28.558441558441558,
"alnum_prop": 0.561618917689859,
"repo_name": "ddutta/entropy",
"id": "6b9dec0a92aaf5e55a82ff0b99584ea93955a726",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entropy/repair/vm_count_react.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "36317"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot as Slot
from .newconnectiondialog_ui import Ui_Dialog
from ....core2.devices.device.frontend import DeviceFrontend
class NewConnectionDialog(QtWidgets.QDialog, Ui_Dialog):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setupUi(self)
def setupUi(self, Dialog):
super().setupUi(Dialog)
self.driverClassComboBox.addItems(sorted([sc.devicename for sc in DeviceFrontend.subclasses() if sc.devicename]))
self.driverClassComboBox.setCurrentIndex(0)
self.deviceNameLineEdit.textEdited.connect(self.check)
self.driverClassComboBox.currentIndexChanged.connect(self.check)
self.hostNameLineEdit.textEdited.connect(self.check)
self.portSpinBox.valueChanged.connect(self.check)
self.check()
@Slot()
def check(self):
self.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(
bool(self.driverClassComboBox.currentText()) and
bool(self.deviceNameLineEdit.text()) and
bool(self.hostNameLineEdit.text()) and
bool(self.portSpinBox.value())
)
def devicename(self) -> str:
return self.deviceNameLineEdit.text()
def driverClassName(self) -> str:
return self.driverClassComboBox.currentText()
def host(self) -> str:
return self.hostNameLineEdit.text()
def port(self) -> int:
return self.portSpinBox.value()
|
{
"content_hash": "78ae16a8827f63f6708bc85be7b78953",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 121,
"avg_line_length": 36.170731707317074,
"alnum_prop": 0.6877950101146325,
"repo_name": "awacha/cct",
"id": "4681ff918af2162501ef83dde0593aa8fdfcbc57",
"size": "1483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cct/qtgui2/devices/connectioneditor/newconnectiondialog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "607"
},
{
"name": "CSS",
"bytes": "908"
},
{
"name": "Cython",
"bytes": "70859"
},
{
"name": "HTML",
"bytes": "1665"
},
{
"name": "Jupyter Notebook",
"bytes": "195924"
},
{
"name": "Python",
"bytes": "1944682"
},
{
"name": "Shell",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import os
import os.path
from django.conf import settings
from downloads.models import TEMP_UPLOAD_FOLDER
# create temporary downloads folder if it does not exist
tmp_abs_path = os.path.join(settings.MEDIA_ROOT, TEMP_UPLOAD_FOLDER)
if not os.path.exists(tmp_abs_path):
try:
os.mkdir(tmp_abs_path, 644)
except OSError:
pass
|
{
"content_hash": "1083c2c5b19a13c71feebab2b5a14ff0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 23.4,
"alnum_prop": 0.7264957264957265,
"repo_name": "praekelt/jmbo-downloads",
"id": "69fdacd465ebdb50419d3d5cf2c64c689fa94aae",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "downloads/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "78529"
}
],
"symlink_target": ""
}
|
"""
adjusts module path to account for virtual namespace
This is required primarily for testing.
"""
import sys
import os
import pkg_resources
VIRTUAL_NAMESPACE = 'tiddlywebplugins'
local_package = os.path.abspath(VIRTUAL_NAMESPACE)
sys.modules[VIRTUAL_NAMESPACE].__dict__['__path__'].insert(0, local_package)
|
{
"content_hash": "7407f437d248db2b264e37ece8fc5c1d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 21,
"alnum_prop": 0.7587301587301587,
"repo_name": "TiddlySpace/tiddlyspace",
"id": "dbefd558ca4650208d44923636a65f9b821b5a26",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mangler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11841"
},
{
"name": "JavaScript",
"bytes": "286904"
},
{
"name": "Python",
"bytes": "220382"
},
{
"name": "Shell",
"bytes": "4714"
}
],
"symlink_target": ""
}
|
"""
@brief test log(time=7s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from ensae_teaching_cs.faq.faq_python import get_month_name, get_day_name
class TestFaqPython(unittest.TestCase):
def test_month_name(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import datetime
dt = datetime.datetime(2016, 1, 25)
name = get_month_name(dt)
self.assertEqual(name, "January")
def test_day_name(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import datetime
dt = datetime.datetime(2016, 1, 25)
name = get_day_name(dt)
self.assertEqual(name, "Monday")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "22c4f9463d8fff5dbd66d4440df1162a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 24.142857142857142,
"alnum_prop": 0.5609467455621302,
"repo_name": "sdpython/ensae_teaching_cs",
"id": "67e592e6383870f63ce68376a8e1a2950c5abcc0",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_faq/test_faq_python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C#",
"bytes": "26850"
},
{
"name": "CSS",
"bytes": "220769"
},
{
"name": "HTML",
"bytes": "44390"
},
{
"name": "JavaScript",
"bytes": "31077"
},
{
"name": "Jupyter Notebook",
"bytes": "45255629"
},
{
"name": "PostScript",
"bytes": "169142"
},
{
"name": "Python",
"bytes": "1770141"
},
{
"name": "R",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "3675"
},
{
"name": "TeX",
"bytes": "593824"
}
],
"symlink_target": ""
}
|
"""
Combinatory Categorial Grammar.
For more information see nltk/doc/contrib/ccg/ccg.pdf
"""
from nltk.ccg.combinator import (UndirectedBinaryCombinator, DirectedBinaryCombinator,
ForwardCombinator, BackwardCombinator,
UndirectedFunctionApplication, ForwardApplication,
BackwardApplication, UndirectedComposition,
ForwardComposition, BackwardComposition,
BackwardBx, UndirectedSubstitution, ForwardSubstitution,
BackwardSx, UndirectedTypeRaise, ForwardT, BackwardT)
from nltk.ccg.chart import CCGEdge, CCGLeafEdge, CCGChartParser, CCGChart
from nltk.ccg.lexicon import CCGLexicon
|
{
"content_hash": "c570f63dc0bab264f00d763cc9faf5a2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 89,
"avg_line_length": 52.06666666666667,
"alnum_prop": 0.6414852752880922,
"repo_name": "sivu22/nltk-on-gae",
"id": "5b66af7226ab6bb6cd0f263e3e6d80b68b6ba172",
"size": "1007",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "GAE/nltk/ccg/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "3591141"
}
],
"symlink_target": ""
}
|
'''
Created on Apr 16, 2013
@author: davey
'''
"""Implements the model for project management."""
from django.db import models
from apps.managers.player_mgr import player_mgr
from apps.managers.resource_mgr import resource_mgr
from apps.managers.team_mgr import team_mgr
from apps.managers.team_mgr.models import Group, Team
from apps.utils.utils import media_file_path
_MEDIA_LOCATION = "projects"
"""location for uploaded files."""
class Project(models.Model):
"""Represents a project in the system."""
STATUS_CHOICES = (("Suggested", "Suggested"),
("Approved", "Approved"),
("Finished", "Finished"),)
created = models.DateField(auto_now_add=True)
leader = models.CharField(
max_length=100,
help_text="The leader and creator of this project.")
title = models.CharField(
max_length=75,
help_text="The title of the project.")
short_description = models.TextField(
max_length="300",
help_text="Short description of the project. This should include information about its "
"goal. It is usually displayed in the project list page.")
long_description = models.TextField(
max_length="5000",
help_text="Detailed information about the project. It is usually displayed in the details "
"view of the project.")
number_of_members = models.IntegerField(
default=1,
help_text="Minimum is 1 member, maximum is 100.")
status = models.CharField(
default="Suggested",
choices=STATUS_CHOICES,
max_length=20,
help_text="The status of the project.")
deadline = models.DateField(
help_text="The last day on which the project "
"can be completed to count for points.")
upvotes = models.IntegerField(default=0)
points = models.IntegerField(
default=0,
help_text="The value of the project.")
class Goal(models.Model):
project = models.ForeignKey(Project)
text = models.CharField(max_length=300, help_text="The goal or next step.")
deadline = models.DateField()
completed = models.BooleanField(default=False)
class Comment(models.Model):
project = models.ForeignKey(Project)
created = models.DateTimeField(auto_now_add=True)
comment = models.CharField(max_length=3000)
player = models.CharField(max_length=100)
#this possibly has to go (to be replaced by a user_mgr function?)
class Player(models.Model):
project = models.ForeignKey(Project)
name = models.CharField(max_length=100)
|
{
"content_hash": "b98c7132fd6dfe270c14b28ac8fc02f6",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 99,
"avg_line_length": 31.759036144578314,
"alnum_prop": 0.6555386949924128,
"repo_name": "MakahikiKTUH/makahiki-ktuh",
"id": "825d36ecb7c3b832b29212d19bb6e0e6f788a4de",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makahiki/apps/widgets/projects/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "293007"
},
{
"name": "Python",
"bytes": "1965590"
},
{
"name": "Shell",
"bytes": "6556"
}
],
"symlink_target": ""
}
|
from pkg_resources import get_distribution
__import__('pkg_resources').declare_namespace(__name__)
__version__ = get_distribution("qamplus").version
__author__ = "QAMplus"
__copyright__ = "Copyright 2017, QAMplus Corp."
__credits__ = ["QAMplus"]
__license__ = "MIT"
__maintainer__ = "QAMplus Corp."
__email__ = "contact@qamplus.com"
__status__ = "Production"
|
{
"content_hash": "219589e0259aa9e8ae37e314127e4d52",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 30,
"alnum_prop": 0.6666666666666666,
"repo_name": "jeniaoo/qamplus-python",
"id": "f6984f9fd10491f483005597b19445cf63bdf591",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qamplus/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10445"
}
],
"symlink_target": ""
}
|
"""
wagtailsystemtext
----------
Simplified Wagtail system text management
"""
__title__ = 'wagtailsystemtext'
__version__ = '1.2.2'
__build__ = 122
__author__ = "Martin Sandström"
__license__ = "MIT"
__copyright__ = "Copyright 2016-2018 Martin Sandström"
default_app_config = 'wagtailsystemtext.apps.WagtailSystemTextConfig'
|
{
"content_hash": "682c38b7cffc666d6cce9dad1cdaea51",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 69,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.6890243902439024,
"repo_name": "Frojd/wagtail-systemtext",
"id": "de04958327b767b5a2e2b375b771ed1d95942ed2",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "wagtailsystemtext/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34200"
},
{
"name": "Shell",
"bytes": "1049"
}
],
"symlink_target": ""
}
|
'''
Widget class
============
The :class:`Widget` class is the base class required for creating Widgets.
This widget class was designed with a couple of principles in mind:
* *Event Driven*
Widget interaction is built on top of events that occur. If a property
changes, the widget can respond to the change in the 'on_<propname>'
callback. If nothing changes, nothing will be done. That's the main
goal of the :class:`~kivy.properties.Property` class.
* *Separation Of Concerns (the widget and its graphical representation)*
Widgets don't have a `draw()` method. This is done on purpose: The idea
is to allow you to create your own graphical representation outside the
widget class.
Obviously you can still use all the available properties to do that, so
that your representation properly reflects the widget's current state.
Every widget has its own :class:`~kivy.graphics.Canvas` that you
can use to draw. This separation allows Kivy to run your
application in a very efficient manner.
* *Bounding Box / Collision*
Often you want to know if a certain point is within the bounds of your
widget. An example would be a button widget where you only want to
trigger an action when the button itself is actually touched.
For this, you can use the :meth:`~Widget.collide_point` method, which
will return True if the point you pass to it is inside the axis-aligned
bounding box defined by the widget's position and size.
If a simple AABB is not sufficient, you can override the method to
perform the collision checks with more complex shapes, e.g. a polygon.
You can also check if a widget collides with another widget with
:meth:`~Widget.collide_widget`.
We also have some default values and behaviors that you should be aware of:
* A :class:`Widget` is not a :class:`~kivy.uix.layout.Layout`: it will not
change the position or the size of its children. If you want control over
positioning or sizing, use a :class:`~kivy.uix.layout.Layout`.
* The default size of a widget is (100, 100). This is only changed if the
parent is a :class:`~kivy.uix.layout.Layout`.
For example, if you add a :class:`Label` inside a
:class:`Button`, the label will not inherit the button's size or position
because the button is not a *Layout*: it's just another *Widget*.
* The default size_hint is (1, 1). If the parent is a :class:`Layout`, then the
widget size will be the parent layout's size.
* :meth:`~Widget.on_touch_down`, :meth:`~Widget.on_touch_move`,
:meth:`~Widget.on_touch_up` don't do any sort of collisions. If you want to
know if the touch is inside your widget, use :meth:`~Widget.collide_point`.
Using Properties
----------------
When you read the documentation, all properties are described in the format::
<name> is a <property class> and defaults to <default value>.
e.g.
:attr:`~kivy.uix.label.Label.text` is a
:class:`~kivy.properties.StringProperty` and defaults to ''.
If you want to be notified when the pos attribute changes, i.e. when the
widget moves, you can bind your own callback function like this::
def callback_pos(instance, value):
print('The widget', instance, 'moved to', value)
wid = Widget()
wid.bind(pos=callback_pos)
Read more about :doc:`/api-kivy.properties`.
Basic drawing
-------------
Widgets support a range of drawing instructions that you can use to customize
the look of your widgets and layouts. For example, to draw a background image
for your widget, you can do the following:
.. code-block:: python
def redraw(self, args):
self.bg_rect.size = self.size
self.bg_rect.pos = self.pos
widget = Widget()
with widget.canvas:
widget.bg_rect = Rectangle(source="cover.jpg", pos=self.pos, \
size=self.size)
widget.bind(pos=redraw, size=redraw)
To draw a background in kv:
.. code-block:: kv
Widget:
canvas:
Rectangle:
source: "cover.jpg"
size: self.size
pos: self.pos
These examples only scratch the surface. Please see the :mod:`kivy.graphics`
documentation for more information.
.. _widget-event-bubbling:
Widget touch event bubbling
---------------------------
When you catch touch events between multiple widgets, you often
need to be aware of the order in which these events are propagated. In Kivy,
events bubble up from the first child upwards through the other children.
If a widget has children, the event is passed through its children before
being passed on to the widget after it.
As the :meth:`~kivy.uix.widget.Widget.on_touch_up` method inserts widgets at
index 0 by default, this means the event goes from the most recently added
widget back to the first one added. Consider the following:
.. code-block:: python
box = BoxLayout()
box.add_widget(Label(text="a"))
box.add_widget(Label(text="b"))
box.add_widget(Label(text="c"))
The label with text "c" gets the event first, "b" second and "a" last. You can
reverse this order by manually specifying the index:
.. code-block:: python
box = BoxLayout()
box.add_widget(Label(text="a"), index=0)
box.add_widget(Label(text="b"), index=1)
box.add_widget(Label(text="c"), index=2)
Now the order would be "a", "b" then "c". One thing to keep in mind when using
kv is that declaring a widget uses the
:meth:`~kivy.uix.widget.Widget.add_widget` method for insertion. Hence, using
.. code-block:: kv
BoxLayout:
MyLabel:
text: "a"
MyLabel:
text: "b"
MyLabel:
text: "c"
would result in the event order "c", "b" then "a" as "c" was actually the last
added widget. It thus has index 0, "b" index 1 and "a" index 2. Effectively,
the child order is the reverse of its listed order.
This ordering is the same for the :meth:`~kivy.uix.widget.Widget.on_touch_move`
and :meth:`~kivy.uix.widget.Widget.on_touch_up` events.
In order to stop this event bubbling, a method can return `True`. This tells
Kivy the event has been handled and the event propagation stops. For example:
.. code-block:: python
class MyWidget(Widget):
def on_touch_down(self, touch):
If <some_condition>:
# Do stuff here and kill the event
return True
else:
return super(MyWidget, self).on_touch_down(touch)
This approach gives you good control over exactly how events are dispatched
and managed. Sometimes, however, you may wish to let the event be completely
propagated before taking action. You can use the
:class:`~kivy.clock.Clock` to help you here:
.. code-block:: python
class MyWidget(Label):
def on_touch_down(self, touch, after=False):
if after:
print "Fired after the event has been dispatched!"
else:
Clock.schedule_once(lambda dt: self.on_touch_down(touch, True))
return super(MyWidget, self).on_touch_down(touch)
Usage of :attr:`Widget.center`, :attr:`Widget.right`, and :attr:`Widget.top`
----------------------------------------------------------------------------
A common mistake when using one of the computed properties such as
:attr:`Widget.right` is to use it to make a widget follow its parent with a
KV rule such as `right: self.parent.right`. Consider, for example:
.. code-block:: kv
FloatLayout:
id: layout
width: 100
Widget:
id: wid
right: layout.right
The (mistaken) expectation is that this rule ensures that wid's right will
always be whatever layout's right is - that is wid.right and layout.right will
always be identical. In actual fact, this rule only says that "whenever
layout's `right` changes, wid's right will be set to that value". The
difference being that as long as `layout.right` doesn't change, `wid.right`
could be anything, even a value that will make them different.
Specifically, for the KV code above, consider the following example::
>>> print(layout.right, wid.right)
(100, 100)
>>> wid.x = 200
>>> print(layout.right, wid.right)
(100, 300)
As can be seen, initially they are in sync, however, when we change `wid.x`
they go out of sync because `layout.right` is not changed and the rule is not
triggered.
The proper way to make the widget follow its parent's right is to use
:attr:`Widget.pos_hint`. If instead of `right: layout.right` we did
`pos_hint: {'right': 1}`, then the widgets right will always be set to be
at the parent's right at each layout update.
'''
__all__ = ('Widget', 'WidgetException')
from kivy.event import EventDispatcher
from kivy.eventmanager import (
MODE_DONT_DISPATCH,
MODE_FILTERED_DISPATCH,
MODE_DEFAULT_DISPATCH
)
from kivy.factory import Factory
from kivy.properties import (
NumericProperty, StringProperty, AliasProperty, ReferenceListProperty,
ObjectProperty, ListProperty, DictProperty, BooleanProperty)
from kivy.graphics import (
Canvas, Translate, Fbo, ClearColor, ClearBuffers, Scale)
from kivy.graphics.transformation import Matrix
from kivy.base import EventLoop
from kivy.lang import Builder
from kivy.context import get_current_context
from kivy.weakproxy import WeakProxy
from functools import partial
from itertools import islice
# References to all the widget destructors (partial method with widget uid as
# key).
_widget_destructors = {}
def _widget_destructor(uid, r):
# Internal method called when a widget is deleted from memory. the only
# thing we remember about it is its uid. Clear all the associated callbacks
# created in kv language.
del _widget_destructors[uid]
Builder.unbind_widget(uid)
class WidgetException(Exception):
'''Fired when the widget gets an exception.
'''
pass
class WidgetMetaclass(type):
'''Metaclass to automatically register new widgets for the
:class:`~kivy.factory.Factory`.
.. warning::
This metaclass is used by the Widget. Do not use it directly!
'''
def __init__(mcs, name, bases, attrs):
super(WidgetMetaclass, mcs).__init__(name, bases, attrs)
Factory.register(name, cls=mcs)
#: Base class used for Widget, that inherits from :class:`EventDispatcher`
WidgetBase = WidgetMetaclass('WidgetBase', (EventDispatcher, ), {})
class Widget(WidgetBase):
'''Widget class. See module documentation for more information.
:Events:
`on_touch_down`: `(touch, )`
Fired when a new touch event occurs. `touch` is the touch object.
`on_touch_move`: `(touch, )`
Fired when an existing touch moves. `touch` is the touch object.
`on_touch_up`: `(touch, )`
Fired when an existing touch disappears. `touch` is the touch
object.
`on_kv_post`: `(base_widget, )`
Fired after all the kv rules associated with the widget
and all other widgets that are in any of those rules have had
all their kv rules applied. `base_widget` is the base-most widget
whose instantiation triggered the kv rules (i.e. the widget
instantiated from Python, e.g. ``MyWidget()``).
.. versionchanged:: 1.11.0
.. warning::
Adding a `__del__` method to a class derived from Widget with Python
prior to 3.4 will disable automatic garbage collection for instances
of that class. This is because the Widget class creates reference
cycles, thereby `preventing garbage collection
<https://docs.python.org/2/library/gc.html#gc.garbage>`_.
.. versionchanged:: 1.0.9
Everything related to event properties has been moved to the
:class:`~kivy.event.EventDispatcher`. Event properties can now be used
when constructing a simple class without subclassing :class:`Widget`.
.. versionchanged:: 1.5.0
The constructor now accepts on_* arguments to automatically bind
callbacks to properties or events, as in the Kv language.
'''
__metaclass__ = WidgetMetaclass
__events__ = (
'on_motion', 'on_touch_down', 'on_touch_move', 'on_touch_up',
'on_kv_post'
)
_proxy_ref = None
def __init__(self, **kwargs):
# Before doing anything, ensure the windows exist.
EventLoop.ensure_window()
# Assign the default context of the widget creation.
if not hasattr(self, '_context'):
self._context = get_current_context()
no_builder = '__no_builder' in kwargs
self._disabled_value = False
if no_builder:
del kwargs['__no_builder']
on_args = {k: v for k, v in kwargs.items() if k[:3] == 'on_'}
for key in on_args:
del kwargs[key]
self._disabled_count = 0
super(Widget, self).__init__(**kwargs)
# Create the default canvas if it does not exist.
if self.canvas is None:
self.canvas = Canvas(opacity=self.opacity)
# Apply all the styles.
if not no_builder:
rule_children = []
self.apply_class_lang_rules(
ignored_consts=self._kwargs_applied_init,
rule_children=rule_children)
for widget in rule_children:
widget.dispatch('on_kv_post', self)
self.dispatch('on_kv_post', self)
# Bind all the events.
if on_args:
self.bind(**on_args)
@property
def proxy_ref(self):
'''Return a proxy reference to the widget, i.e. without creating a
reference to the widget. See `weakref.proxy
<http://docs.python.org/2/library/weakref.html?highlight\
=proxy#weakref.proxy>`_ for more information.
.. versionadded:: 1.7.2
'''
_proxy_ref = self._proxy_ref
if _proxy_ref is not None:
return _proxy_ref
f = partial(_widget_destructor, self.uid)
self._proxy_ref = _proxy_ref = WeakProxy(self, f)
# Only f should be enough here, but it appears that is a very
# specific case, the proxy destructor is not called if both f and
# _proxy_ref are not together in a tuple.
_widget_destructors[self.uid] = (f, _proxy_ref)
return _proxy_ref
def __hash__(self):
return id(self)
def apply_class_lang_rules(
self, root=None, ignored_consts=set(), rule_children=None):
'''
Method that is called by kivy to apply the kv rules of this widget's
class.
:Parameters:
`root`: :class:`Widget`
The root widget that instantiated this widget in kv, if the
widget was instantiated in kv, otherwise ``None``.
`ignored_consts`: set
(internal) See :meth:`~kivy.lang.builder.BuilderBase.apply`.
`rule_children`: list
(internal) See :meth:`~kivy.lang.builder.BuilderBase.apply`.
This is useful to be able to execute code before/after the class kv
rules are applied to the widget. E.g. if the kv code requires some
properties to be initialized before it is used in a binding rule.
If overwriting remember to call ``super``, otherwise the kv rules will
not be applied.
In the following example,
.. code-block:: python
class MyWidget(Widget):
pass
class OtherWidget(MyWidget):
pass
.. code-block:: kv
<MyWidget>:
my_prop: some_value
<OtherWidget>:
other_prop: some_value
When ``OtherWidget`` is instantiated with ``OtherWidget()``, the
widget's :meth:`apply_class_lang_rules` is called and it applies the
kv rules of this class - ``<MyWidget>`` and ``<OtherWidget>``.
Similarly, when the widget is instantiated from kv, e.g.
.. code-block:: kv
<MyBox@BoxLayout>:
height: 55
OtherWidget:
width: 124
``OtherWidget``'s :meth:`apply_class_lang_rules` is called and it
applies the kv rules of this class - ``<MyWidget>`` and
``<OtherWidget>``.
.. note::
It applies only the class rules not the instance rules. I.e. in the
above kv example in the ``MyBox`` rule when ``OtherWidget`` is
instantiated, its :meth:`apply_class_lang_rules` applies the
``<MyWidget>`` and ``<OtherWidget>`` rules to it - it does not
apply the ``width: 124`` rule. The ``width: 124`` rule is part of
the ``MyBox`` rule and is applied by the ``MyBox``'s instance's
:meth:`apply_class_lang_rules`.
.. versionchanged:: 1.11.0
'''
Builder.apply(
self, ignored_consts=ignored_consts,
rule_children=rule_children)
#
# Collision
#
def collide_point(self, x, y):
'''
Check if a point (x, y) is inside the widget's axis aligned bounding
box.
:Parameters:
`x`: numeric
x position of the point (in parent coordinates)
`y`: numeric
y position of the point (in parent coordinates)
:Returns:
A bool. True if the point is inside the bounding box, False
otherwise.
.. code-block:: python
>>> Widget(pos=(10, 10), size=(50, 50)).collide_point(40, 40)
True
'''
return self.x <= x <= self.right and self.y <= y <= self.top
def collide_widget(self, wid):
'''
Check if another widget collides with this widget. This function
performs an axis-aligned bounding box intersection test by default.
:Parameters:
`wid`: :class:`Widget` class
Widget to test collision with.
:Returns:
bool. True if the other widget collides with this widget, False
otherwise.
.. code-block:: python
>>> wid = Widget(size=(50, 50))
>>> wid2 = Widget(size=(50, 50), pos=(25, 25))
>>> wid.collide_widget(wid2)
True
>>> wid2.pos = (55, 55)
>>> wid.collide_widget(wid2)
False
'''
if self.right < wid.x:
return False
if self.x > wid.right:
return False
if self.top < wid.y:
return False
if self.y > wid.top:
return False
return True
def on_motion(self, etype, me):
'''Called when a motion event is received.
:Parameters:
`etype`: `str`
Event type, one of "begin", "update" or "end"
`me`: :class:`~kivy.input.motionevent.MotionEvent`
Received motion event
:Returns: `bool`
`True` to stop event dispatching
.. versionadded:: 2.1.0
.. warning::
This is an experimental method and it remains so while this warning
is present.
'''
if self.disabled or me.dispatch_mode == MODE_DONT_DISPATCH:
return
if me.type_id not in self.motion_filter:
return
filtered = self.motion_filter[me.type_id]
if filtered[0] is self and len(filtered) == 1:
return
if me.dispatch_mode == MODE_DEFAULT_DISPATCH:
last_filtered = filtered[-1]
for widget in self.children[:]:
if widget.dispatch('on_motion', etype, me):
return True
if widget is last_filtered:
return
if me.dispatch_mode == MODE_FILTERED_DISPATCH:
widgets = filtered[1:] if filtered[0] is self else filtered[:]
for widget in widgets:
if widget.dispatch('on_motion', etype, me):
return True
#
# Default event handlers
#
def on_touch_down(self, touch):
'''Receive a touch down event.
:Parameters:
`touch`: :class:`~kivy.input.motionevent.MotionEvent` class
Touch received. The touch is in parent coordinates. See
:mod:`~kivy.uix.relativelayout` for a discussion on
coordinate systems.
:Returns: bool
If True, the dispatching of the touch event will stop.
If False, the event will continue to be dispatched to the rest
of the widget tree.
'''
if self.disabled and self.collide_point(*touch.pos):
return True
for child in self.children[:]:
if child.dispatch('on_touch_down', touch):
return True
def on_touch_move(self, touch):
'''Receive a touch move event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_move', touch):
return True
def on_touch_up(self, touch):
'''Receive a touch up event. The touch is in parent coordinates.
See :meth:`on_touch_down` for more information.
'''
if self.disabled:
return
for child in self.children[:]:
if child.dispatch('on_touch_up', touch):
return True
def on_kv_post(self, base_widget):
pass
#
# Tree management
#
def add_widget(self, widget, index=0, canvas=None):
'''Add a new widget as a child of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to add to our list of children.
`index`: int, defaults to 0
Index to insert the widget in the list. Notice that the default
of 0 means the widget is inserted at the beginning of the list
and will thus be drawn on top of other sibling widgets. For a
full discussion of the index and widget hierarchy, please see
the :doc:`Widgets Programming Guide <guide/widgets>`.
.. versionadded:: 1.0.5
`canvas`: str, defaults to None
Canvas to add widget's canvas to. Can be 'before', 'after' or
None for the default canvas.
.. versionadded:: 1.9.0
.. code-block:: python
>>> from kivy.uix.button import Button
>>> from kivy.uix.slider import Slider
>>> root = Widget()
>>> root.add_widget(Button())
>>> slider = Slider()
>>> root.add_widget(slider)
'''
if not isinstance(widget, Widget):
raise WidgetException(
'add_widget() can be used only with instances'
' of the Widget class.')
widget = widget.__self__
if widget is self:
raise WidgetException(
'Widget instances cannot be added to themselves.')
parent = widget.parent
# Check if the widget is already a child of another widget.
if parent:
raise WidgetException('Cannot add %r, it already has a parent %r'
% (widget, parent))
widget.parent = parent = self
# Child will be disabled if added to a disabled parent.
widget.inc_disabled(self._disabled_count)
canvas = self.canvas.before if canvas == 'before' else \
self.canvas.after if canvas == 'after' else self.canvas
if index == 0 or len(self.children) == 0:
self.children.insert(0, widget)
canvas.add(widget.canvas)
else:
canvas = self.canvas
children = self.children
if index >= len(children):
index = len(children)
next_index = canvas.indexof(children[-1].canvas)
else:
next_child = children[index]
next_index = canvas.indexof(next_child.canvas)
if next_index == -1:
next_index = canvas.length()
else:
next_index += 1
children.insert(index, widget)
# We never want to insert widget _before_ canvas.before.
if next_index == 0 and canvas.has_before:
next_index = 1
canvas.insert(next_index, widget.canvas)
for type_id in widget.motion_filter:
self.register_for_motion_event(type_id, widget)
widget.fbind('motion_filter', self._update_motion_filter)
def remove_widget(self, widget):
'''Remove a widget from the children of this widget.
:Parameters:
`widget`: :class:`Widget`
Widget to remove from our children list.
.. code-block:: python
>>> from kivy.uix.button import Button
>>> root = Widget()
>>> button = Button()
>>> root.add_widget(button)
>>> root.remove_widget(button)
'''
if widget not in self.children:
return
self.children.remove(widget)
if widget.canvas in self.canvas.children:
self.canvas.remove(widget.canvas)
elif widget.canvas in self.canvas.after.children:
self.canvas.after.remove(widget.canvas)
elif widget.canvas in self.canvas.before.children:
self.canvas.before.remove(widget.canvas)
for type_id in widget.motion_filter:
self.unregister_for_motion_event(type_id, widget)
widget.funbind('motion_filter', self._update_motion_filter)
widget.parent = None
widget.dec_disabled(self._disabled_count)
def clear_widgets(self, children=None):
'''
Remove all (or the specified) :attr:`~Widget.children` of this widget.
If the 'children' argument is specified, it should be a list (or
filtered list) of children of the current widget.
.. versionchanged:: 1.8.0
The `children` argument can be used to specify the children you
want to remove.
.. versionchanged:: 2.1.0
Specifying an empty ``children`` list leaves the widgets unchanged.
Previously it was treated like ``None`` and all children were
removed.
'''
if children is None or children is self.children:
children = self.children[:]
remove_widget = self.remove_widget
for child in children:
remove_widget(child)
def _update_motion_filter(self, child_widget, child_motion_filter):
old_events = []
for type_id, widgets in self.motion_filter.items():
if child_widget in widgets:
old_events.append(type_id)
for type_id in old_events:
if type_id not in child_motion_filter:
self.unregister_for_motion_event(type_id, child_widget)
for type_id in child_motion_filter:
if type_id not in old_events:
self.register_for_motion_event(type_id, child_widget)
def _find_index_in_motion_filter(self, type_id, widget):
if widget is self:
return 0
find_index = self.children.index
max_index = find_index(widget) + 1
motion_widgets = self.motion_filter[type_id]
insert_index = 1 if motion_widgets[0] is self else 0
for index in range(insert_index, len(motion_widgets)):
if find_index(motion_widgets[index]) < max_index:
insert_index += 1
else:
break
return insert_index
def register_for_motion_event(self, type_id, widget=None):
'''Register to receive motion events of `type_id`.
Override :meth:`on_motion` or bind to `on_motion` event to handle
the incoming motion events.
:Parameters:
`type_id`: `str`
Motion event type id (eg. "touch", "hover", etc.)
`widget`: `Widget`
Child widget or `self` if omitted
.. versionadded:: 2.1.0
.. note::
Method can be called multiple times with the same arguments.
.. warning::
This is an experimental method and it remains so while this warning
is present.
'''
a_widget = widget or self
motion_filter = self.motion_filter
if type_id not in motion_filter:
motion_filter[type_id] = [a_widget]
elif widget not in motion_filter[type_id]:
index = self._find_index_in_motion_filter(type_id, a_widget)
motion_filter[type_id].insert(index, a_widget)
def unregister_for_motion_event(self, type_id, widget=None):
'''Unregister to receive motion events of `type_id`.
:Parameters:
`type_id`: `str`
Motion event type id (eg. "touch", "hover", etc.)
`widget`: `Widget`
Child widget or `self` if omitted
.. versionadded:: 2.1.0
.. note::
Method can be called multiple times with the same arguments.
.. warning::
This is an experimental method and it remains so while this warning
is present.
'''
a_widget = widget or self
motion_filter = self.motion_filter
if type_id in motion_filter:
if a_widget in motion_filter[type_id]:
motion_filter[type_id].remove(a_widget)
if not motion_filter[type_id]:
del motion_filter[type_id]
def export_to_png(self, filename, *args, **kwargs):
'''Saves an image of the widget and its children in png format at the
specified filename. Works by removing the widget canvas from its
parent, rendering to an :class:`~kivy.graphics.fbo.Fbo`, and calling
:meth:`~kivy.graphics.texture.Texture.save`.
.. note::
The image includes only this widget and its children. If you want
to include widgets elsewhere in the tree, you must call
:meth:`~Widget.export_to_png` from their common parent, or use
:meth:`~kivy.core.window.WindowBase.screenshot` to capture the
whole window.
.. note::
The image will be saved in png format, you should include the
extension in your filename.
.. versionadded:: 1.9.0
:Parameters:
`filename`: str
The filename with which to save the png.
`scale`: float
The amount by which to scale the saved image, defaults to 1.
.. versionadded:: 1.11.0
'''
self.export_as_image(*args, **kwargs).save(filename, flipped=False)
def export_as_image(self, *args, **kwargs):
'''Return an core :class:`~kivy.core.image.Image` of the actual
widget.
.. versionadded:: 1.11.0
'''
from kivy.core.image import Image
scale = kwargs.get('scale', 1)
if self.parent is not None:
canvas_parent_index = self.parent.canvas.indexof(self.canvas)
if canvas_parent_index > -1:
self.parent.canvas.remove(self.canvas)
fbo = Fbo(size=(self.width * scale, self.height * scale),
with_stencilbuffer=True)
with fbo:
ClearColor(0, 0, 0, 0)
ClearBuffers()
Scale(1, -1, 1)
Scale(scale, scale, 1)
Translate(-self.x, -self.y - self.height, 0)
fbo.add(self.canvas)
fbo.draw()
img = Image(fbo.texture)
fbo.remove(self.canvas)
if self.parent is not None and canvas_parent_index > -1:
self.parent.canvas.insert(canvas_parent_index, self.canvas)
return img
def get_root_window(self):
'''Return the root window.
:Returns:
Instance of the root window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_root_window()
def get_parent_window(self):
'''Return the parent window.
:Returns:
Instance of the parent window. Can be a
:class:`~kivy.core.window.WindowBase` or
:class:`Widget`.
'''
if self.parent:
return self.parent.get_parent_window()
def _walk(self, restrict=False, loopback=False, index=None):
# We pass index only when we are going on the parent
# so don't yield the parent as well.
if index is None:
index = len(self.children)
yield self
for child in reversed(self.children[:index]):
for walk_child in child._walk(restrict=True):
yield walk_child
# If we want to continue with our parent, just do it.
if not restrict:
parent = self.parent
try:
if parent is None or not isinstance(parent, Widget):
raise ValueError
index = parent.children.index(self)
except ValueError:
# Self is root, if we want to loopback from the first element:
if not loopback:
return
# If we started with root (i.e. index==None), then we have to
# start from root again, so we return self again. Otherwise, we
# never returned it, so return it now starting with it.
parent = self
index = None
for walk_child in parent._walk(loopback=loopback, index=index):
yield walk_child
def walk(self, restrict=False, loopback=False):
''' Iterator that walks the widget tree starting with this widget and
goes forward returning widgets in the order in which layouts display
them.
:Parameters:
`restrict`: bool, defaults to False
If True, it will only iterate through the widget and its
children (or children of its children etc.). Defaults to False.
`loopback`: bool, defaults to False
If True, when the last widget in the tree is reached,
it'll loop back to the uppermost root and start walking until
we hit this widget again. Naturally, it can only loop back when
`restrict` is False. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
forward layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True, and restrict False
>>> [type(widget) for widget in box.walk(loopback=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>, <class 'GridLayout'>, <class 'Button'>]
>>> # Now with loopback False, and restrict False
>>> [type(widget) for widget in box.walk()]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>,
<class 'Widget'>]
>>> # Now with restrict True
>>> [type(widget) for widget in box.walk(restrict=True)]
[<class 'BoxLayout'>, <class 'Widget'>, <class 'Button'>]
.. versionadded:: 1.9.0
'''
gen = self._walk(restrict, loopback)
yield next(gen)
for node in gen:
if node is self:
return
yield node
def _walk_reverse(self, loopback=False, go_up=False):
# process is walk up level, walk down its children tree, then walk up
# next level etc.
# default just walk down the children tree
root = self
index = 0
# we need to go up a level before walking tree
if go_up:
root = self.parent
try:
if root is None or not isinstance(root, Widget):
raise ValueError
index = root.children.index(self) + 1
except ValueError:
if not loopback:
return
index = 0
go_up = False
root = self
# now walk children tree starting with last-most child
for child in islice(root.children, index, None):
for walk_child in child._walk_reverse(loopback=loopback):
yield walk_child
# we need to return ourself last, in all cases
yield root
# if going up, continue walking up the parent tree
if go_up:
for walk_child in root._walk_reverse(loopback=loopback,
go_up=go_up):
yield walk_child
def walk_reverse(self, loopback=False):
''' Iterator that walks the widget tree backwards starting with the
widget before this, and going backwards returning widgets in the
reverse order in which layouts display them.
This walks in the opposite direction of :meth:`walk`, so a list of the
tree generated with :meth:`walk` will be in reverse order compared
to the list generated with this, provided `loopback` is True.
:Parameters:
`loopback`: bool, defaults to False
If True, when the uppermost root in the tree is
reached, it'll loop back to the last widget and start walking
back until after we hit widget again. Defaults to False.
:return:
A generator that walks the tree, returning widgets in the
reverse layout order.
For example, given a tree with the following structure:
.. code-block:: kv
GridLayout:
Button
BoxLayout:
id: box
Widget
Button
Widget
walking this tree:
.. code-block:: python
>>> # Call walk on box with loopback True
>>> [type(widget) for widget in box.walk_reverse(loopback=True)]
[<class 'Button'>, <class 'GridLayout'>, <class 'Widget'>,
<class 'Button'>, <class 'Widget'>, <class 'BoxLayout'>]
>>> # Now with loopback False
>>> [type(widget) for widget in box.walk_reverse()]
[<class 'Button'>, <class 'GridLayout'>]
>>> forward = [w for w in box.walk(loopback=True)]
>>> backward = [w for w in box.walk_reverse(loopback=True)]
>>> forward == backward[::-1]
True
.. versionadded:: 1.9.0
'''
for node in self._walk_reverse(loopback=loopback, go_up=True):
yield node
if node is self:
return
def to_widget(self, x, y, relative=False):
'''Convert the coordinate from window to local (current widget)
coordinates.
See :mod:`~kivy.uix.relativelayout` for details on the coordinate
systems.
'''
if self.parent:
x, y = self.parent.to_widget(x, y)
return self.to_local(x, y, relative=relative)
def to_window(self, x, y, initial=True, relative=False):
"""If ``initial`` is True, the default, it transforms **parent**
coordinates to window coordinates. Otherwise, it transforms **local**
(current widget) coordinates to window coordinates.
See :mod:`~kivy.uix.relativelayout` for details on the coordinate
systems.
"""
if not initial:
x, y = self.to_parent(x, y, relative=relative)
if self.parent:
return self.parent.to_window(x, y, initial=False,
relative=relative)
return (x, y)
def to_parent(self, x, y, relative=False):
"""Transform local (current widget) coordinates to parent coordinates.
See :mod:`~kivy.uix.relativelayout` for details on the coordinate
systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate relative positions from
a widget to its parent coordinates.
"""
if relative:
return (x + self.x, y + self.y)
return (x, y)
def to_local(self, x, y, relative=False):
"""Transform parent coordinates to local (current widget) coordinates.
See :mod:`~kivy.uix.relativelayout` for details on the coordinate
systems.
:Parameters:
`relative`: bool, defaults to False
Change to True if you want to translate coordinates to
relative widget coordinates.
"""
if relative:
return (x - self.x, y - self.y)
return (x, y)
def _apply_transform(self, m, pos=None):
if self.parent:
x, y = self.parent.to_widget(relative=True,
*self.to_window(*(pos or self.pos)))
m.translate(x, y, 0)
m = self.parent._apply_transform(m) if self.parent else m
return m
def get_window_matrix(self, x=0, y=0):
'''Calculate the transformation matrix to convert between window and
widget coordinates.
:Parameters:
`x`: float, defaults to 0
Translates the matrix on the x axis.
`y`: float, defaults to 0
Translates the matrix on the y axis.
'''
m = Matrix()
m.translate(x, y, 0)
m = self._apply_transform(m)
return m
x = NumericProperty(0)
'''X position of the widget.
:attr:`x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
y = NumericProperty(0)
'''Y position of the widget.
:attr:`y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
width = NumericProperty(100)
'''Width of the widget.
:attr:`width` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `width` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
.. warning::
A negative width is not supported.
'''
height = NumericProperty(100)
'''Height of the widget.
:attr:`height` is a :class:`~kivy.properties.NumericProperty` and defaults
to 100.
.. warning::
Keep in mind that the `height` property is subject to layout logic and
that this has not yet happened at the time of the widget's `__init__`
method.
.. warning::
A negative height is not supported.
'''
pos = ReferenceListProperty(x, y)
'''Position of the widget.
:attr:`pos` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`x`, :attr:`y`) properties.
'''
size = ReferenceListProperty(width, height)
'''Size of the widget.
:attr:`size` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`width`, :attr:`height`) properties.
'''
def get_right(self):
return self.x + self.width
def set_right(self, value):
self.x = value - self.width
right = AliasProperty(get_right, set_right,
bind=('x', 'width'),
cache=True, watch_before_use=False)
'''Right position of the widget.
:attr:`right` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width`).
'''
def get_top(self):
return self.y + self.height
def set_top(self, value):
self.y = value - self.height
top = AliasProperty(get_top, set_top,
bind=('y', 'height'),
cache=True, watch_before_use=False)
'''Top position of the widget.
:attr:`top` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height`).
'''
def get_center_x(self):
return self.x + self.width / 2.
def set_center_x(self, value):
self.x = value - self.width / 2.
center_x = AliasProperty(get_center_x, set_center_x,
bind=('x', 'width'),
cache=True, watch_before_use=False)
'''X center position of the widget.
:attr:`center_x` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`x` + :attr:`width` / 2.).
'''
def get_center_y(self):
return self.y + self.height / 2.
def set_center_y(self, value):
self.y = value - self.height / 2.
center_y = AliasProperty(get_center_y, set_center_y,
bind=('y', 'height'),
cache=True, watch_before_use=False)
'''Y center position of the widget.
:attr:`center_y` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`y` + :attr:`height` / 2.).
'''
center = ReferenceListProperty(center_x, center_y)
'''Center position of the widget.
:attr:`center` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`center_x`, :attr:`center_y`) properties.
'''
cls = ListProperty([])
'''Class of the widget, used for styling.
'''
children = ListProperty([])
'''List of children of this widget.
:attr:`children` is a :class:`~kivy.properties.ListProperty` and
defaults to an empty list.
Use :meth:`add_widget` and :meth:`remove_widget` for manipulating the
children list. Don't manipulate the children list directly unless you know
what you are doing.
'''
parent = ObjectProperty(None, allownone=True, rebind=True)
'''Parent of this widget. The parent of a widget is set when the widget
is added to another widget and unset when the widget is removed from its
parent.
:attr:`parent` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
size_hint_x = NumericProperty(1, allownone=True)
'''x size hint. Represents how much space the widget should use in the
direction of the x axis relative to its parent's width.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
The size_hint is used by layouts for two purposes:
- When the layout considers widgets on their own rather than in
relation to its other children, the size_hint_x is a direct proportion
of the parent width, normally between 0.0 and 1.0. For instance, a
widget with ``size_hint_x=0.5`` in
a vertical BoxLayout will take up half the BoxLayout's width, or
a widget in a FloatLayout with ``size_hint_x=0.2`` will take up 20%
of the FloatLayout width. If the size_hint is greater than 1, the
widget will be wider than the parent.
- When multiple widgets can share a row of a layout, such as in a
horizontal BoxLayout, their widths will be their size_hint_x as a
fraction of the sum of widget size_hints. For instance, if the
size_hint_xs are (0.5, 1.0, 0.5), the first widget will have a
width of 25% of the parent width.
:attr:`size_hint_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
'''
size_hint_y = NumericProperty(1, allownone=True)
'''y size hint.
:attr:`size_hint_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
See :attr:`size_hint_x` for more information, but with widths and heights
swapped.
'''
size_hint = ReferenceListProperty(size_hint_x, size_hint_y)
'''Size hint.
:attr:`size_hint` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`size_hint_x`, :attr:`size_hint_y`) properties.
See :attr:`size_hint_x` for more information.
'''
pos_hint = ObjectProperty({})
'''Position hint. This property allows you to set the position of
the widget inside its parent layout (similar to
size_hint).
For example, if you want to set the top of the widget to be at 90%
height of its parent layout, you can write::
widget = Widget(pos_hint={'top': 0.9})
The keys 'x', 'right' and 'center_x' will use the parent width.
The keys 'y', 'top' and 'center_y' will use the parent height.
See :doc:`api-kivy.uix.floatlayout` for further reference.
.. note::
:attr:`pos_hint` is not used by all layouts. Check the documentation
of the layout in question to see if it supports pos_hint.
:attr:`pos_hint` is an :class:`~kivy.properties.ObjectProperty`
containing a dict.
'''
size_hint_min_x = NumericProperty(None, allownone=True)
'''When not None, the x-direction minimum size (in pixels,
like :attr:`width`) when :attr:`size_hint_x` is also not None.
When :attr:`size_hint_x` is not None, it is the minimum width that the
widget will be set due to the :attr:`size_hint_x`. I.e. when a smaller size
would be set, :attr:`size_hint_min_x` is the value used instead for the
widget width. When None, or when :attr:`size_hint_x` is None,
:attr:`size_hint_min_x` doesn't do anything.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
:attr:`size_hint_min_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.10.0
'''
size_hint_min_y = NumericProperty(None, allownone=True)
'''When not None, the y-direction minimum size (in pixels,
like :attr:`height`) when :attr:`size_hint_y` is also not None.
When :attr:`size_hint_y` is not None, it is the minimum height that the
widget will be set due to the :attr:`size_hint_y`. I.e. when a smaller size
would be set, :attr:`size_hint_min_y` is the value used instead for the
widget height. When None, or when :attr:`size_hint_y` is None,
:attr:`size_hint_min_y` doesn't do anything.
Only the :class:`~kivy.uix.layout.Layout` and
:class:`~kivy.core.window.Window` classes make use of the hint.
:attr:`size_hint_min_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.10.0
'''
size_hint_min = ReferenceListProperty(size_hint_min_x, size_hint_min_y)
'''Minimum size when using :attr:`size_hint`.
:attr:`size_hint_min` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`size_hint_min_x`, :attr:`size_hint_min_y`) properties.
.. versionadded:: 1.10.0
'''
size_hint_max_x = NumericProperty(None, allownone=True)
'''When not None, the x-direction maximum size (in pixels,
like :attr:`width`) when :attr:`size_hint_x` is also not None.
Similar to :attr:`size_hint_min_x`, except that it sets the maximum width.
:attr:`size_hint_max_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.10.0
'''
size_hint_max_y = NumericProperty(None, allownone=True)
'''When not None, the y-direction maximum size (in pixels,
like :attr:`height`) when :attr:`size_hint_y` is also not None.
Similar to :attr:`size_hint_min_y`, except that it sets the maximum height.
:attr:`size_hint_max_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
.. versionadded:: 1.10.0
'''
size_hint_max = ReferenceListProperty(size_hint_max_x, size_hint_max_y)
'''Maximum size when using :attr:`size_hint`.
:attr:`size_hint_max` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`size_hint_max_x`, :attr:`size_hint_max_y`) properties.
.. versionadded:: 1.10.0
'''
ids = DictProperty({})
'''This is a dictionary of ids defined in your kv language. This will only
be populated if you use ids in your kv language code.
.. versionadded:: 1.7.0
:attr:`ids` is a :class:`~kivy.properties.DictProperty` and defaults to an
empty dict {}.
The :attr:`ids` are populated for each root level widget definition. For
example:
.. code-block:: kv
# in kv
<MyWidget@Widget>:
id: my_widget
Label:
id: label_widget
Widget:
id: inner_widget
Label:
id: inner_label
TextInput:
id: text_input
OtherWidget:
id: other_widget
<OtherWidget@Widget>
id: other_widget
Label:
id: other_label
TextInput:
id: other_textinput
Then, in python:
.. code-block:: python
>>> widget = MyWidget()
>>> print(widget.ids)
{'other_widget': <weakproxy at 041CFED0 to OtherWidget at 041BEC38>,
'inner_widget': <weakproxy at 04137EA0 to Widget at 04138228>,
'inner_label': <weakproxy at 04143540 to Label at 04138260>,
'label_widget': <weakproxy at 04137B70 to Label at 040F97A0>,
'text_input': <weakproxy at 041BB5D0 to TextInput at 041BEC00>}
>>> print(widget.ids['other_widget'].ids)
{'other_textinput': <weakproxy at 041DBB40 to TextInput at 041BEF48>,
'other_label': <weakproxy at 041DB570 to Label at 041BEEA0>}
>>> print(widget.ids['label_widget'].ids)
{}
'''
opacity = NumericProperty(1.0)
'''Opacity of the widget and all its children.
.. versionadded:: 1.4.1
The opacity attribute controls the opacity of the widget and its children.
Be careful, it's a cumulative attribute: the value is multiplied by the
current global opacity and the result is applied to the current context
color.
For example, if the parent has an opacity of 0.5 and a child has an
opacity of 0.2, the real opacity of the child will be 0.5 * 0.2 = 0.1.
Then, the opacity is applied by the shader as:
.. code-block:: python
frag_color = color * vec4(1.0, 1.0, 1.0, opacity);
:attr:`opacity` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.0.
'''
def on_opacity(self, instance, value):
canvas = self.canvas
if canvas is not None:
canvas.opacity = value
canvas = None
'''Canvas of the widget.
The canvas is a graphics object that contains all the drawing instructions
for the graphical representation of the widget.
There are no general properties for the Widget class, such as background
color, to keep the design simple and lean. Some derived classes, such as
Button, do add such convenience properties but generally the developer is
responsible for implementing the graphics representation for a custom
widget from the ground up. See the derived widget classes for patterns to
follow and extend.
See :class:`~kivy.graphics.Canvas` for more information about the usage.
'''
def get_disabled(self):
return self._disabled_count > 0
def set_disabled(self, value):
# Necessary to ensure a change between value of equal truthiness
# doesn't mess up the count
value = bool(value)
if value != self._disabled_value:
self._disabled_value = value
if value:
self.inc_disabled()
else:
self.dec_disabled()
return True
def inc_disabled(self, count=1):
self._disabled_count += count
if self._disabled_count - count < 1 <= self._disabled_count:
self.property('disabled').dispatch(self)
for c in self.children:
c.inc_disabled(count)
def dec_disabled(self, count=1):
self._disabled_count -= count
if self._disabled_count <= 0 < self._disabled_count + count:
self.property('disabled').dispatch(self)
for c in self.children:
c.dec_disabled(count)
disabled = AliasProperty(get_disabled, set_disabled, watch_before_use=False)
'''Indicates whether this widget can interact with input or not.
:attr:`disabled` is an :class:`~kivy.properties.AliasProperty` and
defaults to False.
.. note::
1. Child Widgets, when added to a disabled widget, will be disabled
automatically.
2. Disabling/enabling a parent disables/enables all
of its children.
.. versionadded:: 1.8.0
.. versionchanged:: 1.10.1
:attr:`disabled` was changed from a
:class:`~kivy.properties.BooleanProperty` to an
:class:`~kivy.properties.AliasProperty` to allow access to its
previous state when a parent's disabled state is changed.
'''
motion_filter = DictProperty()
'''Holds a dict of `type_id` to `list` of child widgets registered to
receive motion events of `type_id`.
Don't change the property directly but use
:meth:`register_for_motion_event` and :meth:`unregister_for_motion_event`
to register and unregister for motion events. If `self` is registered it
will always be the first element in the list.
.. versionadded:: 2.1.0
.. warning::
This is an experimental property and it remains so while this warning
is present.
'''
|
{
"content_hash": "59957e67eafdc01d92016d37c48c5fba",
"timestamp": "",
"source": "github",
"line_count": 1629,
"max_line_length": 80,
"avg_line_length": 35.29711479435236,
"alnum_prop": 0.6023756934903216,
"repo_name": "akshayaurora/kivy",
"id": "e00b39e1b0d147a909f3f69516ad83b53f9aca91",
"size": "57499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kivy/uix/widget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "328705"
},
{
"name": "Cython",
"bytes": "1105798"
},
{
"name": "Emacs Lisp",
"bytes": "9839"
},
{
"name": "GLSL",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "5494"
},
{
"name": "Objective-C",
"bytes": "26870"
},
{
"name": "PowerShell",
"bytes": "4836"
},
{
"name": "Python",
"bytes": "3286901"
},
{
"name": "Shell",
"bytes": "21930"
},
{
"name": "Vim script",
"bytes": "2120"
},
{
"name": "kvlang",
"bytes": "43643"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name="pylint-guidelines-checker",
version="0.0.1",
url='http://github.com/Azure/azure-sdk-for-python',
license='MIT License',
description="A pylint plugin which enforces azure sdk guidelines.",
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
py_modules=['pylint_guidelines_checker'],
)
|
{
"content_hash": "0b48a4cd091d2a834deffaf8b5258be4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7078947368421052,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b4b89126e95a06b04230e410300c05600a3359b4",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/pylint_custom_plugin/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from .sports import * # noqa
from .picks import * # noqa
from .manage import * # noqa
|
{
"content_hash": "601a09c923fd942c38e80146c5d7f2f5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 29,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6629213483146067,
"repo_name": "dakrauth/picker",
"id": "14c14bb026ee837b227f5b6a9a2344799d14fbd9",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "picker/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "645"
},
{
"name": "HTML",
"bytes": "24694"
},
{
"name": "Python",
"bytes": "122929"
}
],
"symlink_target": ""
}
|
from UtilityLayer import *
from Webserver import Webserver
import DatabaseLayer
import sys, argparse
import logging
import json
import os
class Butterfly():
'''
Butterfly 2.0
EM Data server
2017 VCG + Lichtman Lab
'''
log_info = {
'filename': 'bfly.log',
'level': logging.INFO
}
def __init__(self,_argv):
# keyword arguments
self.INPUT = INPUT()
self.RUNTIME = RUNTIME()
self.OUTPUT = OUTPUT()
# Get the port
args = self.parseArgv(_argv)
port = args['port']
# Start to write to log files
logging.basicConfig(**self.log_info)
# Populate the database
db = self.updateDB()
# Start a webserver on given port
Webserver(db).start(port)
# Update the database from the config file
def updateDB(self):
# Create or open the database
db_class = getattr(DatabaseLayer,DB_TYPE)
self._db = db_class(DB_PATH)
# Get keywords for the BFLY_CONFIG
k_list = self.INPUT.METHODS.GROUP_LIST
k_path = self.OUTPUT.INFO.PATH.NAME
'''
Make a dictionary mapping channel paths to dataset paths
'''
pather = lambda l: l.get(k_path,'')
lister = lambda l,n: l.get(k_list[n],[])
mapper = lambda l,p: {c:p for c in map(pather,l)}
join = lambda l,p,a: dict(mapper(l,p),**a) if p else a
get_L2 = lambda a,l: join(lister(l,3), pather(l), a)
get_L1 = lambda a,l: reduce(get_L2, lister(l,2), a)
get_L0 = lambda a,l: reduce(get_L1, lister(l,1), a)
all_paths = reduce(get_L0, lister(BFLY_CONFIG,0), {})
# Fill the database with content
return self.completeDB(all_paths)
# Add all colections and content to the database
def completeDB(self, all_paths):
# Add paths to database
self._db.add_paths(all_paths)
# Get all dataset paths from all channel paths
dataset_paths = set(all_paths.values())
# Add all needed tables to the database
self._db.add_tables(dataset_paths)
# Add synapses and neurons to database
map(self.addSynapseDB, dataset_paths)
# Complete database
return self._db
def addSynapseDB(self,dataset_path):
# Get keywords for input file
k_file = self.RUNTIME.DB.FILE.SYNAPSE.NAME
k_point = self.RUNTIME.DB.FILE.SYNAPSE.POINT.NAME
k_points_in = self.RUNTIME.DB.FILE.SYNAPSE.POINT.LIST
k_nodes_in = self.RUNTIME.DB.FILE.SYNAPSE.NEURON_LIST
# Get keywords for the database
k_points_out = self.RUNTIME.DB.TABLE.ALL.POINT_LIST
k_nodes_out = self.RUNTIME.DB.TABLE.SYNAPSE.NEURON_LIST
k_synapse = self.RUNTIME.DB.TABLE.SYNAPSE.NAME
# Get the full path to the synapse file
full_path = os.path.join(dataset_path, k_file)
# List all the syapse database keys
k_keys = k_nodes_out + k_points_out
# For output file
synapes_dicts = []
# Begin adding synapses to database
with open(full_path, 'r') as f:
all_json = json.load(f)
# Get points and centers from json
get_node = lambda n: all_json[n]
get_point = lambda p: all_json[k_point][p]
# Transpose the list of all synapses
center = map(get_point, k_points_in)
link0, link1 = map(get_node, k_nodes_in)
synapse_list = zip(link0,link1, *center)
# Get a list of dictionaries for all synapses
get_dict = lambda s: dict(zip(k_keys,s))
synapse_dicts = map(get_dict, synapse_list)
# Add the synapses to the database
entry_args = [k_synapse,dataset_path,synapse_dicts]
self._db.add_entries(*entry_args)
# Add neurons to the database
self.addNeuronDB(dataset_path,synapse_dicts)
def addNeuronDB(self,dataset_path,synapse_dicts):
# Get keywords for the database
k_nodes = self.RUNTIME.DB.TABLE.SYNAPSE.NEURON_LIST
k_points = self.RUNTIME.DB.TABLE.ALL.POINT_LIST
k_neuron = self.RUNTIME.DB.TABLE.NEURON.NAME
# Get constant for id and first/second neurons
k_id = self.RUNTIME.DB.TABLE.NEURON.KEY.NAME
# Get neuron from synapse
get_n = [
lambda d: d[k_nodes[0]],
lambda d: d[k_nodes[1]]
]
# Get id and center point for a neuron
get_id = lambda d,n: {k_id: get_n[n](d)}
get_point = lambda d: {k: d[k] for k in k_points}
# Find targets that are never sources
all_n1 = map(get_n[0], synapse_dicts)
only_n2 = lambda s: get_n[1](s) not in all_n1
only_n2_dicts = filter(only_n2, synapse_dicts)
# Make dictionaries first from source neurons
get_n1 = lambda d: dict(get_point(d), **get_id(d,0))
n1_dicts = map(get_n1, synapse_dicts)
# Add remaining dictionaires from target neurons
get_n2 = lambda d: dict(get_point(d), **get_id(d,1))
n2_dicts = map(get_n2, only_n2_dicts)
# Get all the neuron dicts from synapses
all_dicts = n1_dicts + n2_dicts
# Add the neurons to the database
entry_args = [k_neuron,dataset_path,all_dicts]
self._db.add_entries(*entry_args)
def parseArgv(self, argv):
sys.argv = argv
help = {
'bfly': 'Host a butterfly server!',
'folder': 'relative, absolute, or user path/of/all/experiments',
'save': 'path of output yaml file indexing experiments',
'port': 'port >1024 for hosting this server'
}
parser = argparse.ArgumentParser(description=help['bfly'])
parser.add_argument('port', type=int, nargs='?', default=PORT, help=help['port'])
parser.add_argument('-e','--exp', metavar='exp', help= help['folder'])
parser.add_argument('-o','--out', metavar='out', help= help['save'])
return vars(parser.parse_args())
def main(*_args, **_flags):
return Butterfly(toArgv(*_args, **_flags))
if __name__ == "__main__":
Butterfly(sys.argv)
|
{
"content_hash": "249f8f37fc5819b8e457678ba9944d9b",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 89,
"avg_line_length": 36.54761904761905,
"alnum_prop": 0.594299674267101,
"repo_name": "Rhoana/butterfly2",
"id": "9cd70de4fc8f573c22c5e3319016678c96cc533a",
"size": "6140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "butterfly/butterfly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64079"
}
],
"symlink_target": ""
}
|
"""
``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a
given account quota (in bytes) is exceeded while DELETE requests are still
allowed.
``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to
store the quota. Write requests to this metadata entry are only permitted for
resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not
set.
The ``account_quotas`` middleware should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware.
For example::
[pipeline:main]
pipeline = catch_errors cache tempauth account_quotas proxy-server
[filter:account_quotas]
use = egg:swift#account_quotas
To set the quota on an account::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:10000
Remove the quota::
swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \
post -m quota-bytes:
"""
from swift.common.swob import HTTPForbidden, HTTPRequestEntityTooLarge, \
HTTPBadRequest, wsgify
from swift.proxy.controllers.base import get_account_info
class AccountQuotaMiddleware(object):
"""Account quota middleware
See above for a full description.
"""
def __init__(self, app, *args, **kwargs):
self.app = app
@wsgify
def __call__(self, request):
if request.method not in ("POST", "PUT"):
return self.app
try:
request.split_path(2, 4, rest_with_last=True)
except ValueError:
return self.app
new_quota = request.headers.get('X-Account-Meta-Quota-Bytes')
remove_quota = request.headers.get('X-Remove-Account-Meta-Quota-Bytes')
if remove_quota:
new_quota = 0 # X-Remove dominates if both are present
if request.environ.get('reseller_request') is True:
if new_quota and not new_quota.isdigit():
return HTTPBadRequest()
return self.app
# deny quota set for non-reseller
if new_quota is not None:
return HTTPForbidden()
account_info = get_account_info(request.environ, self.app)
if not account_info or not account_info['bytes']:
return self.app
new_size = int(account_info['bytes']) + (request.content_length or 0)
quota = int(account_info['meta'].get('quota-bytes', -1))
if 0 <= quota < new_size:
return HTTPRequestEntityTooLarge()
return self.app
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
def account_quota_filter(app):
return AccountQuotaMiddleware(app)
return account_quota_filter
|
{
"content_hash": "d78710ab15e1e796ebcc563b601913e1",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 30.57777777777778,
"alnum_prop": 0.659156976744186,
"repo_name": "ctbrizhp/swift1.9.2",
"id": "06b0d9aa6fd06ad63e7e1ab7ee9dda5fc517751d",
"size": "3343",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "swift/common/middleware/account_quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "2901931"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from openstack.common.fixture import config
from tests.utils import BaseTestCase
conf = cfg.CONF
class ConfigTestCase(BaseTestCase):
def setUp(self):
super(ConfigTestCase, self).setUp()
self.config = self.useFixture(config.Config(conf)).config
self.config_fixture = config.Config(conf)
conf.register_opt(cfg.StrOpt(
'testing_option', default='initial_value'))
def test_overriden_value(self):
self.assertEqual(conf.get('testing_option'), 'initial_value')
self.config(testing_option='changed_value')
self.assertEqual(conf.get('testing_option'),
self.config_fixture.conf.get('testing_option'))
def test_cleanup(self):
self.config(testing_option='changed_value')
self.assertEqual(self.config_fixture.conf.get('testing_option'),
'changed_value')
self.config_fixture.conf.reset()
self.assertEqual(conf.get('testing_option'), 'initial_value')
|
{
"content_hash": "494157f1d3db16ca48a7bf76a786d6dc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.6579457364341085,
"repo_name": "JioCloud/oslo-incubator",
"id": "3368272d32ef949ff85cf25efa0fe03c38a4c5b5",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/fixture/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1421922"
},
{
"name": "Shell",
"bytes": "3184"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from itertools import count
import numpy as np
from toolz import curry
from .core import Array, normalize_chunks
linspace_names = ('linspace-%d' % i for i in count(1))
arange_names = ('arange-%d' % i for i in count(1))
def _get_blocksizes(num, blocksize):
# compute blockdims
remainder = (num % blocksize,)
if remainder == (0,):
remainder = tuple()
blocksizes = ((blocksize,) * int(num // blocksize)) + remainder
return blocksizes
def linspace(start, stop, num=50, chunks=None, dtype=None):
"""
Return `num` evenly spaced values over the closed interval [`start`,
`stop`].
TODO: implement the `endpoint`, `restep`, and `dtype` keyword args
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % blocksize != 0`
num : int, optional
Number of samples to in the returned dask array, including the
endpoints.
Returns
-------
samples : dask array
"""
num = int(num)
if chunks is None:
raise ValueError("Must supply a chunks= keyword argument")
chunks = normalize_chunks(chunks, (num,))
range_ = stop - start
space = float(range_) / (num - 1)
name = next(linspace_names)
dsk = {}
blockstart = start
for i, bs in enumerate(chunks[0]):
blockstop = blockstart + ((bs - 1) * space)
task = (curry(np.linspace, dtype=dtype), blockstart, blockstop, bs)
blockstart = blockstart + (space * bs)
dsk[(name, i)] = task
return Array(dsk, name, chunks, dtype=dtype)
def arange(*args, **kwargs):
"""
Return evenly spaced values from `start` to `stop` with step size `step`.
The values are half-open [start, stop), so including start and excluding
stop. This is basically the same as python's range function but for dask
arrays.
When using a non-integer step, such as 0.1, the results will often not be
consistent. It is better to use linspace for these cases.
Parameters
----------
start : int, optional
The starting value of the sequence. The default is 0.
stop : int
The end of the interval, this value is excluded from the interval.
step : int, optional
The spacing between the values. The default is 1 when not specified.
The last value of the sequence.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % chunks != 0`.
num : int, optional
Number of samples to in the returned dask array, including the
endpoints.
Returns
-------
samples : dask array
"""
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start = args[0]
stop = args[1]
step = 1
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError('''
arange takes 3 positional arguments: arange([start], stop, [step])
''')
if 'chunks' not in kwargs:
raise ValueError("Must supply a chunks= keyword argument")
chunks = kwargs['chunks']
dtype = kwargs.get('dtype', None)
range_ = stop - start
num = int(abs(range_ // step))
if (range_ % step) != 0:
num += 1
chunks = normalize_chunks(chunks, (num,))
name = next(arange_names)
dsk = {}
elem_count = 0
for i, bs in enumerate(chunks[0]):
blockstart = start + (elem_count * step)
blockstop = start + ((elem_count + bs) * step)
task = (np.arange, blockstart, blockstop, step, dtype)
dsk[(name, i)] = task
elem_count += bs
return Array(dsk, name, chunks, dtype=dtype)
|
{
"content_hash": "b34ced6fe20422ec47af8128df28aa87",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 27.517482517482517,
"alnum_prop": 0.6035578144853876,
"repo_name": "esc/dask",
"id": "82820dfc17e81eba3c996bee56d36dd67fd70cac",
"size": "3935",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dask/array/creation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "511448"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
from django import forms
from .models import User
class UserForm(forms.ModelForm):
class Meta:
model = User
# Constrain the UserForm to just these fields.
fields = ("first_name", "last_name")
|
{
"content_hash": "e839a2b90673966f87656fcf91828183",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 54,
"avg_line_length": 18.666666666666668,
"alnum_prop": 0.6517857142857143,
"repo_name": "chhantyal/referly",
"id": "83e561a731ae8a227e957d139fb6f34c5319b3bf",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "referly/users/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "22020"
},
{
"name": "JavaScript",
"bytes": "3765"
},
{
"name": "Python",
"bytes": "41319"
}
],
"symlink_target": ""
}
|
"""
Grader file for Columbus problem
"""
def grade(autogen, key):
if '1492' in key:
return (True, 'Good work!')
else:
return (False, 'Nope')
|
{
"content_hash": "aeb487dc68a38c3f2078851661a2aa17",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 17.11111111111111,
"alnum_prop": 0.6103896103896104,
"repo_name": "stuyCTF/stuyCTF-Platform",
"id": "e35b02150e384d9135d8503e7e0aa788a075df56",
"size": "154",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "example_problems/misc/columbus/grader/grader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7294"
},
{
"name": "CoffeeScript",
"bytes": "51286"
},
{
"name": "HTML",
"bytes": "57602"
},
{
"name": "Python",
"bytes": "184241"
},
{
"name": "Shell",
"bytes": "4218"
}
],
"symlink_target": ""
}
|
from AppKit import *
from PyObjCTools.TestSupport import *
class TestNSNibLoading (TestCase):
def testMethods(self):
self.assertResultIsBOOL(NSBundle.loadNibFile_externalNameTable_withZone_)
self.assertResultIsBOOL(NSBundle.loadNibNamed_owner_)
self.assertResultIsBOOL(NSBundle.loadNibFile_externalNameTable_withZone_)
@min_os_level('10.8')
def testMethods10_8(self):
self.assertResultIsBOOL(NSBundle.loadNibNamed_owner_topLevelObjects_)
self.assertArgIsOut(NSBundle.loadNibNamed_owner_topLevelObjects_, 2)
if __name__ == "__main__":
main()
|
{
"content_hash": "79591ec7311cdac44561b6ba8e30a946",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 81,
"avg_line_length": 33.44444444444444,
"alnum_prop": 0.7358803986710963,
"repo_name": "ariabuckles/pyobjc-framework-Cocoa",
"id": "abbae38fd528e8e3b7d6157c4bab4dfdbdd77b68",
"size": "602",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyObjCTest/test_nsnibloading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "5481"
},
{
"name": "Objective-C",
"bytes": "213902"
},
{
"name": "Python",
"bytes": "2450945"
}
],
"symlink_target": ""
}
|
default_app_config = __name__ + '.apps.CollGateMessengerTCPServer'
|
{
"content_hash": "dbe45ceb1f7a242df3c61c28f46ad7e9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 66,
"avg_line_length": 67,
"alnum_prop": 0.746268656716418,
"repo_name": "coll-gate/collgate",
"id": "c8f6648f9efe6370b6621a9858a869a3adeaf03e",
"size": "369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messenger/tcpserver/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20334"
},
{
"name": "HTML",
"bytes": "245334"
},
{
"name": "JavaScript",
"bytes": "5131841"
},
{
"name": "Python",
"bytes": "1291968"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
}
|
from pystache import Template
import os.path
import re
from types import *
def get_or_attr(context_list, name, default=None):
if not context_list:
return default
for obj in context_list:
try:
return obj[name]
except KeyError:
pass
except:
try:
return getattr(obj, name)
except AttributeError:
pass
return default
class View(object):
template_name = None
template_path = None
template = None
template_encoding = None
template_extension = 'mustache'
def __init__(self, template=None, context=None, **kwargs):
self.template = template
context = context or {}
context.update(**kwargs)
self.context_list = [context]
def get(self, attr, default=None):
attr = get_or_attr(self.context_list, attr, getattr(self, attr, default))
if hasattr(attr, '__call__') and type(attr) is UnboundMethodType:
return attr()
else:
return attr
def get_template(self, template_name):
if not self.template:
from pystache import Loader
template_name = self._get_template_name(template_name)
self.template = Loader().load_template(template_name, self.template_path, encoding=self.template_encoding, extension=self.template_extension)
return self.template
def _get_template_name(self, template_name=None):
"""TemplatePartial => template_partial
Takes a string but defaults to using the current class' name or
the `template_name` attribute
"""
if template_name:
return template_name
template_name = self.__class__.__name__
def repl(match):
return '_' + match.group(0).lower()
return re.sub('[A-Z]', repl, template_name)[1:]
def _get_context(self):
context = {}
for item in self.context_list:
if hasattr(item, 'keys') and hasattr(item, '__getitem__'):
context.update(item)
return context
def render(self, encoding=None):
return Template(self.get_template(self.template_name), self).render(encoding=encoding)
def __contains__(self, needle):
return needle in self.context or hasattr(self, needle)
def __getitem__(self, attr):
val = self.get(attr, None)
if not val and val != 0:
raise KeyError("Key '%s' does not exist in View" % attr)
return val
def __getattr__(self, attr):
if attr == 'context':
return self._get_context()
raise AttributeError("Attribute '%s' does not exist in View" % attr)
def __str__(self):
return self.render()
|
{
"content_hash": "411be14eab6b1e5bf5b144c49d239aef",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 153,
"avg_line_length": 29.69148936170213,
"alnum_prop": 0.5854532425653888,
"repo_name": "alex/pystache",
"id": "a4e73b29a121949b49183782e1e3e9c9a3e1f5d4",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystache/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27447"
}
],
"symlink_target": ""
}
|
import math
def isPrime(num):
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
return False
return True
num = 0
numberOfPrimes = 0
while numberOfPrimes <= 10001:
num += 1
if isPrime(num):
numberOfPrimes += 1
print(num)
|
{
"content_hash": "6c579e5f79ea295411dc6ba95a815aca",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 47,
"avg_line_length": 17.3125,
"alnum_prop": 0.5776173285198556,
"repo_name": "asweigart/AlDoesProjectEuler",
"id": "3eca08f0bcd3d6ce13dab0e2a30f765c3e57cbe1",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projecteuler/problem7.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "87479"
}
],
"symlink_target": ""
}
|
import pygame, sys
from pygame.locals import *
FPS = 30
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
REVEALSPEED = 8
BOXSIZE = 40
GAPSIZE = 10
BOARDWIDTH = 10
BORADHEIGHT = 7
assert (BOARDWIDTH * BOARDHEIGHT) % 2 == 0
XMARGIN = int((WINDOWWIDTH - (BOARDWIDTH *(BOXSIZE + GAPSIZE))) /2)
YMARGIN = int((WINDOWHEIGHT - (BORADHEIGHT *(BOXSIZE + GAPSIZE))) / 2)
# color
GRAY = (100, 100, 100)
NAVYBLUE = (60, 60, 100)
WHITE = (255, 255, 255)
BLACK = (0,0,0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
CYAN = (0, 255, 255)
BGCOLOR = NAVYBLUE
LIGHTBGCOLOR = GRAY
BOXCOLOR = WHITE
HIGHLIGHTCOLOR = BLUE
DONUT = 'donut'
SQUARE = 'square'
DIAMOND = 'diamond'
LINES = 'lines'
OVAL = 'oval'
ALLCOLORS = (RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE, CYAN)
ALLSHAPES = (DONUT, SQUARE, DIAMOND, LINES, OVAL)
assert len(ALLCOLORS) * len(ALLSHAPES) * 2 >= BORADHEIGHT * BOARDWIDTH
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
mousex = 0
mousey = 0
pygame.display.set_caption('Memory Game')
mainBoard = getRandomizeBoard()
revealedBoxes = generateRevealedBoxesData(False)
firstSelection = None
DISPLAYSURF.fill(BGCOLOR)
startGameAnimation(mainBoard)
while True:
mouseClicked = False
DISPLAYSURF.fill(BGCOLOR)
drawBoard(mainBoard, revealedBoxes)
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYUP and event.key ==
K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
mouseClicked = True
boxx, boxy = getBoxAtPixel(mousex, mousey)
if boxx != None and boxy != None:
if not revealedBoxes[boxx][boxy]:
drawHighlightBox(boxx, boxy)
if not revealedBoxes[boxx][boxy] and mouseClicked:
revealBoxesAnimation(mainBoard, [(boxx, boxy)])
revealedBoxes[boxx][boxy] = True
if firstSelection == None:
firstSelection = (boxx, boxy)
else:
icon1shape, icon1color = getShapeAndColor(mainBoard,
firstSelection[0], firstSelection[1])
icon2shape, icon2color = getShapeAndColor(mainBoard, boxx,
boxy)
if ifcon1shape != icon2shape or icon1color != icon2color:
pygame.time.wait(1000)
coverBoxesAnimation(mainBoard, [(firstSelection[0],
firstSelection[1]), (boxx, boxy)])
revealedBoxes[firstSelection[0]][firstSelection[1]]=False
revealedBoxes[boxx][boxy] = False
elif hasWon(revealedBoxes):
gameWonAnimation(mainBoard)
pygame.time.wait(2000)
mainBoard = getRandomizeBoard()
revealedBoxes = generateRevealedBoxesData(False)
drawBoard(mainBoard, revealedBoxes)
pygame.display.update()
pygame.time.wait(1000)
startGameAnimation(mainBoard)
firstSelection = None
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRevealedBoxesData(val):
pass
def getRandomizeBoard():
pass
def splitIntoGroupsOf(groupSize, theList):
pass
def leftTopCoordsOfBox(boxx, boxy):
pass
def getBoxAtPixel(x, y):
pass
def drawIcon(shape, color, boxx, boxy):
pass
def getShapeAndColor(board, boxx, boxy):
pass
def drawBoxCovers(board, boxes, coverage):
pass
def revealBoxesAnimation(board, boxesToReveal):
pass
def coverBoxesAnimation(board, boxesToCover):
pass
def drawBoard(board, revealed):
pass
def drawHighlightBox(boxx, boxy):
pass
def startGameAnimation(board):
pass
def gameWonAnimation(board):
pass
def hasWon(revealedBoxes):
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "8419765a82971cb5e2a6328e7ed355c2",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 81,
"avg_line_length": 29.328859060402685,
"alnum_prop": 0.5897025171624714,
"repo_name": "rve/invent-with-python",
"id": "02547248a352155b46557a267ad8d3a90852c8d5",
"size": "4370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypuzzle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26758"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import logging
from smac.facade.func_facade import fmin_smac
def rosenbrock_2d(x):
""" The 2 dimensional Rosenbrock function as a toy model
The Rosenbrock function is well know in the optimization community and
often serves as a toy problem. It can be defined for arbitrary
dimensions. The minimium is always at x_i = 1 with a function value of
zero. All input parameters are continuous. The search domain for
all x's is the interval [-5, 5].
"""
x1 = x[0]
x2 = x[1]
val = 100. * (x2 - x1 ** 2.) ** 2. + (1 - x1) ** 2.
return val
# debug output
logging.basicConfig(level=10)
logger = logging.getLogger("Optimizer") # Enable to show Debug outputs
x, cost, _ = fmin_smac(func=rosenbrock_2d,
x0=[-3, -4],
bounds=[(-5, 5), (-5, 5)],
maxfun=325,
rng=3)
print(x, cost)
|
{
"content_hash": "ad00e8d100f12a684dd64454f618cdb6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 29.84375,
"alnum_prop": 0.6020942408376964,
"repo_name": "SgnJp/SMAC3",
"id": "11a2aa95d577dafb380262bc64325cfa8ff3b792",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/rosenbrock.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7251"
},
{
"name": "Python",
"bytes": "296742"
},
{
"name": "Shell",
"bytes": "1115"
}
],
"symlink_target": ""
}
|
import argparse
import unittest
from util import *
class TestMover(unittest.TestCase):
# test single move
def test_single_move(self):
for action in MOVE_TYPES:
cmd_create, cmd_move = move_random_file(action, FILE_SIZE)
self.assertTrue(cmd_create['state'] == "DONE")
self.assertTrue(cmd_move['state'] == "DONE", "{} action failed".format(action))
# test double moves for any pair of move actions
def test_double_moves(self):
for action1 in MOVE_TYPES:
for action2 in MOVE_TYPES:
cmd_create, cmd_move_1, cmd_move_2 = move_random_file_twice(action1, action2, FILE_SIZE)
self.assertTrue(cmd_create['state'] == "DONE")
self.assertTrue(cmd_move_1['state'] == "DONE", "{} action failed".format(action1))
self.assertTrue(cmd_move_2['state'] == "DONE", "{} action failed".format(action2))
# move randomly and continually
def test_random_list_mover(self):
# get the random test list
cmds = move_random_task_list(FILE_SIZE)
# check the result
self.assertTrue(all_success(cmds))
# move randomly and continually, nearby mover type can be the same
def test_random_list_mover_totally(self):
# get the random test list
cmds = move_random_task_list_totally(FILE_SIZE)
# check the result
# check the result
self.assertTrue(all_success(cmds))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-size', default='64MB')
parser.add_argument('unittest_args', nargs='*')
args, unknown_args = parser.parse_known_args()
sys.argv[1:] = unknown_args
print "The file size for test is {}.".format(args.size)
FILE_SIZE = convert_to_byte(args.size)
unittest.main()
|
{
"content_hash": "f47861525402c7aba2ec49235b624f71",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 108,
"avg_line_length": 37.83673469387755,
"alnum_prop": 0.6202804746494067,
"repo_name": "Intel-bigdata/SSM",
"id": "ab0dbab6f82566a791b7a8ce1d81320e00e397e6",
"size": "1854",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "supports/integration-test/test_mover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6167"
},
{
"name": "Batchfile",
"bytes": "11970"
},
{
"name": "C",
"bytes": "65351"
},
{
"name": "C++",
"bytes": "6401"
},
{
"name": "CMake",
"bytes": "17721"
},
{
"name": "CSS",
"bytes": "77302"
},
{
"name": "HTML",
"bytes": "188088"
},
{
"name": "Java",
"bytes": "4765833"
},
{
"name": "JavaScript",
"bytes": "414433"
},
{
"name": "Python",
"bytes": "67774"
},
{
"name": "Roff",
"bytes": "60995"
},
{
"name": "Scala",
"bytes": "31517"
},
{
"name": "Shell",
"bytes": "76727"
},
{
"name": "Thrift",
"bytes": "4680"
},
{
"name": "XSLT",
"bytes": "7956"
}
],
"symlink_target": ""
}
|
"""Support for Blinkt! lights on Raspberry Pi."""
import importlib
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
SUPPORT_BLINKT = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
DEFAULT_NAME = "blinkt"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blinkt Light platform."""
blinkt = importlib.import_module("blinkt")
# ensure that the lights are off when exiting
blinkt.set_clear_on_exit()
name = config[CONF_NAME]
add_entities(
[BlinktLight(blinkt, name, index) for index in range(blinkt.NUM_PIXELS)]
)
class BlinktLight(LightEntity):
"""Representation of a Blinkt! Light."""
_attr_supported_features = SUPPORT_BLINKT
_attr_should_poll = False
_attr_assumed_state = True
def __init__(self, blinkt, name, index):
"""Initialize a Blinkt Light.
Default brightness and white color.
"""
self._blinkt = blinkt
self._attr_name = f"{name}_{index}"
self._index = index
self._attr_is_on = False
self._attr_brightness = 255
self._attr_hs_color = [0, 0]
def turn_on(self, **kwargs):
"""Instruct the light to turn on and set correct brightness & color."""
if ATTR_HS_COLOR in kwargs:
self._attr_hs_color = kwargs[ATTR_HS_COLOR]
if ATTR_BRIGHTNESS in kwargs:
self._attr_brightness = kwargs[ATTR_BRIGHTNESS]
percent_bright = self.brightness / 255
rgb_color = color_util.color_hs_to_RGB(*self.hs_color)
self._blinkt.set_pixel(
self._index, rgb_color[0], rgb_color[1], rgb_color[2], percent_bright
)
self._blinkt.show()
self._attr_is_on = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._blinkt.set_pixel(self._index, 0, 0, 0, 0)
self._blinkt.show()
self._attr_is_on = False
self.schedule_update_ha_state()
|
{
"content_hash": "45eab656e3917569efa6b193028d1976",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 81,
"avg_line_length": 28.72289156626506,
"alnum_prop": 0.6396812080536913,
"repo_name": "jawilson/home-assistant",
"id": "e6a3ecd362dcd99c401b0a8289d519feb248a3b3",
"size": "2384",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/blinkt/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import logging
import time
import re
from . import common
from ...systems import path
class ParsedFileException(Exception):
pass
class ParsedFileLine(object):
'''Lines returned by `find . -ls` have an inconsistent format. We
therefore have to do a bit of computation on the results to
correlate values with fields. Splitting on spaces is a good first
guess. However remember that paths can have spaces, and the symlink
display format ("foo -> bar") also messes with us.
With that in mind, here is an object to interpret a `find . -ls`
result line.'''
logger = logging.getLogger(__name__ + '.ParsedFileLine')
# TODO: we will need to get smarter about this
# a static list of diffs will come back and bite us as soon as we find a
# system where somebody is retaining important state in /tmp, for example
ignored = [
re.compile('^/dev/'),
re.compile('^/lost+found/'),
re.compile('^/proc/'),
re.compile('^/run/'),
re.compile('^/sys/'),
re.compile('^/tmp/'),
re.compile('^/var/log/'),
]
symlink_deref_re = re.compile(r' -> ')
meta_re = re.compile(r'''
^\s* # beginning of line, followed by 0 or more spaces
(?P<inode>\d{1,20})
\s+
(?P<blocks>\d{1,10})
\s
(?P<perms>[-cld][-r][-w][-xs][-r][-w][-xs][-r][-w][-xtT])
\s+
(?P<lcount>\d{1,10})
\s
(?P<owner>[-a-zA-Z0-9_]{1,20})
\s+
(?P<group>[-a-zA-Z0-9_]{1,20})
\s+
# optional size; char devices don't have this
(
(?P<size>\d{1,20})
\s
)?
(?P<month>(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec))
\s+
# 1 or 2 digits
(?P<day>\d{1,2})
\s+
# valid: 2013 OR 15:58
(?P<timex>(\d{4}|\d\d:\d\d))
\s
# alternate group: symlink has " -> ", other files don't
(
(?P<source>.+)\ ->\ (?P<target>.+)
|
(?P<path>.+)
)
''', re.VERBOSE)
def __init__(self, line):
m = ParsedFileLine.meta_re.match(line)
try:
assert m
except AssertionError, e:
raise ParsedFileException('Unable to understand line: %s',line)
p = path.Path()
p.inode = m.group('inode')
p.blocks = m.group('blocks')
p.perms = m.group('perms')
p.link_count = m.group('lcount')
p.owner = m.group('owner')
p.group = m.group('group')
p.size = m.group('size')
p.month = m.group('month')
p.day = m.group('day')
p.more_time = m.group('timex')
if p.size is None:
if p.perms[0] != 'c':
raise ParsedFileException('Missing file size, but not a char device: %s',line)
else:
if p.perms[0] == 'c':
raise ParsedFileException('Has file size, but is a char device: %s',line)
self.path = p
self.ignore = False
self.set_path(m)
if self.is_ignored():
self.ignore = True
def is_ignored(self):
for pattern in ParsedFileLine.ignored:
if pattern.match(self.path.path):
return True
return False
def set_path(self, m):
'''Set a path and possibly also a symlink target. There's room
for interpretation here because paths can have spaces, and
symlinks contain spaces due to their 'foo -> bar' format.'''
if m.group('path') is not None:
if self.path.perms[0] == 'l':
raise ParsedFileException('Invalid symlink format: %s',m.group('path'))
# ignore "observer effect" commands, that is: Ansible
if common.Observer.timestamp_re.search(m.group('path')):
self.ignore = True
# /opt/VBoxGuestAdditions-4.3.8
self.path.path = m.group('path').rstrip('\n')
return
if self.path.perms[0] != 'l':
raise ParsedFileException('Symlink formatted path, but file is not a symlink: %s',m.group('path'))
self.path.path = m.group('source')
self.path.link_target = m.group('target').rstrip('\n')
if ParsedFileLine.symlink_deref_re.search(self.path.path):
raise ParsedFileException('Unexpected symlink dereference found in path: %s',self.path.path)
class FilesStdoutLog(common.Log):
def parse(self):
self.logger.debug('parsing')
self.data = path.Paths()
self.name = 'paths'
with open(self.path, 'r') as f:
start_time = time.time()
for line in f.readlines():
self.parse_line(line)
self.logger.debug('completed parsing in %d seconds',
time.time() - start_time)
def parse_line(self, line):
parsed = ParsedFileLine(line)
if parsed.ignore: return
assert parsed.path.path not in self.data
self.data[parsed.path.path] = parsed.path
#self.logger.debug('path: %s',parsed.path.path)
|
{
"content_hash": "304f69264bb8b6088edf6a413adda260",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 110,
"avg_line_length": 31.288235294117648,
"alnum_prop": 0.5217146080090242,
"repo_name": "Doveps/mono",
"id": "635a64d3094d911797ab32233c65c041c990c220",
"size": "5398",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bassist/bassist/parser/log_file/files_stdout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1025"
},
{
"name": "HTML",
"bytes": "7230"
},
{
"name": "PLpgSQL",
"bytes": "19874"
},
{
"name": "Python",
"bytes": "103432"
},
{
"name": "Ruby",
"bytes": "3926"
},
{
"name": "Shell",
"bytes": "588"
}
],
"symlink_target": ""
}
|
from enable.api import Container, TextField, Window
from enable.example_support import DemoFrame, demo_main
size = (500, 400)
class MyFrame(DemoFrame):
def _create_window(self):
text_field = TextField(position=[25,100], width=200)
text = "This a test with a text field\nthat has more text than\n"
text += "can fit in it."
text_field2 = TextField(position=[25,200], width=200,
height=50, multiline=True,
text=text, font="Courier New 14")
text_field3 = TextField(position=[250,50], height=300,
width=200, multiline=True,
font="Courier New 14")
container = Container(bounds=size, bgcolor='grey')
container.add(text_field, text_field2, text_field3)
return Window(self, -1, component=container)
if __name__ == '__main__':
# Save demo so that it doesn't get garbage collected when run within
# existing event loop (i.e. from ipython).
demo = demo_main(MyFrame, size=size)
|
{
"content_hash": "a0980ac37c3c1bb8974e2291966992f2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 36.1,
"alnum_prop": 0.5909510618651893,
"repo_name": "tommy-u/enable",
"id": "1078decb4ce7c0fbfca3d6bc86571278ff8f268f",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/enable/text_field_demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "240"
},
{
"name": "C",
"bytes": "5526949"
},
{
"name": "C++",
"bytes": "3058044"
},
{
"name": "DIGITAL Command Language",
"bytes": "35819"
},
{
"name": "Groff",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "58238"
},
{
"name": "Objective-C",
"bytes": "16551"
},
{
"name": "Python",
"bytes": "2202660"
},
{
"name": "Shell",
"bytes": "6286"
}
],
"symlink_target": ""
}
|
import gc
import sys
from time import sleep
from threading import Event
import pytest
from gpiozero import *
def test_source_delay(mock_factory):
with OutputDevice(2) as device:
device.source_delay = 1
assert device.source_delay == 1
device.source_delay = 0.1
assert device.source_delay == 0.1
with pytest.raises(ValueError):
device.source_delay = -1
def test_source(mock_factory):
pin = mock_factory.pin(4)
with InputDevice(4) as in_dev, OutputDevice(3) as out_dev:
assert out_dev.source is None
out_dev.source = in_dev.values
assert out_dev.source is not None
assert out_dev.value == 0
pin.drive_high()
# Give the output device some time to read the input device state
sleep(0.1)
assert out_dev.value == 1
def test_active_time(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as dev:
assert dev.active_time is None
assert dev.inactive_time >= 0.0
pin.drive_high()
sleep(0.1)
assert dev.active_time >= 0.1
assert dev.inactive_time is None
pin.drive_low()
sleep(0.1)
assert dev.active_time is None
assert dev.inactive_time >= 0.1
def test_basic_callbacks(mock_factory):
pin = mock_factory.pin(4)
evt = Event()
with DigitalInputDevice(4) as dev:
dev.when_activated = evt.set
assert dev.when_activated is not None
pin.drive_high()
assert evt.wait(0.1)
pin.drive_low()
dev.when_activated = None
assert dev.when_activated is None
evt.clear()
pin.drive_high()
assert not evt.wait(0.1)
def test_builtin_callbacks(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as dev:
assert gc.isenabled()
dev.when_activated = gc.disable
assert dev.when_activated is gc.disable
pin.drive_high()
assert not gc.isenabled()
gc.enable()
def test_callback_with_param(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as dev:
devices = []
evt = Event()
def cb(d):
devices.append(d)
evt.set()
dev.when_activated = cb
assert dev.when_activated is not None
pin.drive_high()
assert evt.wait(1)
assert devices == [dev]
def test_bad_callback(mock_factory):
pin = mock_factory.pin(4)
with DigitalInputDevice(4) as dev:
with pytest.raises(BadEventHandler):
dev.when_activated = 100
with pytest.raises(BadEventHandler):
dev.when_activated = lambda x, y: x + y
def test_shared_key(mock_factory):
class SharedDevice(SharedMixin, GPIODevice):
def __init__(self, pin, pin_factory=None):
super().__init__(pin, pin_factory=pin_factory)
@classmethod
def _shared_key(cls, pin, pin_factory=None):
return pin
def _conflicts_with(self, other):
return not isinstance(other, SharedDevice)
with SharedDevice(4) as dev:
with SharedDevice(4) as another_dev:
pass
with pytest.raises(GPIOPinInUse):
GPIODevice(4)
|
{
"content_hash": "b539746be7b4df8ac718aaffc5809057",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 73,
"avg_line_length": 28.112068965517242,
"alnum_prop": 0.6074823673719718,
"repo_name": "waveform80/gpio-zero",
"id": "736e29e4b67ad06cda6db805269e9d90d10a1e57",
"size": "3467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Python",
"bytes": "553301"
}
],
"symlink_target": ""
}
|
"""
Client side of the heat worker RPC API.
"""
from heat.common import messaging
from heat.rpc import worker_api
class WorkerClient(object):
'''Client side of the heat worker RPC API.
API version history::
1.0 - Initial version.
1.1 - Added check_resource.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self._client = messaging.get_rpc_client(
topic=worker_api.TOPIC,
version=self.BASE_RPC_API_VERSION)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
client.cast(ctxt, method, **kwargs)
def check_resource(self, ctxt, resource_id,
current_traversal, data, is_update):
self.cast(ctxt, self.make_msg(
'check_resource', resource_id=resource_id,
current_traversal=current_traversal, data=data,
is_update=is_update))
|
{
"content_hash": "e176c0aca3acaff61ff1d2db2c5ea9d1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 59,
"avg_line_length": 26.80952380952381,
"alnum_prop": 0.5968028419182948,
"repo_name": "rh-s/heat",
"id": "b8fa5f7ea8d0cbd6d6fd81b8a4bd9ace0d4b3697",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/rpc/worker_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6288599"
},
{
"name": "Shell",
"bytes": "32845"
}
],
"symlink_target": ""
}
|
"""
Commands work with servers. (Hiss, boo.)
"""
import copy
from fabric.api import local, put, settings, require, run, sudo, task
from fabric.state import env
from jinja2 import Template
import app_config
"""
Setup
"""
@task
def setup():
"""
Setup servers for deployment.
This does not setup services or push to S3. Run deploy() next.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
if not app_config.DEPLOY_TO_SERVERS:
print 'You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.'
return
create_directories()
create_virtualenv()
clone_repo()
checkout_latest()
install_requirements()
setup_logs()
def create_directories():
"""
Create server directories.
"""
require('settings', provided_by=['production', 'staging'])
run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
run('mkdir -p /var/www/uploads/%(PROJECT_FILENAME)s' % app_config.__dict__)
def create_virtualenv():
"""
Setup a server virtualenv.
"""
require('settings', provided_by=['production', 'staging'])
run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)
run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
def clone_repo():
"""
Clone the source repository.
"""
require('settings', provided_by=['production', 'staging'])
run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__)
if app_config.REPOSITORY_ALT_URL:
run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)
@task
def checkout_latest(remote='origin'):
"""
Checkout the latest source.
"""
require('settings', provided_by=['production', 'staging'])
require('branch', provided_by=['stable', 'master', 'branch'])
run('cd %s; git fetch %s' % (app_config.SERVER_REPOSITORY_PATH, remote))
run('cd %s; git checkout %s; git pull %s %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, remote, env.branch))
@task
def install_requirements():
"""
Install the latest requirements.
"""
require('settings', provided_by=['production', 'staging'])
run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_REPOSITORY_PATH)s/requirements.txt' % app_config.__dict__)
run('cd %(SERVER_REPOSITORY_PATH)s; npm install' % app_config.__dict__)
@task
def setup_logs():
"""
Create log directories.
"""
require('settings', provided_by=['production', 'staging'])
sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__)
sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)
@task
def install_crontab():
"""
Install cron jobs script into cron.d.
"""
require('settings', provided_by=['production', 'staging'])
sudo('cp %(SERVER_REPOSITORY_PATH)s/crontab /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def uninstall_crontab():
"""
Remove a previously install cron jobs script from cron.d
"""
require('settings', provided_by=['production', 'staging'])
sudo('rm /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
def delete_project():
"""
Remove the project directory. Invoked by shiva.
"""
run('rm -rf %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
"""
Configuration
"""
def _get_template_conf_path(service, extension):
"""
Derive the path for a conf template file.
"""
return 'confs/%s.%s' % (service, extension)
def _get_rendered_conf_path(service, extension):
"""
Derive the rendered path for a conf file.
"""
return 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)
def _get_installed_conf_path(service, remote_path, extension):
"""
Derive the installed path for a conf file.
"""
return '%s/%s.%s.%s' % (remote_path, app_config.PROJECT_FILENAME, service, extension)
def _get_installed_service_name(service):
"""
Derive the init service name for an installed service.
"""
return '%s.%s' % (app_config.PROJECT_FILENAME, service)
@task
def render_confs():
"""
Renders server configurations.
"""
require('settings', provided_by=['production', 'staging'])
with settings(warn_only=True):
local('mkdir confs/rendered')
# Copy the app_config so that when we load the secrets they don't
# get exposed to other management commands
context = copy.copy(app_config.__dict__)
context.update(app_config.get_secrets())
for service, remote_path, extension in app_config.SERVER_SERVICES:
template_path = _get_template_conf_path(service, extension)
rendered_path = _get_rendered_conf_path(service, extension)
with open(template_path, 'r') as read_template:
with open(rendered_path, 'wb') as write_template:
payload = Template(read_template.read())
write_template.write(payload.render(**context))
@task
def deploy_confs():
"""
Deploys rendered server configurations to the specified server.
This will reload nginx and the appropriate uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
render_confs()
with settings(warn_only=True):
for service, remote_path, extension in app_config.SERVER_SERVICES:
rendered_path = _get_rendered_conf_path(service, extension)
installed_path = _get_installed_conf_path(service, remote_path, extension)
a = local('md5 -q %s' % rendered_path, capture=True)
b = run('md5sum %s' % installed_path).split()[0]
if a != b:
print 'Updating %s' % installed_path
put(rendered_path, installed_path, use_sudo=True)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('initctl reload-configuration')
sudo('service %s restart' % service_name)
elif service == 'app':
run('touch %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chmod 644 %s' % app_config.UWSGI_SOCKET_PATH)
sudo('chown www-data:www-data %s' % app_config.UWSGI_SOCKET_PATH)
else:
print '%s has not changed' % rendered_path
@task
def nuke_confs():
"""
DESTROYS rendered server configurations from the specified server.
This will reload nginx and stop the uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
for service, remote_path, extension in app_config.SERVER_SERVICES:
with settings(warn_only=True):
installed_path = _get_installed_conf_path(service, remote_path, extension)
sudo('rm -f %s' % installed_path)
if service == 'nginx':
sudo('service nginx reload')
elif service == 'uwsgi':
service_name = _get_installed_service_name(service)
sudo('service %s stop' % service_name)
sudo('initctl reload-configuration')
elif service == 'app':
sudo('rm %s' % app_config.UWSGI_SOCKET_PATH)
"""
Fabcasting
"""
@task
def fabcast(command):
"""
Actually run specified commands on the server specified
by staging() or production().
"""
require('settings', provided_by=['production', 'staging'])
if not app_config.DEPLOY_TO_SERVERS:
print 'You must set DEPLOY_TO_SERVERS = True in your app_config.py and setup a server before fabcasting.'
run('cd %s && bash run_on_server.sh fab %s $DEPLOYMENT_TARGET %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, command))
|
{
"content_hash": "999bd8108f0790876bd4037721b1b4c5",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 129,
"avg_line_length": 31.50597609561753,
"alnum_prop": 0.6230399595346484,
"repo_name": "wbez/budget-tracker",
"id": "03bbbc3731916eef4b30370f0e03b9d230e395a6",
"size": "7931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/servers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107377"
},
{
"name": "HTML",
"bytes": "28121"
},
{
"name": "JavaScript",
"bytes": "474998"
},
{
"name": "Nginx",
"bytes": "136"
},
{
"name": "Python",
"bytes": "72647"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
}
|
import time
import re
from os.path import exists, getmtime
from twisted.internet.task import LoopingCall
from carbon import log
from carbon.aggregator.buffers import BufferManager
class RuleManager:
def __init__(self):
self.rules = []
self.rules_file = None
self.read_task = LoopingCall(self.read_rules)
self.rules_last_read = 0.0
def clear(self):
self.rules = []
def read_from(self, rules_file):
self.rules_file = rules_file
self.read_rules()
self.read_task.start(10, now=False)
def read_rules(self):
if not exists(self.rules_file):
self.clear()
return
# Only read if the rules file has been modified
try:
mtime = getmtime(self.rules_file)
except OSError:
log.err("Failed to get mtime of %s" % self.rules_file)
return
if mtime <= self.rules_last_read:
return
# Read new rules
log.aggregator("reading new aggregation rules from %s" % self.rules_file)
new_rules = []
for line in open(self.rules_file):
line = line.strip()
if line.startswith('#') or not line:
continue
rule = self.parse_definition(line)
new_rules.append(rule)
log.aggregator("clearing aggregation buffers")
BufferManager.clear()
self.rules = new_rules
self.rules_last_read = mtime
def parse_definition(self, line):
try:
left_side, right_side = line.split('=', 1)
output_pattern, frequency = left_side.split()
method, input_pattern = right_side.split()
frequency = int( frequency.lstrip('(').rstrip(')') )
return AggregationRule(input_pattern, output_pattern, method, frequency)
except ValueError:
log.err("Failed to parse line: %s" % line)
raise
class AggregationRule:
def __init__(self, input_pattern, output_pattern, method, frequency):
self.input_pattern = input_pattern
self.output_pattern = output_pattern
self.method = method
self.frequency = int(frequency)
if method not in AGGREGATION_METHODS:
raise ValueError("Invalid aggregation method '%s'" % method)
self.aggregation_func = AGGREGATION_METHODS[method]
self.build_regex()
self.build_template()
self.cache = {}
def get_aggregate_metric(self, metric_path):
if metric_path in self.cache:
return self.cache[metric_path]
match = self.regex.match(metric_path)
result = None
if match:
extracted_fields = match.groupdict()
try:
result = self.output_template % extracted_fields
except TypeError:
log.err("Failed to interpolate template %s with fields %s" % (self.output_template, extracted_fields))
if result:
self.cache[metric_path] = result
return result
def build_regex(self):
input_pattern_parts = self.input_pattern.split('.')
regex_pattern_parts = []
for input_part in input_pattern_parts:
if '<<' in input_part and '>>' in input_part:
i = input_part.find('<<')
j = input_part.find('>>')
pre = input_part[:i]
post = input_part[j+2:]
field_name = input_part[i+2:j]
regex_part = '%s(?P<%s>.+)%s' % (pre, field_name, post)
else:
i = input_part.find('<')
j = input_part.find('>')
if i > -1 and j > i:
pre = input_part[:i]
post = input_part[j+1:]
field_name = input_part[i+1:j]
regex_part = '%s(?P<%s>[^.]+)%s' % (pre, field_name, post)
elif input_part == '*':
regex_part = '[^.]+'
else:
regex_part = input_part.replace('*', '[^.]*')
regex_pattern_parts.append(regex_part)
regex_pattern = '\\.'.join(regex_pattern_parts) + '$'
self.regex = re.compile(regex_pattern)
def build_template(self):
self.output_template = self.output_pattern.replace('<', '%(').replace('>', ')s')
def avg(values):
if values:
return float( sum(values) ) / len(values)
def count(values):
if values:
return len(values)
AGGREGATION_METHODS = {
'sum' : sum,
'avg' : avg,
'min' : min,
'max' : max,
'count' : count
}
# Importable singleton
RuleManager = RuleManager()
|
{
"content_hash": "a18a06812644b8c6c32b037bdbf0d82f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 110,
"avg_line_length": 27.223684210526315,
"alnum_prop": 0.6162397293378443,
"repo_name": "kharandziuk/carbon",
"id": "d0a589699132448e63e3d701345d5a293abd2f0c",
"size": "4138",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/carbon/aggregator/rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "192783"
},
{
"name": "Shell",
"bytes": "14106"
}
],
"symlink_target": ""
}
|
import argparse
import json
import logging
import multiprocessing as mp
import os
import sys
import time
# Local modules
import result_writer
import snmp_query
# Settings
# Where to expect the config file,
# if it's not specified via --config
DEFAULT_CONFIGPATH='config.json'
# How many seconds to pause between runs
PERIOD_DEFAULT=60
# Maximum number of processes to run at one time
MAXPROCS_DEFAULT=2
# The actual code
def read_configs(configpath):
'''
Read a config file and return the result.
'''
# Figure out where to look for the config file
if configpath:
filepath=configpath
else:
filepath=DEFAULT_CONFIGPATH
# Open the file and read it
logger.debug('Attempting to read config file "%s"' % filepath)
infile=open(filepath, 'r')
configs=json.load(infile)
infile.close()
logger.info('Parsed config file %s' % filepath)
return configs
def main(logger, configpath):
'''
Make stuff happen
- read the configs, and automagically re-read them if the file's last-modified
time changes
- handle the multiprocessing overhead
'''
# Process-management
procs={} # A dict of running processes
mgr=mp.Manager() # Source of shared-state structures
state=mgr.dict() # The shared dict we'll use for sharing state
queue=mp.Queue() # For passing results from SNMP queries to the Datadog writer
# Read the initial config file, and remember when we read it
configs=read_configs(configpath)
config_mtime=int(os.path.getmtime(configpath))
if 'period' in configs['global']:
period=configs['global']['period']
else:
period=PERIOD_DEFAULT
if 'max_procs' in configs['global']:
max_procs=configs['global']['max_procs']
else:
max_procs=MAXPROCS_DEFAULT
# Start the process that reads the queue and feeds Datadog
writer=mp.Process(target=result_writer.run, args=(queue, configs['global']['datadog_api_key'], logger), name='writer')
writer.start()
# Periodically poll the targets until forcibly interrupted
while True:
starttime=int(time.time())
logger.info('Starting run with timestamp %d' % starttime)
# Check whether to re-read the configs
config_curr_mtime=int(os.path.getmtime(configpath))
logger.debug('Config file last-modified timestamp: %d' % config_curr_mtime)
if config_curr_mtime > config_mtime:
logger.info("Config file's timestamp has changed from %s to %s; re-reading." % (config_mtime, config_curr_mtime))
configs=read_configs(configpath)
config_mtime=int(os.path.getmtime(configpath))
# Work through the list of targets
target_ctr=0
target_total = len(configs['metrics']) # The number of targets to query
unfinished=True # Do we still have targets to query?
while unfinished:
# Harvest any processes that have finished their work
hostnames=list(procs.keys())
for hostname in hostnames:
if not procs[hostname].is_alive():
logger.debug('Finished querying %s' % hostname)
procs[hostname].join()
# Remember to remove it from the dict
del procs[hostname]
# If we're already running the max allowed processes, pause a second then go around again
if len(procs) >= max_procs:
time.sleep(1)
# We're clear to add another one
else:
hostname=configs['metrics'][target_ctr]['hostname']
logger.debug('Starting to query %s' % hostname)
proc=mp.Process(target=snmp_query.query_device,
args=(configs['metrics'][target_ctr], logger, state, period, queue),
name=hostname)
procs[hostname]=proc # Add the process to the control dict before starting it
proc.start()
# Move on to the next target
target_ctr=target_ctr + 1
# Have we reached the end of the list?
if target_ctr == target_total:
unfinished=False
# Now they've all been dispatched, harvest them as they complete
logger.debug('All targets are being queried')
running=True
while running:
# Check each of the processes
hostnames=list(procs.keys())
for hostname in hostnames:
if not procs[hostname].is_alive():
logger.debug('Finished querying %s' % hostname)
procs[hostname].join()
# Remember to remove it from the dict
del procs[hostname]
# Still some left
if len(procs) > 0:
# Pause a second, instead of hammering the CPU with this
time.sleep(1)
# All done; exit the loop
else:
running=False
logger.debug('Harvested all worker processes; finished querying all targets')
# Pause until the next run
# being reasonably sure to start _on_ the minute (or whatever)
endtime=int(time.time())
delay=((endtime + period) % period)
if delay == 0:
delay == period
logger.info('Run complete at timestamp %d after %d seconds. Pausing %d seconds for the next run.' % (endtime, endtime - starttime, delay))
time.sleep(delay)
# Reclaim the writer
writer.terminate()
writer.join()
# Explicitly return _something_
return True
# When invoked as a script, rather than a library
if __name__ == '__main__':
# Set up logging to STDERR
mp.log_to_stderr()
logger=mp.get_logger()
logger.setLevel(logging.INFO) # Required because the default is NOTSET
# Get the command-line arguments
parser = argparse.ArgumentParser(description='Perform SNMP discovery on a host, returning its data in a single structure.')
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
parser.add_argument('--config', action='store', help='Path to the config file')
args=parser.parse_args()
# Set debug logging, if requested
if args.debug:
logger.setLevel(logging.DEBUG)
# Check whether the default config path was overridden
if args.config:
configpath=args.config
else:
configpath=False
# Run the script
logger.info('Logging is ready. Starting main program.')
main(logger, configpath)
|
{
"content_hash": "ea0640db823c178576303326112f71a5",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 146,
"avg_line_length": 39.005917159763314,
"alnum_prop": 0.6236347087378641,
"repo_name": "equill/datadog-snmp",
"id": "f3f7e48038ea6c595208a10b5fa9ce249df92aec",
"size": "7473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadog-snmp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15988"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0167_auto_20171025_2304'),
]
operations = [
migrations.AddField(
model_name='section',
name='order',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='section',
name='name',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='section',
name='section_group',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='tournament.SectionGroup'),
preserve_default=False,
),
]
|
{
"content_hash": "333700d439a9bffe25cc15b8c8b76182",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 122,
"avg_line_length": 29.433333333333334,
"alnum_prop": 0.5832389580973952,
"repo_name": "cyanfish/heltour",
"id": "4558c6f95a46703508bdbb566a4237984824dfd3",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heltour/tournament/migrations/0168_auto_20171028_1948.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13951"
},
{
"name": "HTML",
"bytes": "310481"
},
{
"name": "JavaScript",
"bytes": "26784"
},
{
"name": "Python",
"bytes": "902629"
},
{
"name": "SCSS",
"bytes": "32099"
},
{
"name": "Shell",
"bytes": "4551"
}
],
"symlink_target": ""
}
|
"""
FILE: sample_dynamic_classification.py
DESCRIPTION:
This sample demonstrates how to dynamically classify documents into one or multiple categories.
No model training is required to use dynamic classification.
The dynamic classification feature is part of a gated preview. Request access here:
https://aka.ms/applyforgatedlanguagefeature
USAGE:
python sample_dynamic_classification.py
Set the environment variables with your own values before running the sample:
1) AZURE_LANGUAGE_ENDPOINT - the endpoint to your Language resource.
2) AZURE_LANGUAGE_KEY - your Language subscription key
"""
import os
def sample_dynamic_classification() -> None:
# [START dynamic_classification]
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import TextAnalyticsClient
endpoint = os.environ["AZURE_LANGUAGE_ENDPOINT"]
key = os.environ["AZURE_LANGUAGE_KEY"]
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint,
credential=AzureKeyCredential(key),
)
documents = [
"The WHO is issuing a warning about Monkey Pox.",
"Mo Salah plays in Liverpool FC in England.",
]
result = text_analytics_client.dynamic_classification(
documents,
categories=["Health", "Politics", "Music", "Sports"],
classification_type="Multi"
)
for doc, classification_result in zip(documents, result):
if classification_result.kind == "DynamicClassification":
classifications = classification_result.classifications
print(f"\n'{doc}' classifications:\n")
for classification in classifications:
print("Category '{}' with confidence score {}.".format(
classification.category, classification.confidence_score
))
elif classification_result.is_error is True:
print("Document '{}' has an error with code '{}' and message '{}'".format(
doc, classification_result.code, classification_result.message
))
# [END dynamic_classification]
if __name__ == "__main__":
sample_dynamic_classification()
|
{
"content_hash": "9d670e29a513def6e593625f9bb4b2ba",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 99,
"avg_line_length": 35.83606557377049,
"alnum_prop": 0.6806953339432754,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8b65bc3c7ef6cdf13e76e7b3937956982e78fd96",
"size": "2497",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/textanalytics/azure-ai-textanalytics/samples/sample_dynamic_classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""LVNFP Protocol Server."""
import uuid
import tornado.web
import tornado.httpserver
from empower.datatypes.etheraddress import EtherAddress
from empower.restserver.apihandlers import EmpowerAPIHandlerAdminUsers
from empower.core.image import Image
from empower.core.lvnf import LVNF
from empower.main import RUNTIME
class TenantLVNFHandler(EmpowerAPIHandlerAdminUsers):
"""Tenant Function Handler. Used to view anc manipulate Functions."""
HANDLERS = [r"/api/v1/tenants/([a-zA-Z0-9-]*)/lvnfs/?",
r"/api/v1/tenants/([a-zA-Z0-9-]*)/lvnfs/([a-zA-Z0-9-]*)/?"]
def initialize(self, server):
self.server = server
def get(self, *args, **kwargs):
""" List all Functions.
Args:
tenant_id: the network names of the tenant
lvnf_id: the address of the cpp
Example URLs:
GET /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lvnfs
GET /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/
lvnfs/49313ecb-9d00-4a7c-b873-b55d3d9ada34
"""
try:
if len(args) > 2 or len(args) < 1:
raise ValueError("Invalid url")
tenant_id = uuid.UUID(args[0])
tenant = RUNTIME.tenants[tenant_id]
if len(args) == 1:
self.write_as_json(tenant.lvnfs.values())
self.set_status(200, None)
else:
lvnf_id = uuid.UUID(args[1])
lvnf = tenant.lvnfs[lvnf_id]
self.write_as_json(lvnf)
self.set_status(200, None)
except ValueError as ex:
self.send_error(400, message=ex)
except KeyError as ex:
self.send_error(404, message=ex)
def post(self, *args, **kwargs):
""" Add an LVNF to a tenant.
Args:
tenant_id: network name of a tenant
lvnf_id: the lvnf id
Example URLs:
POST /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lvnfs
POST /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/
lvnfs/49313ecb-9d00-4a7c-b873-b55d3d9ada34
"""
try:
if len(args) > 2 or len(args) < 1:
raise ValueError("Invalid url")
request = tornado.escape.json_decode(self.request.body)
if "version" not in request:
raise ValueError("missing version element")
if "image" not in request:
raise ValueError("missing image element")
if "addr" not in request:
raise ValueError("missing addr element")
if "nb_ports" not in request['image']:
raise ValueError("missing image/nb_ports element")
if "vnf" not in request['image']:
raise ValueError("missing image/vnf element")
handlers = []
if "handlers" in request['image']:
handlers = request['image']['handlers']
state_handlers = []
if "state_handlers" in request['image']:
state_handlers = request['image']['state_handlers']
tenant_id = uuid.UUID(args[0])
addr = EtherAddress(request['addr'])
tenant = RUNTIME.tenants[tenant_id]
cpp = tenant.cpps[addr]
image = Image(nb_ports=request['image']['nb_ports'],
vnf=request['image']['vnf'],
state_handlers=state_handlers,
handlers=handlers)
if not cpp.connection:
raise ValueError("CPP disconnected %s" % addr)
if len(args) == 1:
lvnf_id = uuid.uuid4()
else:
lvnf_id = uuid.UUID(args[1])
lvnf = LVNF(lvnf_id=lvnf_id,
tenant_id=tenant_id,
image=image,
cpp=cpp)
lvnf.start()
except ValueError as ex:
self.send_error(400, message=ex)
except KeyError as ex:
self.send_error(404, message=ex)
self.set_status(201, None)
def put(self, *args, **kwargs):
""" Add an LVNF to a tenant.
Args:
[0]: the tenant id
[1]: the lvnf id
Example URLs:
PUT /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/
lvnfs/49313ecb-9d00-4a7c-b873-b55d3d9ada34
"""
try:
if len(args) != 2:
raise ValueError("Invalid url")
request = tornado.escape.json_decode(self.request.body)
if "version" not in request:
raise ValueError("missing version element")
if "addr" not in request:
raise ValueError("missing addr element")
tenant_id = uuid.UUID(args[0])
lvnf_id = uuid.UUID(args[1])
addr = EtherAddress(request['addr'])
tenant = RUNTIME.tenants[tenant_id]
cpp = tenant.cpps[addr]
lvnf = tenant.lvnfs[lvnf_id]
if not cpp.connection:
raise ValueError("CPP disconnected %s" % addr)
lvnf.cpp = cpp
except ValueError as ex:
self.send_error(400, message=ex)
except KeyError as ex:
self.send_error(404, message=ex)
self.set_status(204, None)
def delete(self, *args, **kwargs):
""" Remove an lvnf from a Tenant.
Args:
tenant_id: network name of a tenant
lvnf_id: the lvnf_id
Example URLs:
GET /api/v1/pools/52313ecb-9d00-4b7d-b873-b55d3d9ada26/
lvnfs/49313ecb-9d00-4a7c-b873-b55d3d9ada34
"""
try:
if len(args) != 2:
raise ValueError("Invalid url")
tenant_id = uuid.UUID(args[0])
tenant = RUNTIME.tenants[tenant_id]
lvnf_id = uuid.UUID(args[1])
lvnf = tenant.lvnfs[lvnf_id]
lvnf.stop()
except ValueError as ex:
self.send_error(400, message=ex)
except KeyError as ex:
self.send_error(404, message=ex)
self.set_status(204, None)
|
{
"content_hash": "244caa1585d549e92986662583841754",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 75,
"avg_line_length": 28.239819004524886,
"alnum_prop": 0.5345297228008332,
"repo_name": "Panagiotis-Kon/empower-runtime",
"id": "7f4e60e01d34ef36db93b0f0e6211db0b6c6ed4b",
"size": "6850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "empower/lvnfp/tenantlvnfhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "67434"
},
{
"name": "HTML",
"bytes": "32485"
},
{
"name": "JavaScript",
"bytes": "4530426"
},
{
"name": "Python",
"bytes": "567426"
}
],
"symlink_target": ""
}
|
import sys
import collections
import random
import csv
import numpy as np
from sklearn.utils import shuffle
config_keys = ["DIMS", "NCLASS", "TRAIN_SIZE", "NOT_VAL_SIZE", "TEST_SIZE", "SEED", "DEBUG"]
def dist(x, y):
return np.linalg.norm(y-x)
def nearest_neighbors(d, sample, in_dataset, class_dataset):
dists = np.array([(dist(sample, p),c) for p,c in zip(in_dataset, class_dataset)], dtype=[('dist', 'float'), ('clase', int)])
min_dist_sample = dists[np.argmin(dists['dist'])]
if min_dist_sample[0] > d:
return np.array([min_dist_sample])
else:
return np.array([x for x in dists if x[0] <= d])
def get_random_most_common(classes):
#Retorna el elemento que mas aparece (si hay empate, uno al azar)
most_common = collections.Counter(classes).most_common()
max_count = most_common[0][1]
max_count_classes = list(filter(lambda x: x[1] == max_count, most_common))
return random.choice(max_count_classes)[0]
def predict_sample(d, sample, in_dataset, class_dataset, ignore_first=False):
#Busca los con distancia < d
diff = 1 if ignore_first else 0
knn = nearest_neighbors(d, sample, in_dataset, class_dataset)
classes = knn[diff:]['clase']
if not len(classes):
return random.randint(0, config["NCLASS"])
#Devuelve el que mas aparece
return get_random_most_common(classes)
def get_predictions(d, samples, in_dataset, class_dataset, ignore_first=False):
return np.array([predict_sample(d, sample, in_dataset, class_dataset, ignore_first) for sample in samples])
def get_error(predictions, clases):
return np.sum([p!=c for p,c in zip(predictions, clases)])/float(clases.shape[0])*100
def optimize_d(in_train, class_train, in_val, class_val, in_test, class_test):
errors = []
diagonal = "diagonal" in sys.argv[1]
values = np.linspace(0.9, 1.5, num=25) * (config["DIMS"] if diagonal else np.sqrt(config["DIMS"]))
for d in values:
preds_val = get_predictions(d, in_val, in_train, class_train)
error_val = get_error(preds_val, class_val)
if config["DEBUG"]:
print("D = {}, validacion = {:f}".format(d, error_val))
errors.append((d, error_val))
best_d = min(errors, key = lambda x: x[1])[0]
return best_d
def load_config(path):
with open(path) as config_file:
values = config_file.readlines()[:len(config_keys)]
values = [ int(attr.strip()) for attr in values ]
config = dict(zip(config_keys, values))
for k,v in config.items(): print "{}: {}".format(k,v)
return config
def read_data(path):
rawdata = np.genfromtxt(path, delimiter=',')
return rawdata[:,:-1], rawdata[:,-1].astype(int)
def save_prediction(path, in_test, predics):
with open(path, "w") as predic_file:
for sample,pred in zip(in_test, predics):
attrs = ", ".join(map(str,sample))
predic_file.write("{},{}\n".format(attrs, pred))
def main():
if len(sys.argv) != 2:
print "Usage: " + sys.argv[0] + " <dataset>"
sys.exit(0)
STEM = sys.argv[1]
config_file = STEM + ".knn"
test_file = STEM + ".test"
train_file = STEM + ".data"
predic_file = STEM + ".predic"
global config
config = load_config(config_file)
if config["SEED"]>0:
random.seed(config["SEED"])
#Carga el dataset
in_train, class_train = read_data(train_file)
if config["SEED"]!=-1:
in_train, class_train = shuffle(in_train, class_train)
#Separa conj de entrenamiento y validacion
[in_train, in_val] = np.split(in_train, [config["NOT_VAL_SIZE"]], axis=0)
[class_train, class_val] = np.split(class_train, [config["NOT_VAL_SIZE"]], axis=0)
#Carga conj de test
in_test, class_test = read_data(test_file)
#Optimiza el d
best_d = optimize_d(in_train, class_train, in_val, class_val, in_test, class_test)
print("Best D: {}".format(best_d))
print("Errores:")
#Predice train
#~ pred_train = get_predictions(best_d, in_train, in_train, class_train, True)
#~ error_train = get_error(pred_train, class_train)
#~ print("Entrenamiento: {}%".format(error_train))
#Predice val
#~ pred_val = get_predictions(best_d, in_val, in_train, class_train)
#~ error_val = get_error(pred_val, class_val)
#~ print("Validacion: {}%".format(error_val))
#Predice test
pred_test = get_predictions(best_d, in_test, in_train, class_train)
error_test = get_error(pred_test, class_test)
print("Test: {}%".format(error_test))
save_prediction(predic_file, in_test, pred_test)
if __name__ == "__main__":
main()
|
{
"content_hash": "6e55bfa5e15e846cbefcddd97fd83342",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 128,
"avg_line_length": 37.256,
"alnum_prop": 0.6274425595877174,
"repo_name": "mvpossum/machine-learning",
"id": "9291243cb31a10166a55ec06d6df3f9831d258d7",
"size": "4681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tp4/dnn.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45072"
},
{
"name": "C++",
"bytes": "4724758"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "2357"
},
{
"name": "Python",
"bytes": "8560"
},
{
"name": "Shell",
"bytes": "5875"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import ambari_simplejson as json
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Directory, Link
from resource_management.core.resources.system import Execute
from resource_management.core.shell import as_sudo
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.version import compare_versions
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script import Script
def setup_hdp_install_directory():
# This is a name of marker file.
SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/hdp-select-set-all.performed"
import params
if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
Execute(as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]) + ' ; ' +
format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{stack_version_unformatted} | tail -1`'),
only_if=format('ls -d /usr/hdp/{stack_version_unformatted}*'), # If any HDP version is installed
not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}") # Do that only once (otherwise we break rolling upgrade logic)
)
def setup_config():
import params
stackversion = params.stack_version_unformatted
Logger.info("FS Type: {0}".format(params.dfs_type))
if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
# create core-site only if the hadoop config diretory exists
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
def load_version(struct_out_file):
"""
Load version from file. Made a separate method for testing
"""
json_version = None
try:
if os.path.exists(struct_out_file):
with open(struct_out_file, 'r') as fp:
json_info = json.load(fp)
json_version = json_info['version']
except:
pass
return json_version
def link_configs(struct_out_file):
"""
Links configs, only on a fresh install of HDP-2.3 and higher
"""
if not Script.is_hdp_stack_greater_or_equal("2.3"):
Logger.info("Can only link configs for HDP-2.3 and higher.")
return
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("Could not load 'version' from {0}".format(struct_out_file))
return
for k, v in conf_select.PACKAGE_DIRS.iteritems():
_link_configs(k, json_version, v)
def _link_configs(package, version, dirs):
"""
Link a specific package's configuration directory
"""
bad_dirs = []
for dir_def in dirs:
if not os.path.exists(dir_def['conf_dir']):
bad_dirs.append(dir_def['conf_dir'])
if len(bad_dirs) > 0:
Logger.debug("Skipping {0} as it does not exist.".format(",".join(bad_dirs)))
return
bad_dirs = []
for dir_def in dirs:
# check if conf is a link already
old_conf = dir_def['conf_dir']
if os.path.islink(old_conf):
Logger.debug("{0} is a link to {1}".format(old_conf, os.path.realpath(old_conf)))
bad_dirs.append(old_conf)
if len(bad_dirs) > 0:
return
# make backup dir and copy everything in case configure() was called after install()
for dir_def in dirs:
old_conf = dir_def['conf_dir']
old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
old_conf_copy = os.path.join(old_parent, "conf.install")
Execute(("cp", "-R", "-p", old_conf, old_conf_copy),
not_if = format("test -e {old_conf_copy}"), sudo = True)
# we're already in the HDP stack
versioned_confs = conf_select.create("HDP", package, version, dry_run = True)
Logger.info("New conf directories: {0}".format(", ".join(versioned_confs)))
need_dirs = []
for d in versioned_confs:
if not os.path.exists(d):
need_dirs.append(d)
if len(need_dirs) > 0:
conf_select.create("HDP", package, version)
# find the matching definition and back it up (not the most efficient way) ONLY if there is more than one directory
if len(dirs) > 1:
for need_dir in need_dirs:
for dir_def in dirs:
if 'prefix' in dir_def and need_dir.startswith(dir_def['prefix']):
old_conf = dir_def['conf_dir']
versioned_conf = need_dir
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
elif 1 == len(dirs) and 1 == len(need_dirs):
old_conf = dirs[0]['conf_dir']
versioned_conf = need_dirs[0]
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
# make /usr/hdp/[version]/[component]/conf point to the versioned config.
# /usr/hdp/current is already set
try:
conf_select.select("HDP", package, version)
# no more references to /etc/[component]/conf
for dir_def in dirs:
Directory(dir_def['conf_dir'], action="delete")
# link /etc/[component]/conf -> /usr/hdp/current/[component]-client/conf
Link(dir_def['conf_dir'], to = dir_def['current_dir'])
except Exception, e:
Logger.warning("Could not select the directory: {0}".format(e.message))
# should conf.install be removed?
|
{
"content_hash": "a91e8a26bd0d503d0e63dd2df2f0a89f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 160,
"avg_line_length": 38.30769230769231,
"alnum_prop": 0.6773246833487797,
"repo_name": "zouzhberk/ambaridemo",
"id": "d3b14481fbbd0556c24f32ec14523b6a1a1d057a",
"size": "6474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5982"
},
{
"name": "Groff",
"bytes": "13935"
},
{
"name": "HTML",
"bytes": "52"
},
{
"name": "Java",
"bytes": "8681846"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "105599"
},
{
"name": "PowerShell",
"bytes": "43170"
},
{
"name": "Python",
"bytes": "2751909"
},
{
"name": "Ruby",
"bytes": "9652"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "247846"
}
],
"symlink_target": ""
}
|
"""
This module contains the high-level functions to access the library. Care is
taken to make this as pythonic as possible and hide as many of the gory
implementations as possible.
"""
from x690.types import ObjectIdentifier
# !!! DO NOT REMOVE !!! The following import triggers the processing of SNMP
# Types and thus populates the Registry. If this is not included, Non x.690
# SNMP types will not be properly detected!
import puresnmp.types
from puresnmp.api.pythonic import PyWrapper
from puresnmp.api.raw import Client
from puresnmp.compatibility import package_version
from puresnmp.credentials import V1, V2C, V3, Auth, Priv
__version__ = package_version("puresnmp")
__all__ = [
"Auth",
"Client",
"ObjectIdentifier",
"Priv",
"PyWrapper",
"V1",
"V2C",
"V3",
"__version__",
]
|
{
"content_hash": "f7b05d2bc8cc2f818ce4d8736c155387",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 76,
"avg_line_length": 26.548387096774192,
"alnum_prop": 0.715674362089915,
"repo_name": "exhuma/puresnmp",
"id": "fd7f00d8d3fb7791dff064db616cd116431e3ad3",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "puresnmp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1466"
},
{
"name": "Python",
"bytes": "278983"
},
{
"name": "Shell",
"bytes": "1619"
}
],
"symlink_target": ""
}
|
"""TF metrics for Bandits algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Callable, Optional, Text
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.policies import constraints
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.metrics import tf_metric
from tf_agents.typing import types
from tf_agents.utils import common
@gin.configurable
class RegretMetric(tf_metric.TFStepMetric):
"""Computes the regret with respect to a baseline."""
def __init__(self,
baseline_reward_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'RegretMetric',
dtype: float = tf.float32):
"""Computes the regret with respect to a baseline.
The regret is computed by computing the difference of the current reward
from the baseline action reward. The latter is computed by calling the input
`baseline_reward_fn` function that given a (batched) observation computes
the baseline action reward.
Args:
baseline_reward_fn: function that computes the reward used as a baseline
for computing the regret.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._baseline_reward_fn = baseline_reward_fn
self.dtype = dtype
self.regret = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='regret')
super(RegretMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the regret value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
baseline_reward = self._baseline_reward_fn(trajectory.observation)
trajectory_reward = trajectory.reward
if isinstance(trajectory.reward, dict):
trajectory_reward = trajectory.reward[bandit_spec_utils.REWARD_SPEC_KEY]
trajectory_regret = baseline_reward - trajectory_reward
self.regret.assign(tf.reduce_mean(trajectory_regret))
return trajectory
def result(self):
return tf.identity(
self.regret, name=self.name)
@gin.configurable
class SuboptimalArmsMetric(tf_metric.TFStepMetric):
"""Computes the number of suboptimal arms with respect to a baseline."""
def __init__(self,
baseline_action_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'SuboptimalArmsMetric',
dtype: float = tf.float32):
"""Computes the number of suboptimal arms with respect to a baseline.
Args:
baseline_action_fn: function that computes the action used as a baseline
for computing the metric.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._baseline_action_fn = baseline_action_fn
self.dtype = dtype
self.suboptimal_arms = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='suboptimal_arms')
super(SuboptimalArmsMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the metric value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
baseline_action = self._baseline_action_fn(trajectory.observation)
disagreement = tf.cast(
tf.not_equal(baseline_action, trajectory.action), tf.float32)
self.suboptimal_arms.assign(tf.reduce_mean(disagreement))
return trajectory
def result(self):
return tf.identity(
self.suboptimal_arms, name=self.name)
@gin.configurable
class ConstraintViolationsMetric(tf_metric.TFStepMetric):
"""Computes the violations of a certain constraint."""
def __init__(self,
constraint: constraints.BaseConstraint,
name: Optional[Text] = 'ConstraintViolationMetric',
dtype: float = tf.float32):
"""Computes the constraint violations given an input constraint.
Given a certain constraint, this metric computes how often the selected
actions in the trajectory violate the constraint.
Args:
constraint: an instance of `tf_agents.bandits.policies.BaseConstraint`.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._constraint = constraint
self.dtype = dtype
self.constraint_violations = common.create_variable(
initial_value=0.0,
dtype=self.dtype,
shape=(),
name='constraint_violations')
super(ConstraintViolationsMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the constraint violations metric.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
feasibility_prob_all_actions = self._constraint(trajectory.observation)
feasibility_prob_selected_actions = common.index_with_actions(
feasibility_prob_all_actions,
tf.cast(trajectory.action, dtype=tf.int32))
self.constraint_violations.assign(tf.reduce_mean(
1.0 - feasibility_prob_selected_actions))
return trajectory
def result(self):
return tf.identity(self.constraint_violations, name=self.name)
@gin.configurable
class DistanceFromGreedyMetric(tf_metric.TFStepMetric):
"""Difference between the estimated reward of the chosen and the best action.
This metric measures how 'safely' the agent explores: it calculates the
difference between what the agent thinks it would have gotten had it chosen
the best looking action, vs the action it actually took. This metric is not
equivalent to the regret, because the regret is calculated as a distance from
optimality, while here everything calculated is based on the policy's
'belief'.
"""
def __init__(self,
estimated_reward_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'DistanceFromGreedyMetric',
dtype: float = tf.float32):
"""Init function for the metric.
Args:
estimated_reward_fn: A function that takes the observation as input and
computes the estimated rewards that the greedy policy uses.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._estimated_reward_fn = estimated_reward_fn
self.dtype = dtype
self.safe_explore = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='safe_explore')
super(DistanceFromGreedyMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the metric value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
all_estimated_rewards = self._estimated_reward_fn(trajectory.observation)
max_estimated_rewards = tf.reduce_max(all_estimated_rewards, axis=-1)
estimated_action_rewards = tf.gather(
all_estimated_rewards, trajectory.action, batch_dims=1)
self.safe_explore.assign(
tf.reduce_mean(max_estimated_rewards - estimated_action_rewards))
return trajectory
def result(self):
return tf.identity(self.safe_explore, name=self.name)
|
{
"content_hash": "419162a44bfe067e91e59751ef5aa4b3",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 80,
"avg_line_length": 35.189320388349515,
"alnum_prop": 0.6956821630569734,
"repo_name": "tensorflow/agents",
"id": "7561dee8084c8b6d2e35573680df698ba08398cb",
"size": "7852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_agents/bandits/metrics/tf_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4930266"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
}
|
"""====================
RNA-Seq pipeline
====================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
The RNA-Seq pipeline imports unmapped reads from one or more
RNA-Seq experiments and performs the basic RNA-Seq analysis steps:
1. Map reads to genome
2. Build transcript models
3. Estimate differential expression
This pipeline works on a single genome.
Overview
========
The pipeline assumes the data derive from multiple tissues/conditions
(:term:`experiment`) with one or more biological and/or technical
replicates (:term:`replicate`). A :term:`replicate`
within each :term:`experiment` is a :term:`track`.
The pipeline performs the following tasks:
* analyse each experiment:
* for each replicate
* map reads using tophat for each term:`replicate` separately.
* predict splice isoforms and expression levels with :term:`cufflinks`.
* estimate expression levels of reference gene set with :term:`cufflinks`.
* annotate isoforms in replicates with genomic annotations
* compare isoforms in replicates within each :term:`experiment` (:term:`cuffcompare`)
and to reference gene set.
* summary statistics on reproducibility within each experiment
* build a combined gene set including the reference gene set and isoforms predicted by :term:`cufflinks`.
* compare all isoforms in all experiments+isoforms (:term:`cuffcompare`) to each other
and the reference gene set
* summary statistics on isoforms with respect to gene set
* estimate differential expression levels of transcripts
* different gene sets
* reference gene set
* combined gene set
* novel gene set
* different methods
* :term:`DESeq` (tag counting)
* :term:`cuffdiff`
* summary statistics on differential expression
Mapping strategy
----------------
The best strategy for mapping and transcriptome assembly depends on the length of your reads.
With short reads, detecting novel splice-junctions is a difficult task. In this case it will be
best to rely on a set of known splice-junctions. Longer reads map more easily across splice-junctions.
From the tophat manual::
TopHat finds splice junctions without a reference annotation. TopHat version 1.4 maps RNA-seq reads
first to a reference transcriptome. Only those reads that don't map in this initial process are
mapped against the genome.
Through the second stage of genome mapping, TopHat identifies novel splice junctions and then confirms
these through mapping to known junctions.
Short read sequencing machines can currently produce reads 100bp or longer, but many exons are
shorter than this, and so would be missed in the initial mapping. TopHat solves this problem
by splitting all input reads into smaller segments, and then mapping them independently. The segment
alignments are "glued" back together in a final step of the program to produce the end-to-end read alignments.
TopHat generates its database of possible splice junctions from three sources of evidence. The first
source is pairings of "coverage islands", which are distinct regions of piled up reads in the
initial mapping. Neighboring islands are often spliced together in the transcriptome, so
TopHat looks for ways to join these with an intron. The second source is only used when
TopHat is run with paired end reads. When reads in a pair come from different exons of a
transcript, they will generally be mapped far apart in the genome coordinate space. When
this happens, TopHat tries to "close" the gap between them by looking for subsequences of
the genomic interval between mates with a total length about equal to the expected distance
between mates. The "introns" in this subsequence are added to the database. The third, and
strongest, source of evidence for a splice junction is when two segments from the same read #
are mapped far apart, or when an internal segment fails to map. With long (>=75bp) reads,
"GT-AG", "GC-AG" and "AT-AC" introns be found ab initio. With shorter reads, TopHat only
reports alignments across "GT-AG" introns
Thus, in order to increase the sensitivity of splice-site detection, it might be best to derive a set of
splice-junctions using all reads. This is not done automatically, but can be done manually by
adding a file with junctions to the ``tophat_options`` entry in the configuration file.
The pipeline supplies tophat with a list of all coding exons to
facilitate mapping across known splice-junctions. If they are
prioritized, I do not know.
Transcripts are built individually for each :term:`track`. This seems
to be the most rigorous way as there might be conflicting transcripts
between replicates and merging the sets might confuse transcript
reconstruction. Also, conflicting transcripts between replicates give
an idea of the variability of the data. However, if there are only
few reads, there might be a case for building transcript models using
reads from all replicates of an experiment. However, there is no
reason to merge reads between experiments.
LincRNA
--------
One of the main benefits of RNASeq over microarrays is that novel
transcripts can be detected. A particular interest are currently novel
long non-coding RNA. Unfortunately, it seems that these transcripts
are often expressed at very low levels and possibly in a highly
regulated manner, for example only in certain tissues. On top of their
low abundance, they frequently seem to be co-localized with protein
coding genes, making it hard to distinguish them from transcription
artifacts.
Success in identifying lincRNA will depend a lot on your input
data. Long, paired-end reads are likely to lead to
success. Unfortunately, many exploratory studies go for single-ended,
short read data. With such data, identification of novel spliced
transcripts will be rare and the set of novel transcripts is likely to
contain many false-positives.
The pipeline constructs a set of novel lncRNA in the following manner:
1. All transcript models overlapping protein coding transcripts are removed.
2. Overlapping lncRNA on the same strand are merged.
Artifacts in lncRNA analysis
++++++++++++++++++++++++++++
There are several sources of artifacts in lncRNA analysis
Read mapping errors
~~~~~~~~~~~~~~~~~~~
Mapping errors are identifyable as sharp peaks in the coverage
profile. Mapping errors occur if the true location of a read has more
mismatches than the original location or it maps across an undetected
splice-site. Most of the highly-expressed lncRNA are due to mapping
errors. Secondary locations very often overlap highly-expressed
protein-coding genes. These errors are annoying for two reasons: they
provide false positives, but at the same time prevent the reads to be
counted towards the expression of the true gene.
They can be detected in two ways:
1. via a peak-like distribution of reads which should result in a low
entropy of start position density. Note that this possibly can remove
transcripts that are close to the length of a single read.
2. via mapping against known protein coding transcripts. However,
getting this mapping right is hard for two reasons. Firstly, mapping
errors usually involve reads aligned with mismatches. Thus, the
mapping has to be done either on the read-level (computationally
expensive), or on the transcript level after variant calling. (tricky,
and also computationally expensive). Secondly, as cufflinks extends
transcripts generously, only a part of a transcript might actually be
a mismapped part. Distinguishing partial true matches from random
matches will be tricky.
Read mapping errors can also be avoided by
1. Longer read lengths
2. Strict alignment criteria
3. A two-stage mapping process.
Fragments
~~~~~~~~~
As lncRNA are expressed at low levels, it is likely that only a
partial transcript can be observed.
Differential expression
-----------------------
The quality of the rnaseq data (read-length, paired-end) determines
the quality of transcript models. For instance, if reads are short
(35bp) and/or reads are not paired-ended, transcript models will be
short and truncated. In these cases it might be better to concentrate
the analysis on only previously known transcript models.
The pipeline offers various sets for downstream analysis of
differential expression.
1. A set of previously known transcripts
(:file:`reference.gtf.gz`). Use this set if only interested in the
transcription of previously known transcripts or read length does
not permit transcript assembly. This does include all transcripts
within the ENSEMBL gene set, including processed but untranscribed
transcripts, transcripts with retained introns, pseudogenes, etc.
2. A set of previously known protein coding transcripts
(:file:`refcoding.gtf.gz`). This set is derived from
(:file:`reference.gtf.gz`) but only includes exons of transcripts
that are protein coding.
3. An ab-initio gene set (:file:`abinitio.gtf.gz`). The ab-initio set
is built by running :term:`cuffcompare` on the combined individual
:term:`cufflinks` results. Transcripts that have been observed in
only one :term:`track` are removed (removed transcripts end up in
:file:`removed.gtf.gz`) in order to exclude partial
transcripts. Use this set if reads are of good length and/or are
paired-ended.
4. A set of novel transcribed loci (:file:`novel.gtf.gz`). This gene
set is derived from the set of ab-initio transcripts. All ab-initio
transcripts overlapping protein coding transcripts in
:file:`refcoding.gtf.gz` are removed. Overlapping transcripts are
merged into a single transcript/gene. This removes individual
transcript structure, but retains constitutive introns. This set
retains transcripts that are only observed in a single
experiment. It also includes known non-coding transcripts, so a
locus might not necessarily be "novel".
Transcripts are the natural choice to measure expression of. However
other quantities might be of interest. Some quantities are biological
meaningful, for example differential expression from a promotor shared
by several trancripts. Other quantities might no biologically
meaningful but are necessary as a technical comprise. For example,
the overlapping transcripts might be hard to resolve and thus might
need to be aggregated per gene. Furthermore, functional annotation is
primarily associated with genes and not individual transcripts. The
pipeline attempts to measure transcription and differential expression
for a variety of entities following the classification laid down by
:term:`cuffdiff`:
isoform
Transcript level
gene
Gene level, aggregates several isoform/transcripts
tss
Transcription start site. Aggregate all isoforms starting from the same
:term:`tss`.
cds
Coding sequence expression. Ignore reads overlapping non-coding parts of
transcripts (UTRs, etc.). Requires
annotation of the cds and thus only available for :file:`reference.gtf.gz`.
Methods differ in their ability to measure transcription on all levels.
.. todo::
add promoters and splicing output
Overprediction of differential expression for low-level expressed
transcripts with :term:`cuffdiff` is a `known problem
<http://seqanswers.com/forums/showthread.php?t=6283&highlight=fpkm>`_.
Estimating coverage
-------------------
An important question in RNASeq analysis is if the sequencing has been
done to sufficient depth. The questions split into two parts:
* What is the minimum abundant transcript that should be detectable
with the number of reads mapped? See for example `PMID: 20565853
<http://www.ncbi.nlm.nih.gov/pubmed/20565853>`
* What is the minimum expression change between two conditions that
can be reliably inferred? See for examples `PMID: 21498551
<http://www.ncbi.nlm.nih.gov/pubmed/21498551?dopt=Abstract>`
These questions are difficult to answer due to the complexity of
RNASeq data: Genes have multiple transcripts, transcript/gene
expression varies by orders of magnitude and a large fraction of reads
might stem from repetetive RNA.
See `figure 4
<http://www.nature.com/nbt/journal/v28/n5/full/nbt.1621.html>`_ from
the cufflinks paper to get an idea about the reliability of transcript
construction with varying sequencing depth.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start
with, use the files supplied with the Example_ data.
Input
-----
Reads
+++++
Reads are imported by placing files are linking to files in the :term:`working directory`.
The default file format assumes the following convention:
<sample>-<condition>-<replicate>.<suffix>
``sample`` and ``condition`` make up an :term:`experiment`, while
``replicate`` denotes the :term:`replicate` within an
:term:`experiment`. The ``suffix`` determines the file type. The
following suffixes/file types are possible:
sra
Short-Read Archive format. Reads will be extracted using the
:file:`fastq-dump` tool.
fastq.gz
Single-end reads in fastq format.
fastq.1.gz, fastq2.2.gz
Paired-end reads in fastq format. The two fastq files must be
sorted by read-pair.
.. note::
Quality scores need to be of the same scale for all input
files. Thus it might be difficult to mix different formats.
Optional inputs
+++++++++++++++
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following software to be in the
path:
+--------------------+-------------------+------------------------------------------------+
|*Program* |*Version* |*Purpose* |
+--------------------+-------------------+------------------------------------------------+
|bowtie_ |>=0.12.7 |read mapping |
+--------------------+-------------------+------------------------------------------------+
|tophat_ |>=1.4.0 |read mapping |
+--------------------+-------------------+------------------------------------------------+
|cufflinks_ |>=1.3.0 |transcription levels |
+--------------------+-------------------+------------------------------------------------+
|samtools |>=0.1.16 |bam/sam files |
+--------------------+-------------------+------------------------------------------------+
|bedtools | |working with intervals |
+--------------------+-------------------+------------------------------------------------+
|R/DESeq | |differential expression |
+--------------------+-------------------+------------------------------------------------+
|sra-tools | |extracting reads from .sra files |
+--------------------+-------------------+------------------------------------------------+
|picard |>=1.42 |bam/sam files. The .jar files need to be in your|
| | | CLASSPATH environment variable. |
+--------------------+-------------------+------------------------------------------------+
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
For each :term:`experiment` there will be the following tables:
<track>_cuffcompare_benchmark
results from comparing gene models against reference gene set
primary key: track
<track>_cuffcompare_transcripts
transcript expression values (FPKMs)
primary key: track+transfrag_id
foreign key: transfrag_id
<track>_cuffcompare_tracking
tracking information linking loci against transcripts.
primary key: transfrag_id,
foreign key: locus_id
<track>_cuffcompare_tracking
locus information (number of transcripts within locus per track)
primary key: locus_id
Differential gene expression results
-------------------------------------
Differential expression is estimated for different genesets
with a variety of methods. Differential expression can be defined
for various levels.
<geneset>_<method>_<level>_diff
Results of the pairwise tests for differential expression
primary keys: track1, track2
<geneset>_<method>_<level>_levels
Expression levels
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_rnaseq.tgz. To run
the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_rnaseq.tgz
tar -xvzf pipeline_rnaseq.tgz
cd pipeline_rnaseq.dir
python <srcdir>/pipeline_rnaseq.py make full
.. note::
For the pipeline to run, install the :doc:`pipeline_annotations` as well.
Glossary
========
.. glossary::
cufflinks
cufflinks_ - transcriptome analysis
tophat
tophat_ - a read mapper to detect splice-junctions
deseq
deseq_ - differential expression analysis
cuffdiff
find differentially expressed transcripts. Part of cufflinks_.
cuffcompare
compare transcriptomes. Part of cufflinks_.
.. _cufflinks: http://cufflinks.cbcb.umd.edu/index.html
.. _tophat: http://tophat.cbcb.umd.edu/
.. _bowtie: http://bowtie-bio.sourceforge.net/index.shtml
.. _bamstats: http://www.agf.liv.ac.uk/454/sabkea/samStats_13-01-2011
.. _deseq: http://www-huber.embl.de/users/anders/DESeq/
Code
====
"""
# load modules
from ruffus import *
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import sys
import os
import re
import shutil
import itertools
import glob
import gzip
import collections
import random
import numpy
import sqlite3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
import CGAT.Tophat as Tophat
from rpy2.robjects import r as R
import rpy2.robjects as ro
from rpy2.rinterface import RRuntimeError
import CGAT.Expression as Expression
import CGATPipelines.PipelineGeneset as PipelineGeneset
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.PipelineRnaseq as PipelineRnaseq
import CGATPipelines.PipelineMappingQC as PipelineMappingQC
import CGAT.Stats as Stats
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineTracks as PipelineTracks
# levels of cuffdiff analysis
# (no promotor and splice -> no lfold column)
CUFFDIFF_LEVELS = ("gene", "cds", "isoform", "tss")
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"],
defaults={
'annotations_dir': "",
'paired_end': False})
PARAMS = P.PARAMS
PARAMS_ANNOTATIONS = P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py")
PipelineGeneset.PARAMS = PARAMS
###################################################################
###################################################################
# Helper functions mapping tracks to conditions, etc
###################################################################
# collect sra nd fastq.gz tracks
TRACKS = PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(
glob.glob("*.sra"), "(\S+).sra") +\
PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(
glob.glob("*.fastq.gz"), "(\S+).fastq.gz") +\
PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(
glob.glob("*.fastq.1.gz"), "(\S+).fastq.1.gz") +\
PipelineTracks.Tracks(PipelineTracks.Sample3).loadFromDirectory(
glob.glob("*.csfasta.gz"), "(\S+).csfasta.gz")
ALL = PipelineTracks.Sample3()
EXPERIMENTS = PipelineTracks.Aggregate(TRACKS, labels=("condition", "tissue"))
CONDITIONS = PipelineTracks.Aggregate(TRACKS, labels=("condition", ))
TISSUES = PipelineTracks.Aggregate(TRACKS, labels=("tissue", ))
###################################################################
###################################################################
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
###################################################################
##################################################################
##################################################################
# genesets build - defined statically here, but could be parsed
# from configuration options
# Needs to done in turn to be able to turn off potentially empty gene sets
# such as refnoncoding
GENESETS = ("novel", "abinitio", "reference",
"refcoding", "refnoncoding", "lincrna")
# reference gene set for QC purposes
REFERENCE = "refcoding"
###################################################################
###################################################################
###################################################################
##
###################################################################
if os.path.exists("pipeline_conf.py"):
L.info("reading additional configuration from pipeline_conf.py")
exec(compile(open("pipeline_conf.py").read(), "pipeline_conf.py", 'exec'))
USECLUSTER = True
#########################################################################
#########################################################################
#########################################################################
def writePrunedGTF(infile, outfile):
'''remove various gene models from a gtf file.
'''
to_cluster = USECLUSTER
cmds = []
rna_file = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_rna_gff"])
if "geneset_remove_repetetive_rna" in PARAMS:
cmds.append('''python %s/gtf2gtf.py
--method=remove-overlapping --gff-file=%s
--log=%s.log''' % (PARAMS["scriptsdir"],
rna_file, outfile))
if "geneset_remove_contigs" in PARAMS:
cmds.append('''awk '$1 !~ /%s/' ''' %
PARAMS["geneset_remove_contigs"])
cmds = " | ".join(cmds)
if infile.endswith(".gz"):
uncompress = "zcat"
else:
# wastefull
uncompress = "cat"
if outfile.endswith(".gz"):
compress = "gzip"
else:
compress = "cat"
# remove \0 bytes within gtf file
statement = '''%(uncompress)s %(infile)s
| %(cmds)s
| cgat gtf2gtf --method=sort --sort-order=contig+gene --log=%(outfile)s.log
| %(compress)s > %(outfile)s'''
P.run()
#########################################################################
#########################################################################
#########################################################################
def mergeAndFilterGTF(infile, outfile, logfile):
'''sanitize transcripts file for cufflinks analysis.
Merge exons separated by small introns (< 5bp).
Transcript will be ignored that
* have very long introns (max_intron_size) (otherwise, cufflinks complains)
* are located on contigs to be ignored (usually: chrM, _random, ...)
This method preserves all features in a gtf file (exon, CDS, ...).
returns a dictionary of all gene_ids that have been kept.
'''
max_intron_size = PARAMS["max_intron_size"]
c = E.Counter()
outf = gzip.open(outfile, "w")
E.info("filtering by contig and removing long introns")
contigs = set(IndexedFasta.IndexedFasta(
os.path.join(PARAMS["genome_dir"], PARAMS["genome"])).getContigs())
rx_contigs = None
if "geneset_remove_contigs" in PARAMS:
rx_contigs = re.compile(PARAMS["geneset_remove_contigs"])
E.info("removing contigs %s" % PARAMS["geneset_remove_contigs"])
rna_index = None
if "geneset_remove_repetetive_rna" in PARAMS:
rna_file = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_rna_gff"])
if not os.path.exists(rna_file):
E.warn("file '%s' to remove repetetive rna does not exist" %
rna_file)
else:
rna_index = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(rna_file, "r")))
E.info("removing ribosomal RNA in %s" % rna_file)
gene_ids = {}
logf = IOTools.openFile(logfile, "w")
logf.write("gene_id\ttranscript_id\treason\n")
for all_exons in GTF.transcript_iterator(GTF.iterator(IOTools.openFile(infile))):
c.input += 1
e = all_exons[0]
# filtering
if e.contig not in contigs:
c.missing_contig += 1
logf.write(
"\t".join((e.gene_id, e.transcript_id, "missing_contig")) + "\n")
continue
if rx_contigs and rx_contigs.search(e.contig):
c.remove_contig += 1
logf.write(
"\t".join((e.gene_id, e.transcript_id, "remove_contig")) + "\n")
continue
if rna_index and all_exons[0].source != 'protein_coding':
found = False
for exon in all_exons:
if rna_index.contains(e.contig, e.start, e.end):
found = True
break
if found:
logf.write(
"\t".join((e.gene_id, e.transcript_id, "overlap_rna")) + "\n")
c.overlap_rna += 1
continue
is_ok = True
# keep exons and cds separate by grouping by feature
all_exons.sort(key=lambda x: x.feature)
new_exons = []
for feature, exons in itertools.groupby(all_exons, lambda x: x.feature):
tmp = sorted(list(exons), key=lambda x: x.start)
gene_ids[tmp[0].transcript_id] = tmp[0].gene_id
l, n = tmp[0], []
for e in tmp[1:]:
d = e.start - l.end
if d > max_intron_size:
is_ok = False
break
elif d < 5:
l.end = max(e.end, l.end)
c.merged += 1
continue
n.append(l)
l = e
n.append(l)
new_exons.extend(n)
if not is_ok:
break
if not is_ok:
logf.write(
"\t".join((e.gene_id, e.transcript_id, "bad_transcript")) + "\n")
c.skipped += 1
continue
new_exons.sort(key=lambda x: x.start)
for e in new_exons:
outf.write("%s\n" % str(e))
c.exons += 1
c.output += 1
outf.close()
L.info("%s" % str(c))
return gene_ids
#########################################################################
#########################################################################
#########################################################################
@merge(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"reference.gtf.gz")
def buildReferenceGeneSet(infile, outfile):
'''sanitize ENSEMBL transcripts file for cufflinks analysis.
Merge exons separated by small introns (< 5bp).
Removes unwanted contigs according to configuration
value ``geneset_remove_contigs``.
Removes transcripts overlapping ribosomal genes if
``geneset_remove_repetitive_rna`` is set. Protein
coding transcripts are not removed.
Transcript will be ignored that
* have very long introns (max_intron_size) (otherwise, cufflinks complains)
* are located on contigs to be ignored (usually: chrM, _random, ...)
The result is run through cuffdiff in order to add the p_id and tss_id tags
required by cuffdiff.
This will only keep sources of the type 'exon'. It will also remove
any transcripts not in the reference genome.
Cuffdiff requires overlapping genes to have different tss_id tags.
This gene is the source for most other gene sets in the pipeline.
'''
tmpfilename = P.getTempFilename(".")
tmpfilename2 = P.getTempFilename(".")
tmpfilename3 = P.getTempFilename(".")
gene_ids = mergeAndFilterGTF(
infile, tmpfilename, "%s.removed.gz" % outfile)
#################################################
E.info("adding tss_id and p_id")
# The p_id attribute is set if the fasta sequence is given.
# However, there might be some errors in cuffdiff downstream:
#
# cuffdiff: bundles.cpp:479: static void HitBundle::combine(const std::vector<HitBundle*, std::allocator<HitBundle*> >&, HitBundle&): Assertion `in_bundles[i]->ref_id() == in_bundles[i-1]->ref_id()' failed.
#
# I was not able to resolve this, it was a complex
# bug dependent on both the read libraries and the input reference gtf
# files
statement = '''
cuffcompare -r <( gunzip < %(tmpfilename)s )
-T
-s %(genome_dir)s/%(genome)s.fa
-o %(tmpfilename2)s
<( gunzip < %(tmpfilename)s )
<( gunzip < %(tmpfilename)s )
> %(outfile)s.log
'''
P.run()
#################################################
E.info("resetting gene_id and transcript_id")
# reset gene_id and transcript_id to ENSEMBL ids
# cufflinks patch:
# make tss_id and p_id unique for each gene id
outf = IOTools.openFile(tmpfilename3, "w")
map_tss2gene, map_pid2gene = {}, {}
inf = IOTools.openFile(tmpfilename2 + ".combined.gtf")
def _map(gtf, key, val, m):
if val in m:
while gene_id != m[val]:
val += "a"
if val not in m:
break
m[val] = gene_id
gtf.setAttribute(key, val)
for gtf in GTF.iterator(inf):
transcript_id = gtf.oId
gene_id = gene_ids[transcript_id]
gtf.setAttribute("transcript_id", transcript_id)
gtf.setAttribute("gene_id", gene_id)
# set tss_id
try:
tss_id = gtf.tss_id
except AttributeError:
tss_id = None
try:
p_id = gtf.p_id
except AttributeError:
p_id = None
if tss_id:
_map(gtf, "tss_id", tss_id, map_tss2gene)
if p_id:
_map(gtf, "p_id", p_id, map_pid2gene)
outf.write(str(gtf) + "\n")
outf.close()
# sort gtf file
PipelineGeneset.sortGTF(tmpfilename3, outfile)
os.unlink(tmpfilename)
# make sure tmpfilename2 is NEVER empty
assert tmpfilename2
for x in glob.glob(tmpfilename2 + "*"):
os.unlink(x)
os.unlink(tmpfilename3)
#########################################################################
#########################################################################
#########################################################################
@transform(buildReferenceGeneSet,
suffix("reference.gtf.gz"),
"refnoncoding.gtf.gz")
def buildNoncodingGeneSet(infile, outfile):
'''build a new gene set without protein coding
transcripts.'''
to_cluster = True
statement = '''
zcat %(infile)s | awk '$2 == "lincRNA" || $2 == "processed_transcript"' | gzip > %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@merge(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"reference_with_cds.gtf.gz")
def buildReferenceGeneSetWithCDS(infile, outfile):
'''build a new gene set without protein coding
transcripts.'''
mergeAndFilterGTF(infile, outfile, "%s.removed.gz" % outfile)
#########################################################################
#########################################################################
#########################################################################
@transform(buildReferenceGeneSet,
suffix("reference.gtf.gz"),
"%s.gtf.gz" % REFERENCE)
def buildCodingGeneSet(infile, outfile):
'''build a gene set with only protein coding
transcripts.
Genes are selected via their gene biotype in the GTF file.
Note that this set will contain all transcripts of protein
coding genes, including processed transcripts.
This set includes UTR and CDS.
'''
to_cluster = True
statement = '''
zcat %(infile)s | awk '$2 == "protein_coding"' | gzip > %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(buildReferenceGeneSet,
suffix("reference.gtf.gz"),
"refcodingtranscripts.gtf.gz")
def buildCodingTranscriptSet(infile, outfile):
'''build a gene set with only protein coding transcripts.
Protein coding transcripts are selected via the ensembl
transcript biotype
'''
dbh = connect()
statement = '''SELECT DISTINCT transcript_id FROM transcript_info WHERE transcript_biotype = 'protein_coding' '''
cc = dbh.cursor()
transcript_ids = set([x[0] for x in cc.execute(statement)])
inf = IOTools.openFile(infile)
outf = IOTools.openFile(outfile, 'w')
for g in GTF.iterator(inf):
if g.transcript_id in transcript_ids:
outf.write(str(g) + "\n")
outf.close()
inf.close()
#########################################################################
#########################################################################
#########################################################################
@transform(buildCodingGeneSet,
suffix("%s.gtf.gz" % REFERENCE),
"%s.gff.gz" % REFERENCE)
def buildGeneRegions(infile, outfile):
'''annotate genomic regions with reference gene set.
'''
PipelineGeneset.buildGeneRegions(infile, outfile)
@transform(buildGeneRegions,
suffix("%s.gff.gz" % REFERENCE),
"%s.terminal_exons.bed.gz" % REFERENCE)
def buildTerminalExons(infile, outfile):
'''output terminal truncated exons.'''
size = 50
outf1 = IOTools.openFile(outfile, "w")
for gff in GTF.flat_gene_iterator(GTF.iterator_filtered(GTF.iterator(IOTools.openFile(infile)),
feature="exon")):
gene_id, contig, strand = gff[0].gene_id, gff[0].contig, gff[0].strand
gff.sort(key=lambda x: x.start)
if strand == "-":
exon = gff[0].start, gff[0].end
cut_exon = gff[0].start, gff[0].start + size
elif strand == "+":
exon = gff[-1].start, gff[-1].end
cut_exon = gff[-1].end - size, gff[-1].end
else:
continue
outf1.write("%s\t%i\t%i\t%s\t%i\t%s\n" %
(contig, cut_exon[0], cut_exon[1], gene_id, 0, strand))
outf1.close()
#########################################################################
#########################################################################
#########################################################################
@merge(os.path.join(PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_geneset_flat_gtf"]),
"introns.gtf.gz")
def buildIntronGeneModels(infile, outfile):
'''build protein-coding intron-transcipts.
Intron-transcripts are the reverse complement of transcripts.
Only protein coding genes are taken.
10 bp are truncated on either end of an intron and need
to have a minimum length of 100.
Introns from nested genes might overlap, but all exons
are removed.
'''
to_cluster = True
filename_exons = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_exons_gtf"])
statement = '''gunzip
< %(infile)s
| awk '$2 == "protein_coding"'
| cgat gtf2gtf --method=sort --sort-order=gene
| cgat gtf2gtf
--method=exons2introns
--intron-min-length=100
--intron-border=10
--log=%(outfile)s.log
| cgat gff2gff
--crop=%(filename_exons)s
--log=%(outfile)s.log
| cgat gtf2gtf
--method=set-transcript-to-gene
--log=%(outfile)s.log
| perl -p -e 's/intron/exon/'
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(buildCodingGeneSet, suffix(".gtf.gz"), ".junctions")
def buildJunctions(infile, outfile):
'''build file with splice junctions from gtf file.
A junctions file is a better option than supplying a GTF
file, as parsing the latter often fails. See:
http://seqanswers.com/forums/showthread.php?t=7563
'''
outf = IOTools.openFile(outfile, "w")
njunctions = 0
for gffs in GTF.transcript_iterator(GTF.iterator(IOTools.openFile(infile, "r"))):
gffs.sort(key=lambda x: x.start)
end = gffs[0].end
for gff in gffs[1:]:
# subtract one: these are not open/closed coordinates but the 0-based coordinates
# of first and last residue that are to be kept (i.e., within the
# exon).
outf.write("%s\t%i\t%i\t%s\n" %
(gff.contig, end - 1, gff.start, gff.strand))
end = gff.end
njunctions += 1
outf.close()
if njunctions == 0:
E.warn('no junctions found in gene set')
return
else:
E.info('found %i junctions before removing duplicates' % njunctions)
# make unique
statement = '''mv %(outfile)s %(outfile)s.tmp;
cat < %(outfile)s.tmp | sort | uniq > %(outfile)s;
rm -f %(outfile)s.tmp; '''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(buildCodingGeneSet, suffix(".gtf.gz"), ".fa")
def buildReferenceTranscriptome(infile, outfile):
'''build reference transcriptome.
The reference transcriptome contains all known
protein coding transcripts.
The sequences include both UTR and CDS.
'''
to_cluster = USECLUSTER
gtf_file = P.snip(infile, ".gz")
genome_file = os.path.abspath(
os.path.join(PARAMS["bowtie_genome_dir"], PARAMS["genome"] + ".fa"))
statement = '''
zcat %(infile)s
| awk '$3 == "exon"' > %(gtf_file)s;
gtf_to_fasta %(gtf_file)s %(genome_file)s %(outfile)s;
checkpoint;
samtools faidx %(outfile)s
'''
P.run()
dest = P.snip(gtf_file, ".gtf") + ".gff"
if not os.path.exists(dest):
os.symlink(gtf_file, dest)
prefix = P.snip(outfile, ".fa")
# build raw index
statement = '''
%(bowtie_executable)s-build -f %(outfile)s %(prefix)s >> %(outfile)s.log 2>&1
'''
P.run()
# build color space index
# statement = '''
# %(bowtie_executable)s-build -C -f %(outfile)s %(prefix)s_cs >> %(outfile)s.log 2>&1
# '''
# P.run()
#########################################################################
#########################################################################
#########################################################################
#########################################################################
# Nick - added building of a mask file for omitting certain regions during
# gene model building
@files(os.path.join(PARAMS["annotations_dir"], "geneset_all.gtf.gz"), "geneset_mask.gtf")
def buildMaskGtf(infile, outfile):
'''
This takes ensembl annotations (geneset_all.gtf.gz) and writes out all entries that
have a 'source' match to "rRNA" or 'contig' match to "chrM". for use with cufflinks
'''
geneset = IOTools.openFile(infile)
outf = open(outfile, "wb")
for entry in GTF.iterator(geneset):
if re.findall("rRNA", entry.source) or re.findall("chrM", entry.contig):
outf.write("\t".join((list(map(str, [entry.contig, entry.source, entry.feature, entry.start, entry.end, ".", entry.strand, ".",
"transcript_id" + " " + '"' + entry.transcript_id + '"' + ";" + " " + "gene_id" + " " + '"' + entry.gene_id + '"'])))) + "\n")
outf.close()
#########################################################################
#########################################################################
#########################################################################
#########################################################################
@transform(("*.fastq.1.gz",
"*.fastq.gz",
"*.sra",
"*.csfasta.gz",
"*.csfasta.F3.gz"),
regex(r"(\S+).(fastq.1.gz|fastq.gz|sra|csfasta.gz|csfasta.F3.gz)"),
add_inputs(buildReferenceTranscriptome),
r"\1.trans.bam")
def mapReadsWithBowtieAgainstTranscriptome(infiles, outfile):
'''map reads from short read archive sequence using bowtie against
transcriptome data.
'''
# Mapping will permit up to one mismatches. This is sufficient
# as the downstream filter in rnaseq_bams2bam requires the
# number of mismatches less than the genomic number of mismatches.
# Change this, if the number of permitted mismatches for the genome
# increases.
# Output all valid matches in the best stratum. This will
# inflate the file sizes due to matches to alternative transcripts
# but otherwise matches to paralogs will be missed (and such
# reads would be filtered out).
job_threads = PARAMS["bowtie_threads"]
m = PipelineMapping.BowtieTranscripts(
executable=P.substituteParameters(**locals())["bowtie_executable"])
infile, reffile = infiles
prefix = P.snip(reffile, ".fa")
bowtie_options = "%s --best --strata -a" % PARAMS["bowtie_options"]
statement = m.build((infile,), outfile)
P.run()
#########################################################################
#########################################################################
#########################################################################
##
#########################################################################
@follows(buildReferenceTranscriptome)
@transform(("*.fastq.1.gz",
"*.fastq.gz",
"*.sra",
"*.csfasta.gz",
"*.csfasta.F3.gz",
),
regex(r"(\S+).(fastq.1.gz|fastq.gz|sra|csfasta.gz|csfasta.F3.gz)"),
add_inputs(buildJunctions, buildReferenceTranscriptome),
r"\1.genome.bam")
def mapReadsWithTophat(infiles, outfile):
'''map reads from .fastq or .sra files.
A list with known splice junctions is supplied.
If tophat fails with an error such as::
Error: segment-based junction search failed with err =-6
what(): std::bad_alloc
it means that it ran out of memory.
'''
job_threads = PARAMS["tophat_threads"]
if "--butterfly-search" in PARAMS["tophat_options"]:
# for butterfly search - require insane amount of
# RAM.
job_options += " -l mem_free=8G"
else:
job_options += " -l mem_free=2G"
to_cluster = USECLUSTER
m = PipelineMapping.Tophat(
executable=P.substituteParameters(**locals())["tophat_executable"])
infile, reffile, transcriptfile = infiles
tophat_options = PARAMS["tophat_options"] + \
" --raw-juncs %(reffile)s " % locals()
# Nick - added the option to map to the reference transcriptome first
# (built within the pipeline)
if PARAMS["tophat_include_reference_transcriptome"]:
prefix = os.path.abspath(P.snip(transcriptfile, ".fa"))
tophat_options = tophat_options + \
" --transcriptome-index=%s -n 2" % prefix
statement = m.build((infile,), outfile)
P.run()
#########################################################################
#########################################################################
#########################################################################
##
#########################################################################
@merge((mapReadsWithTophat, buildJunctions), "junctions.fa")
def buildJunctionsDB(infiles, outfile):
'''build a database of all junctions.'''
to_cluster = USECLUSTER
outfile_junctions = outfile + ".junctions.bed.gz"
min_anchor_length = 3
read_length = 50
tmpfile = P.getTempFile(".")
for infile in infiles:
if infile.endswith(".bam"):
junctions_file = P.snip(infile, ".bam") + ".junctions.bed.gz"
columns = (0, 1, 2, 5)
else:
junctions_file = infile
columns = (0, 1, 2, 3)
if not os.path.exists(junctions_file):
E.warn("can't find junctions file '%s'" % junctions_file)
continue
inf = IOTools.openFile(junctions_file)
for line in inf:
if line.startswith("#"):
continue
if line.startswith("track"):
continue
data = line[:-1].split("\t")
try:
tmpfile.write("\t".join([data[x] for x in columns]) + "\n")
except IndexError:
raise IndexError("parsing error in line %s" % line)
tmpfile.close()
tmpfilename = tmpfile.name
statement = '''
sort %(tmpfilename)s | gzip > %(outfile_junctions)s
'''
P.run()
os.unlink(tmpfilename)
E.info("building junctions database")
statement = '''
juncs_db %(min_anchor_length)i %(read_length)i
<( zcat %(outfile_junctions)s )
/dev/null /dev/null
%(genome_dir)s/%(genome)s.fa
> %(outfile)s
2> %(outfile)s.log
'''
P.run()
E.info("indexing junctions database")
prefix = P.snip(outfile, ".fa")
# build raw index
statement = '''
%(bowtie_executable)s-build -f %(outfile)s %(prefix)s >> %(outfile)s.log 2>&1
'''
P.run()
# build color space index
# statement = '''
# %(bowtie_executable)s-build -C -f %(outfile)s %(prefix)s_cs >> %(outfile)s.log 2>&1
# '''
P.run()
if "tophat_add_separate_junctions" in PARAMS and PARAMS["tophat_add_separate_junctions"]:
@transform(("*.fastq.1.gz",
"*.fastq.gz",
"*.sra",
"*.csfasta.gz",
"*.csfasta.F3.gz"),
regex(
r"(\S+).(fastq.1.gz|fastq.gz|sra|csfasta.gz|csfasta.F3.gz)"),
add_inputs(buildJunctionsDB,
os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_contigs"])),
r"\1.junc.bam")
def mapReadsWithBowtieAgainstJunctions(infiles, outfile):
'''map reads from short read archive sequence using bowtie against
junctions.
'''
# Mapping will permit up to one mismatches. This is sufficient
# as the downstream filter in rnaseq_bams2bam requires the
# number of mismatches less than the genomic number of mismatches.
# Change this, if the number of permitted mismatches for the genome
# increases.
# Output all valid matches in the best stratum. This will
# inflate the file sizes due to matches to alternative transcripts
# but otherwise matches to paralogs will be missed (and such
# reads would be filtered out).
job_threads = PARAMS["bowtie_threads"]
m = PipelineMapping.BowtieJunctions()
infile, reffile, contigsfile = infiles
prefix = P.snip(reffile, ".fa")
bowtie_options = "%s --best --strata -a" % PARAMS["bowtie_options"]
statement = m.build((infile,), outfile)
P.run()
else:
@transform(("*.fastq.1.gz",
"*.fastq.gz",
"*.sra",
"*.csfasta.gz",
"*.csfasta.F3.gz"),
regex(
r"(\S+).(fastq.1.gz|fastq.gz|sra|csfasta.gz|csfasta.F3.gz)"),
r"\1.junc.bam")
def mapReadsWithBowtieAgainstJunctions(infiles, outfile):
P.touch(outfile)
############################################################
############################################################
############################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "fastqc")))
@transform(mapReadsWithTophat, suffix(".bam"), ".fastqc")
def buildFastQCReport(infile, outfile):
'''run fastqc on aligned reads.'''
to_cluster = USECLUSTER
statement = '''fastqc --outdir=%(exportdir)s/fastqc %(infile)s >& %(outfile)s'''
P.run()
############################################################
############################################################
############################################################
@collate((mapReadsWithTophat,
mapReadsWithBowtieAgainstTranscriptome,
mapReadsWithBowtieAgainstJunctions),
regex(r"(.+)\..*.bam"),
add_inputs(buildCodingGeneSet),
r"\1.accepted.bam")
def buildBAMs(infiles, outfile):
'''reconcile genomic and transcriptome matches.
'''
genome, transcriptome, junctions, reffile = infiles[
0][0], infiles[2][0], infiles[1][0], infiles[0][1]
outfile_mismapped = P.snip(outfile, ".accepted.bam") + ".mismapped.bam"
assert genome.endswith(".genome.bam")
to_cluster = USECLUSTER
options = []
if "tophat_unique" in PARAMS and PARAMS["tophat_unique"]:
options.append("--unique")
if "tophat_remove_contigs" in PARAMS and PARAMS["tophat_remove_contigs"]:
options.append("--remove-contigs=%s" % PARAMS["tophat_remove_contigs"])
if "tophat_remove_rna" in PARAMS and PARAMS["tophat_remove_rna"]:
options.append("--filename-regions=<( zcat %s | grep 'repetetive_rna' )" %
(os.path.join(
PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_genomic_context_bed"])))
if "tophat_remove_mismapped" in PARAMS and PARAMS["tophat_remove_mismapped"]:
options.append("--transcripts-gtf-file=%(transcriptome)s" % locals())
if "tophat_add_separate_junctions" in PARAMS and PARAMS["tophat_add_separate_junctions"]:
options.append("--junctions-bed-file=%(junctions)s" % locals())
options = " ".join(options)
tmpfile = P.getTempFilename()
prefix = P.snip(outfile, ".bam")
# map numbered transcript id to real transcript id
map_file_statement = '''<( cat refcoding.fa | awk 'BEGIN { printf("id\\ttranscript_id\\n");} /^>/ {printf("%s\\t%s\\n", substr($1,2),$3)}' )'''
if os.path.exists("%(outfile)s.log" % locals()):
os.remove("%(outfile)s.log" % locals())
statement = '''
cgat bams2bam
--force-output
--gtf-file=%(reffile)s
--filename-mismapped=%(outfile_mismapped)s
--log=%(outfile)s.log
--filename-stats=%(outfile)s.tsv
--map-tsv-file=%(map_file_statement)s
%(options)s
%(genome)s
| samtools sort - %(prefix)s 2>&1 >> %(outfile)s.log;
checkpoint;
samtools index %(outfile_mismapped)s 2>&1 >> %(outfile)s.log;
checkpoint;
samtools index %(outfile)s 2>&1 >> %(outfile)s.log;
'''
P.run()
############################################################
############################################################
############################################################
@transform(buildBAMs, suffix(".accepted.bam"), ".mismapped.bam")
def buildMismappedBAMs(infile, outfile):
'''pseudo target - update the mismapped bam files.'''
P.touch(outfile)
############################################################
############################################################
############################################################
@transform((mapReadsWithBowtieAgainstTranscriptome),
suffix(".bam"),
add_inputs(buildReferenceTranscriptome),
".picard_inserts")
def buildPicardInsertSize(infiles, outfile):
'''build alignment stats using picard.
Note that picards counts reads but they are in fact alignments.
'''
infile, reffile = infiles
PipelineMappingQC.buildPicardAlignmentStats(infile,
outfile,
reffile)
############################################################
############################################################
############################################################
@transform((mapReadsWithTophat, buildBAMs, buildMismappedBAMs),
suffix(".bam"), ".picard_stats")
def buildPicardStats(infile, outfile):
'''build alignment stats using picard.
Note that picards counts reads but they are in fact alignments.
'''
PipelineMappingQC.buildPicardAlignmentStats(infile, outfile,
os.path.join(PARAMS["bowtie_genome_dir"],
PARAMS["genome"] + ".fa"))
############################################################
############################################################
############################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "bamstats")))
@transform((buildMismappedBAMs, mapReadsWithTophat, buildBAMs),
suffix(".bam"), ".bam.report")
def buildBAMReports(infile, outfile):
'''build alignment stats using bamstats
'''
to_cluster = USECLUSTER
# requires a large amount of memory to run.
# only use high-mem machines
job_options = "-l mem_free=32G"
# xvfb-run -f ~/.Xauthority -a
track = P.snip(infile, ".bam")
# use a fake X display in order to avoid problems with
# no X connection on the cluster
xvfb_command = IOTools.which("xvfb-run")
# permit multiple servers using -a option
if xvfb_command:
xvfb_command += " -a "
# bamstats can not accept a directory as output, hence cd to exportdir
statement = '''
cd %(exportdir)s/bamstats;
%(xvfb_command)s
%(cmd-run)s bamstats -i ../../%(infile)s -v html -o %(track)s.html
--qualities --mapped --lengths --distances --starts
>& ../../%(outfile)s
'''
P.run()
############################################################
############################################################
############################################################
@merge(buildPicardStats, "picard_stats.load")
def loadPicardStats(infiles, outfile):
'''merge alignment stats into single tables.'''
PipelineMappingQC.loadPicardAlignmentStats(infiles, outfile)
############################################################
############################################################
############################################################
@merge(mapReadsWithTophat, "tophat_stats.tsv")
def buildTophatStats(infiles, outfile):
def _select(lines, pattern):
x = re.compile(pattern)
for line in lines:
r = x.search(line)
if r:
g = r.groups()
if len(g) > 1:
return g
else:
return g[0]
raise ValueError("pattern '%s' not found %s" % (pattern, lines))
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(("track",
"reads_in",
"reads_removed",
"reads_out",
"junctions_loaded",
"junctions_found",
"possible_splices")) + "\n")
for infile in infiles:
track = P.snip(infile, ".bam")
indir = infile + ".logs"
try:
fn = os.path.join(indir, "prep_reads.log")
lines = IOTools.openFile(fn).readlines()
reads_removed, reads_in = list(map(
int, _select(lines, "(\d+) out of (\d+) reads have been filtered out")))
reads_out = reads_in - reads_removed
prep_reads_version = _select(lines, "prep_reads (.*)$")
except IOError:
reads_removed, reads_in, reads_out, prep_reads_version = 0, 0, 0, "na"
try:
fn = os.path.join(indir, "reports.log")
lines = IOTools.openFile(fn).readlines()
tophat_reports_version = _select(lines, "tophat_reports (.*)$")
junctions_loaded = int(_select(lines, "Loaded (\d+) junctions"))
junctions_found = int(
_select(lines, "Found (\d+) junctions from happy spliced reads"))
except IOError:
junctions_loaded, junctions_found = 0, 0
fn = os.path.join(indir, "segment_juncs.log")
if os.path.exists(fn):
lines = open(fn).readlines()
if len(lines) > 0:
segment_juncs_version = _select(lines, "segment_juncs (.*)$")
try:
possible_splices = int(
_select(lines, "Reported (\d+) total possible splices"))
except ValueError:
E.warn("could not find splices")
possible_splices = ""
else:
segment_juncs_version = "na"
possible_splices = ""
else:
segment_juncs_version = "na"
possible_splices = ""
# fix for paired end reads - tophat reports pairs, not reads
if PARAMS["paired_end"]:
reads_in *= 2
reads_out *= 2
reads_removed *= 2
outf.write("\t".join(map(str, (track,
reads_in, reads_removed, reads_out,
junctions_loaded, junctions_found, possible_splices))) + "\n")
outf.close()
############################################################
############################################################
############################################################
@transform(buildTophatStats, suffix(".tsv"), ".load")
def loadTophatStats(infile, outfile):
P.load(infile, outfile)
############################################################
############################################################
############################################################
@merge(buildBAMs, "mapping_stats.load")
def loadMappingStats(infiles, outfile):
header = ",".join([P.snip(x, ".bam") for x in infiles])
filenames = " ".join(["%s.tsv" % x for x in infiles])
tablename = P.toTable(outfile)
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/track/"
| perl -p -e "s/unique/unique_alignments/"
| cgat table2table --transpose
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
"""
P.run()
############################################################
############################################################
############################################################
@transform((mapReadsWithTophat, buildBAMs, buildMismappedBAMs),
suffix(".bam"),
".readstats")
def buildBAMStats(infile, outfile):
'''count number of reads mapped, duplicates, etc.
'''
to_cluster = USECLUSTER
rna_file = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_rna_gff"])
statement = '''python
cgat bam2stats
--force-output
--mask-bed-file=%(rna_file)s
--ignore-masked-reads
--output-filename-pattern=%(outfile)s.%%s
< %(infile)s
> %(outfile)s
'''
P.run()
############################################################
############################################################
############################################################
@transform((mapReadsWithBowtieAgainstTranscriptome,),
suffix(".bam"),
".readstats")
def buildTranscriptBAMStats(infile, outfile):
'''count number of reads mapped, duplicates, etc.
'''
to_cluster = USECLUSTER
statement = '''python
cgat bam2stats
--force-output
--output-filename-pattern=%(outfile)s.%%s
< %(infile)s
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@merge(buildBAMStats, "bam_stats.load")
def loadBAMStats(infiles, outfile):
'''import bam statisticis.'''
header = ",".join([P.snip(x, ".readstats") for x in infiles])
# filenames = " ".join( [ "<( cut -f 1,2 < %s)" % x for x in infiles ] )
filenames = " ".join(infiles)
tablename = P.toTable(outfile)
E.info("loading bam stats - summary")
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--ignore-empty
--take=2
%(filenames)s
| perl -p -e "s/bin/track/"
| perl -p -e "s/unique/unique_alignments/"
| cgat table2table --transpose
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
"""
P.run()
for suffix in ("nm", "nh"):
E.info("loading bam stats - %s" % suffix)
filenames = " ".join(["%s.%s" % (x, suffix) for x in infiles])
tname = "%s_%s" % (tablename, suffix)
statement = """cgat combine_tables
--header-names=%(header)s
--skip-titles
--missing-value=0
--ignore-empty
%(filenames)s
| perl -p -e "s/bin/%(suffix)s/"
| cgat csv2db
--allow-empty-file
--table=%(tname)s
>> %(outfile)s
"""
P.run()
############################################################
############################################################
############################################################
@transform((mapReadsWithTophat, buildBAMs, buildMismappedBAMs),
suffix(".bam"),
add_inputs(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_genomic_context_bed"])),
".contextstats")
def buildContextStats(infiles, outfile):
'''build mapping context stats.
Examines the genomic context to where reads align.
A read is assigned to the genomic context that it
overlaps by at least 50%. Thus some reads mapping
several contexts might be dropped.
'''
infile, reffile = infiles
min_overlap = 0.5
to_cluster = USECLUSTER
statement = '''
cgat bam_vs_bed
--min-overlap=%(min_overlap)f
--log=%(outfile)s.log
%(infile)s %(reffile)s
> %(outfile)s
'''
P.run()
############################################################
############################################################
############################################################
@follows(loadBAMStats)
@merge(buildContextStats, "context_stats.load")
def loadContextStats(infiles, outfile):
"""load context mapping statistics."""
header = ",".join([P.snip(x, ".contextstats") for x in infiles])
filenames = " ".join(infiles)
tablename = P.toTable(outfile)
statement = """cgat combine_tables
--header-names=%(header)s
--missing-value=0
--skip-titles
%(filenames)s
| perl -p -e "s/bin/track/; s/\?/Q/g"
| cgat table2table --transpose
| cgat csv2db
--add-index=track
--table=%(tablename)s
> %(outfile)s
"""
P.run()
dbhandle = sqlite3.connect(PARAMS["database_name"])
cc = Database.executewait(
dbhandle, '''ALTER TABLE %(tablename)s ADD COLUMN mapped INTEGER''' % locals())
statement = '''UPDATE %(tablename)s SET mapped =
(SELECT b.mapped FROM bam_stats AS b
WHERE %(tablename)s.track = b.track)''' % locals()
cc = Database.executewait(dbhandle, statement)
dbhandle.commit()
#########################################################################
#########################################################################
#########################################################################
@transform(buildBAMs,
suffix(".accepted.bam"),
add_inputs(buildMaskGtf),
r"\1.gtf.gz")
def buildGeneModels(infiles, outfile):
'''build transcript models for each track separately.
'''
infile, mask_file = infiles
job_threads = PARAMS["cufflinks_threads"]
track = os.path.basename(P.snip(outfile, ".gtf.gz"))
tmpfilename = P.getTempFilename(".")
if os.path.exists(tmpfilename):
os.unlink(tmpfilename)
infile = os.path.abspath(infile)
outfile = os.path.abspath(outfile)
# note: cufflinks adds \0 bytes to gtf file - replace with '.'
genome_file = os.path.abspath(
os.path.join(PARAMS["bowtie_genome_dir"], PARAMS["genome"] + ".fa"))
options = PARAMS["cufflinks_options"]
# Nick - added options to mask rRNA and ChrM from gene modle builiding.
# Also added options for faux reads. RABT - see cufflinks manual
if PARAMS["cufflinks_include_mask"]:
mask_file = os.path.abspath(mask_file)
options = options + " -M %s" % mask_file # add mask option
if PARAMS["cufflinks_include_guide"]:
# add reference for RABT - this is all genes in reference ensembl
# geneset so includes known lincRNA and transcribed pseudogenes
# TODO: remove explicit file reference
statement = '''zcat reference.gtf.gz > reference.gtf'''
P.run()
reference = os.path.abspath("reference.gtf")
options = options + " --GTF-guide %s" % reference
statement = '''mkdir %(tmpfilename)s;
cd %(tmpfilename)s;
cufflinks
--label %(track)s
--num-threads %(cufflinks_threads)i
--library-type %(tophat_library_type)s
--frag-bias-correct %(genome_file)s
--multi-read-correct
%(options)s
%(infile)s
>& %(outfile)s.log;
perl -p -e "s/\\0/./g" < transcripts.gtf | gzip > %(outfile)s;
'''
P.run()
# version 0.9.3
# mv genes.expr %(outfile)s.genes.expr;
# mv transcripts.expr %(outfile)s.transcripts.expr
shutil.rmtree(tmpfilename)
#########################################################################
#########################################################################
#########################################################################
@transform(buildGeneModels,
suffix(".gtf.gz"),
add_inputs(buildCodingGeneSet),
".class.tsv.gz")
def oldClassifyTranscripts(infiles, outfile):
'''classify transcripts against a reference gene set.
'''
infile, reffile = infiles
statement = '''gunzip
< %(infile)s
| cgat gtf2gtf --method=sort --sort-order=transcript
| %(cmd-farm)s --split-at-column=1 --output-header --log=%(outfile)s.log --max-files=60
"cgat gtf2table
--counter=position
--counter=classifier
--section=exons
--counter=length
--counter=splice
--counter=splice-comparison
--log=%(outfile)s.log
--filename-format=gff
--gff-file=%(annotation)s
--genome-file=%(genome_dir)s/%(genome)s"
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform("*.bam",
suffix(".bam"),
add_inputs(buildCodingGeneSet),
".ref.gtf.gz")
def estimateExpressionLevelsInReference(infiles, outfile):
'''estimate expression levels against a set of reference gene models.
'''
job_threads = PARAMS["cufflinks_threads"]
track = os.path.basename(outfile[:-len(".gtf")])
tmpfilename = P.getTempFilename(".")
if os.path.exists(tmpfilename):
os.unlink(tmpfilename)
bamfile, gtffile = infiles
gtffile = os.path.abspath(gtffile)
bamfile = os.path.abspath(bamfile)
outfile = os.path.abspath(outfile)
# note: cufflinks adds \0 bytes to gtf file - replace with '.'
# increase max-bundle-length to 4.5Mb due to Galnt-2 in mm9 with a 4.3Mb
# intron.
statement = '''mkdir %(tmpfilename)s;
cd %(tmpfilename)s;
cufflinks --label %(track)s
--GTF=<(gunzip < %(gtffile)s)
--num-threads=%(cufflinks_threads)i
--frag-bias-correct %(bowtie_genome_dir)s/%(genome)s.fa
--library-type %(tophat_library_type)s
%(cufflinks_options)s
%(bamfile)s
>& %(outfile)s.log;
perl -p -e "s/\\0/./g" < transcripts.gtf | gzip > %(outfile)s;
'''
P.run()
shutil.rmtree(tmpfilename)
#########################################################################
#########################################################################
#########################################################################
@transform((estimateExpressionLevelsInReference, buildGeneModels),
suffix(".gtf.gz"),
"_gene_expression.load")
def loadExpressionLevels(infile, outfile):
'''load expression level measurements.'''
track = P.snip(outfile, "_gene_expression.load")
P.load(infile + ".genes.expr",
outfile=track + "_gene_expression.load",
options="--add-index=gene_id")
tablename = track + "_transcript_expression"
infile2 = infile + ".transcripts.expr"
statement = '''cat %(infile2)s
| perl -p -e "s/trans_id/transcript_id/"
| cgat csv2db %(csv2db_options)s
--add-index=transcript_id
--table=%(tablename)s
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
def runCuffCompare(infiles, outfile, reffile):
'''run cuffcompare.
Will create a .tmap and .refmap input file for each input file.
'''
to_cluster = USECLUSTER
tmpdir = P.getTempDir(".")
cmd_extract = "; ".join(
["gunzip < %s > %s/%s" % (x, tmpdir, x) for x in infiles])
genome = os.path.join(
PARAMS["bowtie_genome_dir"], PARAMS["genome"]) + ".fa"
genome = os.path.abspath(genome)
# note: cuffcompare adds \0 bytes to gtf file - replace with '.'
statement = '''
%(cmd_extract)s;
cuffcompare -o %(outfile)s
-s %(genome)s
-r <( gunzip < %(reffile)s)
%(inf)s
>& %(outfile)s.log;
checkpoint;
perl -p -e "s/\\0/./g" < %(outfile)s.combined.gtf | gzip > %(outfile)s.combined.gtf.gz;
checkpoint;
rm -f %(outfile)s.combined.gtf;
checkpoint;
gzip -f %(outfile)s.{tracking,loci};
'''
# the following is a hack. I was running into the same problem as described here:
# http://seqanswers.com/forums/showthread.php?t=5809
# the bug depended on various things, including the order of arguments
# on the command line. Until this is resolved, simply try several times
# with random order of command line arguments.
for t in range(PARAMS["cufflinks_ntries"]):
random.shuffle(infiles)
inf = " ".join(["%s/%s" % (tmpdir, x) for x in infiles])
try:
P.run()
break
except:
E.warn("caught exception - trying again")
shutil.rmtree(tmpdir)
#########################################################################
#########################################################################
#########################################################################
@follows(buildGeneModels)
@files([((["%s.gtf.gz" % y.asFile() for y in EXPERIMENTS[x]], buildCodingGeneSet),
"%s.cuffcompare" % x.asFile())
for x in EXPERIMENTS])
def compareTranscriptsPerExperiment(infiles, outfile):
'''compare transcript models between replicates within each experiment.'''
infiles, reffile = infiles
runCuffCompare(infiles, outfile, reffile)
#########################################################################
#########################################################################
#########################################################################
@merge(buildGeneModels, "%s.cuffcompare" % ALL.asFile())
def compareTranscriptsBetweenExperiments(infiles, outfile):
'''compare transcript models between replicates in all experiments.'''
# needs to be parameterized, unfortunately @merge has no add_inputs
reffile = "%s.gtf.gz" % REFERENCE
runCuffCompare(infiles, outfile, reffile)
#########################################################################
#########################################################################
#########################################################################
@transform((compareTranscriptsBetweenExperiments,
compareTranscriptsPerExperiment),
suffix(".cuffcompare"),
"_cuffcompare.load")
def loadTranscriptComparison(infile, outfile):
'''load data from transcript comparison.
This task creates two tables:
<track>_benchmark
<track>_loci
The following tables are only present if there are
multiple replicates in a sample:
<track>_tracking
'''
tracks, result = Tophat.parseTranscriptComparison(IOTools.openFile(infile))
tracks = [P.snip(os.path.basename(x), ".gtf.gz") for x in tracks]
tmpfile = P.getTempFilename()
tmpfile2 = P.getTempFilename()
tmpfile3 = P.getTempFilename()
outf = open(tmpfile, "w")
outf.write("track\n")
outf.write("\n".join(tracks) + "\n")
outf.close()
#########################################################
# load tracks
#########################################################
tablename = P.toTable(outfile) + "_tracks"
statement = '''cat %(tmpfile)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=track
--table=%(tablename)s
> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
#########################################################
# load benchmarking data
#########################################################
outf = open(tmpfile, "w")
outf.write("track\tcontig\t%s\n" %
"\t".join(Tophat.CuffCompareResult.getHeaders()))
for track, vv in result.items():
track = P.snip(os.path.basename(track), ".gtf.gz")
for contig, v in vv.items():
if v.is_empty:
continue
outf.write("%s\t%s\t%s\n" % (P.tablequote(track), contig, str(v)))
outf.close()
tablename = P.toTable(outfile) + "_benchmark"
statement = '''cat %(tmpfile)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=track
--add-index=contig
--table=%(tablename)s
> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
#########################################################
# load tracking and transcripts information
#########################################################
outf = open(tmpfile, "w")
outf.write("%s\n" % "\t".join(("transfrag_id",
"locus_id",
"ref_gene_id",
"ref_transcript_id",
"code",
"nexperiments")))
outf2 = open(tmpfile2, "w")
outf2.write("%s\n" % "\t".join(("track",
"transfrag_id",
"gene_id",
"transcript_id",
"fmi",
"fpkm",
"conf_lo",
"conf_hi",
"cov",
"length")))
outf3 = open(tmpfile3, "w")
outf3.write("transfrag_id\t%s\n" %
"\t".join([P.tablequote(x) for x in tracks]))
fn = "%s.tracking.gz" % infile
if os.path.exists(fn):
for transfrag in Tophat.iterate_tracking(IOTools.openFile(fn, "r")):
nexperiments = len([x for x in transfrag.transcripts if x])
outf.write("%s\n" %
"\t".join((transfrag.transfrag_id,
transfrag.locus_id,
transfrag.ref_gene_id,
transfrag.ref_transcript_id,
transfrag.code,
str(nexperiments))))
outf3.write("%s" % transfrag.transfrag_id)
for track, t in zip(tracks, transfrag.transcripts):
if t:
outf2.write("%s\n" % "\t".join(map(str, (track,
transfrag.transfrag_id) + t)))
outf3.write("\t%f" % t.fpkm)
else:
outf3.write("\t")
outf3.write("\n")
else:
E.warn("no tracking file %s - skipped ")
outf.close()
outf2.close()
outf3.close()
tablename = P.toTable(outfile) + "_tracking"
statement = '''cat %(tmpfile)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=locus_id
--add-index=transfrag_id
--add-index=code
--table=%(tablename)s
>> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
tablename = P.toTable(outfile) + "_transcripts"
statement = '''cat %(tmpfile2)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=transfrag_id
--add-index=ref_gene_id
--add-index=ref_transcript_id
--add-index=transcript_id
--add-index=gene_id
--add-index=track
--table=%(tablename)s
>> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
tablename = P.toTable(outfile) + "_fpkm"
statement = '''cat %(tmpfile3)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=transfrag_id
--table=%(tablename)s
>> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
#########################################################
# load locus information
#########################################################
outf = open(tmpfile, "w")
outf.write("%s\n" % "\t".join(("locus_id",
"contig",
"strand",
"start",
"end",
"nexperiments", ) + tuple(tracks)))
for locus in Tophat.iterate_locus(IOTools.openFile("%s.loci.gz" % infile, "r")):
counts = [len(x) for x in locus.transcripts]
nexperiments = len([x for x in counts if x > 0])
outf.write("%s\t%s\t%s\t%i\t%i\t%i\t%s\n" %
(locus.locus_id, locus.contig, locus.strand,
locus.start, locus.end,
nexperiments,
"\t".join(map(str, counts))))
outf.close()
tablename = P.toTable(outfile) + "_loci"
statement = '''cat %(tmpfile)s
| cgat csv2db %(csv2db_options)s
--add-index=locus_id
--table=%(tablename)s
>> %(outfile)s
'''
P.run()
L.info("loaded %s" % tablename)
os.unlink(tmpfile)
os.unlink(tmpfile2)
os.unlink(tmpfile3)
#########################################################################
#########################################################################
#########################################################################
@transform(compareTranscriptsBetweenExperiments,
suffix(".cuffcompare"),
".gtf.gz")
def buildAbinitioGeneSet(infile, outfile):
'''builds ab-initio gene set.
The ab-initio gene set is derived from the cuffcompare result.
The following transfrags are removed at this stage:
* transfrags overlapping RNA genes
* transfrags on certain contigs (usually: mitochondrial genes)
'''
infile += ".combined.gtf.gz"
writePrunedGTF(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@follows(loadTranscriptComparison)
@merge((buildAbinitioGeneSet, buildReferenceGeneSet),
"abinitio.gtf.gz")
def buildFullGeneSet(infiles, outfile):
'''builds a gene set by merging the ab-initio gene set and
the reference gene set.
The gene set is cleaned in order to permit differential expression
analysis.
Only transfrags are kept that are:
1. observed in at least 2 samples to remove partial transfrags that
are the result of low coverage observations in one sample
see also: http://seqanswers.com/forums/showthread.php?t=3967
Will also build removed.gtf.gz of removed transcripts.
'''
abinitio_gtf, reference_gtf = infiles
keep_gtf = outfile
remove_gtf = "removed.gtf.gz"
tablename = P.tablequote(
P.snip(abinitio_gtf, ".gtf.gz") + "_cuffcompare_tracking")
dbhandle = sqlite3.connect(PARAMS["database_name"])
tables = Database.getTables(dbhandle)
if tablename in tables:
cc = dbhandle.cursor()
statement = '''SELECT transfrag_id FROM %(tablename)s WHERE nexperiments > 1''' % locals(
)
keep = set([x[0] for x in cc.execute(statement).fetchall()])
E.info("keeping %i transfrags" % len(keep))
else:
E.warn("table %s missing - no replicates - keepy all transfrags" %
tablename)
keep = None
inf = GTF.iterator(IOTools.openFile(abinitio_gtf))
outf1 = IOTools.openFile(keep_gtf, "w")
outf2 = IOTools.openFile(remove_gtf, "w")
c = E.Counter()
for gtf in inf:
c.input += 1
if keep is None or gtf.transcript_id in keep:
c.kept += 1
outf1.write("%s\n" % str(gtf))
else:
c.removed += 1
outf2.write("%s\n" % str(gtf))
outf1.close()
outf2.close()
E.info("%s" % str(c))
#########################################################################
#########################################################################
#########################################################################
@merge((buildAbinitioGeneSet, buildReferenceGeneSet,
os.path.join(PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_repeats_gff"])),
"novel.gtf.gz")
def buildNovelGeneSet(infiles, outfile):
'''build a gene set of novel genes by merging the ab-initio gene set and
the reference gene set.
Ab-initio transcripts are removed based on features in the reference gene set.
Removal is aggressive - as soon as one transcript of a
gene/locus overlaps, all transcripts of that gene/locus are gone.
Transcripts that lie exclusively in repetetive sequence are removed, too.
The resultant set contains a number of novel transcripts. However, these
transcripts will still overlap some known genomic features like pseudogenes.
'''
abinitio_gtf, reference_gtf, repeats_gff = infiles
E.info("indexing geneset for filtering")
sections = ("protein_coding", "lincRNA", "processed_transcript")
indices = {}
for section in sections:
indices[section] = GTF.readAndIndex(
GTF.iterator_filtered(GTF.iterator(IOTools.openFile(reference_gtf)),
source=section),
with_value=False)
E.info("build indices for %i features" % len(indices))
repeats = GTF.readAndIndex(GTF.iterator(IOTools.openFile(repeats_gff)),
with_value=False)
E.info("build index for repeats")
total_genes, remove_genes = set(), collections.defaultdict(set)
inf = GTF.iterator(IOTools.openFile(abinitio_gtf))
for gtf in inf:
total_genes.add(gtf.gene_id)
for section in sections:
if indices[section].contains(gtf.contig, gtf.start, gtf.end):
remove_genes[gtf.gene_id].add(section)
try:
for r in repeats.get(gtf.contig, gtf.start, gtf.end):
if r[0] <= gtf.start and r[1] >= gtf.end:
remove_genes[gtf.gene_id].add("repeat")
break
except KeyError:
pass
E.info("removing %i out of %i genes" %
(len(remove_genes), len(total_genes)))
PipelineRnaseq.filterAndMergeGTF(
abinitio_gtf, outfile, remove_genes, merge=True)
#########################################################################
#########################################################################
#########################################################################
@merge((buildAbinitioGeneSet, buildReferenceGeneSet,
os.path.join(
PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_repeats_gff"]),
os.path.join(
PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_pseudogenes_gtf"]),
os.path.join(
PARAMS["annotations_dir"], PARAMS_ANNOTATIONS["interface_numts_gtf"]),
), "lincrna.gtf.gz")
def buildLincRNAGeneSet(infiles, outfile):
'''build lincRNA gene set.
The lincRNA gene set contains all known lincRNA transcripts from
the reference gene set plus all transcripts in the novel set that
do not overlap at any protein coding, processed or pseudogene transcripts
(exons+introns) in the reference gene set.
Transcripts that lie exclusively in repetetive sequence are removed, too.
lincRNA genes are often expressed at low level and thus the resultant transcript
models are fragmentory. To avoid some double counting in downstream analyses,
transcripts overlapping on the same strand are merged.
Transcripts need to have a length of at least 200 bp.
'''
infile_abinitio, reference_gtf, repeats_gff, pseudogenes_gtf, numts_gtf = infiles
E.info("indexing geneset for filtering")
input_sections = ("protein_coding",
"lincRNA",
"processed_transcript")
indices = {}
for section in input_sections:
indices[section] = GTF.readAndIndex(
GTF.iterator_filtered(GTF.merged_gene_iterator(GTF.iterator(IOTools.openFile(reference_gtf))),
source=section),
with_value=False)
E.info("built indices for %i features" % len(indices))
indices["repeats"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(repeats_gff)), with_value=False)
E.info("added index for repeats")
indices["pseudogenes"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(pseudogenes_gtf)), with_value=False)
E.info("added index for pseudogenes")
indices["numts"] = GTF.readAndIndex(
GTF.iterator(IOTools.openFile(numts_gtf)), with_value=False)
E.info("added index for numts")
sections = list(indices.keys())
total_genes, remove_genes = set(), collections.defaultdict(set)
inf = GTF.iterator(IOTools.openFile(infile_abinitio))
E.info("collecting genes to remove")
min_length = int(PARAMS["lincrna_min_length"])
for gtfs in GTF.transcript_iterator(inf):
gene_id = gtfs[0].gene_id
total_genes.add(gene_id)
l = sum([x.end - x.start for x in gtfs])
if l < min_length:
remove_genes[gene_id].add("length")
continue
for section in sections:
for gtf in gtfs:
if indices[section].contains(gtf.contig, gtf.start, gtf.end):
remove_genes[gene_id].add(section)
E.info("removing %i out of %i genes" %
(len(remove_genes), len(total_genes)))
PipelineRnaseq.filterAndMergeGTF(
infile_abinitio, outfile, remove_genes, merge=True)
E.info("adding known lincRNA set")
# add the known lincRNA gene set.
statement = '''zcat %(reference_gtf)s
| awk '$2 == "lincRNA"'
| gzip
>> %(outfile)s
'''
P.run()
# sort
statement = '''
mv %(outfile)s %(outfile)s.tmp;
checkpoint;
zcat %(outfile)s.tmp
| cgat gtf2gtf --method=sort --sort-order=contig+gene --log=%(outfile)s.log
| gzip > %(outfile)s;
checkpoint;
rm -f %(outfile)s.tmp
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@merge((buildLincRNAGeneSet, buildReferenceTranscriptome), "lincrna.pseudos.tsv")
def annotateLincRNA(infiles, outfile):
'''align lincRNA against reference transcriptome
in order to spot pseudogenes.
'''
linc_fasta, reference_fasta = infiles
format = ("qi", "qS", "qab", "qae",
"ti", "tS", "tab", "tae",
"s",
"pi",
"C")
format = "\\\\t".join(["%%%s" % x for x in format])
statement = '''
zcat %(linc_fasta)s
| cgat gff2fasta
--is-gtf
--genome=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| %(cmd-farm)s --split-at-regex=\"^>(\S+)\" --chunk-size=400 --log=%(outfile)s.log
"exonerate --target %%STDIN%%
--query %(reference_fasta)s
--model affine:local
--score %(lincrna_min_exonerate_score)i
--showalignment no --showsugar no --showcigar no
--showvulgar no
--bestn 5
--ryo \\"%(format)s\\n\\"
"
| grep -v -e "exonerate" -e "Hostname"
| gzip > %(outfile)s.links.gz
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((buildLincRNAGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
"_build_summary.load")
def loadGeneSetsBuildInformation(infile, outfile):
'''load results from gene set filtering into database.'''
infile += ".summary.tsv.gz"
P.load(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@transform((buildCodingGeneSet,
buildNoncodingGeneSet,
buildGeneModels,
buildFullGeneSet,
buildLincRNAGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
add_inputs(buildReferenceGeneSetWithCDS),
".class.tsv.gz")
def classifyTranscripts(infiles, outfile):
'''classify transcripts.
'''
to_cluster = USECLUSTER
infile, reference = infiles
classifier = PARAMS['gtf2table_classifier']
statement = '''
zcat %(infile)s
| cgat gtf2table
--counter=%(classifier)s
--reporter=transcripts
--gff-file=%(reference)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
# need to change pipeline logic to avoid this duplication
@transform((compareTranscriptsPerExperiment,
compareTranscriptsBetweenExperiments),
suffix(".cuffcompare"),
add_inputs(buildReferenceGeneSetWithCDS),
".class.tsv.gz")
def classifyTranscriptsCuffcompare(infiles, outfile):
'''classify transcripts.
'''
to_cluster = USECLUSTER
infile, reference = infiles
classifier = PARAMS['gtf2table_classifier']
statement = '''
zcat %(infile)s.combined.gtf.gz
| cgat gtf2table
--counter=%(classifier)s
--reporter=transcripts
--gff-file=%(reference)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((classifyTranscripts, classifyTranscriptsCuffcompare), suffix(".tsv.gz"), ".load")
def loadClassification(infile, outfile):
P.load(infile, outfile,
options="--add-index=transcript_id --add-index=match_gene_id --add-index=match_transcript_id --add-index=source")
#########################################################################
#########################################################################
#########################################################################
@merge((buildGeneModels,
buildAbinitioGeneSet,
compareTranscriptsPerExperiment,
compareTranscriptsBetweenExperiments,
buildFullGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildNoncodingGeneSet,
buildLincRNAGeneSet,
buildNovelGeneSet),
"geneset_stats.tsv")
def buildGeneSetStats(infiles, outfile):
'''compile gene set statistics.
'''
to_cluster = USECLUSTER
cuffcompare = [
x + ".combined.gtf.gz" for x in infiles if x.endswith("cuffcompare")]
other = [x for x in infiles if x.endswith(".gtf.gz")]
if os.path.exists("removed.gtf.gz"):
other.append("removed.gtf.gz")
allfiles = " ".join(other + cuffcompare)
statement = '''
cgat gff2stats --is-gtf
%(allfiles)s --log=%(outfile)s.log
| perl -p -e "s/.gtf.gz//"
| perl -p -e "s/^agg.*cuffcompare.combined/unfiltered/"
| perl -p -e "s/.cuffcompare.combined//"
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(buildGeneSetStats, suffix(".tsv"), ".load")
def loadGeneSetStats(infile, outfile):
'''load geneset statisticts.'''
P.load(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@transform((
buildReferenceGeneSet,
buildCodingGeneSet,
buildAbinitioGeneSet,
buildFullGeneSet,
buildNoncodingGeneSet,
buildLincRNAGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
".mappability.gz")
def annotateTranscriptsMappability(infile, outfile):
'''classify transcripts with respect to the gene set.
'''
# script will be farmed out
to_cluster = False
if "geneset_mappability" not in PARAMS or not PARAMS["geneset_mappability"]:
P.touch(outfile)
return
statement = """
zcat < %(infile)s
| %(cmd-farm)s --split-at-column=1 --output-header --log=%(outfile)s.log --max-files=60
"cgat gtf2table
--reporter=transcripts
--counter=bigwig-counts
--bigwig-file=%(geneset_mappability)s
--log=%(outfile)s.log"
| gzip
> %(outfile)s"""
P.run()
############################################################
@transform(annotateTranscriptsMappability, suffix(".mappability.gz"), "_mappability.load")
def loadTranscriptsMappability(infile, outfile):
'''load interval annotations: genome architecture
'''
if "geneset_mappability" not in PARAMS or not PARAMS["geneset_mappability"]:
P.touch(outfile)
return
P.load(infile, outfile, "--add-index=transcript_id --allow-empty-file")
#########################################################################
#########################################################################
#########################################################################
@transform((buildFullGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
".annotations.gz")
def annotateTranscripts(infile, outfile):
'''classify transcripts with respect to the gene set.
'''
annotation_file = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_annotation"])
statement = """
zcat < %(infile)s
| cgat gtf2table
--reporter=transcripts
--counter=position
--counter=classifier
--section=exons
--counter=length
--log=%(outfile)s.log
--gff-file=%(annotation_file)s
--genome-file=%(genome_dir)s/%(genome)s
| gzip
> %(outfile)s"""
P.run()
############################################################
@transform(annotateTranscripts, suffix(".annotations"), "_annotations.load")
def loadAnnotations(infile, outfile):
'''load interval annotations: genome architecture
'''
P.load(infile, outfile, "--add-index=gene_id")
#########################################################################
#########################################################################
#########################################################################
def hasReplicates(track):
'''indicator function - return true if track has replicates'''
replicates = PipelineTracks.getSamplesInTrack(track, TRACKS)
return len(replicates) > 1
@follows(loadTranscriptComparison, mkdir(os.path.join(PARAMS["exportdir"], "cuffcompare")))
@files([("%s.cuffcompare" % x.asFile(), "%s.reproducibility" % x.asFile())
for x in EXPERIMENTS if hasReplicates(x)])
def buildReproducibility(infile, outfile):
'''all-vs-all comparison between samples.
Compute correlation between expressed transfrags. Transfrags missing
from another set are ignored.
'''
track = TRACKS.factory(filename=outfile[:-len(".reproducibility")])
replicates = PipelineTracks.getSamplesInTrack(track, TRACKS)
dbhandle = sqlite3.connect(PARAMS["database_name"])
tablename = "%s_cuffcompare_fpkm" % track.asTable()
tablename2 = "%s_cuffcompare_tracking" % track.asTable()
tables = Database.getTables(dbhandle)
if tablename2 not in tables:
E.warn("table %s missing - no replicates" % tablename2)
P.touch(outfile)
return
##################################################################
##################################################################
##################################################################
# build table correlating expression values
##################################################################
outf = IOTools.openFile(outfile, "w")
outf.write("track1\ttrack2\tcode\tpairs\tnull1\tnull2\tboth_null\tnot_null\tone_null\t%s\n" %
"\t".join(Stats.CorrelationTest.getHeaders()))
for rep1, rep2 in itertools.combinations(replicates, 2):
track1, track2 = rep1.asTable(), rep2.asTable()
def _write(statement, code):
data = Database.executewait(dbhandle, statement).fetchall()
if len(data) == 0:
return
both_null = len([x for x in data if x[0] == 0 and x[1] == 0])
one_null = len([x for x in data if x[0] == 0 or x[1] == 0])
null1 = len([x for x in data if x[0] == 0])
null2 = len([x for x in data if x[1] == 0])
not_null = [x for x in data if x[0] != 0 and x[1] != 0]
if len(not_null) > 1:
x, y = list(zip(*not_null))
result = Stats.doCorrelationTest(x, y)
else:
result = Stats.CorrelationTest()
outf.write("%s\n" % "\t".join(map(str, (track1, track2, code,
len(data),
null1, null2, both_null,
len(not_null),
one_null,
str(result)))))
for code in PARAMS["reproducibility_codes"]:
statement = '''SELECT CASE WHEN %(track1)s THEN %(track1)s ELSE 0 END,
CASE WHEN %(track2)s THEN %(track2)s ELSE 0 END
FROM %(tablename)s AS a,
%(tablename2)s AS b
WHERE a.transfrag_id = b.transfrag_id AND
b.code = '%(code)s'
'''
_write(statement % locals(), code)
statement = '''SELECT CASE WHEN %(track1)s THEN %(track1)s ELSE 0 END,
CASE WHEN %(track2)s THEN %(track2)s ELSE 0 END
FROM %(tablename)s AS a
'''
_write(statement % locals(), "*")
##################################################################
##################################################################
##################################################################
# plot pairwise correlations
##################################################################
# plot limit
lim = 1000
outdir = os.path.join(PARAMS["exportdir"], "cuffcompare")
R('''library(RSQLite)''')
R('''drv = dbDriver( "SQLite" )''')
R('''con <- dbConnect(drv, dbname = 'csvdb')''')
columns = ",".join([x.asTable() for x in replicates])
data = R(
'''data = dbGetQuery(con, "SELECT %(columns)s FROM %(tablename)s")''' % locals())
R.png("%(outdir)s/%(outfile)s.pairs.png" % locals())
R('''pairs( data, pch = '.', xlim=c(0,%(lim)i), ylim=c(0,%(lim)i) )''' %
locals())
R('''dev.off()''')
for rep1, rep2 in itertools.combinations(replicates, 2):
a, b = rep1.asTable(), rep2.asTable()
r = R('''r = lm( %(a)s ~ %(b)s, data)''' % locals())
R.png("%(outdir)s/%(outfile)s.pair.%(rep1)s_vs_%(rep2)s.png" %
locals())
R('''plot(data$%(a)s, data$%(b)s, pch='.', xlim=c(0,%(lim)i), ylim=c(0,%(lim)i),)''' %
locals())
try:
R('''abline(r)''')
except RRuntimeError:
pass
R('''dev.off()''')
#########################################################################
#########################################################################
#########################################################################
@transform(buildReproducibility, suffix(".reproducibility"), "_reproducibility.load")
def loadReproducibility(infile, outfile):
'''load reproducibility results.'''
P.load(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
# @files( [ ( ([ "%s.bam" % xx.asFile() for xx in EXPERIMENTS[x] ],
# [ "%s.bam" % yy.asFile() for yy in EXPERIMENTS[y] ]),
# "%s_vs_%s.cuffdiff" % (x.asFile(),y.asFile()) )
# for x,y in itertools.combinations( EXPERIMENTS, 2) ] )
# def estimateDifferentialExpressionPairwise( infiles, outfile ):
# '''estimate differential expression using cuffdiff.
# Replicates are grouped.
# '''
# to_cluster = USECLUSTER
# job_threads = PARAMS["cuffdiff_threads"]
# reffile = "reference.gtf.gz"
# outdir = outfile + ".dir"
# try: os.mkdir( outdir )
# except OSError: pass
# reps = "%s %s" % (",".join( infiles[0]),
# ",".join( infiles[1]) )
# statement = '''
# cuffdiff -o %(outdir)s
# --verbose
# -r %(bowtie_genome_dir)s/%(genome)s.fa
# --num-threads %(cuffdiff_threads)i
# <(gunzip < %(reffile)s)
# %(reps)s
# >& %(outfile)s
# '''
# P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((buildFullGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildLincRNAGeneSet,
buildNoncodingGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
"_geneinfo.load")
def loadGeneSetGeneInformation(infile, outfile):
PipelineGeneset.loadGeneStats(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@transform((buildFullGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildLincRNAGeneSet,
buildNoncodingGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
"_transcript2gene.load")
def loadGeneInformation(infile, outfile):
PipelineGeneset.loadTranscript2Gene(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@transform((
buildGeneModels,
buildFullGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildNoncodingGeneSet,
buildLincRNAGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
"_transcriptinfo.load")
def loadGeneSetTranscriptInformation(infile, outfile):
PipelineGeneset.loadTranscriptStats(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@transform((buildFullGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildNoncodingGeneSet,
buildLincRNAGeneSet,
buildNovelGeneSet),
suffix(".gtf.gz"),
add_inputs(buildMaskGtf),
".cuffdiff")
def runCuffdiff(infiles, outfile):
'''estimate differential expression using cuffdiff.
Replicates are grouped.
'''
infile, mask_file = infiles
to_cluster = USECLUSTER
outdir = outfile + ".dir"
try:
os.mkdir(outdir)
except OSError:
pass
job_threads = PARAMS["cuffdiff_threads"]
# Nick - add mask gtf to not assess rRNA and ChrM
options = PARAMS["cuffdiff_options"]
if PARAMS["cufflinks_include_mask"]:
# add mask option
options = options + " -M %s" % os.path.abspath(mask_file)
# replicates are separated by ","
reps, labels = [], []
for group, replicates in EXPERIMENTS.items():
reps.append(
",".join(["%s.accepted.bam" % r.asFile() for r in replicates]))
labels.append(group.asFile())
reps = " ".join(reps)
labels = ",".join(labels)
mask_file = os.path.abspath(mask_file)
statement = '''date > %(outfile)s; hostname >> %(outfile)s;
cuffdiff --output-dir %(outdir)s
--library-type %(tophat_library_type)s
--verbose
--num-threads %(cuffdiff_threads)i
--plot-labels %(labels)s
--FDR %(cuffdiff_fdr)f
%(options)s
<(gunzip < %(infile)s )
%(reps)s
>> %(outfile)s 2>&1;
date >> %(outfile)s;
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(runCuffdiff,
suffix(".cuffdiff"),
"_cuffdiff.load")
def loadCuffdiff(infile, outfile):
'''load results from differential expression analysis and produce
summary plots.
Note: converts from ln(fold change) to log2 fold change.
The cuffdiff output is parsed.
Pairwise comparisons in which one gene is not expressed (fpkm < fpkm_silent)
are set to status 'NOCALL'. These transcripts might nevertheless be significant.
'''
Expression.loadCuffdiff(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
def buildExpressionStats(tables, method, outfile):
'''build expression summary statistics.
Creates some diagnostic plots in
<exportdir>/<method> directory.
'''
dbhandle = sqlite3.connect(PARAMS["database_name"])
def togeneset(tablename):
return re.match("([^_]+)_", tablename).groups()[0]
keys_status = "OK", "NOTEST", "FAIL", "NOCALL"
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join(("geneset", "level", "treatment_name", "control_name", "tested",
"\t".join(["status_%s" % x for x in keys_status]),
"significant",
"twofold")) + "\n")
all_tables = set(Database.getTables(dbhandle))
outdir = os.path.join(PARAMS["exportdir"], method)
for level in CUFFDIFF_LEVELS:
for tablename in tables:
tablename_diff = "%s_%s_diff" % (tablename, level)
tablename_levels = "%s_%s_diff" % (tablename, level)
geneset = togeneset(tablename_diff)
if tablename_diff not in all_tables:
continue
def toDict(vals, l=2):
return collections.defaultdict(int, [(tuple(x[:l]), x[l]) for x in vals])
tested = toDict(Database.executewait(
dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename_diff)s
GROUP BY treatment_name,control_name""" % locals()).fetchall())
status = toDict(Database.executewait(
dbhandle,
"""SELECT treatment_name, control_name, status, COUNT(*) FROM %(tablename_diff)s
GROUP BY treatment_name,control_name,status""" % locals()).fetchall(), 3)
signif = toDict(Database.executewait(
dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename_diff)s
WHERE significant
GROUP BY treatment_name,control_name""" % locals()).fetchall())
fold2 = toDict(Database.executewait(
dbhandle,
"""SELECT treatment_name, control_name, COUNT(*) FROM %(tablename_diff)s
WHERE (l2fold >= 1 or l2fold <= -1) AND significant
GROUP BY treatment_name,control_name,significant""" % locals()).fetchall())
for treatment_name, control_name in itertools.combinations(EXPERIMENTS, 2):
outf.write("\t".join(map(str, (
geneset,
level,
treatment_name,
control_name,
tested[(treatment_name, control_name)],
"\t".join([str(status[(treatment_name, control_name, x)])
for x in keys_status]),
signif[(treatment_name, control_name)],
fold2[(treatment_name, control_name)]))) + "\n")
###########################################
###########################################
###########################################
# plot length versus P-Value
data = Database.executewait(dbhandle,
'''SELECT i.sum, pvalue
FROM %(tablename_diff)s,
%(geneset)s_geneinfo as i
WHERE i.gene_id = test_id AND significant''' % locals()).fetchall()
# require at least 10 datapoints - otherwise smooth scatter fails
if len(data) > 10:
data = list(zip(*data))
pngfile = "%(outdir)s/%(geneset)s_%(method)s_%(level)s_pvalue_vs_length.png" % locals()
R.png(pngfile)
R.smoothScatter(R.log10(ro.FloatVector(data[0])),
R.log10(ro.FloatVector(data[1])),
xlab='log10( length )',
ylab='log10( pvalue )',
log="x", pch=20, cex=.1)
R['dev.off']()
outf.close()
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "cuffdiff")))
@transform(loadCuffdiff,
suffix(".load"),
".plots")
def buildCuffdiffPlots(infile, outfile):
'''create summaries of cufflinks results (including some diagnostic plots)
Plots are created in the <exportdir>/cuffdiff directory.
Plots are:
<geneset>_<method>_<level>_<track1>_vs_<track2>_significance.png
fold change against expression level
'''
###########################################
###########################################
# create diagnostic plots
###########################################
outdir = os.path.join(PARAMS["exportdir"], "cuffdiff")
dbhandle = sqlite3.connect(PARAMS["database_name"])
prefix = P.snip(infile, ".load")
geneset, method = prefix.split("_")
for level in CUFFDIFF_LEVELS:
tablename_diff = prefix + "_%s_diff" % level
tablename_levels = prefix + "_%s_levels" % level
# note that the ordering of EXPERIMENTS and the _diff table needs to be the same
# as only one triangle is stored of the pairwise results.
# do not plot "undefined" lfold values (where treatment_mean or control_mean = 0)
# do not plot lfold values where the confidence bounds contain 0.
for track1, track2 in itertools.combinations(EXPERIMENTS, 2):
statement = """
SELECT CASE WHEN d.treatment_mean < d.control_mean THEN d.treatment_mean
ELSE d.control_mean END,
d.l2fold, d.significant
FROM %(tablename_diff)s AS d
WHERE treatment_name = '%(track1)s' AND
control_name = '%(track2)s' AND
status = 'OK' AND
treatment_mean > 0 AND
control_mean > 0
""" % locals()
data = list(zip(*Database.executewait(dbhandle, statement)))
pngfile = "%(outdir)s/%(geneset)s_%(method)s_%(level)s_%(track1)s_vs_%(track2)s_significance.png" % locals()
# ian: Bug fix: moved R.png to after data check so that no plot is started if there is no data
# this was leading to R falling over from too many open devices
if len(data) == 0:
E.warn("no plot for %s - %s -%s vs %s" %
(pngfile, level, track1, track2))
continue
R.png(pngfile)
R.plot(ro.FloatVector(data[0]),
ro.FloatVector(data[1]),
xlab='min(FPKM)',
ylab='log2fold',
log="x", pch=20, cex=.1,
col=R.ifelse(ro.IntVector(data[2]), "red", "black"))
R['dev.off']()
P.touch(outfile)
#########################################################################
#########################################################################
#########################################################################
@merge(loadCuffdiff,
"cuffdiff_stats.tsv")
def buildCuffdiffStats(infiles, outfile):
tablenames = [P.toTable(x) for x in infiles]
buildExpressionStats(tablenames, "cuffdiff", outfile)
#########################################################################
#########################################################################
#########################################################################
@transform(buildCuffdiffStats,
suffix(".tsv"),
".load")
def loadCuffdiffStats(infile, outfile):
'''import cuffdiff results.'''
P.load(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
def getLibrarySizes(infiles):
vals = []
for infile in infiles:
assert infile.endswith(".readstats")
val, cont = [x[:-1].split("\t")
for x in open(infile).readlines() if re.search("\tmapped", x)][0]
vals.append(int(val))
return vals
#########################################################################
#########################################################################
#########################################################################
@merge(loadExpressionLevels,
"genelevel_fpkm_tagcounts.tsv.gz")
def buildFPKMGeneLevelTagCounts(infiles, outfile):
'''build tag counts using normalized counts from tophat.
These are gene-length normalized count levels.
They are obtained by multiplying the FPKM value
by the median library size.
'''
infiles = [x for x in infiles if x.endswith(".ref_gene_expression.load")]
tracks = [P.snip(x, ".ref_gene_expression.load") for x in infiles]
# get normalization values
library_sizes = getLibrarySizes(["%s.readstats" % x for x in tracks])
if len(library_sizes) == 0:
raise ValueError("could not get library sizes")
median_library_size = numpy.median(library_sizes)
# dbhandle = sqlite3.connect( os.path.join( PARAMS["annotations_dir"],
# PARAMS_ANNOTATIONS["interface_database"] ) )
# cc = dbhandle.cursor()
# median_gene_length = numpy.median( [ x for x in cc.execute( "SELECT sum FROM gene_stats") ] )
scale = median_library_size / 1000000.0
L.info("normalization: median library size=%i, factor=1.0 / %f" %
(median_library_size, scale))
# normalize
results = []
dbhandle = sqlite3.connect(PARAMS["database_name"])
for track in tracks:
table = "%s_ref_gene_expression" % P.tablequote(track)
statement = "SELECT gene_id, FPKM / %(scale)f FROM %(table)s" % locals()
results.append(
dict(Database.executewait(dbhandle, statement).fetchall()))
outf = IOTools.openFile(outfile, "w")
gene_ids = set()
for x in results:
gene_ids.update(list(x.keys()))
outf.write("gene_id\t%s\n" % "\t".join(tracks))
for gene_id in gene_ids:
outf.write("%s\t%s\n" %
(gene_id, "\t".join([str(int(x[gene_id])) for x in results])))
outf.close()
#########################################################################
#########################################################################
#########################################################################
@merge(os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_all_gtf"]),
"coding_exons.gtf.gz")
def buildCodingExons(infile, outfile):
'''compile set of protein coding exons.
This set is used for splice-site validation
'''
to_cluster = True
statement = '''
zcat %(infile)s
| awk '$2 == "protein_coding" && $3 == "CDS"'
| perl -p -e "s/CDS/exon/"
| cgat gtf2gtf --method=merge-exons --log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
###################################################################
###################################################################
###################################################################
@transform(buildBAMs,
suffix(".bam"),
add_inputs(buildCodingExons),
".exon.validation.tsv.gz")
def buildExonValidation(infiles, outfile):
'''count number of reads mapped, duplicates, etc.
'''
to_cluster = USECLUSTER
infile, exons = infiles
statement = '''cat %(infile)s
| cgat bam_vs_gtf
--exons-file=%(exons)s
--force-output
--log=%(outfile)s.log
--output-filename-pattern="%(outfile)s.%%s.gz"
| gzip
> %(outfile)s
'''
P.run()
############################################################
############################################################
############################################################
@merge(buildExonValidation, "exon_validation.load")
def loadExonValidation(infiles, outfile):
'''merge alignment stats into single tables.'''
suffix = suffix = ".exon.validation.tsv.gz"
P.mergeAndLoad(infiles, outfile, suffix=suffix)
for infile in infiles:
track = P.snip(infile, suffix)
o = "%s_overrun.load" % track
P.load(infile + ".overrun.gz", o)
#########################################################################
#########################################################################
#########################################################################
@transform((buildReferenceGeneSet,
buildCodingGeneSet,
buildNovelGeneSet,
buildLincRNAGeneSet,
buildNoncodingGeneSet,
buildFullGeneSet),
suffix(".gtf.gz"),
".unionintersection.bed.gz")
def buildUnionIntersectionExons(infile, outfile):
'''build union/intersection genes according to Bullard et al. (2010) BMC Bioinformatics.
Builds a single-segment bed file.
'''
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=intersect-transcripts
--log=%(outfile)s.log
| cgat gff2gff --is-gtf --method=crop-unique --log=%(outfile)s.log
| cgat gff2bed --is-gtf --log=%(outfile)s.log
| sort -k1,1 -k2,2n
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((buildReferenceGeneSet,
buildCodingGeneSet,
buildNovelGeneSet,
buildLincRNAGeneSet,
buildNoncodingGeneSet,
buildFullGeneSet),
suffix(".gtf.gz"),
".union.bed.gz")
def buildUnionExons(infile, outfile):
'''build union genes.
Exons across all transcripts of a gene are merged.
They are then intersected between genes to remove any overlap.
Builds a single-segment bed file.
'''
to_cluster = USECLUSTER
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf --method=merge-exons --log=%(outfile)s.log
| cgat gff2gff --is-gtf --method=crop-unique --log=%(outfile)s.log
| cgat gff2bed --is-gtf --log=%(outfile)s.log
| sort -k1,1 -k2,2n
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
# note - needs better implementation, currently no dependency checks.
@follows(buildUnionExons, mkdir("exon_counts.dir"))
@files([(("%s.accepted.bam" % x.asFile(), "%s.union.bed.gz" % y),
("exon_counts.dir/%s_vs_%s.bed.gz" % (x.asFile(), y)))
for x, y in itertools.product(TRACKS, GENESETS)])
def buildExonLevelReadCounts(infiles, outfile):
'''compute coverage of exons with reads.
'''
infile, exons = infiles
to_cluster = USECLUSTER
# note: needs to set flags appropriately for
# single-end/paired-end data sets
# set filter options
# for example, only properly paired reads
paired = False
if paired:
flag_filter = "-f 0x2"
else:
flag_filter = ""
# note: the -split option only concerns the stream in A - multiple
# segments in B are not split. Hence counting has to proceed via
# single exons - this can lead to double counting if exon counts
# are later aggregated.
statement = '''
samtools view -b %(flag_filter)s -q %(deseq_min_mapping_quality)s %(infile)s
| coverageBed -abam stdin -b %(exons)s -split
| sort -k1,1 -k2,2n
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@collate(buildExonLevelReadCounts,
regex(r"exon_counts.dir/(.+)_vs_(.+)\.bed.gz"),
r"\2.exon_counts.load")
def loadExonLevelReadCounts(infiles, outfile):
'''load exon level read counts.
'''
to_cluster = USECLUSTER
# aggregate not necessary for bed12 files, but kept in
# ims: edited so that picks up chromosome, start pos and end pos for
# downstream use.
src = " ".join(["<( zcat %s | cut -f 1,2,3,4,7 )" % x for x in infiles])
tmpfile = P.getTempFilename(".")
tmpfile2 = P.getTempFilename(".")
statement = '''paste %(src)s
> %(tmpfile)s'''
P.run()
tracks = [P.snip(x, ".bed.gz") for x in infiles]
tracks = [re.match("exon_counts.dir/(\S+)_vs.*", x).groups()[0]
for x in tracks]
outf = IOTools.openFile(tmpfile2, "w")
outf.write("gene_id\tchromosome\tstart\tend\t%s\n" % "\t".join(tracks))
for line in open(tmpfile, "r"):
data = line[:-1].split("\t")
# ims: edit so that now skips five and ens_id is in 3rd index
genes = list(set([data[x] for x in range(3, len(data), 5)]))
# ims: add entries for chromosome, start and ends
chrom = list(set([data[x] for x in range(0, len(data), 5)]))
starts = list(set([data[x] for x in range(1, len(data), 5)]))
ends = list(set([data[x] for x in range(2, len(data), 5)]))
# ims: edit as value is now in postion 4 and there are 5 columns per
# line
values = [data[x] for x in range(4, len(data), 5)]
# ims: extra assets for chrom, starts and ends
assert len(
genes) == 1, "paste command failed, wrong number of genes per line"
assert len(
chrom) == 1, "paste command failed, wrong number of chromosomes per line"
assert len(
starts) == 1, "paste command failed, wrong number of starts per line"
assert len(
ends) == 1, "paste command failed, wrong number of ends per line"
# ims: add extra coloumns into output
outf.write("%s\t%s\t%s\t%s\t%s\n" % (
genes[0], chrom[0], starts[0], ends[0], "\t".join(map(str, values))))
outf.close()
P.load(tmpfile2, outfile)
os.unlink(tmpfile)
os.unlink(tmpfile2)
#########################################################################
#########################################################################
#########################################################################
@follows(buildUnionExons, mkdir("gene_counts.dir"))
@transform(buildBAMs,
regex(r"(\S+).accepted.bam"),
add_inputs(buildCodingGeneSet),
r"gene_counts.dir/\1.gene_counts.tsv.gz")
def buildGeneLevelReadCounts(infiles, outfile):
'''compute coverage of exons with reads.
'''
infile, exons = infiles
to_cluster = USECLUSTER
statement = '''
zcat %(exons)s
| cgat gtf2table
--reporter=genes
--bam-file=%(infile)s
--counter=length
--column-prefix="exons_"
--counter=read-counts
--column-prefix=""
--counter=read-coverage
--column-prefix=coverage_
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("gene_counts.dir"), buildGeneModels)
@files([((["%s.accepted.bam" % y.asFile() for y in EXPERIMENTS[x]], buildCodingGeneSet),
"gene_counts.dir/%s.gene_counts.tsv.gz" % x.asFile())
for x in EXPERIMENTS] +
[((["%s.accepted.bam" % y.asFile() for y in TRACKS], buildCodingGeneSet),
"gene_counts.dir/%s.gene_counts.tsv.gz" % ALL.asFile())])
def buildAggregateGeneLevelReadCounts(infiles, outfile):
'''count reads falling into transcripts of protein coding
gene models.
.. note::
In paired-end data sets each mate will be counted. Thus
the actual read counts are approximately twice the fragment
counts.
'''
bamfiles, geneset = infiles
to_cluster = USECLUSTER
bamfiles = ",".join(bamfiles)
statement = '''
zcat %(geneset)s
| cgat gtf2table
--reporter=genes
--bam-file=%(bamfiles)s
--counter=length
--column-prefix="exons_"
--counter=read-counts
--column-prefix=""
--counter=read-coverage
--column-prefix=coverage_
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((buildGeneLevelReadCounts,
buildAggregateGeneLevelReadCounts),
suffix(".tsv.gz"),
".load")
def loadGeneLevelReadCounts(infile, outfile):
P.load(infile, outfile, options="--add-index=gene_id")
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("intron_counts.dir"))
@transform(buildBAMs,
regex(r"(\S+).accepted.bam"),
add_inputs(buildIntronGeneModels),
r"intron_counts.dir/\1.intron_counts.tsv.gz")
def buildIntronLevelReadCounts(infiles, outfile):
'''compute coverage of exons with reads.
'''
infile, exons = infiles
to_cluster = USECLUSTER
statement = '''
zcat %(exons)s
| cgat gtf2table
--reporter=genes
--bam-file=%(infile)s
--counter=length
--column-prefix="introns_"
--counter=read-counts
--column-prefix=""
--counter=read-coverage
--column-prefix=coverage_
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform(buildIntronLevelReadCounts,
suffix(".tsv.gz"),
".load")
def loadIntronLevelReadCounts(infile, outfile):
P.load(infile, outfile, options="--add-index=gene_id")
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("extension_counts.dir"))
@transform(buildBAMs,
regex(r"(\S+).accepted.bam"),
r"extension_counts.dir/\1.extension_counts.tsv.gz")
def buildGeneLevelReadExtension(infile, outfile):
'''compute extension of cds.
Known UTRs are counted as well.
'''
to_cluster = USECLUSTER
cds = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_geneset_cds_gtf"])
territories = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_territories_gff"])
utrs = os.path.join(PARAMS["annotations_dir"],
PARAMS_ANNOTATIONS["interface_annotation_gff"])
if "geneset_remove_contigs" in PARAMS:
remove_contigs = '''| awk '$1 !~ /%s/' ''' % PARAMS[
"geneset_remove_contigs"]
else:
remove_contigs = ""
statement = '''
zcat %(cds)s
%(remove_contigs)s
| cgat gtf2table
--reporter=genes
--bam-file=%(infile)s
--counter=position
--counter=read-extension
--output-filename-pattern=%(outfile)s.%%s.tsv.gz
--gff-file=%(territories)s
--gff-file=%(utrs)s
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "utr_extension")))
@transform(buildGeneLevelReadExtension,
suffix(".tsv.gz"),
".plot")
def plotGeneLevelReadExtension(infile, outfile):
'''plot reads extending beyond last exon.'''
PipelineRnaseq.plotGeneLevelReadExtension(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "utr_extension")))
@transform(buildGeneLevelReadExtension,
suffix(".tsv.gz"),
".utr.gz")
def buildUTRExtension(infile, outfile):
'''build new utrs.'''
PipelineRnaseq.buildUTRExtension(infile, PARAMS["exportdir"],
outfile)
#########################################################################
#########################################################################
#########################################################################
@transform(buildUTRExtension,
suffix(".utr.gz"),
"_utr.load")
def loadUTRExtension(infile, outfile):
P.load(infile, outfile, "--add-index=gene_id")
#########################################################################
#########################################################################
#########################################################################
@merge(buildUTRExtension, "utrs.bed.gz")
def buildUTRs(infiles, outfile):
'''build new utrs by merging estimated UTR extensions
from all data sets.
'''
infiles = " " .join(infiles)
to_cluster = USECLUSTER
statement = '''
zcat %(infiles)s
| cgat csv_cut contig max_5utr_start max_5utr_end gene_id max_5utr_length strand
| awk -v FS='\\t' '$1 != "contig" && $2 != ""'
| mergeBed -nms -s
> %(outfile)s.5
'''
P.run()
statement = '''
zcat %(infiles)s
| cgat csv_cut contig max_3utr_start max_3utr_end gene_id max_3utr_length strand
| awk -v FS='\\t' '$1 != "contig" && $2 != ""'
| mergeBed -nms -s
> %(outfile)s.3
'''
P.run()
statement = '''
cat %(outfile)s.5 %(outfile)s.3
| sort -k 1,1 -k2,2n
| gzip
> %(outfile)s'''
P.run()
os.unlink("%s.5" % outfile)
os.unlink("%s.3" % outfile)
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("transcript_counts.dir"))
@transform(buildBAMs,
regex(r"(\S+).accepted.bam"),
add_inputs(buildCodingGeneSet),
r"transcript_counts.dir/\1.transcript_counts.tsv.gz")
def buildTranscriptLevelReadCounts(infiles, outfile):
'''count reads falling into transcripts of protein coding
gene models.
.. note::
In paired-end data sets each mate will be counted. Thus
the actual read counts are approximately twice the fragment
counts.
'''
infile, geneset = infiles
to_cluster = USECLUSTER
statement = '''
zcat %(geneset)s
| cgat gtf2table
--reporter=transcripts
--bam-file=%(infile)s
--counter=length
--column-prefix="exons_"
--counter=read-counts
--column-prefix=""
--counter=read-coverage
--column-prefix=coverage_
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir("transcript_counts.dir"), buildGeneModels)
@files([((["%s.accepted.bam" % y.asFile() for y in EXPERIMENTS[x]], buildCodingGeneSet),
"transcript_counts.dir/%s.transcript_counts.tsv.gz" % x.asFile())
for x in EXPERIMENTS])
def buildAggregateTranscriptLevelReadCounts(infiles, outfile):
'''count reads falling into transcripts of protein coding
gene models.
.. note::
In paired-end data sets each mate will be counted. Thus
the actual read counts are approximately twice the fragment
counts.
.. note::
This step takes very long if multiple bam-files are supplied.
It has thus been taken out of the pipeline. The aggregate can be derived from summing
the individual counts anyways.
'''
bamfiles, geneset = infiles
to_cluster = USECLUSTER
bamfiles = ",".join(bamfiles)
statement = '''
zcat %(geneset)s
| cgat gtf2table
--reporter=transcripts
--bam-file=%(bamfiles)s
--counter=length
--column-prefix="exons_"
--counter=read-counts
--column-prefix=""
--counter=read-coverage
--column-prefix=coverage_
| gzip
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@transform((buildTranscriptLevelReadCounts,
buildAggregateTranscriptLevelReadCounts),
suffix(".tsv.gz"),
".load")
def loadTranscriptLevelReadCounts(infile, outfile):
P.load(infile, outfile, options="--add-index=transcript_id")
#########################################################################
#########################################################################
#########################################################################
@collate(buildExonLevelReadCounts,
regex(r"exon_counts.dir/(.+)_vs_(.+)\.bed.gz"),
r"\2.exon_counts.tsv.gz")
def aggregateExonLevelReadCounts(infiles, outfile):
'''aggregate exon level tag counts for each gene.
coverageBed adds the following four columns:
1) The number of features in A that overlapped (by at least one base pair) the B interval.
2) The number of bases in B that had non-zero coverage from features in A.
3) The length of the entry in B.
4) The fraction of bases in B that had non-zero coverage from features in A.
For bed6: use column 7
For bed12: use column 13
This method uses the maximum number of reads
found in any exon as the tag count.
'''
to_cluster = USECLUSTER
# aggregate not necessary for bed12 files, but kept in
src = " ".join(
["<( zcat %s | sort -k4,4 | groupBy -i stdin -g 4 -c 7 -o max | sort -k1,1)" % x for x in infiles])
tmpfile = P.getTempFilename(".")
statement = '''paste %(src)s
> %(tmpfile)s''' % locals()
P.run()
tracks = [P.snip(x, ".bed.gz") for x in infiles]
tracks = [re.match("exon_counts.dir/(\S+)_vs.*", x).groups()[0]
for x in tracks]
outf = IOTools.openFile(outfile, "w")
outf.write("gene_id\t%s\n" % "\t".join(tracks))
for line in open(tmpfile, "r"):
data = line[:-1].split("\t")
genes = list(set([data[x] for x in range(0, len(data), 2)]))
values = [data[x] for x in range(1, len(data), 2)]
assert len(
genes) == 1, "paste command failed, wrong number of genes per line"
outf.write("%s\t%s\n" % (genes[0], "\t".join(map(str, values))))
outf.close()
os.unlink(tmpfile)
#########################################################################
#########################################################################
#########################################################################
@transform((aggregateExonLevelReadCounts),
suffix(".tsv.gz"),
".load")
def loadAggregateExonLevelReadCounts(infile, outfile):
P.load(infile, outfile, options="--add-index=gene_id")
#########################################################################
#########################################################################
#########################################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "deseq")))
@transform(aggregateExonLevelReadCounts,
suffix(".exon_counts.tsv.gz"),
".deseq")
def runDESeq(infile, outfile):
'''estimate differential expression using DESeq.
The final output is a table. It is slightly edited such that
it contains a similar output and similar fdr compared
cuffdiff.
Plots are:
<geneset>_<method>_<level>_<track1>_vs_<track2>_significance.png
fold change against expression level
'''
to_cluster = USECLUSTER
outdir = os.path.join(PARAMS["exportdir"], "deseq")
geneset, method = outfile.split(".")
level = "gene"
# load data
R('''suppressMessages(library('DESeq'))''')
R('''countsTable <- read.delim( '%s', header = TRUE, row.names = 1, stringsAsFactors = TRUE )''' %
infile)
# get conditions to test
# note that tracks in R use a '.' as separator
tracks = R('''colnames(countsTable)''')
map_track2column = dict([(y, x) for x, y in enumerate(tracks)])
sample2condition = [None] * len(tracks)
conditions = []
no_replicates = False
for group, replicates in EXPERIMENTS.items():
if len(replicates) == 1:
E.warn(
"only one replicate in %s - replicates will be ignored in ALL data sets for variance estimation" % group)
no_replicates = True
for r in replicates:
sample2condition[map_track2column[r.asR()]] = group.asR()
conditions.append(group)
ro.globalenv['groups'] = ro.StrVector(sample2condition)
R('''print (groups)''')
def build_filename2(**kwargs):
return "%(outdir)s/%(geneset)s_%(method)s_%(level)s_%(track1)s_vs_%(track2)s_%(section)s.png" % kwargs
def build_filename1(**kwargs):
return "%(outdir)s/%(geneset)s_%(method)s_%(level)s_%(section)s_%(track)s.png" % kwargs
def build_filename0(**kwargs):
return "%(outdir)s/%(geneset)s_%(method)s_%(level)s_%(section)s.png" % kwargs
def build_filename0b(**kwargs):
return "%(outdir)s/%(geneset)s_%(method)s_%(level)s_%(section)s.tsv" % kwargs
# Run DESeq
# Create Count data object
E.info("running DESeq: replicates=%s" % (not no_replicates))
R('''cds <-newCountDataSet( countsTable, groups) ''')
# Estimate size factors
R('''cds <- estimateSizeFactors( cds )''')
deseq_fit_type = PARAMS['deseq_fit_type']
deseq_dispersion_method = PARAMS['deseq_dispersion_method']
# Estimate variance
if no_replicates:
E.info("no replicates - estimating variance with method='blind'")
# old:R('''cds <- estimateVarianceFunctions( cds, method="blind" )''')
R('''cds <- estimateDispersions( cds, method="blind" )''')
else:
E.info("replicates - estimating variance from replicates")
# old:R('''cds <- estimateVarianceFunctions( cds )''')
R('''cds <- estimateDispersions( cds,
method='%(deseq_dispersion_method)s',
fitType='%(deseq_fit_type)s' )''' % locals())
R('''str( fitInfo( cds ) )''')
L.info("creating diagnostic plots")
# Plot size factors
Expression.deseqPlotSizeFactors(
build_filename0(section="size_factors", **locals()))
Expression.deseqOutputSizeFactors(
build_filename0b(section="size_factors", **locals()))
Expression.deseqPlotHeatmap(build_filename0(section="heatmap", **locals()))
Expression.deseqPlotPairs(build_filename0(section="pairs", **locals()))
L.info("calling differential expression")
all_results = []
for track1, track2 in itertools.combinations(conditions, 2):
R('''res <- nbinomTest( cds, '%s', '%s' )''' %
(track1.asR(), track2.asR()))
R.png(build_filename2(section="significance", **locals()))
R('''plot( res$baseMean, res$log2FoldChange, log="x", pch=20, cex=.1,
col = ifelse( res$padj < %(cuffdiff_fdr)s, "red", "black" ))''' % PARAMS)
R['dev.off']()
results, counts = Expression.deseqParseResults(
track1, track2, fdr=PARAMS["cuffdiff_fdr"])
all_results.extend(results)
E.info("%s vs %s: %s" % (track1, track2, counts))
with IOTools.openFile(outfile, "w") as outf:
Expression.writeExpressionResults(outf, all_results)
#########################################################################
#########################################################################
#########################################################################
@transform(runDESeq,
suffix(".deseq"),
"_deseq.load")
def loadDESeq(infile, outfile):
'''load differential expression results.
'''
# add gene level follow convention "<level>_diff"
# if one expression value is 0, the log fc is inf or -inf.
# substitute with 10
tablename = P.snip(outfile, ".load") + "_gene_diff"
statement = '''cat %(infile)s
| cgat csv2db %(csv2db_options)s
--allow-empty-file
--add-index=treatment_name
--add-index=control_name
--add-index=test_id
--table=%(tablename)s
> %(outfile)s
'''
P.run()
#########################################################################
#########################################################################
#########################################################################
@merge(loadDESeq, "deseq_stats.tsv")
def buildDESeqStats(infiles, outfile):
tablenames = [P.toTable(x) for x in infiles]
buildExpressionStats(tablenames, "deseq", outfile)
#########################################################################
#########################################################################
#########################################################################
@transform(buildDESeqStats,
suffix(".tsv"),
".load")
def loadDESeqStats(infile, outfile):
P.load(infile, outfile)
#########################################################################
#########################################################################
#########################################################################
# targets related to exporting results of the pipeline
#########################################################################
@follows(mkdir(os.path.join(PARAMS["exportdir"], "roi")))
@transform((loadCuffdiff, loadDESeq),
regex(r"(.*).load"),
r"%s/roi/differentially_expressed_\1" % (PARAMS["exportdir"]))
def buildGeneSetsOfInterest(infile, outfile):
'''export gene sets of interest
* differentially expressed genes
Regions of interest are exported as :term:`bed` formatted files.
'''
dbh = connect()
table = P.toTable(infile) + "_gene_diff"
track = table[:table.index('_')]
statement = '''SELECT test_id, treatment_name, control_name,
info.contig, info.start, info.end, info.strand,
l2fold
FROM %(table)s,
%(track)s_geneinfo AS info
WHERE
significant AND
info.gene_id = test_id
''' % locals()
data = Database.executewait(dbh, statement % locals())
outfiles = IOTools.FilePool(outfile + "_%s.bed.gz")
for test_id, track1, track2, contig, start, end, strand, l2fold in data:
try:
l2fold = float(l2fold)
except TypeError:
l2fold = 0
key = "%s_vs_%s" % (track1, track2)
outfiles.write(key, "%s\t%i\t%i\t%s\t%5.2f\t%s\n" %
(contig, start, end, test_id, l2fold, strand))
outfiles.close()
P.touch(outfile)
#########################################################################
#########################################################################
#########################################################################
@follows(buildBAMs,
buildFastQCReport,
loadTophatStats,
loadBAMStats,
loadPicardStats,
loadContextStats,
loadMappingStats,
)
def mapping():
pass
@follows(buildGeneModels,
loadTranscriptComparison,
buildAbinitioGeneSet,
buildReferenceGeneSet,
buildCodingGeneSet,
buildFullGeneSet,
buildLincRNAGeneSet,
buildNoncodingGeneSet,
buildNovelGeneSet,
loadGeneSetsBuildInformation,
loadClassification,
loadGeneLevelReadCounts,
loadIntronLevelReadCounts,
loadGeneInformation,
loadGeneSetStats,
loadGeneSetGeneInformation,
loadGeneSetTranscriptInformation,
loadReproducibility,
loadTranscriptsMappability,
loadTranscriptLevelReadCounts,
loadGeneLevelReadCounts,
loadExonLevelReadCounts,
)
def genesets():
pass
@follows(buildUTRs,
plotGeneLevelReadExtension,
loadUTRExtension)
def utrs():
pass
@follows(loadCuffdiff,
loadDESeq,
buildCuffdiffPlots,
loadCuffdiffStats,
loadDESeqStats)
def expression():
pass
@follows(buildGeneSetsOfInterest)
def export():
pass
@follows(loadExonValidation)
def validate():
pass
###################################################################
###################################################################
###################################################################
# export targets
###################################################################
@merge(mapping, "view_mapping.load")
def createViewMapping(infile, outfile):
'''create view in database for alignment stats.
This view aggregates all information on a per-track basis.
The table is built from the following tracks:
tophat_stats: .genome
mapping_stats: .accepted
bam_stats: .accepted
context_stats: .accepted
picard_stats: .accepted
'''
tablename = P.toTable(outfile)
# can not create views across multiple database, so use table
view_type = "TABLE"
dbhandle = connect()
Database.executewait(
dbhandle, "DROP %(view_type)s IF EXISTS %(tablename)s" % locals())
statement = '''
CREATE %(view_type)s %(tablename)s AS
SELECT SUBSTR( b.track, 1, LENGTH(b.track) - LENGTH( '.accepted')) AS track, *
FROM bam_stats AS b,
mapping_stats AS m,
context_stats AS c,
picard_stats_alignment_summary_metrics AS a,
tophat_stats AS t
WHERE b.track LIKE "%%.accepted"
AND b.track = m.track
AND b.track = c.track
AND b.track = a.track
AND SUBSTR( b.track, 1, LENGTH(b.track) - LENGTH( '.accepted')) || '.genome' = t.track
'''
Database.executewait(dbhandle, statement % locals())
nrows = Database.executewait(
dbhandle, "SELECT COUNT(*) FROM view_mapping").fetchone()[0]
if nrows == 0:
raise ValueError(
"empty view mapping, check statement = %s" % (statement % locals()))
E.info("created view_mapping with %i rows" % nrows)
P.touch(outfile)
###################################################################
###################################################################
###################################################################
@follows(createViewMapping)
def views():
pass
###################################################################
###################################################################
###################################################################
@follows(mapping,
genesets,
expression,
utrs,
validate,
export,
views)
def full():
pass
###################################################################
###################################################################
###################################################################
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting documentation build process from scratch")
P.run_report(clean=True)
###################################################################
###################################################################
###################################################################
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating documentation")
P.run_report(clean=False)
###################################################################
###################################################################
###################################################################
@follows(mkdir("%s/bamfiles" % PARAMS["web_dir"]),
mkdir("%s/genesets" % PARAMS["web_dir"]),
mkdir("%s/classification" % PARAMS["web_dir"]),
mkdir("%s/differential_expression" % PARAMS["web_dir"]),
update_report,
)
def publish():
'''publish files.'''
# publish web pages
P.publish_report()
# publish additional data
web_dir = PARAMS["web_dir"]
project_id = P.getProjectId()
# directory, files
exportfiles = {
"bamfiles": glob.glob("*.accepted.bam") + glob.glob("*.accepted.bam.bai"),
"genesets": ["lincrna.gtf.gz", "abinitio.gtf.gz"],
"classification": glob.glob("*.class.tsv.gz"),
"differential_expression": glob.glob("*.cuffdiff.dir"),
}
bams = []
for targetdir, filenames in exportfiles.items():
for src in filenames:
dest = "%s/%s/%s" % (web_dir, targetdir, src)
if dest.endswith(".bam"):
bams.append(dest)
dest = os.path.abspath(dest)
if not os.path.exists(dest):
os.symlink(os.path.abspath(src), dest)
# output ucsc links
for bam in bams:
filename = os.path.basename(bam)
track = P.snip(filename, ".bam")
print("""track type=bam name="%(track)s" bigDataUrl=http://www.cgat.org/downloads/%(project_id)s/bamfiles/%(filename)s""" % locals())
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
{
"content_hash": "0542dfc569255a1f42b879a871d18f8f",
"timestamp": "",
"source": "github",
"line_count": 4586,
"max_line_length": 210,
"avg_line_length": 34.52006105538596,
"alnum_prop": 0.5051134174304682,
"repo_name": "CGATOxford/CGATPipelines",
"id": "9d16f8cf0fc5d217705a27b66331c6aa0504d70d",
"size": "158309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obsolete/pipeline_rnaseq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
}
|
"""System tests for Google BigQuery hooks"""
import unittest
from unittest import mock
from airflow.gcp.hooks import bigquery as hook
from tests.gcp.utils.gcp_authenticator import GCP_BIGQUERY_KEY
from tests.test_utils.gcp_system_helpers import skip_gcp_system
@skip_gcp_system(GCP_BIGQUERY_KEY)
class BigQueryDataframeResultsSystemTest(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=mock.PropertyMock,
return_value=None
)
def test_output_is_dataframe_with_valid_query(self, mock_project_id):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=mock.PropertyMock,
return_value=None
)
def test_throws_exception_with_invalid_query(self, mock_project_id):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('Reason: ', str(context.exception), "")
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=mock.PropertyMock,
return_value=None
)
def test_succeeds_with_explicit_legacy_query(self, mock_project_id):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=mock.PropertyMock,
return_value=None
)
def test_succeeds_with_explicit_std_query(self, mock_project_id):
df = self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@mock.patch(
'airflow.gcp.hooks.base.GoogleCloudBaseHook.project_id',
new_callable=mock.PropertyMock,
return_value=None
)
def test_throws_exception_with_incompatible_syntax(self, mock_project_id):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('Reason: ', str(context.exception), "")
|
{
"content_hash": "5e9a57f3d452ec1b8ba43a1d1de810b4",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 37.265625,
"alnum_prop": 0.6658280922431866,
"repo_name": "Fokko/incubator-airflow",
"id": "4bd854804054cb448b6cc2fe99b52d734a9cf4ca",
"size": "3196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gcp/hooks/test_bigquery_system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import taggit.managers
import wagtail.wagtailadmin.taggable
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('wagtailimages', '0010_change_on_delete_behaviour'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0024_alter_page_content_type_on_delete_behaviour'),
('taggit', '0002_auto_20150616_2121'),
('wagtaildocs', '0005_alter_uploaded_by_user_on_delete_action'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('colour', models.CharField(max_length=255)),
('advert', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='tests.Advert')),
],
),
migrations.CreateModel(
name='AdvertTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='tests.Advert')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests_adverttag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdvertWithTabbedInterface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
('something_else', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='tests.BlogCategory')),
],
),
migrations.CreateModel(
name='BusinessChild',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessNowherePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessSubIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CustomImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='CustomImageFilePath',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='CustomManagerPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date_from', models.DateField(null=True, verbose_name='Start date')),
('date_to', models.DateField(blank=True, help_text='Not required if event is on a single day', null=True, verbose_name='End date')),
('time_from', models.TimeField(blank=True, null=True, verbose_name='Start time')),
('time_to', models.TimeField(blank=True, null=True, verbose_name='End time')),
('audience', models.CharField(choices=[('public', 'Public'), ('private', 'Private')], max_length=255)),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Surname')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='FilePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('file_field', models.FileField(upload_to='')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.CharField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=512, verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('to_address', models.CharField(blank=True, help_text='Optional - form submissions will be emailed to this address', max_length=255, verbose_name='to address')),
('from_address', models.CharField(blank=True, max_length=255, verbose_name='from address')),
('subject', models.CharField(blank=True, max_length=255, verbose_name='subject')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='GenericSnippetPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('snippet_object_id', models.PositiveIntegerField(null=True)),
('snippet_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IconSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(blank=True, to='tests.Advert')),
('blog_categories', models.ManyToManyField(blank=True, through='tests.BlogCategoryBlogPage', to='tests.BlogCategory')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MTIBasePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'verbose_name': 'MTI Base page',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MyCustomPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='NotYetRegisteredSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='PageWithOldStyleRouteMethod',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SimplePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SingletonPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('advert', models.ForeignKey(help_text='help text', on_delete=django.db.models.deletion.CASCADE, to='tests.Advert')),
],
),
migrations.CreateModel(
name='StandardChild',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StreamModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', wagtail.wagtailcore.fields.StreamField((('text', wagtail.wagtailcore.blocks.CharBlock()), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())))),
],
),
migrations.CreateModel(
name='StreamPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('text', wagtail.wagtailcore.blocks.CharBlock()), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='tests.TaggedPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests_taggedpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('email', models.EmailField(max_length=50)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ValidatedPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('foo', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MTIChildPage',
fields=[
('mtibasepage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tests.MTIBasePage')),
],
options={
'abstract': False,
},
bases=('tests.mtibasepage',),
),
migrations.CreateModel(
name='SingleEventPage',
fields=[
('eventpage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tests.EventPage')),
('excerpt', models.TextField(blank=True, help_text='Short text to describe what is this action about', max_length=255, null=True)),
],
options={
'abstract': False,
},
bases=('tests.eventpage',),
),
migrations.AddField(
model_name='taggedpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='tests.TaggedPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Page'),
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='tests.FormPage'),
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='speakers', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagechoosermodel',
name='page',
field=models.ForeignKey(help_text='more help text', on_delete=django.db.models.deletion.CASCADE, to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpage',
name='feed_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='tests.ManyToManyBlogPage'),
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='advert_placements', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='advert',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='tests.AdvertTag', to='taggit.Tag', verbose_name='Tags'),
),
]
|
{
"content_hash": "8a61039a613572ffb305049eb7af1e18",
"timestamp": "",
"source": "github",
"line_count": 585,
"max_line_length": 385,
"avg_line_length": 52.07863247863248,
"alnum_prop": 0.5752314055012144,
"repo_name": "gogobook/wagtail",
"id": "2cab4e7acebb748b481f2d66334147d63b82f44f",
"size": "30538",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/tests/testapp/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "155100"
},
{
"name": "HTML",
"bytes": "267043"
},
{
"name": "JavaScript",
"bytes": "109586"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2059166"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedjack', '0007_auto_20150207_2007'),
]
operations = [
migrations.AlterField(
model_name='postprocessor',
name='base',
field=models.ForeignKey(related_name='post_processors', to='feedjack.PostProcessorBase'),
preserve_default=True,
),
migrations.AlterField(
model_name='postprocessorbase',
name='handler_name',
field=models.CharField(help_text=b'Processing function as and import-name, like "myapp.filters.some_filter" or just a name if its a built-in processor (contained in feedjack.filters), latter is implied if this field is omitted.<br /> Should accept Post object and optional (or not) parameter (derived from actual PostProcessor field) and return a dict of fields of Post object to override or None.', max_length=256, blank=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='postprocessorresult',
unique_together=set([('processor', 'post')]),
),
]
|
{
"content_hash": "b10cea5d30c8e304a2d7adcf571bdb0a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 440,
"avg_line_length": 41.86206896551724,
"alnum_prop": 0.6499176276771005,
"repo_name": "allo-/feedjack",
"id": "1cbd9159c3c5f4c4b59ec0b45660de6c32f8a0af",
"size": "1238",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "feedjack/migrations/0008_auto_20150207_2112.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22150"
},
{
"name": "CoffeeScript",
"bytes": "15377"
},
{
"name": "HTML",
"bytes": "39412"
},
{
"name": "Python",
"bytes": "388184"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import os
import sys
def help(rc: int=0):
print("The pedtodot.py script transforms a pedigree file into a dot file that Graphiz can use to create a graph.")
print("")
print("\t-h: This help message.")
print("\t-f --file: The pedigree file to read [default: /home/bert/Documents/DropBox/banded.txt]")
print("\t-o --output: The output file [default: the pedigree file with the .dot extention instead]")
print("\t-a --ancestor <ancestor>: Limit to individuals with this ancestor.")
print("\t-p --pedigree <individual>: Limit to the pedigree of this individual.")
print("\t-c --center <individual>: Combine -a and -p: both offspring and pedigree.")
print("")
sys.exit(rc)
def write_dot(filename: str, pedigree: dict):
with open(filename, 'w') as dot:
dot.write("digraph PEDIGREE {\n")
dot.write(" rankdir=LR;\n")
for individual, parents in pedigree.items():
for parent in parents:
dot.write(" \"%s\" -> \"%s\"\n" % (parent, individual))
dot.write("}\n")
def _filter_ancestor(ancestor, pedigree: dict):
filtered = OrderedDict()
# This one (if exists in the pedigree)
try:
filtered.update({ancestor: pedigree[ancestor]})
except KeyError:
pass
# Looking for offspring: looking for all individuals with this as parent
for individual, parents in pedigree.items():
if ancestor in parents:
# Doing this so the order is ancestor first
new_filtered = _filter_ancestor(individual, pedigree)
new_filtered.update(filtered)
filtered = new_filtered
return filtered
def filter_ancestor(ancestor, pedigree: dict):
filtered = _filter_ancestor(ancestor, pedigree)
# Removing the ancestor pointing to its parents.
try:
del filtered[ancestor]
except KeyError:
pass
return filtered
def filter_pedigree_of(individual, pedigree: dict):
parents = []
# This one
try:
parents = pedigree[individual]
except KeyError:
pass
filtered = OrderedDict({individual: parents})
# Add the pedigree of the parents
for parent in parents:
filtered.update(filter_pedigree_of(parent, pedigree))
return filtered
def read_pedigree(filename: str) -> dict:
with open(filename, 'r') as sped:
pedigree = OrderedDict()
for line in sped:
sline = line.rstrip()
if sline != '':
if (sline[0] != '!') and (sline[0] != '#') and (sline[0] != ' ') and (sline[0] != '@'):
elements = sline.split()
if len(elements) > 0:
pedigree[elements[0]] = []
if len(elements) > 1:
if (elements[1][0] != '?') and (elements[1][0] != 'x'):
pedigree[elements[0]].append(elements[1])
if len(elements) > 2:
if (elements[2][0] != '?') and (elements[2][0] != 'x'):
pedigree[elements[0]].append(elements[2])
return pedigree
if __name__ == '__main__':
in_filename = '/home/bert/Documents/Dropbox/banded.txt'
out_filename = None
ancestor = None
pedigree_of = None
argument_mode = ""
print("Arguments passed: %s" % (sys.argv[1:]))
for argument in sys.argv[1:]:
if argument_mode != "--" and argument.startswith("-"):
if argument == "--":
argument_mode = "--"
elif argument.lower() in ["-h", "--help"]:
help()
elif argument.lower() in ["-f", "--file"]:
argument_mode = "file"
elif argument.lower() in ["-o", "--out"]:
argument_mode = "out"
elif argument.lower() in ["-c", "--center"]:
argument_mode = "center"
elif argument.lower() in ["-a", "--ancestor"]:
argument_mode = "ancestor"
elif argument.lower() in ["-p", "--pedigree", "--pedigree-of"]:
argument_mode = "pedigree-of"
else:
help(1)
else:
if not argument_mode or argument_mode == "--":
in_filename = argument
elif argument_mode == "file":
in_filename = argument
elif argument_mode == "out":
out_filename = argument
elif argument_mode == "center":
ancestor = argument
pedigree_of = argument
elif argument_mode == "ancestor":
ancestor = argument
elif argument_mode == "pedigree-of":
pedigree_of = argument
# Undo the argument mode, unless mode "--"
if argument_mode != "--":
argument_mode == ""
if not out_filename:
out_filename = "%s.dot" % (in_filename.rsplit('.', 1)[0])
print('Tranforming pedigree file %s to dot file %s' % (in_filename, out_filename))
print(ancestor)
print(pedigree_of)
# Reading pedigree and filtering
pedigree = None
full_pedigree = read_pedigree(in_filename)
if pedigree_of:
pedigree = filter_pedigree_of(pedigree_of, full_pedigree)
print('Filtered out the pedigree of %s (#%s) from the full pedigree (#%s).' % (pedigree_of, len(pedigree), len(full_pedigree)))
if ancestor:
if pedigree:
pedigree.update(filter_ancestor(ancestor, full_pedigree))
print('Added the offsprings of ancestor %s as well (#%s).' % (ancestor, len(pedigree)))
else:
pedigree = filter_ancestor(ancestor, full_pedigree)
print('Filtered out the offsprings of ancestor %s (#%s) from the full pedigree (#%s).' % (ancestor, len(pedigree), len(full_pedigree)))
if not pedigree:
pedigree = full_pedigree
# Writing the dot file
write_dot(out_filename, pedigree)
print("To generate a png from the dotfile, you could do:")
print("\tdot -O -Tpng %s" % (out_filename))
|
{
"content_hash": "2ff18b9e9aab3fd3dca9db8520146e09",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 147,
"avg_line_length": 40.70860927152318,
"alnum_prop": 0.5566943224337075,
"repo_name": "BertRaeymaekers/scrapbook",
"id": "7c58f017b38d927bdd78a58360e4dbad523e77f6",
"size": "6167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "random_projects/pedigree_scripts/pedtodot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40923"
},
{
"name": "Roff",
"bytes": "918"
},
{
"name": "Ruby",
"bytes": "19804"
},
{
"name": "Shell",
"bytes": "8315"
},
{
"name": "Visual Basic",
"bytes": "974"
}
],
"symlink_target": ""
}
|
class Solution:
# @param {integer[]} nums
# @return {integer[]}
def productExceptSelf(self, nums):
if not nums:
return []
left_product = [1 for _ in xrange(len(nums))]
for i in xrange(1, len(nums)):
left_product[i] = left_product[i - 1] * nums[i - 1]
right_product = 1
for i in xrange(len(nums) - 2, -1, -1):
right_product *= nums[i + 1]
left_product[i] = left_product[i] * right_product
return left_product
|
{
"content_hash": "7acef9858c76bde037e7cfdd0f1dae60",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 30.529411764705884,
"alnum_prop": 0.5240847784200385,
"repo_name": "kamyu104/LeetCode",
"id": "02b3e1a83b09950d201763c9d4f6b64db64a8166",
"size": "989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/product-of-array-except-self.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1008761"
},
{
"name": "Go",
"bytes": "1907"
},
{
"name": "Java",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "1421980"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
import smtplib
from email.mime.text import MIMEText
class Notifier:
def __init__(self, mail_server, username, password):
self.mail_server = mail_server
self.username = username
self.password = password
def notify(self, from_addr, to_addrs, subject, body):
server = smtplib.SMTP(self.mail_server)
server.ehlo()
server.starttls()
server.login(self.username, self.password)
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = from_addr
msg['To'] = ', '.join(to_addrs)
server.sendmail(from_addr, to_addrs, msg.as_string())
server.quit()
|
{
"content_hash": "2be4cd5376277e7f83d9e23ced8cedbd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 61,
"avg_line_length": 34.421052631578945,
"alnum_prop": 0.6116207951070336,
"repo_name": "jrlusby/asai-monitor-notifications",
"id": "d0785de63022bc5df535282737ba0cc4a88838a8",
"size": "679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MonitorNotifier.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4100"
}
],
"symlink_target": ""
}
|
"""Functionality for working with probability spaces and random variables.
Basic recap of probability theory, and thus of classes in this file:
* A probability space is a (finite or infinite) set Omega with a probability
measure defined on this.
* A random variable is a mapping from a probability space to another measure
space.
* An event is a measurable set in a sample space.
For example, suppose a bag contains 3 balls: two red balls, and one white ball.
This could be represented by a discrete probability space of size 3 with
elements {1, 2, 3}, with equal measure assigned to all 3 elements; and a random
variable that maps 1->red, 2->red, and 3->white. Then the probability of drawing
a red ball is the measure in the probability space of the inverse under the
random variable mapping of {red}, i.e., of {1, 2}, which is 2/3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
# Dependency imports
import six
from six.moves import zip
import sympy
@six.add_metaclass(abc.ABCMeta)
class Event(object):
"""Represents an event in a measure space."""
@six.add_metaclass(abc.ABCMeta)
class ProbabilitySpace(object):
"""Represents a probability space."""
@abc.abstractmethod
def probability(self, event):
"""Returns the probability of an event."""
@six.add_metaclass(abc.ABCMeta)
class RandomVariable(object):
"""Random variable; a mapping from a probability space to a measure space."""
@abc.abstractmethod
def __call__(self, event):
"""Maps an `_Event` in the probability space to one in the sample space."""
@abc.abstractmethod
def inverse(self, event):
"""Maps event in the sample space back to the inverse in the prob. space."""
class DiscreteEvent(Event):
"""Set of discrete values."""
def __init__(self, values):
self._values = values
@property
def values(self):
return self._values
class FiniteProductEvent(Event):
"""Event consisting of cartesian product of events."""
def __init__(self, events):
"""Initializes a `FiniteProductEvent`.
Args:
events: Tuple of `Event`s; resulting event will be cartesian product of
these.
"""
self._events = events
@property
def events(self):
return self._events
def all_sequences(self):
"""Returns iterator of sequences by selecting a single event in each coord.
This assumes that every component event is an instance of `DiscreteEvent`.
Returns:
Iterator over tuples of values.
Raises:
ValueError: If one of the component events is not a `DiscreteEvent`.
"""
if not all(isinstance(event, DiscreteEvent) for event in self._events):
raise ValueError('Not all component events are DiscreteEvents')
values_list = [event.values for event in self._events]
return itertools.product(*values_list)
class CountLevelSetEvent(Event):
"""Event of all sequences with fixed number of different values occurring."""
def __init__(self, counts):
"""Initializes `CountLevelSetEvent`.
E.g., to construct the event of getting two red balls and one green ball,
pass `counts = {red: 2, green: 1}`. (Then `all_sequences()` would return
`[(red, red, green), (red, green, red), (green, red, red)]`.
Args:
counts: Dictionary mapping values to the number of times they occur in a
sequence.
"""
self._counts = counts
self._all_sequences = None
@property
def counts(self):
return self._counts
def all_sequences(self):
"""Returns all sequences generated by this level set."""
if self._all_sequences is None:
# Generate via dynamic programming.
cache = {} # dict mapping tuple -> list of tuples
labels = list(self._counts.keys())
def generate(counts):
"""Returns list of tuples for given `counts` of labels."""
if sum(counts) == 0:
return [()]
counts = tuple(counts)
if counts in cache:
return cache[counts]
generated = []
for i, count in enumerate(counts):
if count == 0:
continue
counts_minus = list(counts)
counts_minus[i] -= 1
counts_minus = tuple(counts_minus)
extensions = generate(counts_minus)
generated += [tuple([labels[i]] + list(extension))
for extension in extensions]
cache[counts] = generated
return generated
self._all_sequences = generate(list(self._counts.values()))
return self._all_sequences
class SequenceEvent(Event):
"""Collection of sequences."""
def __init__(self, sequences):
self._sequences = sequences
def all_sequences(self):
return self._sequences
def normalize_weights(weights):
"""Normalizes the weights (as sympy.Rational) in dictionary of weights."""
weight_sum = sum(six.itervalues(weights))
return {
i: sympy.Rational(weight, weight_sum)
for i, weight in six.iteritems(weights)
}
class DiscreteProbabilitySpace(ProbabilitySpace):
"""Discrete probability space."""
def __init__(self, weights=None):
"""Initializes an `DiscreteProbabilitySpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
"""
self._weights = normalize_weights(weights)
def probability(self, event):
if isinstance(event, DiscreteEvent):
return sum(self._weights[value]
for value in event.values if value in self._weights)
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
@property
def weights(self):
"""Returns dictionary of probability of each element."""
return self._weights
class FiniteProductSpace(ProbabilitySpace):
"""Finite cartesian product of probability spaces."""
def __init__(self, spaces):
"""Initializes a `FiniteProductSpace`.
Args:
spaces: List of `ProbabilitySpace`.
"""
self._spaces = spaces
def all_spaces_equal(self):
return all([self._spaces[0] == space for space in self._spaces])
def probability(self, event):
# Specializations for optimization.
if isinstance(event, FiniteProductEvent):
assert len(self._spaces) == len(event.events)
return sympy.prod([
space.probability(event_slice)
for space, event_slice in zip(self._spaces, event.events)])
if isinstance(event, CountLevelSetEvent) and self.all_spaces_equal():
space = self._spaces[0]
counts = event.counts
probabilities = {
value: space.probability(DiscreteEvent({value}))
for value in six.iterkeys(counts)
}
num_events = sum(six.itervalues(counts))
assert num_events == len(self._spaces)
# Multinomial coefficient:
coeff = (
sympy.factorial(num_events) / sympy.prod(
[sympy.factorial(i) for i in six.itervalues(counts)]))
return coeff * sympy.prod([
pow(probabilities[value], counts[value])
for value in six.iterkeys(counts)
])
raise ValueError('Unhandled event type {}'.format(type(event)))
@property
def spaces(self):
"""Returns list of spaces."""
return self._spaces
class SampleWithoutReplacementSpace(ProbabilitySpace):
"""Probability space formed by sampling discrete space without replacement."""
def __init__(self, weights, n_samples):
"""Initializes a `SampleWithoutReplacementSpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
n_samples: Number of samples to draw.
Raises:
ValueError: If `n_samples > len(weights)`.
"""
if n_samples > len(weights):
raise ValueError('n_samples is more than number of discrete elements')
self._weights = normalize_weights(weights)
self._n_samples = n_samples
@property
def n_samples(self):
"""Number of samples to draw."""
return self._n_samples
def probability(self, event):
try:
all_sequences = event.all_sequences()
except AttributeError:
raise ValueError('Unhandled event type {}'.format(type(event)))
probability_sum = 0
for sequence in all_sequences:
if len(sequence) != len(set(sequence)):
continue # not all unique, so not "without replacement".
p_sequence = 1
removed_prob = 0
for i in sequence:
p = self._weights[i] if i in self._weights else 0
if p == 0:
p_sequence = 0
break
p_sequence *= p / (1 - removed_prob)
removed_prob += p
probability_sum += p_sequence
return probability_sum
class IdentityRandomVariable(RandomVariable):
"""Identity map of a probability space."""
def __call__(self, event):
return event
def inverse(self, event):
return event
class DiscreteRandomVariable(RandomVariable):
"""Specialization to discrete random variable.
This is simply a mapping from a discrete space to a discrete space (dictionary
lookup).
"""
def __init__(self, mapping):
"""Initializes `DiscreteRandomVariable` from `mapping` dict."""
self._mapping = mapping
self._inverse = {}
for key, value in six.iteritems(mapping):
if value in self._inverse:
self._inverse[value].add(key)
else:
self._inverse[value] = set([key])
def __call__(self, event):
if isinstance(event, DiscreteEvent):
return DiscreteEvent({self._mapping[value] for value in event.values})
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
def inverse(self, event):
if isinstance(event, DiscreteEvent):
set_ = set()
for value in event.values:
if value in self._inverse:
set_.update(self._inverse[value])
return DiscreteEvent(set_)
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
class FiniteProductRandomVariable(RandomVariable):
"""Product random variable.
This has the following semantics. Let this be X = (X_1, ..., X_n). Then
X(w) = (X_1(w_1), ..., X_n(w_n))
(the sample space is assumed to be of sequence type).
"""
def __init__(self, random_variables):
"""Initializes a `FiniteProductRandomVariable`.
Args:
random_variables: Tuple of `RandomVariable`.
"""
self._random_variables = random_variables
def __call__(self, event):
if isinstance(event, FiniteProductEvent):
assert len(event.events) == len(self._random_variables)
zipped = list(zip(self._random_variables, event.events))
return FiniteProductEvent(
[random_variable(sub_event)
for random_variable, sub_event in zipped])
else:
raise ValueError('Unhandled event type {}'.format(type(event)))
def inverse(self, event):
# Specialization for `FiniteProductEvent`; don't need to take all sequences.
if isinstance(event, FiniteProductEvent):
assert len(event.events) == len(self._random_variables)
zipped = list(zip(self._random_variables, event.events))
return FiniteProductEvent(tuple(
random_variable.inverse(sub_event)
for random_variable, sub_event in zipped))
# Try fallback of mapping each sequence separately.
try:
all_sequences = event.all_sequences()
except AttributeError:
raise ValueError('Unhandled event type {}'.format(type(event)))
mapped = set()
for sequence in all_sequences:
assert len(sequence) == len(self._random_variables)
zipped = list(zip(self._random_variables, sequence))
mapped_sequence = FiniteProductEvent(tuple(
random_variable.inverse(DiscreteEvent({element}))
for random_variable, element in zipped))
mapped.update(mapped_sequence.all_sequences())
return SequenceEvent(mapped)
|
{
"content_hash": "4fa13e7695f3023140908dd124a46741",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 80,
"avg_line_length": 30.43222506393862,
"alnum_prop": 0.6638372972518699,
"repo_name": "deepmind/mathematics_dataset",
"id": "3016d0523306bebdd9671a104be8c6da51a61170",
"size": "12495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathematics_dataset/util/probability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "262788"
}
],
"symlink_target": ""
}
|
"""
WSGI config for unesco project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "unesco.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "ac65ee3d77ae7aeb4325d81b172f1799",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.642857142857142,
"alnum_prop": 0.772609819121447,
"repo_name": "robjordan/unesco_project",
"id": "9f8cc419a03add3f99dd5163c0bf81aa7d143d24",
"size": "387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unesco/unesco/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "366"
},
{
"name": "CSS",
"bytes": "1439"
},
{
"name": "HTML",
"bytes": "16709"
},
{
"name": "JavaScript",
"bytes": "949"
},
{
"name": "Python",
"bytes": "47997"
}
],
"symlink_target": ""
}
|
"""Implements the wiki WSGI application which dispatches requests to
specific wiki pages and actions.
"""
from os import path
from sqlalchemy import create_engine
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.utils import redirect
from werkzeug.wsgi import ClosingIterator
from . import actions
from .database import metadata
from .database import session
from .specialpages import page_not_found
from .specialpages import pages
from .utils import href
from .utils import local
from .utils import local_manager
from .utils import Request
#: path to shared data
SHARED_DATA = path.join(path.dirname(__file__), "shared")
class SimpleWiki:
"""
Our central WSGI application.
"""
def __init__(self, database_uri):
self.database_engine = create_engine(database_uri)
# apply our middlewares. we apply the middlewars *inside* the
# application and not outside of it so that we never lose the
# reference to the `SimpleWiki` object.
self._dispatch = SharedDataMiddleware(
self.dispatch_request, {"/_shared": SHARED_DATA}
)
# free the context locals at the end of the request
self._dispatch = local_manager.make_middleware(self._dispatch)
def init_database(self):
"""Called from the management script to generate the db."""
metadata.create_all(bind=self.database_engine)
def bind_to_context(self):
"""
Useful for the shell. Binds the application to the current active
context. It's automatically called by the shell command.
"""
local.application = self
def dispatch_request(self, environ, start_response):
"""Dispatch an incoming request."""
# set up all the stuff we want to have for this request. That is
# creating a request object, propagating the application to the
# current context and instanciating the database session.
self.bind_to_context()
request = Request(environ)
request.bind_to_context()
# get the current action from the url and normalize the page name
# which is just the request path
action_name = request.args.get("action") or "show"
page_name = "_".join([x for x in request.path.strip("/").split() if x])
# redirect to the Main_Page if the user requested the index
if not page_name:
response = redirect(href("Main_Page"))
# check special pages
elif page_name.startswith("Special:"):
if page_name[8:] not in pages:
response = page_not_found(request, page_name)
else:
response = pages[page_name[8:]](request)
# get the callback function for the requested action from the
# action module. It's "on_" + the action name. If it doesn't
# exists call the missing_action method from the same module.
else:
action = getattr(actions, f"on_{action_name}", None)
if action is None:
response = actions.missing_action(request, action_name)
else:
response = action(request, page_name)
# make sure the session is removed properly
return ClosingIterator(response(environ, start_response), session.remove)
def __call__(self, environ, start_response):
"""Just forward a WSGI call to the first internal middleware."""
return self._dispatch(environ, start_response)
|
{
"content_hash": "df4a15b781f047bacc8c780c49611de5",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 81,
"avg_line_length": 37.223404255319146,
"alnum_prop": 0.6561874821377537,
"repo_name": "fkazimierczak/werkzeug",
"id": "e6efb4a5596a9b6a480ee69214caeac1f2a26890",
"size": "3499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/simplewiki/application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6705"
},
{
"name": "HTML",
"bytes": "124"
},
{
"name": "JavaScript",
"bytes": "10524"
},
{
"name": "Python",
"bytes": "1136488"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy import sparse
def build_projection_operator(l_x, n_dir, mask=None, exclude_diags=False):
X, Y = _generate_center_coordinates(l_x)
orig = X.min()
if mask is not None:
X = X[mask]
Y = Y[mask]
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
# Remove directions corresponding to the diagonals -- for these
# directions, the 0/1 weights of pixels are a very bad approximation
# of a real projection (some interpolation should be done)
if exclude_diags:
inds = [n_dir / 4, 3 * n_dir / 4]
angles = angles[np.setdiff1d(range(n_dir), inds)]
data_inds, detector_inds = [], []
# Indices for data pixels. For each data, one data pixel
# will contribute to the value of two detector pixels.
if mask is None:
data_unravel_indices = np.arange(l_x ** 2, dtype=np.int32)
else:
data_unravel_indices = np.arange(mask.sum(), dtype=np.int32)
for i, angle in enumerate(angles):
# rotate data pixels centers
Xrot = np.cos(angle) * X + np.sin(angle) * Y
# compute linear interpolation weights
#inds = _weights_nn(Xrot, dx=1, orig=X.min() - 0.5)
inds = _weights_nn(Xrot, dx=1, orig=orig - 0.5)
# crop projections outside the detector
mask_detect = np.logical_and(inds >= 0, inds < l_x)
detector_inds.append((inds[mask_detect] + i * l_x).astype(np.int32))
data_inds.append(data_unravel_indices[mask_detect])
detector_inds = np.concatenate(detector_inds)
data_inds = np.concatenate(data_inds)
weights = np.ones(len(data_inds), dtype=np.uint16)
proj_operator = sparse.coo_matrix((weights, (detector_inds, data_inds)),
shape=(l_x * n_dir, mask.sum()))
return sparse.csr_matrix(proj_operator)
def _generate_center_coordinates(l_x):
"""
Compute the coordinates of pixels centers for an image of
linear size l_x
"""
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def _weights_nn(x, dx=1, orig=0, ravel=True):
"""
Nearest-neighbour interpolation
"""
if ravel:
x = np.ravel(x)
floor_x = np.floor(x - orig)
return floor_x #.astype(np.uint16)
|
{
"content_hash": "ecb981981628fd9f8a2faa35aa3c83e8",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 36.84126984126984,
"alnum_prop": 0.6105127100387764,
"repo_name": "eddam/bp-for-tomo",
"id": "52059a75d7ffa26d7cd546249ad9f123ed26c485",
"size": "2321",
"binary": false,
"copies": "1",
"ref": "refs/heads/demo",
"path": "bptomo/build_projection_operator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "397850"
},
{
"name": "C++",
"bytes": "662"
},
{
"name": "Python",
"bytes": "58730"
}
],
"symlink_target": ""
}
|
'''
Created on Mar 29, 2017
a one-off script to update the CCLE GCS paths in the production database
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from datetime import date
import json
import logging
import sys
from bq_wrapper import fetch_paged_results, query_bq_table
from isbcgc_cloudsql_model import ISBCGC_database_helper as helper
from util import close_log, create_log
def main(config_file_name):
log = None
try:
with open(config_file_name) as configFile:
config = json.load(configFile)
log_dir = str(date.today()).replace('-', '_') + '_' + 'ccle/'
log_name = create_log(log_dir, 'update_ccle_gcs_paths')
log = logging.getLogger(log_name)
log.info('begin updating CCLE paths in production')
# first thing to do is to read in the file paths from BigQuery
query = 'SELECT file_gdc_id, file_gcs_url ' \
'FROM [isb-cgc:GDC_metadata.GDCfileID_to_GCSurl] ' \
'where 0 < instr(file_gcs_url, \'CCLE\')'
query_results = query_bq_table(query, True, 'isb-cgc', log)
_, rows, _ = fetch_paged_results(query_results, 2000, None, None, log)
log.info('\tcreate map of filename to path')
name2path = {}
for row in rows:
fields = row[1].split('/')
name2path[fields[-1]] = '/'.join(fields[3:])
log.info('\tfinished map of filename to path')
# get the db rows from production cloudsql
log.info('\tselect ccle filenames from cloudsql')
query = 'SELECT datafilename ' \
'FROM main.metadata_data ' \
'where 0 < instr(datafilename, \'bam\') and project = \'CCLE\''
rows = helper.select(config, query, log, [])
log.info('\tselected %s ccle filenames from cloudsql' % (len(rows)))
# now setup and do the update of paths in cloud sql
log.info('\tstart updating paths in cloudsql')
params = []
not_matched = []
for row in rows:
if row[0] in name2path:
params += [[name2path[row[0]], row[0]]]
else:
not_matched += [row[0]]
update = 'update main.metadata_data set datafilenamekey = %s where datafilename = %s'
helper.update(config, update, log, params)
log.info('\tcompleted update of paths in cloudsql. updated %d, did not find matches from BQ in cloudsql for %s' % (len(params), ', '.join(not_matched)))
log.info('finished updating CCLE paths in production')
except:
if log:
log.exception('failed to update ccle GCS filepaths')
finally:
if log:
close_log(log)
if __name__ == '__main__':
main(sys.argv[1])
|
{
"content_hash": "c41398975530754bab1ec27cb8ab4a70",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 160,
"avg_line_length": 38.95402298850575,
"alnum_prop": 0.6096193567424019,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "77e1fbe9dd88f1698e481754ff44d07adf97f571",
"size": "3389",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gdc/main/update_ccle_gcs_paths.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
}
|
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@googlepb.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Like MergeFromString(), except we clear the object first."""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
{
"content_hash": "6d6cf6a60661b1d8e8a107c5ee37c07b",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 34.073170731707314,
"alnum_prop": 0.7140300644237653,
"repo_name": "beschulz/ved-decoder",
"id": "0911a7af11341741c8d2eb30ec798e1a68d72e51",
"size": "10138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/googlepb/protobuf/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "273876"
}
],
"symlink_target": ""
}
|
import zmarkdown
from zmarkdown import inlinepatterns, util, Extension
from zmarkdown.blockprocessors import BlockProcessor
import re
class MathJaxPattern(inlinepatterns.Pattern):
def __init__(self):
inlinepatterns.Pattern.__init__(self, r'(?<!\\)\$([^\n]+?)(?<!\\)\$')
def handleMatch(self, m):
node = util.etree.Element('span')
node.text = util.AtomicString("$" + m.group(2) + "$")
return node
class MathJaxBlock(BlockProcessor):
def __init__(self, parser):
BlockProcessor.__init__(self, parser)
self.re = re.compile(r'(?:^|\n)\$\$.+\$\$(\n|$)', re.DOTALL | re.MULTILINE | re.UNICODE)
def test(self, parent, block):
return self.re.search(block)
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.re.search(block)
before = block[:m.start()]
after = block[m.end():]
block = block[m.start():m.end()]
if before:
self.parser.parseBlocks(parent, [before])
dnode = util.etree.SubElement(parent, 'div')
dnode.set('class', "mathjax-wrapper")
node = zmarkdown.util.etree.SubElement(dnode, "mathjax")
node.text = zmarkdown.util.AtomicString(block.strip())
if after:
blocks.insert(0, after)
class MathJaxExtension(Extension):
def extendZMarkdown(self, md, md_globals):
# Needs to come before escape matching because \ is pretty important in LaTeX
md.inlinePatterns.add('mathjax', MathJaxPattern(), '<escape')
md.parser.blockprocessors.add('mathjax', MathJaxBlock(md.parser), '>reference')
def makeExtension(*args, **kwargs):
return MathJaxExtension(*args, **kwargs)
|
{
"content_hash": "265c8a20140413275ddb4f53d601aee3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 96,
"avg_line_length": 32.71153846153846,
"alnum_prop": 0.6231628453850676,
"repo_name": "zestedesavoir/Python-ZMarkdown",
"id": "7f57129c78a5bc46818c72edee151c035db3e3d6",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-zds",
"path": "zmarkdown/extensions/mathjax.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "608664"
},
{
"name": "Makefile",
"bytes": "943"
},
{
"name": "Python",
"bytes": "266528"
},
{
"name": "Shell",
"bytes": "912"
}
],
"symlink_target": ""
}
|
from utils import CSVScraper
class KitchenerPersonScraper(CSVScraper):
# http://open-kitchenergis.opendata.arcgis.com/datasets/aa7c40a2bb5c4c95b3a373ff23844aab
csv_url = 'https://app2.kitchener.ca/appdocs/opendata/staticdatasets/Elected_Officials.csv'
|
{
"content_hash": "2648b1b5772f7744b7dd24e0b8b79afd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 95,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.8129770992366412,
"repo_name": "opencivicdata/scrapers-ca",
"id": "83ac43c81c335f6b2bd44f1dafbceacf2c55e8a9",
"size": "262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ca_on_kitchener/people.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "832"
},
{
"name": "Python",
"bytes": "374889"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
}
|
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, u'='), value])
def LParen():
return Leaf(token.LPAR, u"(")
def RParen():
return Leaf(token.RPAR, u")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = u" "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, u",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, u".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.prefix = prefix
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, u"\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, u"")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, u'['),
index_node,
Leaf(token.RBRACE, u']')])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.prefix = u""
fp.prefix = u" "
it.prefix = u" "
for_leaf = Leaf(token.NAME, u"for")
for_leaf.prefix = u" "
in_leaf = Leaf(token.NAME, u"in")
in_leaf.prefix = u" "
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.prefix = u" "
if_leaf = Leaf(token.NAME, u"if")
if_leaf.prefix = u" "
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, u"["),
inner,
Leaf(token.RBRACE, u"]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, u'from'),
Leaf(token.NAME, package_name, prefix=u" "),
Leaf(token.NAME, u'import', prefix=u" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == u"("
and node.children[2].value == u")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == u"["
and node.children[-1].value == u"]")
###########################################################
### Misc
###########################################################
def parenthesize(node):
return Node(syms.atom, [LParen(), node, RParen()])
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
def is_probably_builtin(node):
"""
Check that something isn't an attribute or function name etc.
"""
prev = node.prev_sibling
if prev is not None and prev.type == token.DOT:
# Attribute lookup.
return False
parent = node.parent
if parent.type in (syms.funcdef, syms.classdef):
return False
if parent.type == syms.expr_stmt and parent.children[0] is node:
# Assignment.
return False
if parent.type == syms.parameters or \
(parent.type == syms.typedargslist and (
(prev is not None and prev.type == token.COMMA) or
parent.children[0] is node
)):
# The name of an argument.
return False
return True
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert node.parent, "Tree is insane! root found before "\
"file_input node was found."
node = node.parent
return node
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
None for the package and 'foo' for the name. """
binding = find_binding(name, find_root(node), package)
return bool(binding)
def is_import(node):
"""Returns true if the node is an import statement."""
return node.type in (syms.import_name, syms.import_from)
def touch_import(package, name, node):
""" Works like `does_tree_import` but adds an import statement
if it was not imported. """
def is_import_stmt(node):
return node.type == syms.simple_stmt and node.children and \
is_import(node.children[0])
root = find_root(node)
if does_tree_import(package, name, root):
return
# figure out where to insert the new import. First try to find
# the first import and then skip to the last one.
insert_pos = offset = 0
for idx, node in enumerate(root.children):
if not is_import_stmt(node):
continue
for offset, node2 in enumerate(root.children[idx:]):
if not is_import_stmt(node2):
break
insert_pos = idx + offset
break
# if there are no imports where we can insert, find the docstring.
# if that also fails, we stick to the beginning of the file
if insert_pos == 0:
for idx, node in enumerate(root.children):
if node.type == syms.simple_stmt and node.children and \
node.children[0].type == token.STRING:
insert_pos = idx + 1
break
if package is None:
import_ = Node(syms.import_name, [
Leaf(token.NAME, u'import'),
Leaf(token.NAME, name, prefix=u' ')
])
else:
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u' ')])
children = [import_, Newline()]
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if is_import(ret):
return ret
return None
_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and unicode(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find(u'as', n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
|
{
"content_hash": "4926d41fe81414a7b365a4aa4a03cda9",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 88,
"avg_line_length": 33.86904761904762,
"alnum_prop": 0.5575395430579965,
"repo_name": "DecipherOne/Troglodyte",
"id": "3b136694a663c2e5566118e907dd25230d8e02d2",
"size": "14225",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Trog Build Dependencies/Python26/Lib/lib2to3/fixer_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "586396"
},
{
"name": "C++",
"bytes": "697696"
},
{
"name": "CSS",
"bytes": "837"
},
{
"name": "Python",
"bytes": "14516232"
},
{
"name": "Shell",
"bytes": "127"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import argparse
import os
import update_tools
def main():
parser = argparse.ArgumentParser()
parser.add_argument("mar", metavar="MAR", help="MAR archive to (un)wrap")
parser.add_argument("dir", metavar="DIR", help="Source or destination " +
"directory for (un)wrapping MAR.")
parser.add_argument("-u", "--unwrap", dest="unwrap", action="store_true",
default=False, help="Unwrap MAR to DIR")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="Verbose (un)wrapping")
args = parser.parse_args()
if os.path.isfile(args.dir):
parser.error("Path is not a directory: %s" % args.dir)
try:
mar = update_tools.BZip2Mar(args.mar, verbose=args.verbose)
action = mar.extract if args.unwrap else mar.create
action(args.dir)
if args.unwrap:
print "Unwrapped MAR to %s" % args.dir
else:
print "Wrapped MAR to %s" % args.mar
except Exception, e:
parser.error(e)
if __name__ == "__main__":
main()
|
{
"content_hash": "18807ff1e200eca418deaffa941ce2ee",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 32.57575757575758,
"alnum_prop": 0.6130232558139534,
"repo_name": "wilebeast/FireFox-OS",
"id": "4c61a921d5e0064aa213a6d0816485649ae46405",
"size": "1942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/tools/update-tools/wrap-mar.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
import usuarios
import asignaturas
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('usuarios.urls', namespace='usuarios')),
url(r'^', include('asignaturas.urls', namespace='asignaturas')),
]
|
{
"content_hash": "b8068fd4b2e6719028798623c6c89389",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 68,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.7331606217616581,
"repo_name": "Hikasgai/HikasgaiApp",
"id": "bf05d75f995fd1a1779423bc45f6186dba8e083d",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "placeForMe/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "287202"
},
{
"name": "HTML",
"bytes": "7563"
},
{
"name": "JavaScript",
"bytes": "878949"
},
{
"name": "Python",
"bytes": "48426"
}
],
"symlink_target": ""
}
|
"""
SSL Middleware
Stephen Zabel
This middleware answers the problem of redirecting to (and from) a SSL secured path
by stating what paths should be secured in urls.py file. To secure a path, add the
additional view_kwarg 'SSL':True to the view_kwargs.
For example
urlpatterns = patterns('some_site.some_app.views',
(r'^test/secure/$','test_secure',{'SSL':True}),
)
All paths where 'SSL':False or where the kwarg of 'SSL' is not specified are routed
to an unsecure path.
For example
urlpatterns = patterns('some_site.some_app.views',
(r'^test/unsecure1/$','test_unsecure',{'SSL':False}),
(r'^test/unsecure2/$','test_unsecure'),
)
Gotcha's : Redirects should only occur during GETs; this is due to the fact that
POST data will get lost in the redirect.
A major benefit of this approach is that it allows you to secure django.contrib views
and generic views without having to modify the base code or wrapping the view.
This method is also better than the two alternative approaches of adding to the
settings file or using a decorator.
It is better than the tactic of creating a list of paths to secure in the settings
file, because you DRY. You are also not forced to consider all paths in a single
location. Instead you can address the security of a path in the urls file that it
is resolved in.
It is better than the tactic of using a @secure or @unsecure decorator, because
it prevents decorator build up on your view methods. Having a bunch of decorators
makes views cumbersome to read and looks pretty redundant. Also because the all
views pass through the middleware you can specify the only secure paths and the
remaining paths can be assumed to be unsecure and handled by the middleware.
This package is inspired by Antonio Cavedoni's SSL Middleware
Satchmo notes:
This package has also merged the main concepts of Antonio Cavedoni's SSL Middleware,
to allow for better integration with other sites, and to easily allow admin pages to
be secured.
Lastly, we've added an optional "SSL_PORT" to be specified in the settings, for
unusual server configurations. If specified, the port will be sent with the
SSL redirect.
"""
__license__ = "Python"
__copyright__ = "Copyright (C) 2007, Stephen Zabel"
__author__ = "Stephen Zabel"
from django.conf import settings
from django.http import HttpResponseRedirect
from django.utils.encoding import iri_to_uri
from satchmo_utils import request_is_secure
HTTPS_PATHS = getattr(settings, "HTTPS_PATHS", [])
SSL = 'SSL'
SSLPORT=getattr(settings, 'SSL_PORT', None)
class SSLRedirect:
def process_view(self, request, view_func, view_args, view_kwargs):
if SSL in view_kwargs:
secure = view_kwargs[SSL]
del view_kwargs[SSL]
else:
secure = False
if not secure:
for path in HTTPS_PATHS:
if request.path.startswith("/%s" % path):
secure = True
break
if not secure == request_is_secure(request):
return self._redirect(request, secure)
def _redirect(self, request, secure):
if settings.DEBUG and request.method == 'POST':
raise RuntimeError(
"""Django can't perform a SSL redirect while maintaining POST data.
Please structure your views so that redirects only occur during GETs.""")
protocol = secure and "https" or "http"
host = "%s://%s" % (protocol, request.get_host())
# In certain proxying situations, we need to strip out the 443 port
# in order to prevent inifinite redirects
if not secure:
host = host.replace(':443','')
if secure and SSLPORT:
host = "%s:%s" % (host, SSLPORT)
newurl = "%s%s" % (host, iri_to_uri(request.get_full_path()))
return HttpResponseRedirect(newurl)
|
{
"content_hash": "c2f2957c3593b29e5107fc0a704b6b40",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 85,
"avg_line_length": 37.04807692307692,
"alnum_prop": 0.697119127952245,
"repo_name": "DrOctogon/Satchmo",
"id": "162ef6c6075c26c6eca567d74de7fd3173ebb627",
"size": "3853",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "satchmo/apps/satchmo_store/shop/SSLMiddleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26551"
},
{
"name": "JavaScript",
"bytes": "73019"
},
{
"name": "Python",
"bytes": "1911149"
}
],
"symlink_target": ""
}
|
"""
@package ion.services.mi.test.test_zmq_driver_process
@file ion/services/mi/test_zmq_driver_process.py
@author Edward Hunter
@brief Test cases for ZmqDriverProcess processes.
"""
__author__ = 'Edward Hunter'
__license__ = 'Apache 2.0'
from gevent import monkey; monkey.patch_all()
import time
import unittest
import logging
from nose.plugins.attrib import attr
from mi.core.instrument.zmq_driver_client import ZmqDriverClient
from mi.core.instrument.zmq_driver_process import ZmqDriverProcess
import mi.core.mi_logger
from mi.core.unit_test import MiTestCase
mi_logger = logging.getLogger('mi_logger')
# Make tests verbose and provide stdout
# bin/nosetests -s -v ion/services/mi/test/test_zmq_driver_process.py
@attr('UNIT', group='mi')
class TestZmqDriverProcess(MiTestCase):
"""
Unit tests for ZMQ driver process.
"""
def setUp(self):
"""
Setup test cases.
"""
# Zmq parameters used by driver process and client.
self.host = 'localhost'
self.cmd_port = 5556
self.evt_port = 5557
# Driver module parameters.
self.dvr_mod = 'ion.services.mi.drivers.sbe37.sbe37_driver'
self.dvr_cls = 'SBE37Driver'
# Add cleanup handler functions.
# self.addCleanup()
def test_driver_process(self):
"""
Test driver process launch and comms.
"""
"""
driver_process = ZmqDriverProcess.launch_process(5556, 5557,
'ion.services.mi.drivers.sbe37.sbe37_driver', 'SBE37Driver')
driver_client = ZmqDriverClient('localhost', 5556, 5557)
driver_client.start_messaging()
time.sleep(3)
reply = driver_client.cmd_dvr('process_echo', data='test 1 2 3')
self.assertIsInstance(reply, dict)
self.assertTrue('cmd' in reply)
self.assertTrue('args' in reply)
self.assertTrue('kwargs' in reply)
self.assertTrue(reply['cmd'] == 'process_echo')
self.assertTrue(reply['args'] == ())
self.assertIsInstance(reply['kwargs'], dict)
self.assertTrue('data' in reply['kwargs'])
self.assertTrue(reply['kwargs']['data'], 'test 1 2 3')
reply = driver_client.cmd_dvr('process_echo',
data='zoom zoom boom boom')
self.assertIsInstance(reply, dict)
self.assertTrue('cmd' in reply)
self.assertTrue('args' in reply)
self.assertTrue('kwargs' in reply)
self.assertTrue(reply['cmd'] == 'process_echo')
self.assertTrue(reply['args'] == ())
self.assertIsInstance(reply['kwargs'], dict)
self.assertTrue('data' in reply['kwargs'])
self.assertTrue(reply['kwargs']['data'], 'test 1 2 3')
events = ['I am event number 1!', 'And I am event number 2!']
reply = driver_client.cmd_dvr('test_events', events=events)
self.assertEqual(reply, 'test_events')
time.sleep(3)
self.assertTrue(driver_client.events, events)
driver_client.done()
"""
pass
def test_number_2(self):
"""
"""
pass
|
{
"content_hash": "13410046183fc5c5df3984c17cac7229",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 84,
"avg_line_length": 30.634615384615383,
"alnum_prop": 0.6111111111111112,
"repo_name": "ronkyo/mi-instrument",
"id": "e805c2f6c7cdd917cbf73d2283f53533183aac33",
"size": "3209",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/core/instrument/test/test_zmq_driver_process.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6398834"
}
],
"symlink_target": ""
}
|
import sys
import os
from optparse import OptionParser
from dcs import base
def main():
usage = 'usage: %prog [--validate] --source=<path to the directory ' + \
'with attributes files> --output=<output directory>'
parser = OptionParser(usage=usage)
parser.add_option('--source', dest='source',
help='Path to directory containing attributes files')
parser.add_option('--output', dest='output',
help='Output directory')
parser.add_option('--validate', dest='validate', action='store_true',
help='Validate the JSON attribute files')
(options, args) = parser.parse_args()
if not options.validate and not (options.source and options.output):
parser.print_usage()
sys.exit(1)
if not os.path.exists(options.source):
raise Exception('Source directory must exist')
if options.validate:
base.runit(options.source, None, dry_run=True)
else:
if not os.path.exists(options.output):
os.makedirs(options.output)
base.runit(options.source, options.output)
if __name__ == '__main__':
main()
|
{
"content_hash": "7fa828b29b124bbab31fedebb77fef68",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 32.22222222222222,
"alnum_prop": 0.625,
"repo_name": "racker/python-dcs",
"id": "2321d744d580dc0da09695658e04f6a2499a8942",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/dcs-builder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16797"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.models import User
from emailconfirmation.models import EmailAddress
from friends.models import *
try:
from notification import models as notification
except ImportError:
notification = None
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class JoinRequestForm(forms.Form):
email = forms.EmailField(label="Email", required=True, widget=forms.TextInput(attrs={'size':'30'}))
message = forms.CharField(label="Message", required=False, widget=forms.Textarea(attrs = {'cols': '30', 'rows': '5'}))
def clean_email(self):
# @@@ this assumes email-confirmation is being used
self.existing_users = EmailAddress.objects.get_users_for(self.cleaned_data["email"])
if self.existing_users:
raise forms.ValidationError(u"Someone with that email address is already here.")
return self.cleaned_data["email"]
def save(self, user):
join_request = JoinInvitation.objects.send_invitation(user, self.cleaned_data["email"], self.cleaned_data["message"])
user.message_set.create(message="Invitation to join sent to %s" % join_request.contact.email)
return join_request
class InviteFriendForm(UserForm):
to_user = forms.CharField(widget=forms.HiddenInput)
message = forms.CharField(label="Message", required=False, widget=forms.Textarea(attrs = {'cols': '20', 'rows': '5'}))
def clean_to_user(self):
to_username = self.cleaned_data["to_user"]
try:
User.objects.get(username=to_username)
except User.DoesNotExist:
raise forms.ValidationError(u"Unknown user.")
return self.cleaned_data["to_user"]
def clean(self):
to_user = User.objects.get(username=self.cleaned_data["to_user"])
previous_invitations_to = FriendshipInvitation.objects.filter(to_user=to_user, from_user=self.user)
if previous_invitations_to.count() > 0:
raise forms.ValidationError(u"Already requested friendship with %s" % to_user.username)
# check inverse
previous_invitations_from = FriendshipInvitation.objects.filter(to_user=self.user, from_user=to_user)
if previous_invitations_from.count() > 0:
raise forms.ValidationError(u"%s has already requested friendship with you" % to_user.username)
return self.cleaned_data
def save(self):
to_user = User.objects.get(username=self.cleaned_data["to_user"])
message = self.cleaned_data["message"]
invitation = FriendshipInvitation(from_user=self.user, to_user=to_user, message=message, status=2)
invitation.save()
if notification:
notification.send([to_user], "friends_invite", {"invitation": invitation})
notification.send([self.user], "friends_invite_sent", {"invitation": invitation})
self.user.message_set.create(message="Friendship requested with %s" % to_user.username) # @@@ make link like notification
return invitation
|
{
"content_hash": "0637b751a32bc1ff1ce1c3bcf3504355",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 129,
"avg_line_length": 44.09722222222222,
"alnum_prop": 0.6686614173228347,
"repo_name": "indro/t2c",
"id": "7bc7369b92ef09b600fe6ca27ae47086201a71b3",
"size": "3175",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/external_apps/friends/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "4084"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "C",
"bytes": "146718"
},
{
"name": "C#",
"bytes": "17611"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "CSS",
"bytes": "165869"
},
{
"name": "Clojure",
"bytes": "21964"
},
{
"name": "Common Lisp",
"bytes": "48874"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dylan",
"bytes": "683"
},
{
"name": "Emacs Lisp",
"bytes": "126207"
},
{
"name": "Erlang",
"bytes": "8972"
},
{
"name": "FORTRAN",
"bytes": "27700"
},
{
"name": "Haskell",
"bytes": "40419"
},
{
"name": "Java",
"bytes": "81362"
},
{
"name": "JavaScript",
"bytes": "75388"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Matlab",
"bytes": "469"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "778"
},
{
"name": "PHP",
"bytes": "17078"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "37504"
},
{
"name": "Python",
"bytes": "8018145"
},
{
"name": "R",
"bytes": "3468"
},
{
"name": "Ruby",
"bytes": "91230"
},
{
"name": "Scala",
"bytes": "272"
},
{
"name": "Scheme",
"bytes": "45856"
},
{
"name": "Shell",
"bytes": "117254"
},
{
"name": "Smalltalk",
"bytes": "15501"
},
{
"name": "VimL",
"bytes": "16660"
},
{
"name": "Visual Basic",
"bytes": "846"
},
{
"name": "XSLT",
"bytes": "755"
}
],
"symlink_target": ""
}
|
import traceback
from django.http import HttpResponseServerError
from django.template import loader
def server_error(request, template_name='500.html'):
template = loader.get_template(template_name)
context = {'stacktrace' : traceback.format_exc()}
return HttpResponseServerError(template.render(context))
|
{
"content_hash": "2462de921f2471d463c8f93b14474984",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 34.888888888888886,
"alnum_prop": 0.7961783439490446,
"repo_name": "mcoolive/graphite-web",
"id": "e4db58af893e818e3400022cfbfb6e449a75aeb4",
"size": "314",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "webapp/graphite/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149965"
},
{
"name": "HTML",
"bytes": "21170"
},
{
"name": "JavaScript",
"bytes": "1679914"
},
{
"name": "Perl",
"bytes": "857"
},
{
"name": "Python",
"bytes": "754908"
},
{
"name": "Ruby",
"bytes": "1950"
},
{
"name": "Shell",
"bytes": "1113"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
import simples3
intro = open("README", "U").read()
usage = "\nUsage\n-----\n\n" + simples3.__doc__
changes = open("changes.rst", "U").read()
long_description = intro + usage + "\n" + changes
setup(name="simples3", version=simples3.__version__,
url="http://sendapatch.se/projects/simples3/",
author="Ludvig Ericson", author_email="ludvig@lericson.se",
description="Simple, quick Amazon AWS S3 interface",
long_description=long_description,
packages=["simples3"])
|
{
"content_hash": "d05adb9b0d9585ac79f27e9b7e781443",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 34.705882352941174,
"alnum_prop": 0.676271186440678,
"repo_name": "sirkamran32/simples3",
"id": "d5786b520db29f4e4d1bbf51ece760ff5ca43987",
"size": "613",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "51223"
}
],
"symlink_target": ""
}
|
import numpy as np
import tensorflow as tf
from elbow import Model
from elbow.elementary import Gaussian
from elbow.joint_model import BatchGenerator
from elbow.models.neural import neural_bernoulli, neural_gaussian
from util import mnist_training_data
import time
def build_vae(d_z=2, d_hidden=256, d_x=784, N=100, total_N=60000):
# MODEL
z = Gaussian(mean=0, std=1.0, shape=(N,d_z), name="z", local=True)
X = neural_bernoulli(z, d_hidden=d_hidden, d_out=d_x, name="X", local=True)
# OBSERVED DATA
x_placeholder = X.observe_placeholder()
# VARIATIONAL MODEL
q_z = neural_gaussian(X=x_placeholder, d_hidden=d_hidden, d_out=d_z, name="q_z")
z.attach_q(q_z)
jm = Model(X, minibatch_ratio = total_N/float(N))
return jm, x_placeholder
def main():
Xtrain, _, _, _ = mnist_training_data()
batchsize = 100
jm, x_batch = build_vae(N=batchsize, total_N=Xtrain.shape[0])
batches = BatchGenerator(Xtrain, batch_size=batchsize)
jm.register_feed(lambda : {x_batch: batches.next_batch()})
jm.train(steps=10000, adam_rate=0.01)
if __name__ == "__main__":
main()
|
{
"content_hash": "77a4df6465bf6627860e524a5ba08142",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 84,
"avg_line_length": 26.976190476190474,
"alnum_prop": 0.6690203000882613,
"repo_name": "davmre/elbow",
"id": "6b892cf3bee07ff77af1a5079cb92ad8d0213bbe",
"size": "1133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/vae_minibatch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "157566"
}
],
"symlink_target": ""
}
|
"""Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set, Timer, SetDaemonProperty
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform. The
:class:`SignalHandler` will ignore errors raised from attempting to register
handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask,
doc="""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(Timer):
"""A responsive subclass of threading.Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def __init__(self, *args, **kwargs):
"Override parent constructor to allow 'bus' to be provided."
self.bus = kwargs.pop('bus', None)
super(PerpetualTimer, self).__init__(*args, **kwargs)
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log(
"Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(SetDaemonProperty, threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
# default to daemonic
self.daemon = True
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread."""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus = self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust the
``match`` attribute, a regular expression. For example, to stop monitoring
cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in list(sys.modules.items()):
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
|
{
"content_hash": "e47813d056b1579002ed154f3afe1f55",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 108,
"avg_line_length": 36.91014492753623,
"alnum_prop": 0.5741715093450604,
"repo_name": "hellsgate1001/bookit",
"id": "cfad3700723dae0165fe6b5dade0b1e052a43cec",
"size": "25468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/env/Lib/site-packages/cherrypy/process/plugins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "429638"
},
{
"name": "C++",
"bytes": "15261"
},
{
"name": "CSS",
"bytes": "258246"
},
{
"name": "JavaScript",
"bytes": "202757"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "8879326"
},
{
"name": "Shell",
"bytes": "1197"
}
],
"symlink_target": ""
}
|
def quote(str):
''' encode spaces and comma '''
return None if not str else str.replace('\\', '\\\\').replace(' ','\\s').replace('|','\\v').replace(',','\\c').replace('\n','\\n')
def dequote(str):
''' decode spaces and comma '''
return None if not str else str.replace('\\n','\n').replace('\\c', ',').replace('\\v', '|').replace('\\s', ' ').replace('\\\\', '\\')
def str2bool(str):
return str.lower() in [ 'true', 'yes', 'visible', 'show', '1' ]
|
{
"content_hash": "f7670f30fb548f281c6bcc3302268bdb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 137,
"avg_line_length": 46.8,
"alnum_prop": 0.5192307692307693,
"repo_name": "voc/voctomix",
"id": "639ba462b6380b4e4348666b457893a58629b676",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "vocto/command_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2621"
},
{
"name": "Dockerfile",
"bytes": "2626"
},
{
"name": "Python",
"bytes": "350063"
},
{
"name": "Shell",
"bytes": "25187"
}
],
"symlink_target": ""
}
|
from schematics.models import Model
from schematics.types import IntType, ListType, ModelType, StringType
class User(Model):
userid = IntType(required=True)
class Style(Model):
width = IntType(required=True)
height = IntType(required=True)
border_width = IntType(required=True)
border_style = StringType(required=True)
border_color = StringType(required=True)
color = StringType(required=True)
class Data(Model):
user = ModelType(User, required=True)
tags = ListType(IntType)
style = ModelType(Style, required=True)
optional = StringType(required=False)
def validate(data):
m = Data(data)
m.validate()
return m.to_primitive()
CASES = {
'default': validate
}
|
{
"content_hash": "52a37f8a4c393dce710dbd80bb6d88e3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 22.060606060606062,
"alnum_prop": 0.7019230769230769,
"repo_name": "guyskk/validater",
"id": "cde7b5fd74c40a6d523762d85a1d8dcfe3038a66",
"size": "728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "benchmark/case_schematics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47391"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('digikey', '0009_auto_20151123_1442'),
]
operations = [
migrations.RenameField(
model_name='orders',
old_name='paied',
new_name='paid',
),
]
|
{
"content_hash": "4a10cc40d73f2697d38d804310972eb2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 47,
"avg_line_length": 20,
"alnum_prop": 0.5722222222222222,
"repo_name": "sonicyang/chiphub",
"id": "973e852abd05084a606273e01335b296ded99da8",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digikey/migrations/0010_auto_20151123_1443.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "298157"
},
{
"name": "HTML",
"bytes": "89822"
},
{
"name": "JavaScript",
"bytes": "285818"
},
{
"name": "Python",
"bytes": "89817"
}
],
"symlink_target": ""
}
|
from django import forms
from models import ShortURL, key_to_id, id_to_key
class ShortURLForm(forms.ModelForm):
key = forms.CharField(required=False)
class Meta:
model = ShortURL
fields = ('url', 'key')
def __init__(self, *args, **kwargs):
super(ShortURLForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'].id:
self.initial['key'] = id_to_key(kwargs['instance'].id)
def clean_key(self):
try:
ShortURL.objects.get_by_key(self.cleaned_data['key'])
except ShortURL.DoesNotExist:
return self.cleaned_data['key']
else:
raise forms.ValidationError('Key is already taken')
def save(self, commit=True):
model = super(ShortURLForm, self).save(commit=False)
if self.cleaned_data.get('key'):
model.key = self.cleaned_data['key']
if commit:
model.save(force_insert=True)
return model
|
{
"content_hash": "6631c590b4cd9711753c90eeda6301c2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 66,
"avg_line_length": 28.4,
"alnum_prop": 0.596579476861167,
"repo_name": "butfriendly/friendly-django-urlshortener",
"id": "f699b0d4c22987f9b39879401523e4002a99e7c3",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urlshortener/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11459"
}
],
"symlink_target": ""
}
|
import base64
import mox
import webob
from nova.api.openstack.compute import servers
from nova.compute import vm_states
import nova.db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
FAKE_UUID = fakes.FAKE_UUID
def return_server_not_found(context, uuid):
raise exception.NotFound()
def instance_update(context, instance_id, kwargs):
return fakes.stub_instance(instance_id)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTest(test.TestCase):
def setUp(self):
super(ServerActionsControllerTest, self).setUp()
self.stubs.Set(nova.db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(nova.db, 'instance_update', instance_update)
fakes.stub_out_glance(self.stubs)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_image_service(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = importutils.import_object(service_class)
self.service.delete_all()
self.sent_to_glance = {}
fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self.uuid = FAKE_UUID
self.url = '/v2/fake/servers/%s/action' % self.uuid
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = servers.Controller()
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
# note,the mock still contains the password.
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_not_a_string(self):
body = {'changePassword': {'adminPass': 1234}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_empty_string(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': ''}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '')
def test_server_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_not_found(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
req, str(utils.gen_uuid()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'reboot', fake_reboot)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
FLAGS.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(nova.db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(nova.compute.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_uuid,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(nova.db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(nova.compute.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_href,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue("adminPass" not in body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'rebuild', fake_rebuild)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertTrue('personality' not in body['server'])
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue('adminPass' not in body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
update = self.mox.CreateMockAnything()
self.stubs.Set(nova.compute.API, 'update', update)
req = fakes.HTTPRequest.blank(self.url)
context = req.environ['nova.context']
update(context, mox.IgnoreArg(),
image_ref=self._image_href,
vm_state=vm_states.REBUILDING,
task_state=None, progress=0, **attributes).AndReturn(None)
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_resize(req, FAKE_UUID, body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(nova.compute.api.API, 'confirm_resize', cr_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(nova.compute.api.API,
'confirm_resize',
confirm_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'confirm_resize',
fake_confirm_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(nova.compute.api.API,
'revert_resize',
revert_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_revert_resize(req, FAKE_UUID, body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'revert_resize',
fake_revert_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(FLAGS.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.API, 'snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
req, FAKE_UUID, body)
class TestServerActionXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerActionXMLDeserializer, self).setUp()
self.deserializer = servers.ActionDeserializer()
def test_create_image(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
},
}
self.assertEquals(request['body'], expected)
def test_create_image_with_metadata(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="key1">value1</meta>
</metadata>
</createImage>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
"metadata": {"key1": "value1"},
},
}
self.assertEquals(request['body'], expected)
def test_change_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "1234pass",
},
}
self.assertEquals(request['body'], expected)
def test_change_pass_no_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_change_pass_empty_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass=""/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "",
},
}
self.assertEquals(request['body'], expected)
def test_reboot(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"
type="HARD"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"reboot": {
"type": "HARD",
},
}
self.assertEquals(request['body'], expected)
def test_reboot_no_type(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"
flavorRef="http://localhost/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"resize": {"flavorRef": "http://localhost/flavors/3"},
}
self.assertEquals(request['body'], expected)
def test_resize_no_flavor_ref(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_confirm_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<confirmResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"confirmResize": None,
}
self.assertEquals(request['body'], expected)
def test_revert_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<revertResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"revertResize": None,
}
self.assertEquals(request['body'], expected)
def test_rebuild(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost/images/1">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"name": "new-server-test",
"imageRef": "http://localhost/images/1",
"metadata": {
"My Server Name": "Apache1",
},
"personality": [
{"path": "/etc/banner.txt", "contents": "Mg=="},
],
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_rebuild_blank_name(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"
name=""/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
|
{
"content_hash": "ff509eefd07c65afe2a2000fd8b9816b",
"timestamp": "",
"source": "github",
"line_count": 892,
"max_line_length": 79,
"avg_line_length": 36.20291479820628,
"alnum_prop": 0.5529371690459233,
"repo_name": "psiwczak/openstack",
"id": "77470036102159e00f7acb3c73a924e639be6506",
"size": "32923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/test_server_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "5755682"
},
{
"name": "Shell",
"bytes": "26160"
}
],
"symlink_target": ""
}
|
""" This file describe the module configuration box that is displayed when
the user selects a module's "Edit Configuration"
"""
from PyQt4 import QtCore, QtGui
from vistrails.core.modules.module_registry import get_module_registry, \
ModuleRegistryException
from vistrails.gui.modules.module_configure import DefaultModuleConfigurationWidget
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
################################################################################
class QConfigurationWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setLayout(QtGui.QVBoxLayout())
self.widget = None
def setUpWidget(self, widget):
self.widget = widget
self.layout().addWidget(self.widget)
def clear(self):
""" clear() -> None
Clear and delete widget in the layout
"""
if self.widget is not None:
self.widget.setVisible(False)
self.layout().removeWidget(self.widget)
self.widget.deleteLater()
self.widget = None
def askToSaveChanges(self):
if self.widget:
return self.widget.askToSaveChanges()
def activate(self):
if self.widget:
self.widget.activate()
################################################################################
class QModuleConfiguration(QtGui.QScrollArea, QVistrailsPaletteInterface):
def __init__(self, parent=None, scene=None):
"""QModuleConfiguration(parent: QWidget) -> QModuleConfiguration
Initialize widget constraints
"""
QtGui.QScrollArea.__init__(self, parent)
self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.setWindowTitle('Module Configuration')
self.setWidgetResizable(True)
self.confWidget = QConfigurationWidget()
self.setWidget(self.confWidget)
self.module = None
self.controller = None
self.scene = scene
self.updateLocked = False
self.hasChanges = False
def set_controller(self, controller):
if self.controller == controller:
return
self.controller = controller
if self.controller is not None:
self.scene = controller.current_pipeline_scene
selected_ids = self.scene.get_selected_module_ids()
modules = [controller.current_pipeline.modules[i]
for i in selected_ids]
if len(modules) == 1:
self.updateModule(modules[0])
else:
self.updateModule(None)
else:
self.updateModule(None)
def updateModule(self, module):
if self.updateLocked: return
self.check_need_save_changes()
self.module = module
self.confWidget.setUpdatesEnabled(False)
self.confWidget.setVisible(False)
self.confWidget.clear()
if module and self.controller:
registry = get_module_registry()
getter = registry.get_configuration_widget
widgetType = None
try:
widgetType = \
getter(module.package, module.name, module.namespace)
except ModuleRegistryException:
pass
if not widgetType:
widgetType = DefaultModuleConfigurationWidget
widget = widgetType(module, self.controller)
self.confWidget.setUpWidget(widget)
self.connect(widget, QtCore.SIGNAL("doneConfigure"),
self.configureDone)
self.connect(widget, QtCore.SIGNAL("stateChanged"),
self.stateChanged)
self.confWidget.setUpdatesEnabled(True)
self.confWidget.setVisible(True)
self.hasChanges = False
# we need to reset the title in case there were changes
self.setWindowTitle("Module Configuration")
def configureDone(self):
from vistrails.gui.vistrails_window import _app
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
_app.notify('module_done_configure', self.module.id)
def stateChanged(self):
self.hasChanges = self.confWidget.widget.state_changed
# self.setWindowModified seems not to work here
# self.setWindowModified(self.hasChanges)
title = str(self.windowTitle())
if self.hasChanges:
if not title.endswith("*"):
self.setWindowTitle(title + "*")
else:
if title.endswith("*"):
self.setWindowTitle(title[:-1])
def lockUpdate(self):
""" lockUpdate() -> None
Do not allow updateModule()
"""
self.updateLocked = True
def unlockUpdate(self):
""" unlockUpdate() -> None
Allow updateModule()
"""
self.updateLocked = False
def closeEvent(self, event):
self.confWidget.askToSaveChanges()
event.accept()
def activate(self):
if self.isVisible() == False:
# self.toolWindow().show()
self.show()
self.activateWindow()
self.confWidget.activate()
def check_need_save_changes(self):
if self.confWidget:
self.lockUpdate()
self.confWidget.askToSaveChanges()
self.unlockUpdate()
|
{
"content_hash": "fa6a18472768d62c85d562a774237d95",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 83,
"avg_line_length": 35.416666666666664,
"alnum_prop": 0.582262443438914,
"repo_name": "celiafish/VisTrails",
"id": "e412129673f49d99e00b3eb756fa34cecceac408",
"size": "7405",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/gui/module_configuration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
import logging # noqa
import mock
import os
from oslo_config import cfg
from oslo_log import log as o_log
from oslo_utils import uuidutils
from oslotest import base
from sqlalchemy.orm import sessionmaker as sa_sessionmaker
from aim.agent.aid.universes.aci import aci_universe
from aim.agent.aid.universes.k8s import k8s_watcher
from aim import aim_manager
from aim import aim_store
from aim.api import resource
from aim.api import status as aim_status
from aim.common import utils
from aim import config as aim_cfg
from aim import context
from aim.db import api
from aim.db import hashtree_db_listener as ht_db_l
from aim.db import model_base
from aim.k8s import api_v1 as k8s_api_v1
from aim.tools.cli import shell # noqa
from aim import tree_manager
CONF = cfg.CONF
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
o_log.register_options(aim_cfg.CONF)
K8S_STORE_VENV = 'K8S_STORE'
K8S_CONFIG_ENV = 'K8S_CONFIG'
LOG = o_log.getLogger(__name__)
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def resource_equal(self, other):
if type(self) != type(other):
return False
for attr in self.identity_attributes:
if getattr(self, attr) != getattr(other, attr):
return False
for attr in self.other_attributes:
if (utils.deep_sort(getattr(self, attr, None)) !=
utils.deep_sort(getattr(other, attr, None))):
return False
return True
def requires(requirements):
def wrap(func):
def inner(self, *args, **kwargs):
diff = set(requirements) - set(self.ctx.store.features)
if diff:
self.skipTest("Store %s doesn't support required "
"features: %s" % (self.ctx.store.name, diff))
else:
func(self, *args, **kwargs)
return inner
return wrap
class BaseTestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def config_parse(self, conf=None, args=None):
"""Create the default configurations."""
# neutron.conf.test includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', self.test_conf_file]
if conf is None:
CONF(args=args, project='aim')
else:
conf(args)
o_log.setup(cfg.CONF, 'aim')
def setUp(self):
super(BaseTestCase, self).setUp()
self.addCleanup(CONF.reset)
self.test_conf_file = etcdir('aim.conf.test')
self.config_parse()
def _check_call_list(self, expected, mocked, check_all=True):
observed = mocked.call_args_list
for call in expected:
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(call), str(observed)))
observed.remove(call)
if check_all:
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
name_to_res = {utils.camel_to_snake(x.__name__): x for x in
aim_manager.AimManager.aim_resources}
k8s_watcher_instance = None
def _k8s_post_create(self, created):
if created:
w = k8s_watcher_instance
w.klient.get_new_watch()
event = {'type': 'ADDED', 'object': created}
w.klient.watch.stream = mock.Mock(return_value=[event])
w._reset_trees = mock.Mock()
w.q.put(event)
w._persistence_loop(save_on_empty=True, warmup_wait=0)
def _k8s_post_delete(self, deleted):
if deleted:
w = k8s_watcher_instance
event = {'type': 'DELETED', 'object': deleted}
w.klient.get_new_watch()
w.klient.watch.stream = mock.Mock(return_value=[event])
w._reset_trees = mock.Mock()
w.q.put(event)
w._persistence_loop(save_on_empty=True, warmup_wait=0)
def _initialize_hooks(self):
self.old_initialize_hooks()
self.register_after_transaction_ends_callback('_catch_up_logs',
self._catch_up_logs)
def _catch_up_logs(self, added, updated, removed):
# Create new session and populate the hashtrees
session = api.get_session(autocommit=True, expire_on_commit=True,
use_slave=False)
store = aim_store.SqlAlchemyStore(session)
ht_db_l.HashTreeDbListener(
aim_manager.AimManager()).catch_up_with_action_log(store)
class TestAimDBBase(BaseTestCase):
_TABLES_ESTABLISHED = False
def setUp(self, mock_store=True):
super(TestAimDBBase, self).setUp()
self.test_id = uuidutils.generate_uuid()
aim_cfg.OPTION_SUBSCRIBER_MANAGER = None
aci_universe.ac_context = None
if not os.environ.get(K8S_STORE_VENV):
CONF.set_override('aim_store', 'sql', 'aim')
self.engine = api.get_engine()
if not TestAimDBBase._TABLES_ESTABLISHED:
model_base.Base.metadata.create_all(self.engine)
TestAimDBBase._TABLES_ESTABLISHED = True
# Uncomment the line below to log SQL statements. Additionally, to
# log results of queries, change INFO to DEBUG
#
# logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
def clear_tables():
with self.engine.begin() as conn:
for table in reversed(
model_base.Base.metadata.sorted_tables):
conn.execute(table.delete())
self.addCleanup(clear_tables)
if mock_store:
self.old_initialize_hooks = (
aim_store.SqlAlchemyStore._initialize_hooks)
aim_store.SqlAlchemyStore.old_initialize_hooks = (
self.old_initialize_hooks)
aim_store.SqlAlchemyStore._initialize_hooks = _initialize_hooks
def restore_initialize_hook():
aim_store.SqlAlchemyStore._initialize_hooks = (
self.old_initialize_hooks)
self.addCleanup(restore_initialize_hook)
aim_store.SqlAlchemyStore._catch_up_logs = _catch_up_logs
else:
CONF.set_override('aim_store', 'k8s', 'aim')
CONF.set_override('k8s_namespace', self.test_id, 'aim_k8s')
k8s_config_path = os.environ.get(K8S_CONFIG_ENV)
if k8s_config_path:
CONF.set_override('k8s_config_path', k8s_config_path,
'aim_k8s')
aim_store.K8sStore._post_delete = _k8s_post_delete
aim_store.K8sStore._post_create = _k8s_post_create
global k8s_watcher_instance
k8s_watcher_instance = k8s_watcher.K8sWatcher()
k8s_watcher_instance.event_handler = mock.Mock()
k8s_watcher_instance._renew_klient_watch = mock.Mock()
self.addCleanup(self._cleanup_objects)
self.store = api.get_store(expire_on_commit=True)
def unregister_catch_up():
self.store.unregister_after_transaction_ends_callback(
'_catch_up_logs')
self.addCleanup(unregister_catch_up)
self.ctx = context.AimContext(store=self.store)
self.cfg_manager = aim_cfg.ConfigManager(self.ctx, '')
self.tt_mgr = tree_manager.HashTreeManager()
resource.ResourceBase.__eq__ = resource_equal
self.cfg_manager.replace_all(CONF)
self.sys_id = self.cfg_manager.get_option('aim_system_id', 'aim')
def get_new_context(self):
return context.AimContext(
db_session=sa_sessionmaker(bind=self.engine)())
def set_override(self, item, value, group=None, host='', poll=False):
# Override DB config as well
if group:
CONF.set_override(item, value, group)
else:
CONF.set_override(item, value)
self.cfg_manager.override(item, value, group=group or 'default',
host=host, context=self.ctx)
if poll:
self.cfg_manager.subs_mgr._poll_and_execute()
def _cleanup_objects(self):
objs = [k8s_api_v1.Namespace(metadata={'name': self.test_id}),
k8s_api_v1.Namespace(metadata={'name': 'ns-' + self.test_id}),
k8s_api_v1.Node(metadata={'name': self.test_id})]
for obj in objs:
try:
self.ctx.store.delete(obj)
except k8s_api_v1.klient.ApiException as e:
if str(e.status) != '420':
LOG.warning("Error while cleaning %s %s: %s",
obj.kind, obj['metadata']['name'], e)
@classmethod
def _get_example_aci_object(cls, type, dn, **kwargs):
attr = {'dn': dn}
attr.update(kwargs)
return {type: {'attributes': attr}}
@classmethod
def _get_example_aim_security_group_rule(cls, **kwargs):
example = resource.SecurityGroupRule(
tenant_name='t1', security_group_name='sg1',
security_group_subject_name='sgs1', name='rule1')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aim_bd(cls, **kwargs):
example = resource.BridgeDomain(tenant_name='test-tenant',
vrf_name='default',
name='test', enable_arp_flood=False,
enable_routing=True,
limit_ip_learn_to_subnets=False,
l2_unknown_unicast_mode='proxy',
ep_move_detect_mode='')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_bd(cls, **kwargs):
example_bd = {
"fvBD": {
"attributes": {
"arpFlood": "no", "descr": "test",
"dn": "uni/tn-test-tenant/BD-test",
"epMoveDetectMode": "",
"limitIpLearnToSubnets": "no",
"llAddr": "::",
"mac": "00:22:BD:F8:19:FF",
"multiDstPktAct": "bd-flood",
"name": "test", "displayName": "",
"ownerKey": "", "ownerTag": "", "unicastRoute": "yes",
"unkMacUcastAct": "proxy", "unkMcastAct": "flood",
"vmac": "not-applicable"}}}
example_bd['fvBD']['attributes'].update(kwargs)
return example_bd
@classmethod
def _get_example_aim_netflow(cls, **kwargs):
example = resource.NetflowVMMExporterPol(name='netflow1',
dst_addr='172.28.184.76',
dst_port='2055',
ver='v9')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_netflow(cls, **kwargs):
example_netflow = {
"netflowVmmExporterPol": {
"attributes": {
"dn": "uni/infra/vmmexporterpol-netflow1",
"name": "netflow1", "displayName": "",
"dstAddr": "172.28.184.76",
"dstPort": "2055",
"srcAddr": "0.0.0.0",
"ownerKey": "", "ownerTag": "",
"ver": "v9"}}}
example_netflow['netflowVmmExporterPol']['attributes'].update(kwargs)
return example_netflow
@classmethod
def _get_example_aim_vswitch_policy_grp(cls, **kwargs):
example = resource.VmmVswitchPolicyGroup(domain_type='OpenStack',
domain_name='osd13-fab20')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_vswitch_policy_grp(cls, **kwargs):
example_vs_pol_grp = {
"vmmVSwitchPolicyCont": {
"attributes": {
"dn": "uni/vmmp-OpenStack/dom-osd13-fab20/vswitchpolcont",
"ownerKey": "", "ownerTag": ""}}}
example_vs_pol_grp['vmmVSwitchPolicyCont']['attributes'].update(kwargs)
return example_vs_pol_grp
@classmethod
def _get_example_aim_reln_netflow(cls, **kwargs):
example = resource.VmmRelationToExporterPol(domain_type='OpenStack',
domain_name='osd13-fab20',
netflow_path='uni/infra/'
'vmmexporterpol-test',
active_flow_time_out=90,
idle_flow_time_out=15,
sampling_rate=0)
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_reln_netflow(cls, **kwargs):
example_reln = {
"vmmRsVswitchExporterPol": {
"attributes": {
"dn": "uni/vmmp-OpenStack/dom-osd13-fab20/vswitchpolcont/"
"rsvswitchExporterPol-[uni/infra/vmmexporterpol-"
"test]",
"activeFlowTimeOut": 90, "idleFlowTimeOut": 15,
"samplingRate": 0, "ownerKey": "", "ownerTag": ""}}}
example_reln['vmmRsVswitchExporterPol']['attributes'].update(kwargs)
return example_reln
@classmethod
def _get_example_aci_rs_ctx(cls, **kwargs):
example_rsctx = {
"fvRsCtx": {
"attributes": {
"tnFvCtxName": "default",
"dn": "uni/tn-test-tenant/BD-test/rsctx"}}}
example_rsctx['fvRsCtx']['attributes'].update(kwargs)
return example_rsctx
@classmethod
def _get_example_aim_vrf(cls, **kwargs):
example = resource.VRF(
tenant_name='test-tenant',
name='test',
policy_enforcement_pref=resource.VRF.POLICY_ENFORCED)
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_vrf(cls, **kwargs):
example_vrf = {
"fvCtx": {
"attributes": {
"dn": "uni/tn-test-tenant/ctx-test",
"descr": "",
"knwMcastAct": "permit",
"name": "default",
"ownerKey": "",
"ownerTag": "",
"pcEnfDir": "ingress",
"pcEnfPref": "enforced"
}
}
}
example_vrf['fvCtx']['attributes'].update(kwargs)
return example_vrf
@classmethod
def _get_example_aim_app_profile(cls, **kwargs):
example = resource.ApplicationProfile(
tenant_name='test-tenant', name='test')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_app_profile(cls, **kwargs):
example_ap = {
"fvAp": {
"attributes": {
"dn": "uni/tn-test-tenant/ap-test",
"descr": ""
}
}
}
example_ap['fvAp']['attributes'].update(kwargs)
return example_ap
@classmethod
def _get_example_aim_subnet(cls, **kwargs):
example = resource.Subnet(
tenant_name='t1', bd_name='test', gw_ip_mask='10.10.10.0/28')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_subnet(cls, **kwargs):
example_sub = {
"fvSubnet": {
"attributes": {
"dn": "uni/tn-t1/BD-test/subnet-[10.10.10.0/28]",
"scope": "private"
}
}
}
example_sub['fvSubnet']['attributes'].update(kwargs)
return example_sub
@classmethod
def _get_example_aim_epg_subnet(cls, **kwargs):
example = resource.EPGSubnet(
tenant_name='t1', app_profile_name='ap1', epg_name='epg1',
gw_ip_mask='10.10.10.0/28')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_epg_subnet(cls, **kwargs):
example_sub = {
"fvSubnet__epg": {
"attributes": {
"dn": "uni/tn-t1/ap-ap1/epg-epg1/subnet-[10.10.10.0/28]",
"scope": "private"
}
}
}
example_sub['fvSubnet__epg']['attributes'].update(kwargs)
return example_sub
@classmethod
def _get_example_aim_tenant(cls, **kwargs):
example = resource.Tenant(name='test-tenant')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_tenant(cls, **kwargs):
example_tenant = {
"fvTenant": {
"attributes": {
"dn": "uni/tn-test-tenant",
"descr": ""
}
}
}
example_tenant['fvTenant']['attributes'].update(kwargs)
return example_tenant
@classmethod
def _get_example_aim_epg(cls, **kwargs):
example = resource.EndpointGroup(
tenant_name='t1', app_profile_name='a1', name='test',
bd_name='net1')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_epg(cls, **kwargs):
example_epg = {
"fvAEPg": {
"attributes": {
"dn": "uni/tn-t1/ap-a1/epg-test",
"descr": ""
}
}
}
example_epg['fvAEPg']['attributes'].update(kwargs)
return example_epg
@classmethod
def _get_example_aim_fault(cls, **kwargs):
example = aim_status.AciFault(
fault_code='951',
external_identifier='uni/tn-t1/ap-a1/epg-test/fault-951',
severity='warning')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_fault(cls, **kwargs):
example_epg = {
"faultInst": {
"attributes": {
"dn": "uni/tn-t1/ap-a1/epg-test/fault-951",
"descr": "cannot resolve",
"code": "951",
"severity": "warning",
"cause": "resolution-failed",
}
}
}
example_epg['faultInst']['attributes'].update(kwargs)
return example_epg
@classmethod
def _get_example_aci_ext_net(cls, **kwargs):
example_extnet = {
"l3extInstP": {
"attributes": {
"descr": "",
"dn": "uni/tn-common/out-default/instP-extnet",
"name": "extnet"}}}
example_extnet['l3extInstP']['attributes'].update(kwargs)
return example_extnet
@classmethod
def _get_example_aci_ext_net_rs_prov(cls, **kwargs):
example_extnet = {
"fvRsProv": {
"attributes": {
"dn": "uni/tn-common/out-default/instP-extnet"
"/rsprov-default",
"tnVzBrCPName": "default"}}}
example_extnet['fvRsProv']['attributes'].update(kwargs)
return example_extnet
@classmethod
def _get_example_aci_l3_out(cls, **kwargs):
example_l3o = {"l3extOut": {
"attributes": {
"descr": "",
"dn": "uni/tn-common/out-default",
"name": "default"}}}
example_l3o['l3extOut']['attributes'].update(kwargs)
return example_l3o
@classmethod
def _get_example_aci_l3_out_vrf_rs(cls, **kwargs):
example_l3o_vrf_rs = {"l3extRsEctx": {
"attributes": {
"dn": "uni/tn-common/out-default/rsectx",
"tnFvCtxName": "default"}}}
example_l3o_vrf_rs['l3extRsEctx']['attributes'].update(kwargs)
return example_l3o_vrf_rs
@classmethod
def _get_example_aci_contract(cls, **kwargs):
example_brcp = {
"vzBrCP": {
"attributes": {
"dn": "uni/tn-common/brc-c",
"name": "c"}}}
example_brcp['vzBrCP']['attributes'].update(kwargs)
return example_brcp
@classmethod
def _get_example_aci_oob_contract(cls, **kwargs):
example_oob_brcp = {
"vzOOBBrCP": {
"attributes": {
"dn": "uni/tn-common/oobbrc-c",
"name": "c"}}}
example_oob_brcp['vzOOBBrCP']['attributes'].update(kwargs)
return example_oob_brcp
@classmethod
def _get_example_provided_contract(cls, **kwargs):
example_rsprov = {
"fvRsProv": {
"attributes": {
"dn": "uni/tn-common/ap-ap/epg-epg/rsprov-c",
"tnVzBrCPName": "c"
}
}
}
example_rsprov['fvRsProv']['attributes'].update(kwargs)
return example_rsprov
@classmethod
def _get_example_consumed_contract(cls, **kwargs):
example_rscons = {
"fvRsCons": {
"attributes": {
"dn": "uni/tn-common/ap-ap/epg-epg/rscons-c",
"tnVzBrCPName": "c"
}
}
}
example_rscons['fvRsCons']['attributes'].update(kwargs)
return example_rscons
@classmethod
def generate_aim_object(cls, aim_type, **kwargs):
"""Generate AIM object with random identity attributes.
Identity attributes will be considered as strings, which could be
schema-invalid. kwargs can be passed to fix that.
"""
res_dict = {x: utils.generate_uuid()
for x in list(aim_type.identity_attributes.keys())}
res_dict.update(kwargs)
return aim_type(**res_dict)
@classmethod
def _get_example_aim_span_vsource_grp(cls, **kwargs):
example = resource.SpanVsourceGroup(name='testSrcGrp',
admin_st='start')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_vsource_grp(cls, **kwargs):
example_span_vsource_grp = {
"spanVSrcGrp": {
"attributes": {
"dn": "uni/infra/vsrcgrp-testSrcGrp",
"name": "testSrcGrp", "descr": "",
"adminSt": "start",
"ownerKey": "", "ownerTag": ""}}}
example_span_vsource_grp['spanVSrcGrp']['attributes'].update(kwargs)
return example_span_vsource_grp
@classmethod
def _get_example_aim_span_vsource(cls, **kwargs):
example = resource.SpanVsource(vsg_name='testSrcGrp',
name='testSrc',
dir='both')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_vsource(cls, **kwargs):
example_span_vsource = {
"spanVSrc": {
"attributes": {
"dn": "uni/infra/vsrcgrp-testSrcGrp/vsrc-testSrc",
"name": "testSrc", "descr": "", "dir": "both",
"ownerKey": "", "ownerTag": ""}}}
example_span_vsource['spanVSrc']['attributes'].update(kwargs)
return example_span_vsource
@classmethod
def _get_example_aim_span_vdest_grp(cls, **kwargs):
example = resource.SpanVdestGroup(name='testDestGrp')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_vdest_grp(cls, **kwargs):
example_span_vdest_grp = {
"spanVDestGrp": {
"attributes": {
"dn": "uni/infra/vdestgrp-testDestGrp",
"name": "testDestGrp", "descr": "",
"ownerKey": "", "ownerTag": ""}}}
example_span_vdest_grp['spanVDestGrp']['attributes'].update(kwargs)
return example_span_vdest_grp
@classmethod
def _get_example_aim_span_vdest(cls, **kwargs):
example = resource.SpanVdest(vdg_name='testDestGrp',
name='testDest')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_vdest(cls, **kwargs):
example_span_vdest = {
"spanVDest": {
"attributes": {
"dn": "uni/infra/vdestgrp-testDestGrp/vdest-testDest",
"name": "testDest", "descr": "",
"ownerKey": "", "ownerTag": ""}}}
example_span_vdest['spanVDest']['attributes'].update(kwargs)
return example_span_vdest
@classmethod
def _get_example_aim_span_vepg_sum(cls, **kwargs):
example = resource.SpanVepgSummary(vdg_name='testDestGrp',
vd_name='testDest',
dst_ip='172.51.12.2',
flow_id=1,
ttl=128,
mtu=1519,
mode='visible',
src_ip_prefix='1.2.2.1',
dscp=48)
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_vepg_sum(cls, **kwargs):
example_span_vepg_sum = {
"spanVEpgSummary": {
"attributes": {
"dn": "uni/infra/vdestgrp-testDestGrp/vdest-testDest/"
"vepgsummary",
"dstIp": "172.51.12.2", "flowId": 1, "ttl": 128,
"mtu": 1519, "descr": "",
"mode": "visible", "srcIpPrefix": "1.1.1.1", "dscp": 32,
"ownerKey": "", "ownerTag": ""}}}
example_span_vepg_sum['spanVEpgSummary']['attributes'].update(kwargs)
return example_span_vepg_sum
@classmethod
def _get_example_aim_infra_acc_bundle_grp(cls, **kwargs):
example = resource.InfraAccBundleGroup(name='accTest',
lag_t='node',
display_name='test_src')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_infra_acc_bundle_grp(cls, **kwargs):
example_acc_bundle_grp = {
"infraAccBndlGrp": {
"attributes": {
"dn": "uni/infra/funcprof/accbundle-accTest",
"name": "accTest", "descr": "",
"ownerKey": "", "ownerTag": "", "lagT": "link"}}}
example_acc_bundle_grp['infraAccBndlGrp']['attributes'].update(kwargs)
return example_acc_bundle_grp
@classmethod
def _get_example_aim_infra_acc_port_grp(cls, **kwargs):
example = resource.InfraAccPortGroup(name='1-5',
display_name='test_dest')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_infra_acc_port_grp(cls, **kwargs):
example_acc_port_grp = {
"infraAccPortGrp": {
"attributes": {
"dn": "uni/infra/funcprof/accportgrp-1-5",
"name": "1-5", "descr": "",
"ownerKey": "", "ownerTag": ""}}}
example_acc_port_grp['infraAccPortGrp']['attributes'].update(kwargs)
return example_acc_port_grp
@classmethod
def _get_example_aim_span_spanlbl(cls, **kwargs):
example = resource.SpanSpanlbl(vsg_name='testSrcGrp',
name='testDestGrp',
tag='green-yellow')
example.__dict__.update(kwargs)
return example
@classmethod
def _get_example_aci_span_spanlbl(cls, **kwargs):
example_span_spanlbl = {
"spanSpanLbl": {
"attributes": {
"dn": "uni/infra/vsrcgrp-testSrcGrp/spanlbl-testDestGrp",
"name": "testSrc", "descr": "", "ownerKey": "",
"ownerTag": "", "tag": ""}}}
example_span_spanlbl['spanSpanLbl']['attributes'].update(kwargs)
return example_span_spanlbl
@classmethod
def _get_example_aim_system_security_group_rule(cls, **kwargs):
example = resource.SystemSecurityGroupRule(
security_group_subject_name='sgs1', name='rule1')
example.__dict__.update(kwargs)
return example
|
{
"content_hash": "2bc29b45fc094ae8db3c48d19f68f3a3",
"timestamp": "",
"source": "github",
"line_count": 775,
"max_line_length": 79,
"avg_line_length": 37.43096774193548,
"alnum_prop": 0.5182529559791789,
"repo_name": "noironetworks/aci-integration-module",
"id": "effa17b4aa13ecddcf0ff9cdda369aa8ce13e9a0",
"size": "29615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aim/tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1899856"
},
{
"name": "Roff",
"bytes": "437"
},
{
"name": "Shell",
"bytes": "2552"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponseRedirect
def username(request, *args, **kwargs):
if kwargs.get('user'):
username = kwargs['user'].username
else:
username = request.session.get('saved_username')
return {'username': username}
def redirect_to_form(*args, **kwargs):
if not kwargs['request'].session.get('saved_username') and \
kwargs.get('user') is None:
return HttpResponseRedirect('/form/')
|
{
"content_hash": "230cf15d664fd1813e58e49d686f8fa3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6584269662921348,
"repo_name": "eduherraiz/foowill",
"id": "b10a6659f85d87c87b522f8d4c586799b2a601e3",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "568093"
},
{
"name": "JavaScript",
"bytes": "255116"
},
{
"name": "Python",
"bytes": "172475"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
class FantasySport(object):
"""FantasySport Class
"""
url = 'http://fantasysports.yahooapis.com/fantasy/v2/'
def __init__(self, oauth, fmt=None, use_login=False):
"""Initialize a FantasySport object
"""
self.oauth = oauth
self.fmt = 'json' if not fmt else fmt # JSON as default format
self.use_login = use_login
def __repr__(self,):
return "<{0}> <{1}>".format(self.url, self.fmt)
def _check_token_validity(self,):
"""Check wether or not the access token is still valid, if not, renews it
"""
if not self.oauth.token_is_valid():
self.oauth.refresh_access_token
return True
def _get(self, uri):
"""
"""
if not self.oauth.oauth.base_url :
self.oauth.oauth.base_url = self.url
self._check_token_validity()
response = self.oauth.session.get(uri, params={'format': self.fmt})
return response
def _put(self, uri, roster):
"""
- uri : roster resource uri
- roster : roster object
"""
headers = {'Content-Type':'application/{0}'.format(self.fmt)}
data = roster.to_json() if self.fmt == 'json' else roster.to_xml() # Getting roster xml or json according to self.fmt
response = self.oauth.session.put(uri, data=data, headers=headers)
def _add_login(self, uri):
"""Add users;use_login=1/ to the uri
"""
uri = "users;use_login=1/{0}".format(uri)
return uri
def _format_resources_key(self, keys):
"""Format resources keys
"""
return ','.join(str(e) for e in keys)
def _build_uri(self, resources, keys, sub=None):
"""Build uri
"""
if resources:
uri = "{0}={1}".format(resources, self._format_resources_key(keys))
else:
uri = '{0}'.format(self._format_resources_key(keys))
if sub and isinstance(sub, str) :
uri += "/{0}".format(sub)
if sub and not isinstance(sub, str):
uri += ";out={0}".format(','.join([e for e in sub]))
if self.use_login:
uri = self._add_login(uri)
return uri
def get_collections(self, resource_type, resource_ids, sub_resources):
"""Generic method to get collections
"""
uri = self._build_uri(resource_type, resource_ids, sub=sub_resources)
response = self._get(uri)
return response
#################################
#
# GAMES
#
#################################
def get_games_info(self, game_keys, leagues=None, teams=False, players=None):
"""Return game info
>>> yfs.get_games_info('mlb')
Must set use_login to True to pull teams data
"""
uri = self._build_uri('games;game_keys', game_keys)
if leagues:
uri += '/leagues;league_keys={0}'.format(leagues)
if teams:
uri += '/teams'
if players:
uri += '/players;player_keys={0}'.format(players)
response = self._get(uri)
return response
####################################
#
# LEAGUES
#
####################################
def get_leagues(self, league_keys):
"""Return league data
>>> yfs.get_leagues(['league_key'])
"""
uri = self._build_uri('leagues;league_keys',league_keys)
response = self._get(uri)
return response
def get_leagues_teams(self, league_keys):
"""Return leagues teams
>>> yfs.get_leagues_teams(['238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='teams')
response = self._get(uri)
return response
def get_leagues_players(self, league_keys):
"""Return leagues players
>>> yfs.get_leagues_teams(['238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='players')
response = self._get(uri)
return response
def get_leagues_scoreboard(self, league_keys, week=None):
"""Return leagues scoreboard
>>> yfs.get_leagues_scoreboard(['league_key'])
"""
uri = self._build_uri('leagues;league_keys',league_keys, sub='scoreboard')
if week:
uri += ';week={0}'.format(week)
response = self._get(uri)
return response
def get_leagues_settings(self, league_keys):
"""Return leagues settings
>>> yfs.get_leagues_settings(['238.l.627062','238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='settings')
response = self._get(uri)
return response
def get_leagues_standings(self, league_keys, teams=None, players=None):
"""Return leagues settings
>>> yfs.get_leagues_settings(['238.l.627062','238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='standings')
if teams:
uri += '/teams/{0}'.format(teams)
if teams=='roster' and players:
uri += '/players/{0}'.format(players)
response = self._get(uri)
return response
def get_leagues_transactions(self, league_keys):
"""Return leagues settings
>>> yfs.get_leagues_transactions(['238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='transactions')
response = self._get(uri)
return response
def get_leagues_draftresults(self, league_keys):
"""Return leagues draftresults
>>> yfs.get_leagues_draftresults(['238.l.627062'])
"""
uri = self._build_uri('leagues;league_keys', league_keys, sub='draftresults')
response = self._get(uri)
return response
#########################################
#
# PLAYERS (not league specific)
#
#########################################
def get_players(self, player_keys, filters=None):
"""Return player data
>>> yfs.get_players(['player_key'])
"""
uri = self._build_uri('players;player_keys', player_keys)
if filters and isinstance(filters, str):
uri += ';{0}'.format(filters)
if filters and not isinstance(filters, str):
uri += ";{0}".format(','.join([e for e in filters]))
response = self._get(uri)
return response
def get_players_stats(self, player_keys, week=None):
"""Return player stats (not league specific)
>>> yfs.get_players_stats(['223.p.5479'], week=3)
"""
uri = self._build_uri('players;player_keys', player_keys, sub='stats')
if week:
uri += ';type=week;week={0}'.format(week)
response = self._get(uri)
return response
def get_players_percent_owned(self, player_keys):
"""Return ownership percentage of player (not league specific)
>>> yfs.get_players_percent_owned([223.p.5479])
"""
uri = self._build_uri('players;player_keys', player_keys, sub='percent_owned')
response = self._get(uri)
return response
def get_players_draft_analysis(self, player_keys):
"""Return draft metrics for player (not league specific)
>>> yfs.get_players_draft_analysis([223.p.5479])
"""
uri = self._build_uri('players;player_keys', player_keys, sub='draft_analysis')
response = self._get(uri)
return response
###################################
#
# TEAMS
#
###################################
def get_teams(self, team_keys):
"""Return team data
>>> yfs.get_teams(['league_key'])
"""
uri = self._build_uri('teams;team_keys',team_keys)
response = self._get(uri)
return response
def get_teams_players(self, team_keys):
"""Return teams players
>>> yfs.get_teams_players(['238.l.627062'])
"""
uri = self._build_uri('teams;team_keys', team_keys, sub='players')
response = self._get(uri)
return response
def get_teams_stats(self, team_keys, week=None):
"""Return team stats (week only for H2H league)
>>> yfs.get_teams_stats(['238.l.627062.t.1'], week=3)
"""
uri = self._build_uri('teams;team_keys',team_keys, sub='stats')
if week:
uri += ';type=week;week={0}'.format(week)
response = self._get(uri)
return response
def get_teams_standings(self, team_keys):
"""Return team standings
>>> yfs.get_teams_standings(['238.l.627062.t.1'])
"""
uri = self._build_uri('teams;team_keys',team_keys, sub='standings')
response = self._get(uri)
return response
def get_teams_roster(self, team_keys, week=None, players=None, filters=None):
"""Return team roster
>>> yfs.get_teams_roster(['238.l.627062.t.1'], week=3)
"""
uri = self._build_uri('teams;team_keys',team_keys, sub='roster')
if week:
uri += ';week={0}'.format(week)
if players and filters:
uri += '/players;{1}/{0}'.format(filters, players)
elif filters and not players:
uri += '/players;{0}'.format(filters)
elif players and not filters:
uri += '/players/{0}'.format(players)
response = self._get(uri)
return response
def get_teams_draftresults(self, team_keys):
"""Return a team's draft results
>>> yfs.get_teams_draftresults(['238.l.627062.t.1'])
"""
uri = self._build_uri('teams;team_keys',team_keys, sub='draftresults')
response = self._get(uri)
return response
def get_teams_matchups(self, team_keys, weeks=None):
"""Return team matchups (H2H leagues only)
>>> yfs.get_teams_matchups(['238.l.627062.t.1'], weeks='1,3,6')
"""
uri = self._build_uri('teams;team_keys',team_keys, sub='matchups')
if weeks and isinstance(weeks, str):
uri += ';weeks={0}'.format(weeks)
if weeks and not isinstance(weeks, str):
uri += ";weeks{0}".format(','.join([e for e in weeks]))
response = self._get(uri)
return response
##############################################
#
# ROSTERS (team specific player info)
#
##############################################
def get_roster_players(self, team_keys, week=None, date=None):
"""Access roster info, with player sub option
>>> yfs.get_roster_players(['238.l.627062'])
"""
uri = self._build_uri(None, team_keys, sub='roster/players')
uri = 'team/{0}'.format(uri) # Done to avoid having 'team=238.l.627062', which doesn't work for this resource
if week:
uri += ';week={0}'.format(week)
if date:
uri += ';date={0}'.format(date)
response = self._get(uri)
return response
def set_roster_players(self, team_keys, roster):
"""
>>> from fantasy_sport import Roster, Player
>>> p1 = Player('242.p.8332','WR')
>>> p2 = Player('242.p.8334','WL')
>>> roster = Roster([p1, p2], date='2015-01-11')
>>> ysf.set_roster_players(['238.l.627062'], roster)
"""
uri = self._build_uri(None, team_keys, sub='roster/players')
uri = 'team/{0}'.format(uri)
response = self._put(uri, roster)
return response
##############################################
#
# TRANSACTIONS
#
##############################################
def get_transactions(self, transaction_keys, players=None):
"""Return transaction data
>>> yfs.get_transaction(['transaction_key'])
"""
if players:
subtext = 'players/{0}'.format(players)
uri = self._build_uri('transactions;transaction_keys', transaction_keys, sub=subtext)
else:
uri = self._build_uri('transactions;transaction_keys', transaction_keys)
response = self._get(uri)
return response
|
{
"content_hash": "3415e9dbb6dc4309432f53a8811b0b04",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 125,
"avg_line_length": 32.80361757105943,
"alnum_prop": 0.5174478141000394,
"repo_name": "josuebrunel/yahoo-fantasy-sport",
"id": "60a49ff7e1ada74b2bb26ccc8aa3f4cf044f6d73",
"size": "12695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fantasy_sport/fantasy_sport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30513"
},
{
"name": "Shell",
"bytes": "212"
}
],
"symlink_target": ""
}
|
import os
from f1.a.user.session import Session
# start session
Session.start()
|
{
"content_hash": "af4366865c045f4a1df0990cef30dfb6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 37,
"avg_line_length": 13.666666666666666,
"alnum_prop": 0.7682926829268293,
"repo_name": "filemakergarage/zeroclient",
"id": "7fcbf6717b11e2fd425e2099e8d2d8505dab15ff",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f1/c/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "163404"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
}
|
"""
korean.ext.jinja2
~~~~~~~~~~~~~~~~~
Jinja2_ is one of the most used template engines for Python. This module
contains Jinja2 template engine extensions to make :mod:`korean` easy to
use.
.. versionadded:: 0.1.5
.. versionchanged:: 0.1.6
Moved from :mod:`korean.l10n.jinja2ext` to :mod:`korean.ext.jinja2`.
.. _Jinja2: http://jinja.pocoo.org/docs
:copyright: (c) 2012-2013 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
from jinja2 import nodes
from jinja2.ext import Extension
from jinja2.utils import Markup
from .. import l10n
class ProofreadingExtension(Extension):
"""A Jinja2 extention which registers the ``proofread`` filter and the
``proofread`` block:
.. sourcecode:: jinja
<h1>ProofreadingExtension Usage</h1>
<h2>Single filter</h2>
{{ (name ~ '은(는) ' ~ obj ~ '을(를) 획득했다.')|proofread }}
<h2>Filter chaining</h2>
{{ '%s은(는) %s을(를) 획득했다.'|format(name, obj)|proofread }}
<h2><code>proofread</code> block</h2>
{% proofread %}
{{ name }}은(는) {{ obj }}을(를) 획득했다.
{% endproofread %}
<h2>Conditional <code>proofread</code> block</h2>
{% proofread locale.startswith('ko') %}
{{ name }}은(는) {{ obj }}을(를) 획득했다.
{% endproofread %}
The import name is ``korean.ext.jinja2.proofread``. Just add it into
your Jinja2 environment by the following code::
from jinja2 import Environment
jinja_env = Environment(extensions=['korean.ext.jinja2.proofread'])
.. versionadded:: 0.1.5
.. versionchanged:: 0.1.6
Added ``enabled`` argument to ``{% proofread %}``.
"""
tags = ['proofread', 'autoproofread']
def __init__(self, environment):
environment.filters['proofread'] = l10n.proofread
def _proofread(self, enabled, caller):
return l10n.proofread(caller()) if enabled else caller()
def parse(self, parser):
tag = parser.stream.current.value
lineno = next(parser.stream).lineno
if parser.stream.current.type == 'block_end':
args = [nodes.Const(True)]
else:
args = [parser.parse_expression()]
body = parser.parse_statements(['name:end%s' % tag], drop_needle=True)
call = self.call_method('_proofread', args)
return nodes.CallBlock(call, [], [], body, lineno=lineno)
# nicer import name
proofread = ProofreadingExtension
|
{
"content_hash": "1934970801269d13da552d51a858e53f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 29.58823529411765,
"alnum_prop": 0.6163021868787276,
"repo_name": "lqez/korean",
"id": "ca41066f080ea817416ce78179f087333ac2ad22",
"size": "2603",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "korean/ext/jinja2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "64071"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SignatureEnvelopeResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # SignatureEnvelopeResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
|
{
"content_hash": "24a8dbe2e06da25c82c1cad0e61c39d1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 30.945945945945947,
"alnum_prop": 0.6497816593886463,
"repo_name": "liosha2007/temporary-groupdocs-python3-sdk",
"id": "5fd3e8a89a53f88707780f9fdc211f5b6e03972a",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groupdocs/models/SignatureEnvelopeResponse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "992590"
}
],
"symlink_target": ""
}
|
from unittest.mock import patch
import pytest
from faker import Faker, Generator
class BarProvider:
def foo_formatter(self):
return "barfoo"
class FooProvider:
def foo_formatter(self):
return "foobar"
def foo_formatter_with_arguments(self, param="", append=""):
return "baz" + str(param) + str(append)
@pytest.fixture(autouse=True)
def generator():
generator = Generator()
generator.add_provider(FooProvider())
return generator
class TestGenerator:
"""Test Generator class"""
def test_get_formatter_returns_correct_formatter(self, generator):
foo_provider = generator.providers[0]
formatter = generator.get_formatter("foo_formatter")
assert callable(formatter) and formatter == foo_provider.foo_formatter
def test_get_formatter_with_unknown_formatter(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.get_formatter("barFormatter")
assert str(excinfo.value) == "Unknown formatter 'barFormatter'"
fake = Faker("it_IT")
with pytest.raises(AttributeError) as excinfo:
fake.get_formatter("barFormatter")
assert str(excinfo.value) == "Unknown formatter 'barFormatter' with locale 'it_IT'"
def test_format_calls_formatter_on_provider(self, generator):
assert generator.format("foo_formatter") == "foobar"
def test_format_passes_arguments_to_formatter(self, generator):
result = generator.format("foo_formatter_with_arguments", "foo", append="!")
assert result == "bazfoo!"
def test_add_provider_overrides_old_provider(self, generator):
assert generator.format("foo_formatter") == "foobar"
generator.add_provider(BarProvider())
assert generator.format("foo_formatter") == "barfoo"
def test_parse_without_formatter_tokens(self, generator):
assert generator.parse("fooBar#?") == "fooBar#?"
def test_parse_with_valid_formatter_tokens(self, generator):
result = generator.parse('This is {{foo_formatter}} a text with "{{ foo_formatter }}"')
assert result == 'This is foobar a text with "foobar"'
def test_arguments_group_with_values(self, generator):
generator.set_arguments("group1", "argument1", 1)
generator.set_arguments("group1", "argument2", 2)
assert generator.get_arguments("group1", "argument1") == 1
assert generator.del_arguments("group1", "argument2") == 2
assert generator.get_arguments("group1", "argument2") is None
assert generator.get_arguments("group1") == {"argument1": 1}
def test_arguments_group_with_dictionaries(self, generator):
generator.set_arguments("group2", {"argument1": 3, "argument2": 4})
assert generator.get_arguments("group2") == {"argument1": 3, "argument2": 4}
assert generator.del_arguments("group2") == {"argument1": 3, "argument2": 4}
assert generator.get_arguments("group2") is None
def test_arguments_group_with_invalid_name(self, generator):
assert generator.get_arguments("group3") is None
assert generator.del_arguments("group3") is None
def test_arguments_group_with_invalid_argument_type(self, generator):
with pytest.raises(ValueError) as excinfo:
generator.set_arguments("group", ["foo", "bar"])
assert str(excinfo.value) == "Arguments must be either a string or dictionary"
def test_parse_with_valid_formatter_arguments(self, generator):
generator.set_arguments("format_name", {"param": "foo", "append": "bar"})
result = generator.parse('This is "{{foo_formatter_with_arguments:format_name}}"')
generator.del_arguments("format_name")
assert result == 'This is "bazfoobar"'
def test_parse_with_unknown_arguments_group(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.parse('This is "{{foo_formatter_with_arguments:unknown}}"')
assert str(excinfo.value) == "Unknown argument group 'unknown'"
def test_parse_with_unknown_formatter_token(self, generator):
with pytest.raises(AttributeError) as excinfo:
generator.parse("{{barFormatter}}")
assert str(excinfo.value) == "Unknown formatter 'barFormatter'"
def test_magic_call_calls_format(self, generator):
assert generator.foo_formatter() == "foobar"
def test_magic_call_calls_format_with_arguments(self, generator):
assert generator.foo_formatter_with_arguments("foo") == "bazfoo"
@patch("faker.generator.random_module.getstate")
def test_get_random(self, mock_system_random, generator):
random_instance = generator.random
random_instance.getstate()
mock_system_random.assert_not_called()
@patch("faker.generator.random_module.seed")
def test_random_seed_doesnt_seed_system_random(self, mock_system_random, generator):
# Save original state of shared random instance to avoid affecting other tests
state = generator.random.getstate()
generator.seed(0)
mock_system_random.assert_not_called()
# Restore state of shared random instance
generator.random.setstate(state)
|
{
"content_hash": "75f1c59c4d2e246e28c9f693cc7f5a85",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 95,
"avg_line_length": 41.792,
"alnum_prop": 0.6763016845329249,
"repo_name": "joke2k/faker",
"id": "39aed2ac1ecd54311a66838fce2bf6ebfd13c79c",
"size": "5224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "291"
},
{
"name": "Python",
"bytes": "7689013"
},
{
"name": "Shell",
"bytes": "880"
}
],
"symlink_target": ""
}
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("LGBMClassifier" , "BinaryClass_100" , "mysql")
|
{
"content_hash": "f531473ae289dccbd5d726a7b8c03de2",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 36,
"alnum_prop": 0.7847222222222222,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "796c9eea967f78dae5f6db7cebe6c310d84ad4b5",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/classification/BinaryClass_100/ws_BinaryClass_100_LGBMClassifier_mysql_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
}
|
class GiraffeError(Exception):
"""
Baseclass for all giraffez errors.
"""
class GiraffeTypeError(GiraffeError):
"""
Baseclass for all giraffez type errors.
"""
class GiraffeEncodeError(GiraffeError):
"""
Raised when unable to encode the provided object.
"""
class ConfigurationError(GiraffeError):
"""
For use with configuration file handling.
"""
class FileNotFound(GiraffeError):
"""
Raised when file does not exist.
"""
class ConfigNotFound(ConfigurationError, FileNotFound):
"""
Raised when the specified configuration file does not exist.
"""
class KeyNotFound(ConfigurationError, FileNotFound):
"""
Raised when the specified configuration file does not exist.
"""
class ConfigReadOnly(ConfigurationError):
"""
Raised when a write is attempted on a configuration file was opened
in read mode.
"""
class InvalidCredentialsError(ConfigurationError):
"""
Raised when connection credentials are incorrect.
"""
class ConnectionLock(ConfigurationError):
"""
Raised when connection is locked by invalid attempts and the
'protect' feature is being used.
"""
def __init__(self, dsn):
super(ConnectionLock, self).__init__(("Connection {0} is currently locked. please update "
"credentials and run:\n\tgiraffez config --unlock {0}").format(dsn))
|
{
"content_hash": "9f1f2eb280163be11310eeafc014b2bd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 98,
"avg_line_length": 26,
"alnum_prop": 0.6794871794871795,
"repo_name": "capitalone/giraffez",
"id": "e24e8c04022b8a4a39931d79d768cd0505a310a7",
"size": "2020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "giraffez/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "196016"
},
{
"name": "C++",
"bytes": "35182"
},
{
"name": "Makefile",
"bytes": "834"
},
{
"name": "Python",
"bytes": "269726"
},
{
"name": "Shell",
"bytes": "6939"
}
],
"symlink_target": ""
}
|
import numpy as np
# physical/external base state of all entites
class EntityState(object):
def __init__(self):
# physical position
self.p_pos = None
# physical velocity
self.p_vel = None
# state of agents (including communication and internal/mental state)
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
# communication utterance
self.c = None
# action of the agent
class Action(object):
def __init__(self):
# physical action
self.u = None
# communication action
self.c = None
# properties and state of physical world entity
class Entity(object):
def __init__(self):
# name
self.name = ''
# properties:
self.size = 0.050
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# material density (affects mass)
self.density = 25.0
# color
self.color = None
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.initial_mass = 1.0
@property
def mass(self):
return self.initial_mass
# properties of landmark entities
class Landmark(Entity):
def __init__(self):
super(Landmark, self).__init__()
# properties of agent entities
class Agent(Entity):
def __init__(self):
super(Agent, self).__init__()
# agents are movable by default
self.movable = True
# cannot send communication signals
self.silent = False
# cannot observe the world
self.blind = False
# physical motor noise amount
self.u_noise = None
# communication noise amount
self.c_noise = None
# control range
self.u_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
# multi-agent world
class World(object):
def __init__(self):
# list of agents and entities (can change at execution-time!)
self.agents = []
self.landmarks = []
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 2
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-3
# return all entities in the world
@property
def entities(self):
return self.agents + self.landmarks
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
# update state of the world
def step(self):
# set actions for scripted agents
for agent in self.scripted_agents:
agent.action = agent.action_callback(agent, self)
# gather forces applied to entities
p_force = [None] * len(self.entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
# integrate physical state
self.integrate_state(p_force)
# update agent state
for agent in self.agents:
self.update_agent_state(agent)
# gather agent action forces
def apply_action_force(self, p_force):
# set applied forces
for i,agent in enumerate(self.agents):
if agent.movable:
noise = np.random.randn(*agent.action.u.shape) * agent.u_noise if agent.u_noise else 0.0
p_force[i] = agent.action.u + noise
return p_force
# gather physical forces acting on entities
def apply_environment_force(self, p_force):
# simple (but inefficient) collision response
for a,entity_a in enumerate(self.entities):
for b,entity_b in enumerate(self.entities):
if(b <= a): continue
[f_a, f_b] = self.get_collision_force(entity_a, entity_b)
if(f_a is not None):
if(p_force[a] is None): p_force[a] = 0.0
p_force[a] = f_a + p_force[a]
if(f_b is not None):
if(p_force[b] is None): p_force[b] = 0.0
p_force[b] = f_b + p_force[b]
return p_force
# integrate physical state
def integrate_state(self, p_force):
for i,entity in enumerate(self.entities):
if not entity.movable: continue
entity.state.p_vel = entity.state.p_vel * (1 - self.damping)
if (p_force[i] is not None):
entity.state.p_vel += (p_force[i] / entity.mass) * self.dt
if entity.max_speed is not None:
speed = np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1]))
if speed > entity.max_speed:
entity.state.p_vel = entity.state.p_vel / np.sqrt(np.square(entity.state.p_vel[0]) +
np.square(entity.state.p_vel[1])) * entity.max_speed
entity.state.p_pos += entity.state.p_vel * self.dt
def update_agent_state(self, agent):
# set communication state (directly for now)
if agent.silent:
agent.state.c = np.zeros(self.dim_c)
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_noise if agent.c_noise else 0.0
agent.state.c = agent.action.c + noise
# get collision forces for any contact between two entities
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (entity_a is entity_b):
return [None, None] # don't collide against itself
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force = self.contact_force * delta_pos / dist * penetration
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
|
{
"content_hash": "246a9d8de6cff3ecdc6456e6dbb821bf",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 118,
"avg_line_length": 35.66326530612245,
"alnum_prop": 0.5809728183118741,
"repo_name": "openai/multiagent-particle-envs",
"id": "c0662ca0c4e0751dee136aa14f6585c40095756d",
"size": "6990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multiagent/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86795"
}
],
"symlink_target": ""
}
|
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-std=gnu++11',
'-x',
'c++',
'-isystem',
'C:/mingw-w64/mingw64/x86_64-w64-mingw32/include',
'-isystem',
'C:/mingw-w64/mingw64/x86_64-w64-mingw32/include/c++',
'-isystem',
'C:/mingw-w64/mingw64/x86_64-w64-mingw32/include/c++/x86_64-w64-mingw32',
'-isystem',
'C:/Users/otrip/_local/include',
'-LC:/mingw-w64/mingw64/x86_64-w64-mingw32/lib',
'-LC:/Users/otrip/_local/lib'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh', '.tcc' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
{
"content_hash": "2533befa91d4e0db018d9f5299ac88c3",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 81,
"avg_line_length": 32.256,
"alnum_prop": 0.6574900793650794,
"repo_name": "pajamapants3000/vimfiles",
"id": "6daed438ff58a988ed4434d41a1a2a56a8212baf",
"size": "5432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "YouCompleteMe/cxx.ycm_extra_conf-Wind.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8273"
},
{
"name": "CMake",
"bytes": "7498"
},
{
"name": "GDB",
"bytes": "1831"
},
{
"name": "Python",
"bytes": "97357"
},
{
"name": "Vim script",
"bytes": "5186976"
}
],
"symlink_target": ""
}
|
import sys
import web
import GlobalVars
try:
import json
except ImportError:
import simplejson as json
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
from IRCResponse import IRCResponse, ResponseType
from IRCMessage import IRCMessage
from FunctionHandler import AutoLoadFunctions
from GlobalVars import functions
class MessageHandler:
def POST(self, name=None):
data = web.data()
jsonData = json.loads(data)
message = IRCMessage(jsonData)
if message.MessageString is not None:
message.MessageString = message.MessageString.encode('ascii', 'xmlcharrefreplace')
#print message.__dict__
print ( '%s <%s> %s' % (message.ReplyTo,
message.User.Name,
message.MessageString) )
responses = []
for (name, func) in functions.items():
try:
response = func.GetResponse(message)
if response is None:
continue
if hasattr(response, '__iter__'):
for r in response:
responses.append( r.__dict__ )
else:
responses.append( response.__dict__ )
except Exception:
msg = IRCResponse(ResponseType.Say,
("Python Execution Error in '%s': %s" %
(name, str( sys.exc_info() ))),
message.ReplyTo)
responses.append( msg.__dict__ )
return json.dumps(responses)
class BotDetailHandler:
def POST(self, path=None):
if path is not 'nickchange':
return
newNick = web.data()
print 'nickchange received: ' + newNick
GlobalVars.CurrentNick = newNick
urls = (
'/message', 'MessageHandler',
'/(nickchange)', 'BotDetailHandler'
)
if __name__ == '__main__':
AutoLoadFunctions()
app = web.application(urls, globals(), True)
app.run()
|
{
"content_hash": "5aa1615174d3412912fdbe115dd3d8b4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 94,
"avg_line_length": 28.972972972972972,
"alnum_prop": 0.5345149253731343,
"repo_name": "MatthewCox/MoronBot",
"id": "90506833cf9671735cdb90ce31f5120952ca57b1",
"size": "2144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MBPyFunctionServer/MBPyFunctionServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83330"
},
{
"name": "C#",
"bytes": "356552"
},
{
"name": "Lua",
"bytes": "1361"
},
{
"name": "PHP",
"bytes": "9739"
},
{
"name": "Python",
"bytes": "729338"
},
{
"name": "Shell",
"bytes": "360"
}
],
"symlink_target": ""
}
|
"""TensorFlow collective Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import device
from tensorflow.python.ops import gen_collective_ops
def all_reduce(t, group_size, group_key, instance_key, merge_op, final_op,
subdiv_offsets=(0,), communication_hint='auto'):
"""Reduces tensors collectively, across devices.
Args:
t: the tensor to be reduced.
group_size: the total number of tensors to be collectively reduced.
Each must reside on a different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
merge_op: string naming the binary Op to be applied to compute each
partial reduction.
final_op: string naming the unary Op to be applied to each fully
reduced value. Can be 'Id' for no operation.
subdiv_offsets: a list of integer offsets into the tensor at which each
independent subdivision should begin. Use [0] if no subdivision should
be done.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
Returns:
An Op implementing the distributed reduction.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if not device.canonical_name(t.device):
raise ValueError('Device assignment required for collective ops')
if group_size <= 1:
raise ValueError('Parameter group_size to all_reduce must be at least 2.')
return gen_collective_ops.collective_reduce(
t,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
merge_op=merge_op,
final_op=final_op,
subdiv_offsets=subdiv_offsets,
communication_hint=communication_hint.lower())
def all_gather(t, group_size, group_key, instance_key,
communication_hint='auto'):
"""Accumulates tensors collectively, across devices, along first dimension.
Args:
t: the tensor to participate in the accumulation.
group_size: the total number of tensors to be collectively accumulated.
Each must reside on a different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
Returns:
An Op implementing the distributed operation.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if not device.canonical_name(t.device):
raise ValueError('Device assignment required for collective ops')
if group_size <= 1:
raise ValueError('Parameter group_size to all_gather must be at least 2.')
return gen_collective_ops.collective_gather(
t,
shape=[0],
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower())
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
communication_hint='auto'):
"""Broadcasts one tensor to a group of others, across devices.
Args:
t: the tensor to be sent.
shape: the shape of the tensor being sent, which must agree with t.
dtype: the type of the tensor being sent, which must agree with t.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
Returns:
An Op implementing the distributed broadcast send.
Raises:
ValueError: if any of the input parameter constraints are not met.
Note that the shape and dtype arguments appear redundant since they
should be obtainable from t. The are two reasons for including
them. First, the shape and type of tensors passed via broadcast must
be known ahead of time in their most specific form so that the receive
side can allocate memory for the operation and shape/type inference can
carry forward from there. Including the same declarations on the
send side clarifies a commitment already made. Secondly, having nearly
identical use syntax for send and receive sides may simplify tool-driven
generation of broadcast.
"""
if not device.canonical_name(t.device):
raise ValueError('Device assignment required for collective ops')
if group_size <= 1:
raise ValueError(
'Parameter group_size to broadcast_send must be at least 2.')
if t.shape != shape:
raise ValueError(
'Shape of broadcast_send tensor not equal to delcared shape')
if t.dtype != dtype:
raise ValueError(
'Type of broadcast_send tensor not equal to declared type')
return gen_collective_ops.collective_bcast_send(
t,
shape=shape,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower())
def broadcast_recv(shape, dtype, group_size, group_key, instance_key,
communication_hint='auto'):
"""Receives a broadcasts tensor, across devices.
Args:
shape: Shape of the tensor to be received.
dtype: Type of the tensor to be received.
group_size: one plus the number of receiving tensors, i.e. the total
number of devices participating. Each tensor must reside on a
different device.
group_key: an integer identifying the group of devices.
instance_key: an integer identifying the participating group of Ops.
communication_hint: preferred collective communication. The implementation
may fall back to another mechanism. Options include `auto`, `ring`, and
`nccl`.
Returns:
An Op implementing the broadcast receive.
Raises:
ValueError: if any of the input parameter constraints are not met.
"""
if group_size <= 1:
raise ValueError(
'Parameter group_size to broadcast_send must be at least 2.')
return gen_collective_ops.collective_bcast_recv(
shape=shape,
T=dtype,
group_size=group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication_hint.lower())
|
{
"content_hash": "b4bb0f4d46ec6af6c7d7382a84ddd59c",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 79,
"avg_line_length": 39.55621301775148,
"alnum_prop": 0.712789827973074,
"repo_name": "chemelnucfin/tensorflow",
"id": "e4f8dd8c2eaac4640dde84d1db24994def56bac3",
"size": "7374",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/collective_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
import config
import sys
#def listingUser(**k):
#return config.DB.select('users', **k)
#print **k
#return config.DB.select('usuarios', **k)
def listingUser(**k):
"""
Determina si el usuario y contraseña coinciden.
"""
#return config.DB.query("SELECT cliente_id AS clienteId, privilege_id FROM usuarios WHERE usuario=$name and pass=$passwd", **k)
return config.DB.query("SELECT cliente_id AS clienteId, privilege_id FROM usuarios WHERE usuario=$i.username and passwd=$i.password", **k)
def listingPrivilege(**k):
"""
Determina a que subaplicación se debe reenviar el usuario.
Usage:
>>> from db import listingPrivilege
>>> privilegeId=1
>>> listingPrivilege(vars=locals())
0.0 (1): SELECT descrip FROM privileges WHERE id=1
u'/user'
>>>
"""
#return config.DB.query("SELECT descrip FROM privileges WHERE id=$privilegeId", **k)
result = config.DB.query("SELECT descrip FROM privileges WHERE id=$privilegeId", **k)
return "".join(["/%s" % v for v in result[0].values()])
def getNameClient(**k):
"""
Obtine el nombre del cliente
usage:
>>> from db import getNameClient
>>> clienteId = 1
>>>
>>> getNameClient(vars=locals())
0.0 (1): SELECT nombre1, nombre2, apellido1, apellido2 FROM clientes WHERE id=1
<Storage {'nombre2': u'alonso', 'apellido2': u'hoyos', 'apellido1': u'toro', 'nombre1': u'jorge'}>
>>> r = getNameClient(vars=locals())
0.0 (2): SELECT nombre1, nombre2, apellido1, apellido2 FROM clientes WHERE id=1
>>> r.nombre1
u'jorge'
>>>
"""
result = config.DB.query("SELECT nombre1, nombre2, apellido1, apellido2 FROM clientes WHERE id=$clienteId", **k)
#return "".join(["%s " % v for v in result[0].values()])
#return result[0].values()
return result[0]
#def listingDropdown(table, vars="id,name", order="id ASC"):
def listingDropdown(table, vars="id,name", order=None):
"""
Obtiene una lista para los form.Dropdown().
Usage:
>>> from db import listingDropdown
>>> r = listingDropdown('type_document')
0.0 (4): SELECT id,name FROM type_document ORDER BY id ASC
>>> r
[(1, u'C\xe9dula de Ciudadan\xeda'), (2, u'N\xfamero de Identificaci\xf3n Tributaria'),
(3, u'Registro \xdanico Tributario'), (4, u'Tarjeta de Identidad'), (5, u'C\xe9dula de Extranjer\xeda')]
>>>
>>> r = listingDropdown('type_gps', "codigo,descrip", "codigo ASC")
0.0 (4): SELECT codigo,descrip FROM type_gps ORDER BY codigo ASC
>>> r
[(1, u'Antares'), (2, u'Skypatrol'), (3, u'HunterPro')]
>>>
"""
#result = config.DB.select
#result = config.DB.select('type_document', what="id,name")
#result = config.DB.select('type_document', what="id,name", order="id DESC") # order="id ASC"
# result = config.DB.select('type_document', what="id,name", order="id ASC")
#result = config.DB.select(table, what=vars, order)
result = config.DB.select(table, what=vars, order=order)
return [tuple(i.values()) for i in result.list()]
def listingGPS():
"""
Realiza un select de todos los GPS en las tablas public.gps y public.type_gps.
Usage:
>>> from db import listingGPS
>>> a = listingGPS()
>>> for i in a.list():
... print i.name
...
GPS0001
GPS0004
ANT051
ANT056
ANT099
>>>
"""
return config.DB.query("""SELECT g.id, g.name, g.fecha_creacion, t.descrip,
g.active FROM gps g INNER JOIN type_gps t ON g.type = t.codigo;""")
def listingAllClient():
"""
Query que abstrae todos los Clientes.
Usage:
>>> from db import listingAllClient
>>> a = listingAllClient()
>>> for i in a:
... print i
...
<Storage {'nombre1': u'jorge', 'genero': u'Masculino', 'tipo': u'C\xe9dula de Ciudadan\xeda',
'nombre2': u'alonso', 'fecha_naci': datetime.date(1982, 11, 2),
'direccion': u'Cra 11 # 15-15 APT 601 BL 2B', 'apellido2': u'hoyos', 'apellido1': u'toro',
'id': 1, 'email': u'jolthgs@gmail.com', 'municipio': u'Manizales', 'documento': u'11814584'}>
"""
return config.DB.query("""
SELECT c.id, c.documento, td.descrip AS tipo,
c.nombre1, c.nombre2, c.apellido1, c.apellido2,
c.fecha_naci, s.descrip AS genero,
c.direccion, m.descrip AS municipio, c.email
FROM clientes AS c
LEFT OUTER JOIN type_document AS td ON (c.tipo_docu=td.id)
LEFT OUTER JOIN sexo AS s ON (c.sexo_id=s.id)
LEFT OUTER JOIN municipio AS m ON (c.ciudad=m.codigo);
""")
def listingPhones(id):
"""
usage:
>>> from db import listingPhones
>>> a = listingPhones(15)
0.01 (1): SELECT p.phone, tp.name
FROM phones_all pa, phones p, type_phone tp
WHERE pa.phone_id=p.id AND tp.id=p.type AND pa.client_id=15
>>> for i in a:
... print i
...
<Storage {'phone': u'7844983', 'name': u'fijo'}>
<Storage {'phone': u'3126783452', 'name': u'celular'}>
>>>
"""
from web.db import sqlquote
result = config.DB.query("""SELECT p.phone, tp.name
FROM phones_all pa, phones p, type_phone tp
WHERE pa.phone_id=p.id AND tp.id=p.type AND pa.client_id=""" + sqlquote(id))
return [tuple(i.values()) for i in result.list()]
def listingClients(id):
"""
Lista los clientes propietarios de un vehiculo.
usage:
>>> from db import listingClients
>>> a = listingClients(5) # id del vehiculo
>>>
>>> for i in a:
... print i
...
(u'jorge,alonso,toro,hoyos', u'11814584')
>>>
"""
from web.db import sqlquote
result = config.DB.query("""
SELECT (c.nombre1 || ',' || COALESCE(c.nombre2, '') || ',' || c.apellido1 || ',' || COALESCE(c.apellido2,'')) AS nombre,
c.documento
FROM clientes_vehiculos cv, clientes c
WHERE cv.cliente_id=c.id AND cv.vehiculo_id=""" + sqlquote(id))
return [tuple(i.values()) for i in result.list()]
def listingAllVehicle():
"""
Query que abstrae todos los vehículos.
Usage:
>>> from db import listingAllVehicle
>>> a = listingAllVehicle()
>>> for i in a:
... for k,v in i.items():
... print "%s=%s" % (k,v),
...
servicio=None carroceria=Aspersora cilindraje=1.1 color=None
ejes=1 combustible=Etanol clase=Motocarro marca=Hino placa=ttq000
modelo=1964 linea=None name=ANT003 servicio=None carroceria=None
cilindraje=None color=None ejes=None combustible=None clase=None
marca=Renault placa=rjm270 modelo=2012 linea=None name=ANT049
servicio=None carroceria=None cilindraje=None color=None ejes=None
combustible=None clase=None marca=Jac placa=sta345 modelo=2008
linea=None name=ANT098
>>>
"""
return config.DB.query("""
SELECT v.id, v.placa, g.name, m.descrip AS marca, v.modelo,
v.cilindraje, v.ejes, l.descrip AS linea,
c.descrip AS clase, ca.descrip AS carroceria, co.descrip AS color,
cb.descrip AS combustible, sv.descrip AS servicio
FROM vehiculos AS v
LEFT OUTER JOIN gps AS g ON (v.gps_id=g.id)
LEFT OUTER JOIN marcas_vehiculo AS m ON (v.marca_id=m.id)
LEFT OUTER JOIN liena_vehiculo AS l ON (v.linea_id=l.id)
LEFT OUTER JOIN clase_vehiculo AS c ON (v.clase_id=c.id)
LEFT OUTER JOIN carrocerias AS ca ON (v.carroceria_id=ca.id)
LEFT OUTER JOIN colores AS co ON (v.color_id=co.id)
LEFT OUTER JOIN combustibles AS cb ON (v.combustible_id=cb.id)
LEFT OUTER JOIN servicio_vehiculo AS sv ON (v.servicio_id=sv.id);
""")
def unmanagedEventListAdmin():
"""
Query que obtiene todos los eventos no gestionados por el administrador.
usage:
>>> from db import unmanagedEventListAdmin
>>> a = unmanagedEventListAdmin()
>>> for i in a:
... for k,v in i.items():
... print "%s=%s" % (k,v),
...
vehicle_id=4 name=Panico id=46 user_state=False fecha=2012-08-28 11:56:54.638781-05:00 placa=ttq000
position_id=149 admin_state=False gps_name=ANT003 ubicacion=Carrera 6 # 16-2 a 16-100, Pereira, Risaralda, Colombia
gps_id=10 coord_state=False vehicle_id=4 name=Ignicion OFF id=45 user_state=False fecha=2012-08-28 11:56:51.360497-05:00
placa=ttq000 position_id=148 admin_state=False gps_name=ANT003
ubicacion=Carrera 6 # 16-2 a 16-100, Pereira, Risaralda, Colombia gps_id=10 coord_state=False
"""
return config.DB.query("""
SELECT e.id, te.name, e.fecha, e.type AS tipo_event,
e.gps_id, g.name AS gps_name,
v.placa, v.id AS vehicle_id,
e.admin_state, e.user_state, e.coord_state,
e.positions_gps_id AS position_id, p.ubicacion, p.position
from eventos e
LEFT OUTER JOIN type_event AS te ON e.type=te.codigo
LEFT OUTER JOIN vehiculos AS v ON e.gps_id=v.gps_id
LEFT OUTER JOIN gps AS g ON e.gps_id=g.id
LEFT OUTER JOIN positions_gps AS p ON p.id=e.positions_gps_id
WHERE e.admin_state='f' ORDER BY e.id DESC;
""")
def generalView():
"""
Usage:
>>> from db import generalView
>>> a = generalView()
>>> a.list()
[<Storage {'name': u'ANT003', 'velocidad': 1.0, \
'fecha': datetime.datetime(2012, 8, 19, 7, 22, 34, 964809, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None)), \
'satelites': 3, 'placa': u'ttq000', 'ubicacion': u'Calle 20 # 6-1 a 6-99, Pereira, Risaralda, Colombia', 'position': '(4.81534,-75.69489)', 'altura': None}>,\
<Storage {'name': u'ANT051', 'velocidad': 1.0, \
'fecha': datetime.datetime(2012, 8, 19, 7, 21, 14, 64915, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None)),\
'satelites': 3, 'placa': u'rjm270', 'ubicacion': u'Calle 20 # 6-1 a 6-99, Pereira, Risaralda, Colombia', 'position': '(4.81534,-75.69489)', 'altura': None}>]
>>> a = generalView()
>>> for i in a:
... print i.placa
...
ttq000
rjm270
>>>
"""
return config.DB.query("""SELECT l.position, v.placa, g.name, l.fecha,
l.velocidad, l.altura, l.satelites, l.ubicacion
FROM vehiculos v, last_positions_gps l, gps g
WHERE v.gps_id=g.id AND g.id=l.gps_id""")
def countEvent():
"""
Retorna el numero de eventos sin gestionar.
Usage:
>>> from db import countEvent
>>> a = countEvent()
"""
return config.DB.query("""SELECT count(*) FROM eventos WHERE admin_state <> 't';""")
def insertPhone(storage, **sequence_id):
"""
>>> from db import insertPhone
>>> telefonos = {'fijo':'44444444', 'celular':u'', 'pbx':u'', 'fax':u''}
>>> insertPhone(telefonos, client_id=1)
0.0 (1): SELECT id FROM type_phone WHERE name='fijo'
typePhone_id: 2
0.0 (2): SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'
0.0 (3): INSERT INTO phones (phone, type) VALUES (44444444, 2); SELECT currval('phones_id_seq')
seqPhone_id: 4
0.0 (4): INSERT INTO phones_all (phone_id, client_id) VALUES (4L, 1)
>>>
"""
from web.db import sqlquote
for name in storage:
if storage[name]:
try:
typePhone_id = (config.DB.select('type_phone', what="id", where="name=" + sqlquote(name)))[0].id
#print "typePhone_id:", typePhone_id
# Insert public.phones
seqPhone_id = config.DB.insert('phones', phone=storage[name], type=typePhone_id)
#print "seqPhone_id:", seqPhone_id
# Insert puiblic.phones_all
seqPhone_all = config.DB.insert('phones_all', phone_id=seqPhone_id, **sequence_id)
#print "seqPhone_all", seqPhone_all
except:
print "Error en insertPhone:"
print sys.exc_info()
|
{
"content_hash": "b012c51924ff54e96a5dd4536bd58961",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 166,
"avg_line_length": 40.97096774193548,
"alnum_prop": 0.569955121643965,
"repo_name": "jolth/CowFleet-1.0.0",
"id": "17d6e04ea93f0efb1b528e5d07dd67ca75f7bb4d",
"size": "12728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "143079"
},
{
"name": "Python",
"bytes": "41863"
}
],
"symlink_target": ""
}
|
from rpi_greenhouse import GreenhouseIndicator
from time import sleep
indicator = GreenhouseIndicator()
while True:
for index in range(3):
indicator.turn_leds_on(index=index)
print("#%i on" % index)
sleep(0.5)
indicator.turn_leds_off(index=index)
|
{
"content_hash": "17c98be952fc47decbc17769ea37abaa",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 25.90909090909091,
"alnum_prop": 0.6771929824561403,
"repo_name": "bennuttall/rpi-greenhouse",
"id": "6f11b85d4bc2a699ac5108392f56be1038f320af",
"size": "285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/test_leds_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16315"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/',
consumer_key=config.CONFIG_DB.twitter_consumer_key,
consumer_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
response = twitter.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (
response['oauth_token'],
response['oauth_token_secret'],
)
user_db = retrieve_user_from_twitter(response)
return auth.signin_user_db(user_db)
@twitter.tokengetter
def get_twitter_token():
return flask.session.get('oauth_token')
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['user_id']
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id=auth_id,
name=response['screen_name'],
username=response['screen_name'],
)
|
{
"content_hash": "bf0093f931569ab9d879eba9dd3e4631",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 25.910714285714285,
"alnum_prop": 0.7160578911095796,
"repo_name": "topless/gae-init",
"id": "26b5c78ee91ad8aca029b76777a523a0cbcf30c9",
"size": "1468",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "main/auth/twitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5299"
},
{
"name": "CoffeeScript",
"bytes": "16753"
},
{
"name": "HTML",
"bytes": "68540"
},
{
"name": "JavaScript",
"bytes": "65"
},
{
"name": "Python",
"bytes": "121675"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
}
|
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import httplib
import urllib
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the override_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
override_token = None
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, str):
if not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.iteritems():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, (str, unicode)):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise 'Error status=',str(p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = httplib.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = httplib.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = httplib.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
deprecation('call to deprecated function ProcessUrl')
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
deprecation('call to deprecated function DictionaryToParamList')
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in (url_parameters or {}).items()]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
deprecation('call to deprecated function BuildUri')
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and not service.additional_headers.has_key('Content-Length') and
not extra_headers.has_key('Content-Length')):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
|
{
"content_hash": "022f8f99cdc11a227303d71f60e6c492",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 83,
"avg_line_length": 39.38340486409156,
"alnum_prop": 0.6566893094554833,
"repo_name": "csytan/pycmds",
"id": "73f0f27280b64b919b37d55a0d66ae654480b527",
"size": "28143",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/atom/service.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from collections import namedtuple
ValidationRule = namedtuple('ValidationRule', 'vfunc errfunc getter')
def Rule(vfunc, on_error, getter=None):
"""Constructs a single validation rule. A rule effectively
is saying "I want to validation this input using
this function and if validation fails I want this (on_error)
to happen.
:param vfunc: The function used to validate this param
:param on_error: The function to call when an error is detected
:param value_src: The source from which the value can be
This function should take a value as a field name
as a single param.
"""
return ValidationRule(vfunc=vfunc, errfunc=on_error, getter=getter)
|
{
"content_hash": "d8e9f74962a4c8e00caf3378854d0340",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 38.77777777777778,
"alnum_prop": 0.7249283667621776,
"repo_name": "amitgandhinz/cdn",
"id": "15214ad0af9c93409fdf8e08f04d0c0b8c37079b",
"size": "1283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "poppy/transport/validators/stoplight/rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303888"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
}
|
import random
import os
# wasd movement
# change player to a dictionary with a key that hold onto where the player has been. Then when drawing map show every cell they've been in.
ROOMS = [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0),
(0, 1), (1, 1), (2, 1), (3, 1), (4, 1),
(0, 2), (1, 2), (2, 2), (3, 2), (4, 2),
(0, 3), (1, 3), (2, 3), (3, 3), (4, 3),
(0, 4), (1, 4), (2, 4), (3, 4), (4, 4)]
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
def get_locations():
return random.sample(ROOMS, 7)
def move_player(player, move):
x, y = player
if move in ["LEFT", "A"]:
x -= 1
if move in ["RIGHT", "D"]:
x += 1
if move in ["UP", "W"]:
y -= 1
if move in ["DOWN", "S"]:
y += 1
return x, y
def get_moves(player):
moves = ["LEFT", "RIGHT", "UP", "DOWN", "W", "A", "S", "D"]
x, y = player
if x == 0:
moves.remove("LEFT")
moves.remove("A")
if x == 4:
moves.remove("RIGHT")
moves.remove("D")
if y == 0:
moves.remove("UP")
moves.remove("W")
if y == 4:
moves.remove("DOWN")
moves.remove("S")
return moves
def move_monster(monster, monster_move_choice, monster_turn):
x, y = monster
if monster_move_choice == "LEFT" and monster_turn is True:
x -= 1
if monster_move_choice == "RIGHT" and monster_turn is True:
x += 1
if monster_move_choice == "UP" and monster_turn is True:
y -= 1
if monster_move_choice == "DOWN" and monster_turn is True:
y += 1
return x, y
def get_monster_moves(monster, game_map, door, key, sword, secret_orb):
moves = ["LEFT", "RIGHT", "UP", "DOWN"]
x, y = monster
mx, my = game_map
dx, dy = door
kx, ky = key
sx, sy = sword
ox, oy = secret_orb
if x == 0 or (mx == x-1 and y == my) or (dx == x-1 and y == dy) or (kx == x-1 and y == ky) or (sx == x-1 and y == sy) or (ox == x-1 and y == oy):
moves.remove("LEFT")
if x == 4 or (mx == x+1 and y == my) or (dx == x+1 and y == dy) or (kx == x+1 and y == ky) or (sx == x+1 and y == sy) or (ox == x+1 and y == oy):
moves.remove("RIGHT")
if y == 0 or (my == y-1 and x == mx) or (dy == y-1 and x == dx) or (ky == y-1 and x == kx) or (sy == y-1 and x == sx) or (oy == y-1 and x == ox):
moves.remove("UP")
if y == 4 or (my == y+1 and x == mx) or (dy == y+1 and x == dx) or (ky == y+1 and x == kx) or (sy == y+1 and x == sx) or (oy == y+1 and x == ox):
moves.remove("DOWN")
return moves
def draw_map(found_map, found_door, has_key, has_sword, has_secret_orb, debug, player, monster, game_map, door, key, sword, secret_orb):
print(" _"*5)
tile = "|{}"
for cell in ROOMS:
x, y = cell
if x < 4:
line_end = ""
if cell == player:
output = tile.format("X")
elif cell == door and (debug is True or found_map is True or found_door is True):
output = tile.format("D")
elif cell == key and (debug is True or found_map is True or has_key is True):
output = tile.format("K")
elif cell == sword and (debug is True or found_map is True or has_sword is True):
output = tile.format("S")
elif cell == secret_orb and (debug is True or has_secret_orb is True):
output = tile.format("T")
elif cell == monster and (debug is True or has_secret_orb is True):
output = tile.format("O")
elif cell == game_map and (debug is True or found_map is True):
output = tile.format("M")
else:
output = tile.format("_")
else:
line_end = "\n"
if cell == player:
output = tile.format("X|")
elif cell == door and (debug is True or found_map is True or found_door is True):
output = tile.format("D|")
elif cell == key and (debug is True or found_map is True or has_key is True):
output = tile.format("K|")
elif cell == sword and (debug is True or found_map is True or has_sword is True):
output = tile.format("S|")
elif cell == secret_orb and (debug is True or has_secret_orb is True):
output = tile.format("T|")
elif cell == monster and (debug is True or has_secret_orb is True):
output = tile.format("O|")
elif cell == game_map and (debug is True or found_map is True):
output = tile.format("M|")
else:
output = tile.format("_|")
print(output, end=line_end)
def game_loop():
player, monster, game_map, door, key, sword, secret_orb = get_locations()
playing = True
monster_turn = True
found_map = False
found_door = False
has_key = False
has_sword = False
has_secret_orb = False
debug = False
valid_inputs = ["LEFT", "RIGHT", "UP", "DOWN", "W", "A", "S", "D", "QUIT", "DEBUG"]
wall_checks = ["QUIT", "DEBUG"]
while playing:
clear_screen()
draw_map(found_map, found_door, has_key, has_sword, has_secret_orb, debug, player, monster, game_map, door, key, sword, secret_orb)
valid_moves = get_moves(player)
valid_monster_moves = get_monster_moves(monster, game_map, door, key, sword, secret_orb)
print("You're currently in room {}".format(player))
print("You can move {}".format(", ".join(valid_moves)))
print("Enter QUIT to quit")
move = input("> ")
move = move.upper()
if move == 'QUIT':
print("\n ** See you next time! ** \n")
break
if move == 'DEBUG':
if debug is False:
debug = True
elif debug is True:
debug = False
if move not in valid_inputs:
input("\n ** That is not a valid input! **\n")
elif move in valid_moves:
if monster_turn is True:
monster_move_choice = random.choice(valid_monster_moves)
monster = move_monster(monster, monster_move_choice, monster_turn)
monster_turn = False
elif monster_turn is False:
monster_turn = True
player = move_player(player, move)
if player == game_map:
if found_map is False:
found_map = True
input("\n ** You've found a map of the dungeon that reveals the location of the KEY and the EXIT! Something else is on here too, it might be worth investigating! **\n")
elif found_map is True:
continue
if player == key:
if has_key is False:
has_key = True
input("\n ** You've found a key! You might need this to open a door... **\n")
if has_key is True:
continue
if player == door:
if has_key is False:
found_door = True
input("\n ** You've found the door but it is locked and won't budge. There must be key hidden somewhere else in the dungeon! **\n")
elif has_key is True:
input("\n ** You try the key to open the door and it works! You've escaped the dungeon! Congratulations! ** \n")
playing = False
if player == sword:
if has_sword is False:
has_sword = True
input("\n ** You've found a sword! It appears old and brittle but might help you defend against any dangers! **\n")
if has_sword is True:
continue
if player == secret_orb:
if has_secret_orb is False:
has_secret_orb = True
input("\n ** You've found a secret orb! Peering into it reveals the location of the dungeon's monster. This should help you survive! **\n")
if has_secret_orb is True:
continue
if player == monster:
if has_sword is True:
has_sword = False
input("\n ** You encounter the monster of the dungeon! You fight for your life and just barely manage to escape. Your brittle sword is destroyed in the fray, better be careful! **\n")
elif has_sword is False:
print("\n ** Oh no, the monster got you! With nothing to defend yourself you didn't stand a chance, better luck next time! ** \n")
playing = False
elif move not in wall_checks:
input("\n ** Walls are hard! Don't run into them! **\n")
else:
if input("Play again? [Y/n] ").lower() != "n":
game_loop()
# Welcome the user and initialize game
clear_screen()
print("Welcome to the dungeon!")
input("Press return to start!")
clear_screen()
game_loop()
|
{
"content_hash": "154fcb0e919a2775ee9f42fc183afb87",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 203,
"avg_line_length": 40.92070484581498,
"alnum_prop": 0.5019916029712563,
"repo_name": "CaseyNord/Treehouse",
"id": "982b078c76e54e5c91108cacef2429c27d5bd3c7",
"size": "9953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python Collections/dungeon_game_nord.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "33012"
},
{
"name": "CSS",
"bytes": "46954"
},
{
"name": "HTML",
"bytes": "78185"
},
{
"name": "JavaScript",
"bytes": "3217811"
},
{
"name": "Python",
"bytes": "133602"
}
],
"symlink_target": ""
}
|
import time
from cherrypy import log
from braubuddy.output import IOutput
class ListMemoryOutput(IOutput):
"""
Output to a list in memory.
This is a special output used internally by the Braubuddy engine to store
metrics for the dashboard and api. It could also be used by a thermostat
if it required access to past data.
:param units: Temperature units to output. Use 'celsius' or
'fahrenheit'.
:type units: :class:`str`
:param datapoint_limit: Maximum number of datapoints to store.
:type datapoint_limit: :class:`int` (0 for unlimited)
"""
def __init__(self, units='celsius', datapoint_limit=44640):
self._datapoints = []
self._datapoint_limit = datapoint_limit
super(ListMemoryOutput, self).__init__(units)
def get_datapoints(self, since=None, before=None, limit=None):
results = self._datapoints
if since:
results = [x for x in results if x[4] >= since]
if before:
results = [x for x in results if x[4] <= before]
if limit:
results = results[-limit:]
return results
def publish_status(self, target, temp, heater_percent, cooler_percent):
# Get timestamp in epoch seconds
timestamp = int(time.time())
# Append new status
status = [target, temp, heater_percent, cooler_percent, timestamp]
self._datapoints.append(status)
# Drop datapoints if limit exceeded
if self._datapoint_limit != 0:
while len(self._datapoints) > self._datapoint_limit:
# Discard oldest status datapoint
log(('Datapoint limit exceeded - dropping earliest datapoint: '
'{0!r}').format(self._datapoints[0]))
self._datapoints.pop(0)
|
{
"content_hash": "cf950ed906ab9fab7dbbf15fab5199f6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 35.490196078431374,
"alnum_prop": 0.6248618784530386,
"repo_name": "amorphic/braubuddy",
"id": "7ff5dcd4a6369d09fff6a9b2d387d913e585b285",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "braubuddy/output/listmemory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "589"
},
{
"name": "HTML",
"bytes": "3341"
},
{
"name": "JavaScript",
"bytes": "2296"
},
{
"name": "Python",
"bytes": "93206"
}
],
"symlink_target": ""
}
|
import logging
from collections import namedtuple
from .exceptions import ScreenError
from .version import __title__
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
logger = logging.getLogger(__title__)
Coords = namedtuple('Coords', ('top', 'left', 'right', 'bottom'))
class Grabber(tk.Tk):
WINDOW_COLOR = '#ffffff'
WINDOW_ALPHA = 0.2
RECTANGLE_COLOR = '#000000'
@classmethod
def select_area(cls):
logger.info("Selecting an area.")
# run TK app to select an area
scope = {'coords': None}
def set_coords(_coords):
scope['coords'] = _coords
grabber.exit() # close the window
grabber = cls(on_selected=set_coords)
try:
grabber.mainloop()
except KeyboardInterrupt:
grabber.exit()
coords = scope['coords']
if not coords:
raise ScreenError("Aborted!")
# normalize coords
coords = tuple(map(int, coords))
left, right = sorted(coords[0::2])
top, bottom = sorted(coords[1::2])
logger.debug("Selected area %s.", coords)
return Coords(top, left, right, bottom)
def __init__(self, on_selected):
tk.Tk.__init__(self)
self.title(__title__)
self._on_selected = on_selected
self._coords = None
self._rect_id = None
self._is_drawing = False
self._canvas = None
self.initialize_geometry()
self.initialize_controls()
def initialize_geometry(self):
self.wait_visibility()
self.attributes('-topmost', True)
self.attributes('-fullscreen', True)
self.attributes('-alpha', self.WINDOW_ALPHA)
def initialize_controls(self):
self._canvas = tk.Canvas(self, bg=self.WINDOW_COLOR, cursor='crosshair')
self._canvas.pack(fill=tk.BOTH, expand=1)
self._canvas.bind('<Button-1>', self.start_drawing)
self._canvas.bind('<Button-3>', self.exit)
self._canvas.bind('<ButtonRelease-1>', self.stop_drawing)
self._canvas.bind('<Motion>', self.draw_rectangle)
def start_drawing(self, event):
logger.debug("Selecting screen area.")
self._is_drawing = True
x = self._canvas.canvasx(event.x)
y = self._canvas.canvasy(event.y)
self._coords = [x, y, x, y]
self._rect_id = self._canvas.create_rectangle(*self._coords, fill=self.RECTANGLE_COLOR)
def draw_rectangle(self, event):
if not self._is_drawing:
return
self._coords[2] = self._canvas.canvasx(event.x)
self._coords[3] = self._canvas.canvasy(event.y)
self._canvas.coords(self._rect_id, *self._coords)
def stop_drawing(self, event):
logger.debug("Screen area has been selected. Coords: %s", self._coords)
self._is_drawing = False
self._canvas.delete(self._rect_id)
self._on_selected(self._coords)
def exit(self, event=None):
self.attributes('-alpha', 0)
self.destroy()
|
{
"content_hash": "033f41bc0de387d388dd533f92e18db2",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 95,
"avg_line_length": 27.52252252252252,
"alnum_prop": 0.5950900163666121,
"repo_name": "andrei-shabanski/grab-screen",
"id": "0f45ab9813d8d97e7e3741def9654e13e19d4d32",
"size": "3055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grab_screen/screen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1066"
},
{
"name": "Python",
"bytes": "25866"
}
],
"symlink_target": ""
}
|
from typing import Callable, cast
import numpy
from .backends import Ops
from .config import registry
from .types import FloatsXd, Shape
from .util import partial
# TODO: Harmonize naming with Keras, and fill in missing entries
# https://keras.io/initializers/ We should also have He normal/uniform
# and probably lecun normal/uniform.
# Initialize via numpy, before copying to ops. This makes it easier to work with
# the different backends, because the backend won't affect the randomization.
def lecun_normal_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(1.0 / shape[1])
return ops.asarray_f(cast(FloatsXd, numpy.random.normal(0, scale, shape)))
@registry.initializers("lecun_normal_init.v1")
def configure_lecun_normal_init() -> Callable[[Shape], FloatsXd]:
return partial(lecun_normal_init)
def he_normal_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(2.0 / shape[1])
return ops.asarray_f(cast(FloatsXd, numpy.random.normal(0, scale, shape)))
@registry.initializers("he_normal_init.v1")
def configure_he_normal_init() -> Callable[[Shape], FloatsXd]:
return partial(he_normal_init)
def glorot_normal_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(2.0 / (shape[1] + shape[0]))
return ops.asarray_f(cast(FloatsXd, numpy.random.normal(0, scale, shape)))
@registry.initializers("glorot_normal_init.v1")
def configure_glorot_normal_init() -> Callable[[Shape], FloatsXd]:
return partial(glorot_normal_init)
def he_uniform_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(6.0 / shape[1])
return ops.asarray_f(cast(FloatsXd, numpy.random.uniform(-scale, scale, shape)))
@registry.initializers("he_uniform_init.v1")
def configure_he_uniform_init() -> Callable[[Shape], FloatsXd]:
return partial(he_uniform_init)
def lecun_uniform_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(3.0 / shape[1])
return ops.asarray_f(cast(FloatsXd, numpy.random.uniform(-scale, scale, shape)))
@registry.initializers("lecun_uniform_init.v1")
def configure_lecun_uniform_init() -> Callable[[Shape], FloatsXd]:
return partial(lecun_uniform_init)
def glorot_uniform_init(ops: Ops, shape: Shape) -> FloatsXd:
scale = numpy.sqrt(6.0 / (shape[0] + shape[1]))
return ops.asarray_f(cast(FloatsXd, numpy.random.uniform(-scale, scale, shape)))
@registry.initializers("glorot_uniform_init.v1")
def configure_glorot_uniform_init() -> Callable[[Shape], FloatsXd]:
return partial(glorot_uniform_init)
def zero_init(ops: Ops, shape: Shape) -> FloatsXd:
return ops.alloc(shape)
@registry.initializers("zero_init.v1")
def configure_zero_init() -> Callable[[FloatsXd], FloatsXd]:
return partial(zero_init)
def uniform_init(
ops: Ops, shape: Shape, *, lo: float = -0.1, hi: float = 0.1
) -> FloatsXd:
values = numpy.random.uniform(lo, hi, shape)
return ops.asarray_f(cast(FloatsXd, values.astype("float32")))
@registry.initializers("uniform_init.v1")
def configure_uniform_init(
*, lo: float = -0.1, hi: float = 0.1
) -> Callable[[FloatsXd], FloatsXd]:
return partial(uniform_init, lo=lo, hi=hi)
def normal_init(ops: Ops, shape: Shape, *, mean: float = 0) -> FloatsXd:
size = int(ops.xp.prod(ops.xp.asarray(shape)))
inits = cast(FloatsXd, numpy.random.normal(scale=mean, size=size).astype("float32"))
inits = ops.reshape_f(inits, shape)
return ops.asarray_f(inits)
@registry.initializers("normal_init.v1")
def configure_normal_init(*, mean: float = 0) -> Callable[[FloatsXd], FloatsXd]:
return partial(normal_init, mean=mean)
__all__ = [
"normal_init",
"uniform_init",
"glorot_uniform_init",
"zero_init",
"lecun_uniform_init",
"he_uniform_init",
"glorot_normal_init",
"he_normal_init",
"lecun_normal_init",
]
|
{
"content_hash": "cc02123dacc23531856f6b9c47e6081c",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 88,
"avg_line_length": 31.401639344262296,
"alnum_prop": 0.6966849386583137,
"repo_name": "spacy-io/thinc",
"id": "4842f4f08019c334b30be1edec6218b37b4a4bd7",
"size": "3831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thinc/initializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259926"
},
{
"name": "C++",
"bytes": "5131"
},
{
"name": "Python",
"bytes": "135654"
}
],
"symlink_target": ""
}
|
class Secret:
"""Defines Kubernetes Secret Volume"""
def __init__(self, deploy_type, deploy_target, secret, key):
"""Initialize a Kubernetes Secret Object. Used to track requested secrets from
the user.
:param deploy_type: The type of secret deploy in Kubernetes, either `env` or
`volume`
:type deploy_type: str
:param deploy_target: The environment variable when `deploy_type` `env` or
file path when `deploy_type` `volume` where expose secret
:type deploy_target: str
:param secret: Name of the secrets object in Kubernetes
:type secret: str
:param key: Key of the secret within the Kubernetes Secret
:type key: str
"""
self.deploy_type = deploy_type
self.deploy_target = deploy_target.upper()
if deploy_type == 'volume':
self.deploy_target = deploy_target
self.secret = secret
self.key = key
|
{
"content_hash": "28492e50e70c7ab9a70202ada8503251",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 86,
"avg_line_length": 42.08695652173913,
"alnum_prop": 0.6208677685950413,
"repo_name": "artwr/airflow",
"id": "bf1526b1a393b173cc85b7abc3986fd154f99025",
"size": "1755",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/kubernetes/secret.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879976"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
}
|
from mozfile import *
|
{
"content_hash": "cbfb7b6591999fae4778a994a83f566c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "sergecodd/FireFox-OS",
"id": "37b8babb80b2633e451d8ceb32ef982d70cf3997",
"size": "222",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "B2G/gecko/testing/mozbase/mozfile/mozfile/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
}
|
'''OpenGL extension EXT.blend_minmax
This module customises the behaviour of the
OpenGL.raw.GLES1.EXT.blend_minmax to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/blend_minmax.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.EXT.blend_minmax import *
from OpenGL.raw.GLES1.EXT.blend_minmax import _EXTENSION_NAME
def glInitBlendMinmaxEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
{
"content_hash": "db434cd53e24294299d0e1b8675f767d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 32.869565217391305,
"alnum_prop": 0.791005291005291,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "9e0f1b0b150d50fbe66a56e35d6d8f4d14789d95",
"size": "756",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES1/EXT/blend_minmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.