code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import os
import pwd
import Pyro5.api
class RestrictedService:
@Pyro5.api.expose
def who_is_server(self):
return os.getuid(), os.getgid(), pwd.getpwuid(os.getuid()).pw_name
@Pyro5.api.expose
def write_file(self):
# this should fail ("permission denied") because of the dropped privileges
with open("dummy-test-file.bin", "w"):
pass
class RestrictedDaemon(Pyro5.api.Daemon):
def __init__(self):
super().__init__()
print("Server started as:")
print(" uid/gid", os.getuid(), os.getgid())
print(" euid/egid", os.geteuid(), os.getegid())
self.drop_privileges("nobody")
def drop_privileges(self, user):
nobody = pwd.getpwnam(user)
try:
os.setgid(nobody.pw_uid)
os.setuid(nobody.pw_gid)
except OSError:
print("Failed to drop privileges. You'll have to start this program as root to be able to do this.")
raise
print("Privileges dropped. Server now running as", user)
print(" uid/gid", os.getuid(), os.getgid())
print(" euid/egid", os.geteuid(), os.getegid())
if __name__ == "__main__":
rdaemon = RestrictedDaemon()
Pyro5.api.serve({
RestrictedService: "restricted"
}, host="localhost", daemon=rdaemon, use_ns=False)
|
irmen/Pyro5
|
examples/privilege-separation/drop_privs_server.py
|
Python
|
mit
| 1,333
|
import tkinter as tk
# Default values
DEFAULT_HORIZONTALS = 4
DEFAULT_VERTICALS = 6
DEFAULT_LENGTH = 100
DEFAULT_WIDTH = 5
DEFAULT_ALPHA = 0.03
DEFAULT_BETA = 1e-3
DEFAULT_THRESHOLD = 2
# Dimensions
ENTRY_WIDTH = 4
PADX = 10
class CreationTools(tk.LabelFrame):
"""This class of methods implements creation tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(CreationTools, self).__init__(view, *args, text="Create grid", **kwargs)
self.view = view
horizontals_label = tk.Label(self,
text="Horizontal streets [{0}]:".format(DEFAULT_HORIZONTALS),
padx=PADX
)
horizontals_label.pack(side=tk.LEFT)
self.horizontals_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.horizontals_entry.pack(side=tk.LEFT)
verticals_label = tk.Label(self,
text="Vertical streets [{0}]:".format(DEFAULT_VERTICALS),
padx=PADX
)
verticals_label.pack(side=tk.LEFT)
self.verticals_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.verticals_entry.pack(side=tk.LEFT)
initial_length_label = tk.Label(self,
text="Street lengths [{0}]:".format(
DEFAULT_LENGTH),
padx=PADX
)
initial_length_label.pack(side=tk.LEFT)
self.initial_length_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.initial_length_entry.pack(side=tk.LEFT)
draw_button = tk.Button(self,
text="Draw network",
padx=PADX,
command=self.click
)
draw_button.pack(side=tk.LEFT)
def click(self):
"""
This method is triggered when the "draw" button is clicked.
"""
horizontals = self.horizontals_entry.get()
if horizontals == '':
horizontals = DEFAULT_HORIZONTALS
verticals = self.verticals_entry.get()
if verticals == '':
verticals = DEFAULT_VERTICALS
initial_length = self.initial_length_entry.get()
if initial_length == '':
initial_length = DEFAULT_LENGTH
self.view.controller.done_creating(horizontals, verticals, initial_length)
class MovingTools(tk.LabelFrame):
"""This class of methods implements moving tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(MovingTools, self).__init__(view, *args, text="Move streets", **kwargs)
self.view = view
moving_message = tk.Label(self,
text="Drag and drop to move street.",
padx=PADX
)
moving_message.pack(side=tk.LEFT)
done_moving_button = tk.Button(self,
text="Done moving",
command=self.click
)
done_moving_button.pack(side=tk.LEFT)
def click(self):
"""
This method is triggered when the "done moving" button is clicked.
"""
self.view.controller.done_moving()
class DeletingTools(tk.LabelFrame):
"""This class of methods implements deleting tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(DeletingTools, self).__init__(view, *args, text="Delete streets", **kwargs)
self.view = view
deleting_message = tk.Label(self,
text="Click to delete street.",
padx=PADX
)
deleting_message.pack(side=tk.LEFT)
done_deleting_button = tk.Button(self,
text="Done deleting",
command=self.click
)
done_deleting_button.pack(side=tk.LEFT)
def click(self):
"""
This method is triggered when the "done deleting" button is clicked.
"""
self.view.controller.done_deleting()
class ModifyingTools(tk.LabelFrame):
"""This class of methods implements modifying tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(ModifyingTools, self).__init__(
view, *args, text="Select default parameters for streets", **kwargs)
self.view = view
width_message = tk.Label(self,
text="Width [{0}]:".format(DEFAULT_WIDTH),
padx=PADX
)
width_message.pack(side=tk.LEFT)
self.width_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.width_entry.pack(side=tk.LEFT)
alpha_message = tk.Label(self,
text="Wall absorption [{0}]:".format(DEFAULT_ALPHA),
padx=PADX
)
alpha_message.pack(side=tk.LEFT)
self.alpha_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.alpha_entry.pack(side=tk.LEFT)
beta_message = tk.Label(self,
text="Air absorption [{0}]:".format(DEFAULT_BETA),
padx=PADX
)
beta_message.pack(side=tk.LEFT)
self.beta_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.beta_entry.pack(side=tk.LEFT)
modify_button = tk.Button(self,
text="Modify network",
command=self.click
)
modify_button.pack(side=tk.LEFT)
def click(self):
"""
This method is triggered when the "modify network" button is clicked.
"""
width = self.width_entry.get()
if width == '':
width = DEFAULT_WIDTH
alpha = self.alpha_entry.get()
if alpha == '':
alpha = DEFAULT_ALPHA
beta = self.beta_entry.get()
if beta == '':
beta = DEFAULT_BETA
self.view.controller.done_modifying(width, alpha, beta)
class CustomisingTools(tk.LabelFrame):
"""This class of methods implements customising tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(CustomisingTools, self).__init__(view, *args, text="Customise streets", **kwargs)
self.view = view
customising_message = tk.Label(self,
text="Click to select street.",
padx=PADX
)
customising_message.pack(side=tk.LEFT)
width = tk.Label(self,
text="Width [{0}]:".format(DEFAULT_WIDTH),
padx=PADX
)
width.pack(side=tk.LEFT)
self.width_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.width_entry.pack(side=tk.LEFT)
alpha = tk.Label(self,
text="Wall absorption [{0}]:".format(DEFAULT_ALPHA),
padx=PADX
)
alpha.pack(side=tk.LEFT)
self.alpha_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.alpha_entry.pack(side=tk.LEFT)
beta = tk.Label(self,
text="Air absorption [{0}]:".format(DEFAULT_BETA),
padx=PADX
)
beta.pack(side=tk.LEFT)
self.beta_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.beta_entry.pack(side=tk.LEFT)
customise_button = tk.Button(self,
text="Customise",
command=self.customise_click
)
customise_button.pack(side=tk.LEFT)
done_customising_button = tk.Button(self,
text="Finished customising",
command=self.done_customising
)
done_customising_button.pack(side=tk.LEFT)
def customise_click(self):
"""
This method is triggered when the "customise" button is clicked.
"""
width = self.width_entry.get()
if width == '':
width = DEFAULT_WIDTH
alpha = self.alpha_entry.get()
if alpha == '':
alpha = DEFAULT_ALPHA
beta = self.beta_entry.get()
if beta == '':
beta = DEFAULT_BETA
self.view.controller.customise_click(width, alpha, beta)
def done_customising(self):
"""
This method is triggered when the "done customising" button is clicked.
"""
self.view.controller.done_customising()
class ModelTools(tk.LabelFrame):
"""This class of methods implements model tools in the toolbar."""
def __init__(self, view, *args, **kwargs):
super(ModelTools, self).__init__(view, *args, text="Choose model parameters", **kwargs)
self.view = view
starting_label = tk.Label(self, text="Source:", padx=PADX )
starting_label.pack(side=tk.LEFT)
self.starting_entry_1 = tk.Entry(self, width=ENTRY_WIDTH)
self.starting_entry_1.pack(side=tk.LEFT)
self.starting_entry_2 = tk.Entry(self, width=ENTRY_WIDTH)
self.starting_entry_2.pack(side=tk.LEFT)
ending_label = tk.Label(self, text="Receiver:", padx=PADX)
ending_label.pack(side=tk.LEFT)
self.ending_entry_1 = tk.Entry(self, width=ENTRY_WIDTH)
self.ending_entry_1.pack(side=tk.LEFT)
self.ending_entry_2 = tk.Entry(self, width=ENTRY_WIDTH)
self.ending_entry_2.pack(side=tk.LEFT)
threshold_label = tk.Label(self,
text="Threshold [{0}]:".format(DEFAULT_THRESHOLD),
padx=PADX
)
threshold_label.pack(side=tk.LEFT)
self.threshold_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.threshold_entry.pack(side=tk.LEFT)
height_label = tk.Label(self,
text="Height:",
padx=PADX
)
height_label.pack(side=tk.LEFT)
self.height_entry = tk.Entry(self, width=ENTRY_WIDTH)
self.height_entry.pack(side=tk.LEFT)
compute_button = tk.Button(self,
text="Compute",
command=self.compute_click
)
compute_button.pack(side=tk.LEFT)
export_button = tk.Button(self,
text="Compute all",
command=self.compute_all_click
)
export_button.pack(side=tk.LEFT)
def compute_click(self):
"""
This method is triggered when the "compute" button is clicked.
"""
starting_1 = self.starting_entry_1.get()
starting_2 = self.starting_entry_2.get()
ending_1 = self.ending_entry_1.get()
ending_2 = self.ending_entry_2.get()
threshold = self.threshold_entry.get()
height = self.height_entry.get()
if threshold == '':
threshold = DEFAULT_THRESHOLD
if height == '':
height = False
source = (starting_1, starting_2)
receiver = (ending_1, ending_2)
self.view.controller.compute_click(source, receiver, threshold, height)
def compute_all_click(self):
"""
This method is triggered when the "compute all" button is clicked.
"""
starting_1 = self.starting_entry_1.get()
starting_2 = self.starting_entry_2.get()
threshold = self.threshold_entry.get()
height = self.height_entry.get()
if threshold == '':
threshold = DEFAULT_THRESHOLD
if height == '':
height = False
source = (starting_1, starting_2)
self.view.controller.compute_all_click(source, threshold, height)
|
janzmazek/wave-propagation
|
source/components/tools.py
|
Python
|
mit
| 12,428
|
import os
import base64
import requests
import time
import zlib
from pycrest import version
from pycrest.compat import bytes_, text_
from pycrest.errors import APIException
from pycrest.weak_ciphers import WeakCiphersAdapter
from hashlib import sha224
try:
from urllib.parse import urlparse, urlunparse, parse_qsl
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse, parse_qsl
try:
import pickle
except ImportError: # pragma: no cover
import cPickle as pickle
try:
from urllib.parse import quote
except ImportError: # pragma: no cover
from urllib import quote
import logging
import re
logger = logging.getLogger("pycrest.eve")
cache_re = re.compile(r'max-age=([0-9]+)')
class APICache(object):
def put(self, key, value):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def invalidate(self, key):
raise NotImplementedError
class FileCache(APICache):
def __init__(self, path):
self._cache = {}
self.path = path
if not os.path.isdir(self.path):
os.mkdir(self.path, 0o700)
def _hashx(self, key):
'''Persistent hash value generator
Note: current implementation does not handle nested data structures in given key.
Parameters:
key(frozenset): a frozenset object containing unique data for hash key generation.
Returns:
string: hash value as hexadecimal string.
'''
hashx_value=sha224(pickle.dumps(sorted(key))).hexdigest()
return hashx_value
def _getpath(self, key):
return os.path.join(self.path, str(self._hashx(key)) + '.cache')
def put(self, key, value):
with open(self._getpath(key), 'wb') as f:
f.write(zlib.compress(pickle.dumps(value, -1)))
self._cache[key] = value
def get(self, key):
if key in self._cache:
return self._cache[key]
try:
with open(self._getpath(key), 'rb') as f:
return pickle.loads(zlib.decompress(f.read()))
except zlib.error:
# something went wrong
return None
except IOError as ex:
if ex.errno == 2: # file does not exist (yet)
return None
else:
raise
def invalidate(self, key):
self._cache.pop(key, None)
try:
os.unlink(self._getpath(key))
except OSError as ex:
if ex.errno == 2: # does not exist
pass
else:
raise
class DictCache(APICache):
def __init__(self):
self._dict = {}
def get(self, key):
return self._dict.get(key, None)
def put(self, key, value):
self._dict[key] = value
def invalidate(self, key):
self._dict.pop(key, None)
class RequestsLimiter:
'''Simple requests per second limiter
'''
def __init__(self, requests_per_second=30):
self.requests_per_second=requests_per_second
self._pool=0.0
self._last_activation=time.perf_counter()
def _update_pool(self):
current_time=time.perf_counter()
delta=current_time-self._last_activation
self._last_activation=current_time
self._pool=max(0, self._pool+1-(delta*self.requests_per_second))
return (self._pool-self.requests_per_second)/self.requests_per_second
def sleep(self):
'''Sleep only if limit exceeded.
'''
wait_time=self._update_pool()
if(wait_time>0):
logger.debug('request limit, sleep for: %f',wait_time)
time.sleep(wait_time)
class APIConnection(object):
def __init__(self, additional_headers=None, user_agent=None, cache_dir=None):
# Set up a Requests Session
session = requests.Session()
if additional_headers is None:
additional_headers = {}
if user_agent is None:
user_agent = "PyCrest/{0}".format(version)
session.headers.update({
"User-Agent": user_agent,
"Accept": "application/json",
})
session.headers.update(additional_headers)
session.mount('https://public-crest.eveonline.com',
WeakCiphersAdapter())
self._session = session
self.cache_dir = cache_dir
if self.cache_dir:
self.cache = FileCache(self.cache_dir)
else:
self.cache = DictCache()
# Create a request limiter object. Generally, the CREST requests limit is 30/s
self._requests_limiter=RequestsLimiter(requests_per_second=30)
def get(self, resource, params=None):
logger.debug('Getting resource %s', resource)
if params is None:
params = {}
# remove params from resource URI (needed for paginated stuff)
parsed_uri = urlparse(resource)
qs = parsed_uri.query
resource = urlunparse(parsed_uri._replace(query=''))
prms = {}
for tup in parse_qsl(qs):
prms[tup[0]] = tup[1]
# params supplied to self.get() override parsed params
for key in params:
prms[key] = params[key]
# check cache
''' TODO: check how to differentiate between clients. Current cache control does now work if auth token is updated.
Going on a limb here and assuming the secret key will be the equivalent of current api key, named
as api_key in pycrest implementation
'''
key = frozenset({'resource':resource, 'key':self.client_id}.items()).union(prms.items())
cached = self.cache.get(key)
if cached and cached['expires'] > time.time():
logger.debug('Cache hit for resource %s (params=%s)', resource, prms)
return cached['payload']
elif cached:
logger.debug('Cache stale for resource %s (params=%s)', resource, prms)
self.cache.invalidate(key)
else:
logger.debug('Cache miss for resource %s (params=%s)', resource, prms)
logger.debug('Getting resource %s (params=%s)', resource, prms)
#limit the requests per second after cache check.
self._requests_limiter.sleep()
res = self._session.get(resource, params=prms)
if res.status_code != 200:
raise APIException("Got unexpected status code from server: %i (%s)" % (res.status_code, res.reason))
ret = res.json()
# cache result
key = frozenset({'resource':resource, 'key':self.client_id}.items()).union(prms.items())
expires = self._get_expires(res)
if expires > 0:
self.cache.put(key, {'expires': time.time() + expires, 'payload': ret})
return ret
def _get_expires(self, response):
if 'Cache-Control' not in response.headers:
return 0
if any([s in response.headers['Cache-Control'] for s in ['no-cache', 'no-store']]):
return 0
match = cache_re.search(response.headers['Cache-Control'])
if match:
logger.debug('Cache resource for %s', int(match.group(1)))
return int(match.group(1))
return 0
class EVE(APIConnection):
def __init__(self, **kwargs):
self.api_key = kwargs.pop('api_key', None)
self.client_id = kwargs.pop('client_id', None)
self.redirect_uri = kwargs.pop('redirect_uri', None)
if kwargs.pop('testing', False):
self._public_endpoint = "http://public-crest-sisi.testeveonline.com/"
self._authed_endpoint = "https://api-sisi.testeveonline.com/"
self._image_server = "https://image.testeveonline.com/"
self._oauth_endpoint = "https://sisilogin.testeveonline.com/oauth"
else:
self._public_endpoint = "https://public-crest.eveonline.com/"
self._authed_endpoint = "https://crest-tq.eveonline.com/"
self._image_server = "https://image.eveonline.com/"
self._oauth_endpoint = "https://login.eveonline.com/oauth"
self._endpoint = self._public_endpoint
self._cache = {}
self._data = None
APIConnection.__init__(self, cache_dir=kwargs.pop('cache_dir', None), **kwargs)
def __call__(self):
if not self._data:
self._data = APIObject(self.get(self._endpoint), self)
return self._data
def __getattr__(self, item):
return self._data.__getattr__(item)
def auth_uri(self, scopes=None, state=None):
s = [] if not scopes else scopes
return "%s/authorize?response_type=code&redirect_uri=%s&client_id=%s%s%s" % (
self._oauth_endpoint,
quote(self.redirect_uri, safe=''),
self.client_id,
"&scope=%s" % ','.join(s) if scopes else '',
"&state=%s" % state if state else ''
)
def _authorize(self, params):
auth = text_(base64.b64encode(bytes_("%s:%s" % (self.client_id, self.api_key))))
headers = {"Authorization": "Basic %s" % auth}
res = self._session.post("%s/token" % self._oauth_endpoint, params=params, headers=headers)
if res.status_code != 200:
raise APIException("Got unexpected status code from API: %i" % res.status_code)
return res.json()
def authorize(self, code):
res = self._authorize(params={"grant_type": "authorization_code", "code": code})
return AuthedConnection(res,
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache_dir=self.cache_dir)
def refr_authorize(self, refresh_token):
res = self._authorize(params={"grant_type": "refresh_token", "refresh_token": refresh_token})
return AuthedConnection({'access_token': res['access_token'],
'refresh_token': refresh_token,
'expires_in': res['expires_in']},
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache_dir=self.cache_dir)
def temptoken_authorize(self, access_token, expires_in, refresh_token):
return AuthedConnection({'access_token': access_token,
'refresh_token': refresh_token,
'expires_in': expires_in},
self._authed_endpoint,
self._oauth_endpoint,
self.client_id,
self.api_key,
cache_dir=self.cache_dir)
class AuthedConnection(EVE):
def __init__(self, res, endpoint, oauth_endpoint, client_id=None, api_key=None, **kwargs):
EVE.__init__(self, **kwargs)
self.client_id = client_id
self.api_key = api_key
self.token = res['access_token']
self.refresh_token = res['refresh_token']
self.expires = int(time.time()) + res['expires_in']
self._oauth_endpoint = oauth_endpoint
self._endpoint = endpoint
self._session.headers.update({"Authorization": "Bearer %s" % self.token})
def __call__(self):
if not self._data:
self._data = APIObject(self.get(self._endpoint), self)
return self._data
def get(self, resource, params=None):
if int(time.time()) >= self.expires:
self.refresh()
return super(self.__class__, self).get(resource, params)
def whoami(self):
if 'whoami' not in self._cache:
self._cache['whoami'] = self.get("https://login.eveonline.com/oauth/verify")
return self._cache['whoami']
def refresh(self):
res = self._authorize(params={"grant_type": "refresh_token", "refresh_token": self.refresh_token})
self.token = res['access_token']
self.expires = int(time.time()) + res['expires_in']
self._session.headers.update({"Authorization": "Bearer %s" % self.token})
return self # for backwards compatibility
class APIObject(object):
def __init__(self, parent, connection):
self._dict = {}
self.connection = connection
for k, v in parent.items():
if type(v) is dict:
self._dict[k] = APIObject(v, connection)
elif type(v) is list:
self._dict[k] = self._wrap_list(v)
else:
self._dict[k] = v
def _wrap_list(self, list_):
new = []
for item in list_:
if type(item) is dict:
new.append(APIObject(item, self.connection))
elif type(item) is list:
new.append(self._wrap_list(item))
else:
new.append(item)
return new
def __getattr__(self, item):
if item in self._dict:
return self._dict[item]
raise AttributeError(item)
def __call__(self, **kwargs):
# Caching is now handled by APIConnection
if 'href' in self._dict:
return APIObject(self.connection.get(self._dict['href'], params=kwargs), self.connection)
else:
return self
def __str__(self): # pragma: no cover
return self._dict.__str__()
def __repr__(self): # pragma: no cover
return self._dict.__repr__()
|
lodex/PyCrest
|
pycrest/eve.py
|
Python
|
mit
| 13,675
|
import cPickle as pickle
import unittest
from .dataflow import *
from .node import Node
from .state import State
n1 = Node("n1")
n2_1 = Node("n2_1", depends = [n1])
n2_2 = Node("n2_2", depends = [n1])
n3 = Node("n3", depends = [n2_1, n2_2])
n4 = Node("n4")
# Can't be a lambda function
def Compute2_1(o1): return ["2_1"] + o1
def Compute2_2(o1): return ["2_2"] + o1
def Compute3(o2_1, o2_2): return ["3"] + o2_1 + o2_2
class MyFlow(DataFlow):
@node_builder(n2_1)
def BuildN2_1(self, d1):
return Compute2_1(d1)
@node_builder(n2_2)
def BuildN2_2(self, d1):
return Compute2_2(d1)
@node_builder(n3)
def BuildN3(self, d2_1, d2_2):
return Compute3(d2_1, d2_2)
class DataFlowTest(unittest.TestCase):
def testDecorator(self):
d = MyFlow()
s = BuildNode(d, n3, State({n1 : ["1"]}))
for node in (n1, n2_1, n2_2, n3):
self.assertIn(node, s)
self.assertEqual(s[n3], ["3", "2_1", "1", "2_2", "1"])
def testBuildNode_multipleNodes(self):
# pass multiple nodes, check that each are built
d = MyFlow()
s = BuildNode(d, (n2_1, n2_2), State({n1 : ["1"]}))
for node in (n1, n2_1, n2_2):
self.assertIn(node, s)
self.assertEqual(s[n2_1], ["2_1", "1"])
self.assertEqual(s[n2_2], ["2_2", "1"])
def testBuildNode_badNode(self):
d = MyFlow()
with self.assertRaises(DependencyError):
BuildNode(d, n4, State())
def testEndToEnd(self):
d = DataFlow()
d.Register(n2_1, Compute2_1)
d.Register(n2_2, Compute2_2)
d.Register(n3, Compute3)
s = BuildNode(d, n3, State({n1 : ["1"]}))
for node in (n1, n2_1, n2_2, n3):
self.assertIn(node, s)
def testBottomNodeGenerator(self):
d = DataFlow()
d.Register(n1, lambda: 1) # callback returns constant value
s = BuildNode(d, n1, State())
self.assertIn(n1, s)
self.assertEqual(s[n1], 1)
def testSerializable_noDecorator(self):
d = DataFlow()
d.Register(n2_1, Compute2_1)
d.Register(n2_2, Compute2_2)
d.Register(n3, Compute3)
d2 = pickle.loads(pickle.dumps(d, protocol = 2))
self.assertSequenceEqual(d._callbacks.keys(), d2._callbacks.keys())
def testSerializable_withDecorator(self):
d1 = MyFlow()
d2 = pickle.loads(pickle.dumps(d1, protocol = 2))
# The same set of nodes should be in the flow:
self.assertSetEqual(set(d1._callbacks.keys()), set(d2._callbacks.keys()))
# However, callbacks won't be equal, since they'll be bound to different
# objects.
for k in d1._callbacks.keys():
f1 = d1._callbacks[k].f
f2 = d2._callbacks[k].f
# Just compare unbound method.
self.assertEqual(f1.im_func, f2.im_func)
def testRegister_badCallback(self):
d = DataFlow()
with self.assertRaises(ValueError):
d.Register(n2_1, toofew_callback)
with self.assertRaises(ValueError):
d.Register(n2_1, toomany_callback)
def toofew_callback(): return "exval"
def toomany_callback(x, y): return "exval"
if __name__ == '__main__':
unittest.main()
|
mthomure/glimpse-project
|
glimpse/util/dataflow/dataflow_test.py
|
Python
|
mit
| 3,002
|
import numpy as np
a = np.array([0, 1, 2])
b = np.array([2, 0, 6])
print(np.minimum(a, b))
# [0 0 2]
print(np.fmin(a, b))
# [0 0 2]
a_2d = np.arange(6).reshape(2, 3)
print(a_2d)
# [[0 1 2]
# [3 4 5]]
print(np.minimum(a_2d, b))
# [[0 0 2]
# [2 0 5]]
print(np.fmin(a_2d, b))
# [[0 0 2]
# [2 0 5]]
print(np.minimum(a_2d, 2))
# [[0 1 2]
# [2 2 2]]
print(np.fmin(a_2d, 2))
# [[0 1 2]
# [2 2 2]]
print(np.minimum([np.nan, np.nan], [np.inf, 0]))
# [nan nan]
print(np.fmin([np.nan, np.nan], [np.inf, 0]))
# [inf 0.]
|
nkmk/python-snippets
|
notebook/numpy_minimum_fmin.py
|
Python
|
mit
| 524
|
# -*- coding: utf-8 -*-
"""One Million.
Usage:
onemillion <host> [--no-cache | --no-update | (-l <cache> | --cache_location=<cache>)]
onemillion (-h | --help)
onemillion --version
Options:
-h --help Show this screen.
--version Show version.
--no-cache Don't cache the top million domain lists
--no-update Don't update any cached domain lists
-l <cache>, --cache_location=<cache> Specify a cache location
"""
from docopt import docopt
from .__init__ import __version__ as VERSION
from .onemillion import OneMillion
def main(args=None):
"""Console script for onemillion"""
arguments = docopt(__doc__, version=VERSION)
# if there is a cache location, pass it into onemillion
if arguments['--cache_location'] is not None:
one_million = OneMillion(cache=(not arguments['--no-cache']), update=(not arguments['--no-update']), cache_location=arguments['--cache_location'])
else:
# if there is no cache location, use the default one and pass in the other values
one_million = OneMillion(cache=(not arguments['--no-cache']), update=(not arguments['--no-update']))
print(one_million.domain_in_million(arguments['<host>']))
if __name__ == "__main__":
main()
|
fhightower/onemillion
|
onemillion/cli.py
|
Python
|
mit
| 1,255
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images_metadata', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='imagethesaurus',
name='region',
field=models.ManyToManyField(to='defcdb.DC_region', blank=True),
),
]
|
acdh-oeaw/defc-app
|
images_metadata/migrations/0002_auto_20160314_1343.py
|
Python
|
mit
| 433
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import HybridComputeManagementClientConfiguration
from .operations import MachinesOperations
from .operations import MachineExtensionsOperations
from .operations import Operations
from .operations import PrivateLinkScopesOperations
from .operations import PrivateLinkResourcesOperations
from .operations import PrivateEndpointConnectionsOperations
from .. import models
class HybridComputeManagementClient(object):
"""The Hybrid Compute Management Client.
:ivar machines: MachinesOperations operations
:vartype machines: azure.mgmt.hybridcompute.aio.operations.MachinesOperations
:ivar machine_extensions: MachineExtensionsOperations operations
:vartype machine_extensions: azure.mgmt.hybridcompute.aio.operations.MachineExtensionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.hybridcompute.aio.operations.Operations
:ivar private_link_scopes: PrivateLinkScopesOperations operations
:vartype private_link_scopes: azure.mgmt.hybridcompute.aio.operations.PrivateLinkScopesOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.hybridcompute.aio.operations.PrivateLinkResourcesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.hybridcompute.aio.operations.PrivateEndpointConnectionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = HybridComputeManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.machines = MachinesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.machine_extensions = MachineExtensionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_scopes = PrivateLinkScopesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "HybridComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
Azure/azure-sdk-for-python
|
sdk/hybridcompute/azure-mgmt-hybridcompute/azure/mgmt/hybridcompute/aio/_hybrid_compute_management_client.py
|
Python
|
mit
| 5,609
|
# The MIT License (MIT)
# Copyright (c) 2021 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Internal class for query execution endpoint component implementation in the
Azure Cosmos database service.
"""
import numbers
import copy
import hashlib
import json
from azure.cosmos._execution_context.aggregators import (
_AverageAggregator,
_CountAggregator,
_MaxAggregator,
_MinAggregator,
_SumAggregator,
)
class _QueryExecutionEndpointComponent(object):
def __init__(self, execution_context):
self._execution_context = execution_context
async def __aiter__(self):
return self
async def __anext__(self):
# supports python 3 iterator
return await self._execution_context.__anext__()
class _QueryExecutionOrderByEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling an order by query.
For each processed orderby result it returns 'payload' item of the result.
"""
async def __anext__(self):
payload = await self._execution_context.__anext__()
return payload["payload"]
class _QueryExecutionTopEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling top query.
It only returns as many results as top arg specified.
"""
def __init__(self, execution_context, top_count):
super(_QueryExecutionTopEndpointComponent, self).__init__(execution_context)
self._top_count = top_count
async def __anext__(self):
if self._top_count > 0:
res = await self._execution_context.__anext__()
self._top_count -= 1
return res
raise StopAsyncIteration
class _QueryExecutionDistinctOrderedEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling distinct query.
It returns only those values not already returned.
"""
def __init__(self, execution_context):
super(_QueryExecutionDistinctOrderedEndpointComponent, self).__init__(execution_context)
self.last_result = None
async def __anext__(self):
res = await self._execution_context.__anext__()
while self.last_result == res:
res = await self._execution_context.__anext__()
self.last_result = res
return res
class _QueryExecutionDistinctUnorderedEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling distinct query.
It returns only those values not already returned.
"""
def __init__(self, execution_context):
super(_QueryExecutionDistinctUnorderedEndpointComponent, self).__init__(execution_context)
self.last_result = set()
def make_hash(self, value):
if isinstance(value, (set, tuple, list)):
return tuple([self.make_hash(v) for v in value])
if not isinstance(value, dict):
if isinstance(value, numbers.Number):
return float(value)
return value
new_value = copy.deepcopy(value)
for k, v in new_value.items():
new_value[k] = self.make_hash(v)
return tuple(frozenset(sorted(new_value.items())))
async def __anext__(self):
res = await self._execution_context.__anext__()
json_repr = json.dumps(self.make_hash(res)).encode("utf-8")
hash_object = hashlib.sha1(json_repr) # nosec
hashed_result = hash_object.hexdigest()
while hashed_result in self.last_result:
res = await self._execution_context.__anext__()
json_repr = json.dumps(self.make_hash(res)).encode("utf-8")
hash_object = hashlib.sha1(json_repr) # nosec
hashed_result = hash_object.hexdigest()
self.last_result.add(hashed_result)
return res
class _QueryExecutionOffsetEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling offset query.
It returns results offset by as many results as offset arg specified.
"""
def __init__(self, execution_context, offset_count):
super(_QueryExecutionOffsetEndpointComponent, self).__init__(execution_context)
self._offset_count = offset_count
async def __anext__(self):
while self._offset_count > 0:
res = await self._execution_context.__anext__()
if res is not None:
self._offset_count -= 1
else:
raise StopAsyncIteration
return await self._execution_context.__anext__()
class _QueryExecutionAggregateEndpointComponent(_QueryExecutionEndpointComponent):
"""Represents an endpoint in handling aggregate query.
It returns only aggreated values.
"""
def __init__(self, execution_context, aggregate_operators):
super(_QueryExecutionAggregateEndpointComponent, self).__init__(execution_context)
self._local_aggregators = []
self._results = None
self._result_index = 0
for operator in aggregate_operators:
if operator == "Average":
self._local_aggregators.append(_AverageAggregator())
elif operator == "Count":
self._local_aggregators.append(_CountAggregator())
elif operator == "Max":
self._local_aggregators.append(_MaxAggregator())
elif operator == "Min":
self._local_aggregators.append(_MinAggregator())
elif operator == "Sum":
self._local_aggregators.append(_SumAggregator())
async def __anext__(self):
async for res in self._execution_context:
for item in res: #TODO check on this being an async loop
for operator in self._local_aggregators:
if isinstance(item, dict) and item:
operator.aggregate(item["item"])
elif isinstance(item, numbers.Number):
operator.aggregate(item)
if self._results is None:
self._results = []
for operator in self._local_aggregators:
self._results.append(operator.get_result())
if self._result_index < len(self._results):
res = self._results[self._result_index]
self._result_index += 1
return res
raise StopAsyncIteration
|
Azure/azure-sdk-for-python
|
sdk/cosmos/azure-cosmos/azure/cosmos/_execution_context/aio/endpoint_component.py
|
Python
|
mit
| 7,370
|
# -*- coding: utf-8 -*-
import os
import codecs
import sys
import urllib
import json
from wekeypedia.wikipedia_page import WikipediaPage as Page, url2title, url2lang
from wekeypedia.wikipedia_network import WikipediaNetwork
from wekeypedia.exporter.nx_json import NetworkxJson
from multiprocessing.dummy import Pool as ThreadPool
sources_file = "sources.txt"
pages_file = "pagenames.txt"
def sources_pages(sources):
pages = []
for s in sources:
print s.strip()
w = Page()
w.fetch_from_api_title(s.strip())
pages.extend(w.get_links())
return list(set(pages))
sources = codecs.open(sources_file,"r", "utf-8-sig").readlines()
pages = sources_pages(sources)
# remove special pages
pages = [ p for p in pages if not(":" in p) ]
print len(pages)
pages = sorted(pages)
with codecs.open(pages_file,"w", "utf-8-sig") as f:
for p in pages:
f.write("%s\n" % (p))
f.close()
# exit()
pages = codecs.open("pagenames.txt","r", "utf-8-sig").readlines()
pages = map(lambda x: x.strip(), pages)
def fetch_page(source):
if os.path.exists("pages/%s.json" % (source)) == True:
return
print "📄 fetching: %s" % source.encode('utf-8-sig')
p = Page()
r = p.fetch_from_api_title(source.strip(), { "redirects":"true", "rvparse" : "true", "prop": "info|revisions", "inprop": "url", "rvprop": "content" })
with codecs.open("pages/%s.json" % (source), "w", "utf-8-sig") as f:
json.dump(r, f)
def fetch_revisions(source):
if os.path.exists("revisions/%s.json" % (source)) == True:
return
print "📄 fetching revisions: %s" % source.encode('utf-8-sig')
p = Page()
p.fetch_from_api_title(source.strip())
r = p.get_all_editors()
with codecs.open("revisions/%s.json" % (source), "w", "utf-8-sig") as f:
json.dump(r, f)
def fetch_pageviews(source):
if os.path.exists("pageviews/%s.json" % (source)) == False:
print "📄 fetching pageviews: %s" % source.encode('utf-8-sig')
p = Page()
p.fetch_from_api_title(source.strip())
r = p.get_pageviews()
with codecs.open("pageviews/%s.json" % (source), "w", "utf-8-sig") as f:
json.dump(r, f)
# print pages
pool = ThreadPool(8)
for p in pages:
# title = p["pagename"]
title = p
# fetch_page(p["pagename"])
# fetch_revisions(p["pagename"])
# fetch_pageviews(p["pagename"])
pool.apply_async(fetch_page, args=(title,))
pool.apply_async(fetch_revisions, args=(title,))
# this one is particulary slow. skip it for demo purposes
# pool.apply_async(fetch_pageviews, args=(title,))
pool.close()
pool.join()
|
WeKeyPedia/toolkit-python
|
examples/analysis-data.py
|
Python
|
mit
| 2,562
|
from planner import feedback as fb
from extend import RRG as rrg
from helper import visualize as vis
import matplotlib.image as mpimg
from env_classes.States import *
from time import clock
from math import floor
from helper.iClap_helpers import *
from helper.logging import *
import helper.bvp as bvp
def main():
global robots, robotNum, env, graphs, goals, prevTime, curTime, iterations
maxIterations = int(raw_input("How many iterations? [default = 1500]") or "1500")
env = str(raw_input("What is the environment file? [default = env.png]") or "./env.png")
x = int(raw_input("What is the robot's x coord? [default = 10]") or "10")
y = int(raw_input("What is the robot's y coord? [default = 10]") or "10")
gx = int(raw_input("What is the goal's x coord? [default = 10]") or "90")
gy = int(raw_input("What is the goal's y coord? [default = 10]") or "90")
setup(maxIterations=maxIterations, robotPos={'x':x, 'y':y}, goalPos={'x':gx, 'y':gy})
run_loop()
cleanup()
vis.run(robots[0], env)
def cleanup():
global robots, robotNum, env, graphs, goals, prevTime, curTime, iterations, logg
imgOutput = []
for robot in robots:
if robot['finished']:
robot['path'] = fb.pathFinding(robot['graph'], robot['root'], robot['goal'])
logg.write_path(iterations, robot['path'])
logg.write_states(robot['graph'])
else:
logg.ERROR("Robot did not finish")
logg.close()
def output(i):
global robots, robotNum, env, graphs, goals, prevTime, curTime, iterations, logg
prevTime = curTime
curTime = clock()
if floor(curTime)-floor(prevTime) >= 1:
logg.OUTPUT("Iteration: "+str(i))
logg.OUTPUT("Time elapsed (s): %f" % clock(), lvl=2)
logg.VERBOUT("- Framerate: %f" % (1/(curTime-prevTime)))
def run_loop():
global robots, robotNum, env, graphs, goals, prevTime, curTime, iterations
for i in range(iterations):
for robot in robots:
# Run RRG
robot['graph'] = rrg.run(robot['graph'], robot['goal'])
# Check if finished
if not robot['finished']:
robot = finishCheck(robot)
else:
# Run feedback loop
robot['graph'] = fb.run(robot['graph'], robot['goal'])
robot['path'] = fb.pathFinding(robot['graph'], robot['root'], robot['goal'])
logg.write_path(i, robot['path'])
output(i)
def setup(maxIterations=1000, robotPos=None, goalPos=None):
global robots, robotNum, env, graphs, goals, prevTime, curTime, iterations, logg
logg = logger()
for arg in sys.argv[-2:]:
if arg == "-v" or arg == "--verbose":
logg.verbose = True
if arg == "-d" or arg == "--debug":
logg.debug = True
fb.LOG = logg
env = mpimg.imread(env)
robots = []
robotNum = 1
graphs = []
goals = []
prevTime = clock()
curTime = clock()
iterations = maxIterations
rrg.initRRG(env, dyType='point', goalRadius=5, minRadius=5, logg=logg)
for i in range(robotNum):
robots.append({})
robots[i]['finished'] = False
logg.VERBOUT("Initializing robot")
if robotPos == None:
robots[i]['graph'] = rrg.initGraph()
robots[i]['root'] = next(iter(robots[i]['graph'].keys()))
else:
x = floor(robotPos['x'])
y = floor(robotPos['y'])
robots[i]['root'] = State(x, y)
robots[i]['graph'] = {robots[i]['root']:[]}
logg.VERBOUT("Initial state: "+str(robots[i]['root']), lvl=2)
logg.VERBOUT("Initializing goal")
if goalPos == None:
robots[i]['goal'] = rrg.sample(env)
else:
x = floor(goalPos['x'])
y = floor(goalPos['y'])
robots[i]['goal'] = State(x, y)
logg.VERBOUT("Initial goal: "+str(robots[i]['goal']), lvl=2)
robots[i]['path'] = {}
robots[i]['goal'].cost = -100
if __name__ == '__main__':
main()
|
quentunahelper/game-theoretic-feedback-loop
|
RRG/iCLAP.py
|
Python
|
mit
| 3,595
|
#! /usr/bin/env python
import logging
import random
import pickle
import os
import sys
import getopt
from lib.common import LOW_SCORE, finished_flag, visited_flag, result_flag, error_flag
from lib.common import touch, deepcopy
from lib.common import setup_logging
# Import PDFRW later for controling the logging format.
# Note: The original pdfw should be used in parsing the repacked seeds for efficiency.
# No, we have to use the modified version, due to the additional trace issue.
class GPPdf:
def __init__(self,
job_dir,
seed_sha1,
seed_file_path,
logger,
random_state_file_path,
ext_genome,
success_traces_path,
promising_traces_path,
gp_params,
fitness_function,
):
self.logger = logger
self.job_dir = job_dir
self.seed_sha1 = seed_sha1
# Load the pre-defined random state for reproducing the existing results.
if random_state_file_path:
try:
random_state = pickle.load(open(random_state_file_path, 'rb'))
random.setstate(random_state)
logger.debug("Loaded a random state from %s" % random_state_file_path)
except:
logger.warning("Failed to load random state from %s" % random_state_file_path)
# Save random state for reproducing results in the future.
random_state_file = os.path.join(self.job_dir, "random_state.pickle")
random_state = random.getstate()
pickle.dump(random_state, open(random_state_file, 'wb'))
self.fitness_func = fitness_function
# Load the seed.
self.seed_file_path = seed_file_path
self.seed_fitness = self.fitness([self.seed_file_path], self.seed_sha1)[0]
self.seed_root = PdfGenome.load_genome(seed_file_path)
self.logger.info("Loaded %s as PDF seed, fitness %.2f." % (seed_file_path, self.seed_fitness))
# Load the external genome.
self.ext_genome = ext_genome
# Load traces.
self.success_traces_path = success_traces_path
self.success_traces = Trace.load_traces(self.success_traces_path)
self.promising_traces_path = promising_traces_path
self.promising_traces = Trace.load_traces(self.promising_traces_path)
# Initiate some parameters.
self.gp_params = gp_params
self.pop_size = gp_params['pop_size']
self.max_gen = gp_params['max_gen']
self.mut_rate = gp_params['mut_rate']
self.xover_rate = gp_params['xover_rate']
self.fitness_threshold = gp_params['fitness_threshold']
def save_variants_to_files(self):
folder = "./variants/generation_%d" % (self.generation)
folder = os.path.join(self.job_dir, folder)
if not os.path.isdir(folder):
os.makedirs(folder)
file_paths = []
for j in range(len(self.popul)):
path = "./variants/generation_%d/%d.pdf" % (self.generation, j)
path = os.path.join(self.job_dir, path)
file_paths.append(path)
PdfGenome.save_to_file(self.popul[j], path)
return file_paths
def load_variant(self, gen, vid):
path = "./variants/generation_%d/%d.pdf" % (gen, vid)
path = os.path.join(self.job_dir, path)
pdf_obj = PdfGenome.load_genome(path)
return pdf_obj
def load_variant_trace(self, gen, vid):
path = "./variants/generation_%d/%d.pdf" % (gen, vid)
path = os.path.join(self.job_dir, path)
trace = PdfGenome.load_trace(path)
return trace
def fitness(self, *args):
return self.fitness_func(*args)
def run(self):
self.logger.info("Start a gp task with %s" % (self.gp_params))
score_file_name = os.path.join(self.job_dir, "fitness_scores.pickle")
self.fitness_scores = {}
self.popul = self.initial_population()
self.generation = 1
while self.generation <= self.max_gen:
self.logger.info("There're %d variants in population at generation %d." % (len(self.popul), self.generation))
file_paths = self.save_variants_to_files()
scores = self.fitness(file_paths, self.seed_sha1)
# Introduce a fake score for testing tracing.
# scores = [0.1, 0.2] * (self.pop_size/2)
self.fitness_scores[self.generation] = scores
pickle.dump(self.fitness_scores, open(score_file_name, 'wb'))
self.logger.info("Fitness scores: %s" % scores)
self.logger.info("Sorted fitness: %s" % sorted(scores, reverse=True))
if max(scores) > self.fitness_threshold:
self.best_score = max(scores)
self.logger.info("Already got a high score [%.2f]>%.2f variant, break the GP process." % (max(scores), self.fitness_threshold))
# Store the success traces.
for i in range(len(scores)):
score = scores[i]
if score > self.fitness_threshold:
success_trace = self.popul[i].active_trace
self.success_traces.append(success_trace)
# Dump the new generated traces.
# We assume no concurrent GP tasks depending on the traces.
Trace.dump_traces(self.success_traces, self.success_traces_path)
touch(os.path.join(self.job_dir, finished_flag))
break
elif self.generation == max_gen:
self.logger.info("Failed at max generation.")
if max(scores) >= self.seed_fitness:
best_gen, best_vid, self.best_score = self.get_best_variant(1, self.generation)
promising_trace = self.load_variant_trace(best_gen, best_vid)
self.logger.info("Save the promising trace %.2f of %d:%d" % (best_score, best_gen, best_vid))
self.promising_traces.append(promising_trace)
Trace.dump_traces(self.promising_traces, self.promising_traces_path, exclude_traces=self.success_traces)
break
# Crossover
if self.xover_rate > 0:
self.popul = self.select(self.popul, scores, self.pop_size/2)
self.logger.debug("After selecting goods and replacing bads, we have %d variants in population." % len(self.popul))
for p1,p2 in zip(self.popul[0::2], self.popul[1::2]):
c1, c2 = PdfGenome.crossover(p1, p2)
self.popul.append(c1)
self.popul.append(c2)
self.logger.debug("After crossover, we have %d variants in population." % len(self.popul))
else: # No Crossover
self.popul = self.select(self.popul, scores, self.pop_size)
self.logger.debug("After selecting goods and replacing bads, we have %d variants in population." % len(self.popul))
# Mutation
for i in range(len(self.popul)):
if i not in self.vid_from_trace:
self.logger.debug("Generating %d:%d variant" % (self.generation+1, i))
self.popul[i] = PdfGenome.mutation(self.popul[i], self.mut_rate, self.ext_genome)
else:
self.logger.debug("Keep %d:%d variant from trace." % (self.generation+1, i))
self.generation = self.generation + 1
self.logger.info("Stopped the GP process with max fitness %.2f." % self.best_score)
touch(os.path.join(self.job_dir, result_flag % self.best_score))
return True
def initial_population(self):
logger = self.logger
logger.info("Getting initial population from existing mutation traces (success: %d, promising: %d)." \
% (len(self.success_traces), len(self.promising_traces)))
popul = []
traces = self.success_traces + self.promising_traces
traces = Trace.get_distinct_traces(traces)
logger.info("Got %d distinct traces" % len(traces))
self.traces = traces
self.remaining_traces_id = range(len(traces))
if 0 < len(self.remaining_traces_id) <= self.pop_size:
tid_picked = self.remaining_traces_id
elif len(self.remaining_traces_id) > self.pop_size:
tid_picked = random.sample(self.remaining_traces_id, self.pop_size)
tid_picked.sort()
else:
tid_picked = []
# generate_variants_from_traces
for i in tid_picked:
self.remaining_traces_id.remove(i)
logger.debug("Generating %d variant from existing trace." % i)
trace = traces[i]
variant_root = Trace.generate_variant_from_trace(self.seed_root, trace, self.ext_genome)
popul.append(variant_root)
if len(popul) < int(self.pop_size):
logger.info("Getting %d more variants in initial population by random mutation." \
% (int(self.pop_size) - len(popul)))
while len(popul) < int(self.pop_size):
i = len(popul)
logger.debug("Getting variant %d in initial population." % i)
root = deepcopy(self.seed_root)
root = PdfGenome.mutation(root, self.mut_rate, self.ext_genome)
popul.append(root)
return popul
def get_best_variant(self, start_gen, end_gen):
best_gen = 1
best_vid = 0
best_score = LOW_SCORE
for gen in range(start_gen, end_gen+1):
scores = self.fitness_scores[gen]
if max(scores) > best_score:
best_score = max(scores)
best_gen = gen
best_vid = scores.index(best_score)
return best_gen, best_vid, best_score
def select(self, orig_list, scores, sel_size):
# when reverse==False, select variants with lower score, otherwise select higher scores.
sorted_indices = [i[0] for i in sorted(enumerate(scores), key=lambda x:x[1], reverse=True)]
ret = []
self.vid_from_trace = []
for i in sorted_indices[:sel_size]:
if scores[i] == LOW_SCORE:
if len(self.remaining_traces_id) > 0:
# TODO: need to label these, not to mutate in next generation.
self.vid_from_trace.append(i)
tid_picked = random.choice(self.remaining_traces_id)
self.remaining_traces_id.remove(tid_picked)
self.logger.info("Ignored a variant with low score, generating from existing trace %d" % tid_picked)
trace = self.traces[tid_picked]
new_variant = Trace.generate_variant_from_trace(self.seed_root, trace, self.ext_genome)
ret.append(new_variant)
elif self.generation == 1:
self.logger.info("Ignored a variant with low score, replace with original seed.")
ret.append(deepcopy(self.seed_root))
else:
choice = random.choice(['seed', 'last_gen_best', 'historic_best'])
if choice == "seed":
self.logger.info("Ignored a variant with low score, replace with original seed.")
ret.append(deepcopy(self.seed_root))
elif choice == "last_gen_best":
best_gen, best_vid, best_score = self.get_best_variant(self.generation-1, self.generation-1)
best_root = self.load_variant(best_gen, best_vid)
ret.append(best_root)
self.logger.info("Ignored a variant with low score, replace with best variant in last generation[%d, %d]." % (best_gen, best_vid))
elif choice == "historic_best":
best_gen, best_vid, best_score = self.get_best_variant(1, self.generation-1)
best_root = self.load_variant(best_gen, best_vid)
ret.append(best_root)
self.logger.info("Ignored a variant with low score, replace with best variant in historic generation[%d, %d]." % (best_gen, best_vid))
else:
self.logger.info("Selected a file with score %.2f" % scores[i])
ret.append(orig_list[i])
return ret
def get_opt(argv):
classifier_name = None
start_file = None
ext_genome_folder = None
pop_size = None
max_gen = None
mut_rate = None
xover_rate = 0
stop_fitness = None
random_state_file_path = None
token = None
round_id = 1
help_msg = "gp.py -c <classifier name> -o <oracle name> \
-s <start file location> -e <external genome folder> \
-p <population size> -g <max generation> \-m <mutation rate> \
-x <crossvoer rate> -r <random_state_file_path> -t <task_token>\
--round <round_id>\
-f <stop criterion in fitness score>"
if len(argv) < 2:
print help_msg
sys.exit(2)
try:
opts, args = getopt.getopt(argv[1:],"hc:s:e:p:g:m:f:x:r:t:",["classifier=",
"sfile=",
"extgenome=",
"popu=",
"gen=",
"mut=",
"fitness=",
"crossover=",
"random_state=",
"token=",
"round=",
])
except getopt.GetoptError:
print help_msg
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print help_msg
sys.exit()
elif opt in ("-c", "--classifier"):
classifier_name = arg
elif opt in ("-s", "--sfile"):
start_file = arg
elif opt in ("-e", "--extgenome"):
ext_genome_folder = arg
elif opt in ("-p", "--popu"):
pop_size = int(arg)
elif opt in ("-g", "--gen"):
max_gen = int(arg)
elif opt in ("-m", "--mut"):
mut_rate = float(arg)
elif opt in ("-x", "--crossover"):
xover_rate = float(arg)
elif opt in ("-f", "--fitness"):
stop_fitness = float(arg)
elif opt in ("-r", "--random_state"):
random_state_file_path = arg
elif opt in ("-t", "--token"):
token = arg
elif opt in("--round"):
round_id = int(arg)
if xover_rate != 0 and pop_size % 4 != 0:
print "The population size should be times of 4."
sys.exit(2)
print classifier_name, start_file, ext_genome_folder, \
pop_size, max_gen, mut_rate, xover_rate, \
stop_fitness, random_state_file_path, token, round_id
return classifier_name, start_file, ext_genome_folder, \
pop_size, max_gen, mut_rate, xover_rate, \
stop_fitness, random_state_file_path, token, round_id
if __name__ == "__main__":
classifier_name, start_file_path, \
ext_genome_folder, pop_size, max_gen, mut_rate, \
xover_rate, stop_fitness, random_state_file_path, \
task_token, round_id = get_opt(sys.argv)
start_hash = os.path.basename(start_file_path).split('.')[0]
for rid in range(1, round_id + 1):
job_dir = "./results/%s/log_r%d/classifier=%s,mut=%.1f,xover=%.1f,popsz=%d,maxgen=%d,stopfit=%.2f,start=%s" \
% (task_token, rid, classifier_name, mut_rate, xover_rate, pop_size, max_gen, stop_fitness, start_hash)
if not os.path.isdir(job_dir):
os.makedirs(job_dir)
# skip the succeeded tasks in previous rounds.
# skip all the visited tasks in the current round.
if os.path.exists(os.path.join(job_dir, finished_flag)):
sys.exit(0)
if rid == round_id and os.path.exists(os.path.join(job_dir, visited_flag)):
sys.exit(0)
traces_dir = "./results/%s/trace/" % task_token
if not os.path.isdir(traces_dir):
os.makedirs(traces_dir)
success_traces_path = traces_dir + "success_traces.pickle"
promising_traces_path = traces_dir + "promising_traces.pickle"
log_file_path = os.path.join(job_dir, visited_flag)
setup_logging(log_file_path)
logger = logging.getLogger('gp.core')
logger.info("Starting logging for a GP process...")
# Due to logging is called in pdfrw, they have to be imported after basicConfig of logging.
# Otherwise, the above basicConfig would be overridden.
from lib.pdf_genome import PdfGenome
from lib.trace import Trace
if classifier_name == 'pdfrate':
from lib.fitness import fitness_pdfrate as fitness_func
elif classifier_name == 'hidost':
from lib.fitness import fitness_hidost as fitness_func
elif classifier_name == "hidost_pdfrate":
from lib.fitness import fitness_hidost_pdfrate as fitness_func
elif classifier_name == "hidost_pdfrate_mean":
from lib.fitness import fitness_hidost_pdfrate_mean as fitness_func
elif classifier_name == "hidost_pdfrate_sigmoid":
from lib.fitness import fitness_hidost_pdfrate_sigmoid as fitness_func
gp_params = {'pop_size': pop_size, 'max_gen': max_gen, \
'mut_rate': mut_rate, 'xover_rate': xover_rate, \
'fitness_threshold': stop_fitness}
ext_genome = PdfGenome.load_external_genome(ext_genome_folder)
try:
gp = GPPdf( job_dir = job_dir,
seed_sha1 = start_hash,
seed_file_path = start_file_path,
logger = logger,
random_state_file_path = random_state_file_path,
ext_genome = ext_genome,
success_traces_path = success_traces_path,
promising_traces_path = promising_traces_path,
gp_params = gp_params,
fitness_function = fitness_func,
)
gp.run()
except Exception, e:
touch(os.path.join(job_dir, error_flag))
logger.exception(e)
sys.exit(1)
|
uvasrg/EvadeML
|
gp.py
|
Python
|
mit
| 18,807
|
print "-- This shows that the environment file is loaded --"
|
abbotao/harmonious
|
test/input_data/environment.py
|
Python
|
mit
| 61
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import unittest
class ExemploDeTeste(unittest.TestCase):
def test_adicao(self):
resultado_obtido=1+2
self.assertEqual(3,resultado_obtido)
|
renzon/livrogae
|
backend/test/exemplo_de_teste.py
|
Python
|
mit
| 240
|
'''
Distributed under the MIT License, see accompanying file LICENSE.txt
'''
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.locale
import os.path
from ConfigParser import SafeConfigParser
from tornado.options import define, options
#api
from handlers.ApiHandler import BlockAfterHandler
from handlers.ApiHandler import LastBlockHandler
from handlers.ApiHandler import AccountHandler
from handlers.ApiHandler import TransfersHandler
from handlers.ApiHandler import FromToBlocksHandler
from handlers.ApiHandler import SearchBlockByHashHandler
from handlers.ApiHandler import SearchTxByHashHandler
from handlers.ApiHandler import SearchHandler
from handlers.ApiHandler import FromToTxHandler
from handlers.ApiHandler import BlockChartHandlerCustom
from handlers.ApiHandler import NxtBlockChartHandlerCustom
from handlers.ApiHandler import HarvesterStatsHandler
from handlers.ApiHandler import CheckNis
from handlers.ApiHandler import NodeListHandler
from handlers.ApiHandler import TestAccountHandler
#sockets
from handlers.SocketHandler import LatestBlockSocket
from handlers.SocketHandler import LatestTxSocket
parser = SafeConfigParser()
parser.read("settings.INI")
define("port", default=parser.get("blockexplorer", "port"), help="run on the given port", type=int)
if __name__ == '__main__':
tornado.options.parse_command_line()
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"static_path" : os.path.join(os.path.dirname(__file__), 'static'),
"cookie_secret": "doEx8QhSQv+CUoZjKDevtL/5VODeEkUFgbWyv7PO0O4", #define your own here !
"xsrf_cookies": True,
"debug": False,
"gzip":True,
'pycket': {
'engine': 'redis',
'storage': {
'host': 'localhost',
'port': 6379,
'db_sessions': 10,
'db_notifications': 11,
'max_connections': 2 ** 31,
},
},
}
#define the url endpoints
app = tornado.web.Application(
[
#main page stuff
#(r'/', FromToBlocksHandlerTemp),
#(r'/blocks', FromToBlocksHandlerTemp),
#apis
#blocks
(r'/api/block-after', BlockAfterHandler),
(r'/api/last-block', LastBlockHandler),
(r'/api/blocks', FromToBlocksHandler),
#account
(r'/api/account', AccountHandler),
(r'/api/transfers', TransfersHandler),
(r'/api/testAcc', TestAccountHandler),
#txs
(r'/api/txs', FromToTxHandler),
#search
(r'/api/tx', SearchTxByHashHandler),
(r'/api/block', SearchBlockByHashHandler),
(r'/api/search', SearchHandler),
#stats
(r'/api/stats/v2/blocktimes', BlockChartHandlerCustom),
(r'/api/stats/harvesters', HarvesterStatsHandler),
(r'/api/stats/nodes', NodeListHandler),
(r'/api/stats/nxttimes', NxtBlockChartHandlerCustom),
#sockets
#blocks
(r'/socket/last-block', LatestBlockSocket),
#txs
(r'/socket/last-tx', LatestTxSocket),
#extras
(r'/api/extras/checknis', CheckNis),
],
**settings
)
#load translations
translationsPath = os.path.join(os.path.dirname(__file__), "locale")
tornado.locale.load_translations(translationsPath)
server = tornado.httpserver.HTTPServer(app, xheaders=True)
server.bind(options.port, '127.0.0.1')
print "port: ", options.port
server.start()
tornado.ioloop.IOLoop.instance().start()
|
NewEconomyMovement/blockexplorer
|
NEMBEX.py
|
Python
|
mit
| 3,700
|
import sys
def synced_impl(dependencies, python):
import subprocess
from ast import literal_eval
from packaging.requirements import Requirement
from ...dep.core import dependencies_in_sync
sys_path = None
if python:
output = subprocess.check_output([python, '-c', 'import sys;print([path for path in sys.path if path])'])
sys_path = literal_eval(output.strip().decode('utf-8'))
sys.exit(0 if dependencies_in_sync(map(Requirement, dependencies), sys_path) else 1)
def synced_command(subparsers, defaults):
parser = subparsers.add_parser('synced')
parser.add_argument('dependencies', nargs='+')
parser.add_argument('-p', '--python', dest='python', **defaults)
parser.set_defaults(func=synced_impl)
def dep_command(subparsers, defaults):
parser = subparsers.add_parser('dep')
subparsers = parser.add_subparsers()
synced_command(subparsers, defaults)
|
ofek/hatch
|
backend/src/hatchling/cli/dep/__init__.py
|
Python
|
mit
| 930
|
input1="""Some
text."""
input2="""package main
import "fmt"
func main() {
queue := make(chan string, 2)
queue <- "one"
queue <- "twoO"
close(queue)
for elem := range queue {
fmt.Println(elem)
}
}"""
def transpose(text):
lines = text.splitlines()
lens = [len(line) for line in lines]
longest = max(lens)
for i in xrange(len(lines)):
spaces = longest-lens[i]
if spaces > 0:
lines[i]+=' '*(spaces)
output=''
curindex=0
for i in xrange(longest):
for line in lines:
output+=line[curindex]
if curindex<longest-1:
output+='\n'
curindex+=1
return output
if __name__ == "__main__":
print transpose(input1)
print transpose(input2)
|
jamtot/DailyChallenge
|
transpose_text (06jun2016)/transpose.py
|
Python
|
mit
| 778
|
import pytest
from pytest_bdd import (
scenarios,
then,
when,
)
from . import browsersteps
pytestmark = [
pytest.mark.bdd,
pytest.mark.usefixtures('workbook', 'admin_user'),
]
scenarios(
'title.feature',
'select_variant.feature',
'variant_curation_tabs.feature',
'generics.feature',
'create_gene_disease.feature',
'curation_central.feature',
'gdm.feature',
)
# https://github.com/pytest-dev/pytest-bdd/issues/124
@when('I visit "/<item_type>/"')
def i_visit_the_collection_for_item_type(browser, base_url, item_type):
url = '/{}/'.format(item_type)
browsersteps.when_i_visit_url(browser, base_url, url)
@when('I click the link with text that contains "<link_text>"')
def click_link_with_text_that_contains_link_text(browser, link_text):
browsersteps.click_link_with_text_that_contains(browser, link_text)
@then('I should see an element with the css selector ".view-item.type-<item_type>"')
def should_see_element_with_css_item_type(browser, item_type):
css = ".view-item.type-{}".format(item_type)
browsersteps.should_see_element_with_css(browser, css)
|
ClinGen/clincoded
|
src/clincoded/tests/features/test_generics.py
|
Python
|
mit
| 1,131
|
from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.settings import CFG
from benchbuild.source import HTTP
from benchbuild.utils import path
from benchbuild.utils.cmd import make, tar
from benchbuild.utils.settings import get_number_of_jobs
class MCrypt(bb.Project):
""" MCrypt benchmark """
NAME = 'mcrypt'
DOMAIN = 'encryption'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={
'2.6.8': (
'http://sourceforge.net/projects/mcrypt/files/MCrypt/'
'2.6.8/mcrypt-2.6.8.tar.gz'
)
},
local='mcrypt.tar.gz'
),
HTTP(
remote={
'2.5.8': (
'http://sourceforge.net/projects/mcrypt/files/Libmcrypt/'
'2.5.8/libmcrypt-2.5.8.tar.gz'
)
},
local='libmcrypt.tar.gz'
),
HTTP(
remote={
'0.9.9.9': (
'http://sourceforge.net/projects/mhash/files/mhash/'
'0.9.9.9/mhash-0.9.9.9.tar.gz'
)
},
local='mhash.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
libmcrypt_dir = "libmcrypt-2.5.8"
libmcrypt_file = libmcrypt_dir + ".tar.gz"
mhash_dir = "mhash-0.9.9.9"
mhash_file = mhash_dir + ".tar.gz"
def compile(self):
mcrypt_source = local.path(self.source_of('mcrypt.tar.gz'))
libmcrypt_source = local.path(self.source_of('libmcrypt.tar.gz'))
mhash_source = local.path(self.source_of('mhash.tar.gz'))
tar('xfz', mcrypt_source)
tar('xfz', libmcrypt_source)
tar('xfz', mhash_source)
builddir = local.path(self.builddir)
mcrypt_dir = builddir / "mcrypt-2.6.8"
mhash_dir = builddir / self.mhash_dir
libmcrypt_dir = builddir / self.libmcrypt_dir
_cc = bb.compiler.cc(self)
_cxx = bb.compiler.cxx(self)
_make = bb.watch(make)
# Build mhash dependency
with local.cwd(mhash_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
with local.env(CC=_cc, CXX=_cxx):
_configure("--prefix=" + builddir)
_make("-j", get_number_of_jobs(CFG), "install")
# Builder libmcrypt dependency
with local.cwd(libmcrypt_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
with local.env(CC=_cc, CXX=_cxx):
_configure("--prefix=" + builddir)
_make("-j", CFG["jobs"], "install")
with local.cwd(mcrypt_dir):
configure = local["./configure"]
_configure = bb.watch(configure)
lib_dir = builddir / "lib"
inc_dir = builddir / "include"
env = CFG["env"].value
mod_env = dict(
CC=_cc,
CXX=_cxx,
LD_LIBRARY_PATH=path.
list_to_path([str(lib_dir)] + env.get("LD_LIBRARY_PATH", [])),
LDFLAGS="-L" + str(lib_dir),
CFLAGS="-I" + str(inc_dir)
)
env.update(mod_env)
with local.env(**env):
_configure(
"--disable-dependency-tracking", "--disable-shared",
"--with-libmcrypt=" + builddir,
"--with-libmhash=" + builddir
)
_make("-j", get_number_of_jobs(CFG))
def run_tests(self):
mcrypt_dir = local.path(self.builddir) / "mcrypt-2.6.8"
mcrypt_libs = mcrypt_dir / "src" / ".libs"
aestest = bb.wrap(mcrypt_libs / "lt-aestest", self)
_aestest = bb.watch(aestest)
_aestest()
ciphertest = bb.wrap(mcrypt_libs / "lt-ciphertest", self)
_ciphertest = bb.watch(ciphertest)
_ciphertest()
|
PolyJIT/benchbuild
|
benchbuild/projects/benchbuild/mcrypt.py
|
Python
|
mit
| 4,034
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import swaggyjenkins
from swaggyjenkins.model.input_step_impl import InputStepImpl
from swaggyjenkins.model.pipeline_step_impllinks import PipelineStepImpllinks
globals()['InputStepImpl'] = InputStepImpl
globals()['PipelineStepImpllinks'] = PipelineStepImpllinks
from swaggyjenkins.model.pipeline_step_impl import PipelineStepImpl
class TestPipelineStepImpl(unittest.TestCase):
"""PipelineStepImpl unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPipelineStepImpl(self):
"""Test PipelineStepImpl"""
# FIXME: construct object with mandatory attributes with example values
# model = PipelineStepImpl() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
cliffano/swaggy-jenkins
|
clients/python/generated/test/test_pipeline_step_impl.py
|
Python
|
mit
| 1,040
|
#!/usr/bin/python
"""
assembles raw cuts into final, titles, tweaks audio, encodes to format for upload.
"""
import datetime
import os
from pprint import pprint
import sys
import subprocess
import xml.etree.ElementTree
import pycaption
from mk_mlt import mk_mlt
from process import process
from django.db import connection
from main.models import Client, Show, Location, Episode, Raw_File, Cut_List
class enc(process):
ready_state = 2
def mk_title_svg(self, raw_svg, texts):
"""
Make a title slide by filling in a pre-made svg with name/authors.
return: svg
"""
tree = xml.etree.ElementTree.XMLID(raw_svg)
for key in texts:
if self.options.verbose:
print("looking for:", key)
# tollerate template where tokens have been removed
if key in tree[1]:
if key == "license":
# CC license image
if self.options.verbose:
print("found in svg:", tree[1][key])
print("replacing with:", texts[key])
t = tree[1][key]
# import code; code.interact(local=locals())
if texts[key] is None:
# del(tree[1][key])
# print tree[1].has_key(key)
tree[1][key].clear()
else:
t.set('{http://www.w3.org/1999/xlink}href', texts[key])
else:
if self.options.verbose:
print("found in svg:", tree[1][key].text)
print("replacing with:", texts[key]) # .encode()
tree[1][key].text = texts[key]
# cooked_svg = xml.etree.ElementTree.tostring(tree[0])
# print "testing...", "license" in cooked_svg
if 'presenternames' in tree[1]:
# some people like to add spiffy text near the presenter name(s)
if texts['authors']:
# prefix = u"Featuring" if "," in texts['authors'] else "By"
# tree[1]['presenternames'].text=u"%s %s" % (prefix,texts['authors'])
tree[1]['presenternames'].text = texts['authors']
else:
# remove the text (there is a placholder to make editing sane)
tree[1]['presenternames'].text = ""
cooked_svg = xml.etree.ElementTree.tostring(tree[0]).decode('ascii')
return cooked_svg
def get_title_text(self, episode):
# lets try putting (stuff) on a new line
title = episode.name
authors = episode.authors
if episode.show.slug == 'write_docs_na_2016':
title = title.upper()
authors = authors.upper()
# non breaking hyphen
# it's wider?!!!
# title = title.replace('-','—')
if False and episode.show.slug != 'pygotham_2015' and len(title) > 80: # crazy long titles need all the lines
title2 = ''
elif episode.id in [13741, ]: # black list - don't touch this.
title2 = ''
elif ": " in title: # the space keeps 9:00 from breaking
pos = title.index(":") + 1
title1, title2 = title[:pos], title[pos:].strip()
elif " - " in title:
# error if there is more than 1.
# title, title2 = title.split(' - ')
t1, t2 = title.split(' - ',1)
if t1[-1].isdigit() and t2[0].isdigit():
title1 = title
title2=''
else:
title1, title2 = t1, t2
elif " -- " in title:
# error if there is more than 1.
title1, title2 = title.split(' -- ')
elif (" (" in title) and (title.index(" (") > 10):
pos = title.index(" (")
# +1 skip space in " ("
title1, title2 = title[:pos], title[pos + 1:]
elif ", " in title:
pos = title.index(", ")
# +1 include the comma, + 2 skip space after it
title1, title2 = title[:pos+1], title[pos + 2:]
elif (") " in title):
pos = title.index(") ")
# +1 include the ), + 2 skip space in ") "
title1, title2 = title[:pos+1], title[pos+ 2:]
elif " # " in title:
pos = title.index(" # ")
title1, title2 = title[:pos], title[pos+1:].strip()
elif False and " using " in title:
pos = title.index(" using ")
title1, title2 = title[:pos], title[pos + 1:]
elif ";" in title:
pos = title.index(";") + 1
title1, title2 = title[:pos], title[pos:].strip()
elif "? " in title: # ?(space) to not break on 'can you?'
pos = title.index("?") + 1
title1, title2 = title[:pos], title[pos:].strip()
elif ". " in title:
pos = title.index(". ") + 1
title1, title2 = title[:pos], title[pos:].strip()
else:
title1 = episode.name
title2 = ""
# replace last space wiht nbs to prevent orphan words.
try:
i = title1.rindex(' ')
title1 = title1[:i] + chr(160) + title1[i+1:]
except ValueError: pass
try:
i = title2.rindex(' ')
title2 = title2[:i] + chr(160) + title2[i+1:]
except ValueError: pass
if episode.license:
license = "cc/{}.svg".format(episode.license.lower())
else:
license = None
if episode.tags:
tags = episode.tags.split(',')
tag1 = tags[0]
else:
tags = []
tag1 = ''
# split authors over three objects
# parse into list
# strip the spaces
# padd to 3 items
l = [a.strip() for a in authors.split(',')]
authors = " and ".join(l)
l += [''] * (3-len(l))
author1, author2, author3 = l
# World date format
# date = episode.start.strftime("%Y-%m-%-d")
# US dumb format
date = episode.start.strftime("%B %-dth, %Y")
texts = {
'client': episode.show.client.name,
'show': episode.show.name,
'title': title,
'title1': title1,
'title2': title2,
'track': tag1,
'authors': authors,
'author1': author1,
'author2': author2,
'author3': author3,
'presentertitle': "",
'twitter_id': episode.twitter_id,
'date': date,
'time': episode.start.strftime("%H:%M"),
'license': license,
'room': episode.location.name,
}
return texts
def svg2png(self, svg_name, png_name, episode):
"""
Make a title slide png file.
melt uses librsvg which doesn't support flow,
wich is needed for long titles, so render it to a .png using inkscape
"""
# create png file
# inkscape does not return an error code on failure
# so clean up previous run and
# check for the existance of a new png
if os.path.exists(png_name):
os.remove(png_name)
cmd = ["inkscape", svg_name,
"--export-png", png_name,
# "--export-width", "720",
]
ret = self.run_cmds(episode, [cmd])
ret = os.path.exists(png_name)
# if self.options.verbose: print cooked_svg
if self.options.verbose:
print(png_name)
if not ret:
print("svg:", svg_name)
png_name = None
return png_name
def mk_title(self, episode):
# make a title slide
# if we find titles/custom/(slug).svg, use that
# else make one from the tempalte
custom_svg_name = os.path.join( "..",
"custom", "titles", episode.slug + ".svg")
if self.options.verbose: print("custom:", custom_svg_name)
abs_path = os.path.join( self.show_dir, "tmp", custom_svg_name )
if self.options.verbose: print("abs:", abs_path)
if os.path.exists(abs_path):
cooked_svg_name = abs_path
else:
svg_name = episode.show.client.title_svg
# print(svg_name)
template = os.path.join(
self.show_dir,
"assets", "titles",
svg_name)
raw_svg = open(template).read()
# happy_filename = episode.slug.encode('utf-8')
happy_filename = episode.slug
# happy_filename = ''.join([c for c in happy_filename if c.isalpha()])
# title_base = os.path.join(self.show_dir, "titles", happy_filename)
title_base = os.path.join("..", "titles", happy_filename)
texts = self.get_title_text(episode)
cooked_svg = self.mk_title_svg(raw_svg, texts)
# save svg to a file
# strip 'broken' chars because inkscape can't handle the truth
# output_base=''.join([ c for c in output_base if c.isalpha()])
# output_base=''.join([ c for c in output_base if ord(c)<128])
# output_base=output_base.encode('utf-8','ignore')
cooked_svg_name = os.path.join(
self.show_dir, "titles", '{}.svg'.format(episode.slug))
open(cooked_svg_name, 'w').write(cooked_svg)
png_name = os.path.join( "..",
"titles", '{}.png'.format(episode.slug))
abs_path = os.path.join( self.show_dir, "tmp", png_name )
title_img = self.svg2png(cooked_svg_name, abs_path, episode)
if title_img is None:
print("missing title png")
return False
return png_name
def get_params(self, episode, rfs, cls):
"""
assemble a dict of params to send to mk_mlt
mlt template, title screen image,
filter parameters (currently just audio)
and cutlist+raw filenames
"""
def get_title(episode):
# if we find show_dir/custom/titles/(slug).svg, use that
# else make one from the tempalte
custom_png_name = os.path.join(
self.show_dir, "custom", "titles", episode.slug + ".png")
if os.path.exists(custom_png_name):
print("found custom:", custom_png_name)
title_img = custom_png_name
else:
title_img = self.mk_title(episode)
return title_img
def get_foot(episode):
credits_img = episode.show.client.credits
credits_pathname = os.path.join("..",
"assets", "credits", credits_img )
return credits_pathname
def get_clips(rfs, ep):
"""
return list of possible input files
this may get the files and store them localy.
start/end segments are under get_cuts.
ps. this is not used for encoding,
just shows in ShotCut for easy dragging onto the timeline.
"""
clips = []
for rf in rfs:
clip = {'id': rf.id }
# if rf.filename.startswith('\\'):
# rawpathname = rf.filename
# else:
raw_pathname = os.path.join( "../dv",
rf.location.slug, rf.filename)
# self.episode_dir, rf.filename)
# check for missing input file
# typically due to incorrect fs mount
abs_path = os.path.join(
self.show_dir, "tmp", raw_pathname)
if not os.path.exists(abs_path):
print(( 'raw_pathname not found: "{}"'.format(
abs_path)))
return False
clip['filename']=raw_pathname
# trim start/end based on episode start/end
if rf.start < ep.start < rf.end:
# if the ep start falls durring this clip,
# trim it
d = ep.start - rf.start
clip['in']="00:00:{}".format(d.total_seconds())
else:
clip['in']=None
# if "mkv" in rf.filename:
# import code; code.interact(local=locals())
if rf.start < ep.end < rf.end:
# if the ep end falls durring this clip,
d = ep.end - rf.start
clip['out']="00:00:{}".format(d.total_seconds())
else:
clip['out']=None
# pprint.pprint(clip)
clips.append(clip)
return clips
def get_cuts(cls):
"""
gets the list of cuts.
input file, start, end, filters
ps, does not reference the clips above.
"""
def hms_to_clock(hms):
"""
Converts what media players show h:m:s
to the mlt time format h:m:s.s
for more on this:
http://mltframework.blogspot.com/2012/04/time-properties.html
"""
if not hms:
return None
if ":" not in hms:
hms = "0:" + hms
if "." not in hms:
hms = hms + ".0"
return hms
cuts = []
for cl in cls:
cut = {}
cut['id'] = cl.id
rawpathname = os.path.join( "../dv",
cl.raw_file.location.slug, cl.raw_file.filename)
# self.episode_dir, cl.raw_file.filename)
# print(rawpathname)
cut['filename'] = rawpathname
# set start/end on the clips if they are set in the db
# else None
cut['in']=hms_to_clock(cl.start)
cut['out']=hms_to_clock(cl.end)
cut['length'] = cl.duration()
if cl.episode.channelcopy:
cut['channelcopy'] = cl.episode.channelcopy
elif cl.episode.location.channelcopy:
cut['channelcopy'] = cl.episode.location.channelcopy
else:
cut['channelcopy']='01'
if cl.comment.startswith('channelcopy'):
channelcopy = cl.comment.split('\n')[0].split('=')[1].strip()
cut['channelcopy']=channelcopy
if cl.episode.normalise:
cut['normalize'] = cl.episode.normalise
else:
# cut['normalize']='-12.0'
cut['normalize']='0'
if cl.episode.comment.startswith('delay'):
delay = cl.episode.comment.split('\n')[0].split('=')[1].strip()
cut['video_delay']=delay
else:
if cl.episode.show.slug == "pytx19":
cut['video_delay']='0.8'
elif cl.episode.show.slug == "kicon_2019":
cut['video_delay']='0.000'
else:
cut['video_delay']='0.0'
cuts.append(cut)
return cuts
def get_transcriptions(cls):
"""
loop over the cuts because that is where the data is now.
"""
transcriptions = []
video_time = 0
for cl in cls:
for c in cl.comment.split('\n'):
if c.startswith('TS'):
kv=c.split('=',1)[1].strip().split(' ',1)
transcription = {}
transcription['start']={
'timestamp':kv[0],
'text': kv[1] if len(kv)>1 else None,
'wallclock': cl.get_start_wall(),
'video_time': video_time,
}
if c.startswith('TE'):
kv=c.split('=',1)[1].strip().split(' ',1)
transcription['end']={
'timestamp':kv[0],
'text': kv[1] if len(kv)>1 else None,
'wallclock': cl.get_end_wall(),
}
transcriptions.append(transcription)
transcription = None
video_time += cl.duration()
# print("vt: {}".format(video_time))
return transcriptions
params = {}
params['title_img'] = get_title(episode)
params['foot_img'] = get_foot(episode)
params['clips'] = get_clips(rfs, episode)
params['cuts'] = get_cuts(cls)
params['transcriptions'] = get_transcriptions(cls)
return params
def mk_subs(self, transcriptions, sub_pathname ):
"""
Create a subtitle file for this video.
It is currently a huge hack, but it works good enough.
transcriptions: list of start/end 'pointers' into the source
sub_pathname: full path to output file
"""
transcript_filename = '12022017 NBPY SCC.scc'
# dt = transcript_filename[:8]
transcript_pathname = os.path.join( self.show_dir,
"assets", "transcripts", transcript_filename )
# transcript_start = datetime.datetime.strptime(
# dt + " 10:06:56", '%m%d%Y %H:%M:%S' ) - \
# datetime.timedelta(0, 2, 158933)
caps = open(transcript_pathname, encoding='iso-8859-1').read()
transcript = pycaption.SCCReader().read(caps)
language = transcript.get_languages()[0] # ['en-US']
captions = transcript.get_captions(language)
out_captions = pycaption.CaptionList()
for transcription in transcriptions:
state = 0
for c in captions:
if c.format_start() == \
transcription['start']['timestamp']:
state=1
offset = c.start - transcription['start']['video_time'] * 1000000
c.nodes[0].content=transcription['start']['text']
if state==1:
if c.format_start() == \
transcription['end']['timestamp']:
c.nodes[0].content=\
transcription['end']['text']
state = 0
c.start -= offset
c.end -= offset
out_captions.append(c)
transcript.set_captions(language, out_captions)
# writer = pycaption.DFXPWriter()
writer = pycaption.SRTWriter()
open(sub_pathname, 'wt').write(writer.write(transcript))
return
def enc_all(self, mlt_pathname, episode):
def enc_one(ext):
out_pathname = os.path.join(
self.show_dir, ext, "%s.%s" % (episode.slug, ext))
if ext == 'webm':
parms = {
'profile': self.options.mlt_profile,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
cmds = ["melt -verbose -profile {profile} {mlt} -consumer avformat:{out} progress=1 threads=4 acodec=libvorbis ab=256k vb=2000k quality=good cpu-used=0 vcodec=libvpx".format( **parms ) ]
if ext == 'flv':
cmds = [
"melt %(mlt)s -progress -profile {profile} -consumer avformat:%(out)s progressive=1 acodec=libfaac ab=96k ar=44100 vcodec=libx264 b=110k vpre=/usr/share/ffmpeg/libx264-hq.ffpreset" % parms]
if ext == 'flac':
# 16kHz/mono
cmds = ["melt -verbose -progress %s -consumer avformat:%s ar=16000" %
(mlt_pathname, out_pathname)]
if ext == 'mp3':
cmds = ["melt -verbose -progress %s -consumer avformat:%s" %
(mlt_pathname, out_pathname)]
if ext == 'mp4':
# High Quality
parms = {
'profile': self.options.mlt_profile,
'mlt': mlt_pathname,
'out': out_pathname,
'threads': self.options.threads,
'test': '',
}
cmd = """
melt -verbose -progress
-profile {profile}
field_order=progressive
{mlt}
-consumer avformat:{out}
threads={threads}
movflags="+faststart"
properties="x264-medium"
acodec="aac"
ab="384k"
ar="48000"
channels="2"
vcodec="libx264"
cabac=1
vb=5000k
aq-mode=0
subme=0
trellis=1
bframes=2
colorspace=709
progressive=1
""".format(**parms)
cmd = cmd.split()
# 2 pass causes no video track, so dumping this.
# need to figure out how to switch between good and fast
if False:
cmds = [cmd + ['pass=1'],
cmd + ['pass=2']]
if True: # even faster!
cmds[0].append('fastfirstpass=1')
else:
cmds = [cmd]
# cmds.append( ["qt-faststart", tmp_pathname, out_pathname] )
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'm4v':
# iPhone
tmp_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
# combine settings from 2 files
ffpreset = open(
'/usr/share/ffmpeg/libx264-default.ffpreset').read().split('\n')
ffpreset.extend(
open('/usr/share/ffmpeg/libx264-ipod640.ffpreset').read().split('\n'))
ffpreset = [i for i in ffpreset if i]
cmd = "melt %(mlt)s -progress -profile {profile} -consumer avformat:%(tmp)s s=432x320 aspect=@4/3 progressive=1 acodec=libfaac ar=44100 ab=128k vcodec=libx264 b=70k" % parms
cmd = cmd.split()
cmd.extend(ffpreset)
cmds = [cmd]
cmds.append(["qt-faststart", tmp_pathname, out_pathname])
if self.options.rm_temp:
cmds.append(["rm", tmp_pathname])
if ext == 'dv':
out_pathname = os.path.join(
self.tmp_dir, "%s.%s" % (episode.slug, ext))
cmds = ["melt -verbose -progress %s -consumer avformat:%s pix_fmt=yuv411p progressive=1" %
(mlt_pathname, out_pathname)]
if ext == 'ogv':
# melt/ffmpeg ogv encoder is loopy,
# so make a .dv and pass it to ffmpeg2theora
ret = enc_one("dv")
if ret:
dv_pathname = os.path.join(
self.tmp_dir, "%s.dv" % (episode.slug,))
cmds = [
"ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --channels 1 %s -o %s" % (dv_pathname, out_pathname)]
if self.options.rm_temp:
cmds.append(["rm", dv_pathname])
else:
return ret
if self.options.noencode:
print("sorce files generated, skipping encode.")
if self.options.melt:
self.run_cmd(['melt', mlt_pathname])
ret = False
else:
# run encoder:
ret = self.run_cmds(episode, cmds, )
file_size = os.stat( out_pathname ).st_size
print( out_pathname, file_size )
# check results
if ret and not os.path.exists(out_pathname):
print("melt returned %ret, but no output: %s" % \
(ret, out_pathname))
ret = False
return ret
ret = True
# create all the formats for uploading
for ext in self.options.upload_formats:
print("encoding to %s" % (ext,))
ret = enc_one(ext) and ret
"""
if self.options.enc_script:
cmd = [self.options.enc_script,
self.show_dir, episode.slug]
ret = ret and self.run_cmds(episode, [cmd])
"""
return ret
def dv2theora(self, episode, dv_path_name, cls, rfs):
"""
Not used any more.
transcode dv to ogv
"""
oggpathname = os.path.join(
self.show_dir, "ogv", "%s.ogv" % episode.slug)
# cmd="ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --speedlevel 0 --optimize --keyint 256 --channels 1".split()
cmd = "ffmpeg2theora --videoquality 5 -V 600 --audioquality 5 --keyint 256 --channels 1".split()
cmd += ['--output', oggpathname]
cmd += [dv_path_name]
return cmd
def process_ep(self, episode):
self.whoami(episode.slug)
ret = False
cls = Cut_List.objects.filter(
episode=episode, apply=True).order_by('sequence')
if cls:
# get list of raw footage for this episode
rfs = Raw_File.objects. \
filter(cut_list__episode=episode).\
exclude(trash=True).distinct()
# get a .mlt file for this episode (mlt_pathname)
# look for custom/slug.mlt and just use it,
# else build one from client.template_mlt
mlt_pathname = os.path.join(
self.show_dir, "custom",
"{}.mlt".format(episode.slug))
if os.path.exists(mlt_pathname):
print(("found custom/slug.mlt:\n{}".format( mlt_pathname )))
ret = True
else:
template_mlt = os.path.join(self.show_dir,
"assets", "mlt", episode.show.client.template_mlt )
mlt_pathname = os.path.join(self.show_dir,
"mlt", "%s.mlt" % episode.slug)
params = self.get_params(episode, rfs, cls )
if self.options.verbose:
pprint(params)
# print((2, mlt_pathname))
ret = mk_mlt( template_mlt, mlt_pathname, params )
if params['transcriptions']:
# create the slug.srt file for this video
sub_pathname = os.path.join(
self.show_dir,
"transcripts", "{}.srt".format(episode.slug) )
subs = self.mk_subs(
params['transcriptions'], sub_pathname)
if not ret:
episode.state = 0
episode.comment += "\nenc.py mlt = self.mkmlt_1 failed.\n"
episode.save()
return False
# do the final encoding:
# using melt
ret = self.enc_all(mlt_pathname, episode)
if self.options.load_temp and self.options.rm_temp:
cmds = []
for rf in rfs:
dst_path = os.path.join(
self.tmp_dir, episode.slug, os.path.dirname(rf.filename))
rawpathname = os.path.join(
self.tmp_dir, episode.slug, rf.filename)
cmds.append(['rm', rawpathname])
cmds.append(['rmdir', dst_path])
dst_path = os.path.join(self.tmp_dir, episode.slug)
cmds.append(['rmdir', dst_path])
self.run_cmds(episode, cmds)
connection.connection.close()
connection.connection=None
else:
err_msg = "No cutlist found."
episode.state = 0
episode.comment += "\nenc error: %s\n" % (err_msg,)
episode.save()
print(err_msg)
return False
if self.options.test:
ret = False
# save the episode so the test suite can get the slug
self.episode = episode
return ret
def add_more_options(self, parser):
parser.add_option('--mlt-profile',
help="melt --profile profile")
parser.add_option('--enc-script',
help='encode shell script')
parser.add_option('--noencode', action="store_true",
help="don't encode, just make svg, png, mlt")
parser.add_option('--melt', action="store_true",
help="play with melt slug.melt (only w/noencode)")
parser.add_option('--load-temp', action="store_true",
help='copy raw to local temp files')
parser.add_option('--rm-temp',
help='remove large temp files')
parser.add_option('--threads',
help='thread parameter passed to encoder')
def add_more_option_defaults(self, parser):
parser.set_defaults(mlt_profile="atsc_720p_30")
parser.set_defaults(threads=0)
if __name__ == '__main__':
p = enc()
p.main()
|
CarlFK/veyepar
|
dj/scripts/enc.py
|
Python
|
mit
| 29,456
|
'''
Example of how to transform a corpus into the streamcorpus format.
This uses the John Smith corpus as a test data set for illustration.
The John Smith corpus is 197 articles from the New York Times gathered
by Amit Bagga and Breck Baldwin "Entity-Based Cross-Document
Coreferencing Using the Vector Space Model"
http://acl.ldc.upenn.edu/P/P98/P98-1012.pdf
The corpus consists of 35 directories with files inside each
directory. The documents in each directory all refer to the same
entity named John Smith, so the directory names are document-level
labels. First, we store these doc-level labels and then later, when
we have reader output from LingPipe or Stanford CoreNLP, we coerce
these doc-level labels into labels on individual in-doc coref chains
that contain 'john' and 'smith' as substrings.
The original data is stored in data/john-smith/original and the output
of this file is in data/john-smith/john-smith.sc
This software is released under an MIT/X11 open source license.
Copyright 2012-2013 Diffeo, Inc.
'''
## this assumes that streamcorpus has been installed
import streamcorpus
from streamcorpus_pipeline._exceptions import PipelineBaseException
from streamcorpus_pipeline.stages import Configured
import os
import hashlib
class john_smith(Configured):
config_name = 'john_smith'
# no other config
def __call__(self, i_str):
'''
Returns a kba.pipeline "reader" that generates a single
streamcorpus.Chunk file containing the John Smith corpus.
:returns function:
'''
return generate_john_smith_chunk(i_str)
def generate_john_smith_chunk(path_to_original):
'''
This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon.
'''
## Every StreamItem has a stream_time property. It usually comes
## from the document creation time. Here, we assume the JS corpus
## was created at one moment at the end of 1998:
creation_time = '1998-12-31T23:59:59.999999Z'
correct_time = 915148799
if not os.path.isabs(path_to_original):
path_to_original = os.path.join(os.getcwd(), path_to_original)
## iterate over the files in the 35 input directories
for label_id in range(35):
dir_path = os.path.join(path_to_original, str(label_id))
fnames = os.listdir(dir_path)
fnames.sort()
for fname in fnames:
stream_item = streamcorpus.make_stream_item(
creation_time,
## make up an abs_url
os.path.join(
'john-smith-corpus', str(label_id), fname))
if int(stream_item.stream_time.epoch_ticks) != correct_time:
raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\
% (creation_time, stream_item.stream_time.epoch_ticks,
correct_time))
## These docs came from the authors of the paper cited above.
stream_item.source = 'bagga-and-baldwin'
## build a ContentItem for the body
body = streamcorpus.ContentItem()
raw_string = open(os.path.join(dir_path, fname)).read()
## We know that this is already clean and has nothing
## tricky in it, because we manually cleansed it. To
## illustrate how we stick all strings into thrift, we
## convert this to unicode (which introduces no changes)
## and then encode it as utf-8, which also introduces no
## changes. Thrift stores strings as 8-bit character
## strings.
# http://www.mail-archive.com/thrift-user@incubator.apache.org/msg00210.html
body.clean_visible = unicode(raw_string).encode('utf8')
## attach the content_item to the stream_item
stream_item.body = body
stream_item.body.language = streamcorpus.Language(code='en', name='ENGLISH')
## The authors also annotated the corpus
anno = streamcorpus.Annotator()
anno.annotator_id = 'bagga-and-baldwin'
anno.annotation_time = stream_item.stream_time
## build a Label for the doc-level label:
rating = streamcorpus.Rating()
rating.annotator = anno
rating.target = streamcorpus.Target(target_id = str(label_id)) # must be string
rating.contains_mention = True
rating.mentions = ['john', 'smith']
## put this one label in the array of labels
streamcorpus.add_annotation(stream_item, rating)
## provide this stream_item to the pipeline
yield stream_item
if __name__ == '__main__':
## this is a simple test of this reader stage
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'input_dir',
help='path to a directory containing the original John Smith corpus.')
args = parser.parse_args()
john_smith_reader_stage = john_smith({})
for si in john_smith_reader_stage( args.input_dir ):
print len(si.body.clean_visible), si.stream_id
|
trec-kba/streamcorpus-pipeline
|
streamcorpus_pipeline/_john_smith.py
|
Python
|
mit
| 5,197
|
import tornado.web
from tornado import gen
from engines.verify import EmailVerificationEngine
class IndexPageHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class VerifyPageHandler(tornado.web.RequestHandler):
@gen.coroutine
def get(self, payload):
# 1. Activate user account and create new session (login)
engine = EmailVerificationEngine(self.settings)
result = yield engine.execute(payload)
# 2. Store session details in cookie
self.set_secure_cookie('sessionId', str(result.sessionId),
expires_days = result.lifespan,
httponly = True) # TODO: secure=True
self.redirect('/')
|
rmoritz/chessrank
|
chessrank/server/requesthandlers/__init__.py
|
Python
|
mit
| 747
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for willchaterr.
This file was generated with PyScaffold 2.4.4, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.4rc1,<2.5a0'] + sphinx,
tests_require=['pytest_cov', 'pytest'],
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
|
totalgood/willchatterr
|
setup.py
|
Python
|
mit
| 654
|
import os
import uuid
import zipfile
from flask import Flask, request, render_template, jsonify, redirect, url_for
from constants import CONTRIBUTION_LINK, DEFAULT_ERROR_MESSAGE
from utils import get_parsed_file, empty_directory
app = Flask(__name__)
IS_PROD = os.environ.get("IS_PROD", False)
def allowed_file(filename):
allowed_filetypes = ['txt', 'json', 'zip']
return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_filetypes
@app.route('/parse-file', methods=['POST'])
def parse_file():
empty_directory("static/chat")
file_req = request.files
if len(file_req) == 0:
response = {
"success": False,
"error_message": "Please upload a file to proceed.",
}
return jsonify(response), 200
file = file_req['0']
if not allowed_file(file.filename):
response = {
"success": False,
"error_message": "Please upload a valid file!",
}
else:
attachment_flag = False
filename, file_extension = os.path.splitext(file.filename)
filename = str(uuid.uuid4())
tmp_filepath = os.path.join("conversations", filename + file_extension)
file.save(tmp_filepath)
if '.zip' == file_extension:
with zipfile.ZipFile(tmp_filepath, 'r') as zip_ref:
zip_ref.extractall("static/chat")
os.remove(tmp_filepath)
# Assumption that needs to be proven
filename = '_chat'
file_extension = '.txt'
tmp_filepath = os.path.join("static/chat", filename + file_extension)
attachment_flag = True
try:
parsed_items, persons_list = get_parsed_file(tmp_filepath, is_media_available=attachment_flag)
response = {
"success": True,
"chat": parsed_items,
"users": persons_list,
"attachments": attachment_flag
}
except Exception as e:
response = {
"success": False,
"error_message": str(e)
}
# clears out attachments and conversations
empty_directory("conversations")
return jsonify(response), 200
@app.route('/', methods=['GET'])
def main():
empty_directory("static/chat")
ctx = {
'is_prod': IS_PROD,
'contribution_link': CONTRIBUTION_LINK,
'default_error_message': DEFAULT_ERROR_MESSAGE,
}
if request.args.get('redirect'):
message = "Sorry, we couldn't find the page"
return render_template("index.html", data=ctx, error_message=message)
else:
return render_template("index.html", data=ctx)
@app.errorhandler(404)
def not_found(e):
return redirect(url_for('main', redirect='home'))
if __name__ == "__main__":
app.run(debug=not IS_PROD, host="0.0.0.0", threaded=True)
|
prabhakar267/whatsapp-reader
|
app.py
|
Python
|
mit
| 2,888
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="densitymapbox.colorbar", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/densitymapbox/colorbar/_title.py
|
Python
|
mit
| 1,252
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-19 13:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('caffe', '0004_auto_20160619_0004'),
]
operations = [
migrations.AlterField(
model_name='caffe',
name='creator',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='my_caffe', to=settings.AUTH_USER_MODEL),
),
]
|
VirrageS/io-kawiarnie
|
caffe/caffe/migrations/0005_auto_20160619_1552.py
|
Python
|
mit
| 624
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic.base import RedirectView, TemplateView
from django.contrib.auth.forms import AuthenticationForm
from registration.forms import RegistrationForm
class HomeView(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
login_form = AuthenticationForm()
registration_form = RegistrationForm()
context = super(HomeView, self).get_context_data(**kwargs)
context['login_form'] = login_form
context['registration_form'] = registration_form
return context
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/register/complete/$',
RedirectView.as_view(url="/"),
name='registration_complete'),
url(r'^accounts/password/reset/complete/$',
RedirectView.as_view(url="/"),
name='auth_password_reset_complete'),
(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^polls/', include('ballots.polls_urls', namespace="polls")),
url(r'^ballots/', include('ballots.urls', namespace="ballots")),
)
|
cpsimpson/pollcat
|
pollcat/urls.py
|
Python
|
mit
| 1,230
|
import threading
import inspect
import re
from collections import defaultdict
from difflib import unified_diff
from StringIO import StringIO
from os.path import dirname, join, basename, splitext
NEWLINE = '\n'
SPACE = ' '
PREFIX = 'test_'
NEWLINE_PATTERN = re.compile(r'\r?\n')
def _lines(text):
return NEWLINE_PATTERN.split(text)
def _less(name):
return name[len(PREFIX):] if name.startswith(PREFIX) else name
def _dig(depth, frame):
for _ in range(depth):
frame = frame.f_back
return frame
def _function_name(depth):
frame = _dig(depth, inspect.currentframe())
return _less(frame.f_code.co_name)
def _dirname(depth):
frame = _dig(depth, inspect.currentframe())
return dirname(frame.f_code.co_filename)
def _filename(depth):
frame = _dig(depth, inspect.currentframe())
filename = basename(frame.f_code.co_filename)
root, _ = splitext(filename)
return _less(root)
def _approved_file(name, distance=2):
filename = '%s.%s.ok' % (_filename(distance), _less(name))
return join(_dirname(distance), filename)
def _received_file(name, distance=2):
filename = '%s.%s.nok' % (_filename(distance), _less(name))
return join(_dirname(distance), filename)
def _both_paths(name):
return _approved_file(name, 4), _received_file(name, 4)
def _read(filename):
with open(filename) as source:
return _lines(source.read())
def _write(filename, content):
with open(filename, 'w+') as target:
target.write(content)
def _as_tagset(tagset):
return tuple(sorted(tagset if isinstance(tagset, set) else {tagset}))
class _StreamLogger(object):
def __init__(self, stream):
self._atomic = threading.RLock()
self._stream = stream
def is_valid(self):
return True
def log(self, message):
with self._atomic:
self._stream.write(message)
if not message.endswith(NEWLINE):
self._stream.write(NEWLINE)
self._stream.flush()
class _Hold(object):
strategy = None
class _Space(object):
def __init__(self):
self._atomic = threading.RLock()
self._dimension = defaultdict(_Hold)
def into_stream(self, tagsets, stream):
strategy = _StreamLogger(stream)
with self._atomic:
for tagset in tagsets:
self._dimension[tagset].strategy = strategy
def log(self, tagsets, form, args):
with self._atomic:
strategies = set(self._dimension[x].strategy for x in tagsets)
loggers = filter(lambda x: x, strategies)
if loggers:
prefix = SPACE.join(tagsets[0])
message = "%s: %s" % (prefix, (form % args))
for logger in loggers:
logger.log(message)
_REGISTRY = _Space()
class At(object):
def __init__(self, *tagsets):
self._tagsets = tuple(_as_tagset(x) for x in tagsets)
def __call__(self, form, *args):
_REGISTRY.log(self._tagsets, form, args)
return self
log = __call__ # just alias for calling directly; do not remove
def returns(self, *values):
caller = _function_name(2)
self.log("%s returns %r", caller, values)
return values if len(values) > 1 else values[0]
def into(self, stream):
_REGISTRY.into_stream(self._tagsets, stream)
return self
class Approve(object):
def __init__(self, *tagsets):
self._tagsets = tuple(_as_tagset(x) for x in tagsets)
self._stream = None
self._approved, self._received = _both_paths(_function_name(2))
def __enter__(self):
self._stream = StringIO()
_REGISTRY.into_stream(self._tagsets, self._stream)
return self
def __exit__(self, kind, _value, _trace):
if kind:
return False
try:
golden_master = _read(self._approved)
except IOError as err:
if err.errno != 2:
raise
_write(self._received, self._stream.getvalue())
raise AssertionError, "Missing %s file!" % self._approved
else:
sut_content = self._stream.getvalue()
changed = NEWLINE.join(unified_diff(golden_master,
_lines(sut_content), fromfile='Expected', tofile='Actual',
n=1))
if changed:
_write(self._received, sut_content)
raise AssertionError, "Content does not match:\n%s" % changed
return False
def __call__(self, fn):
self._approved, self._received = _both_paths(fn.func_name)
def wrapper(*args, **kvargs):
with self:
return fn(*args, **kvargs)
return wrapper
|
vjmp/texpectpy
|
texpect.py
|
Python
|
mit
| 4,707
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import base64
import socket
import struct
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
from tchannel.zipkin.thrift import ttypes
try:
import ujson as json
except ImportError: # pragma: nocover
import json
def hex_str(n):
return '%0.16x' % (n,)
def int_or_none(val):
if val is None:
return None
return int(val, 16)
def json_formatter(traces, *json_args, **json_kwargs):
json_traces = []
for (trace, annotations) in traces:
json_trace = {
'trace_id': hex_str(trace.trace_id),
'span_id': hex_str(trace.span_id),
'name': trace.name,
'annotations': []
}
if trace.parent_span_id:
json_trace['parent_span_id'] = hex_str(trace.parent_span_id)
for annotation in annotations:
json_annotation = {
'key': annotation.name,
'value': annotation.value,
'type': annotation.annotation_type
}
endpoint = annotation.endpoint or trace.endpoint
if endpoint:
json_annotation['host'] = {
'ipv4': endpoint.ipv4,
'port': endpoint.port,
'service_name': endpoint.service_name
}
json_trace['annotations'].append(json_annotation)
json_traces.append(json_trace)
return json.dumps(json_traces, *json_args, **json_kwargs)
def ipv4_to_int(ipv4):
if ipv4 == 'localhost':
ipv4 = '127.0.0.1'
return struct.unpack('!i', socket.inet_aton(ipv4))[0]
def base64_thrift(thrift_obj):
trans = TTransport.TMemoryBuffer()
tbp = TBinaryProtocol.TBinaryProtocol(trans)
thrift_obj.write(tbp)
return base64.b64encode(trans.getvalue())
def binary_annotation_formatter(annotation):
annotation_types = {
'string': ttypes.AnnotationType.STRING,
'bytes': ttypes.AnnotationType.BYTES,
}
annotation_type = annotation_types[annotation.annotation_type]
value = annotation.value
if isinstance(value, unicode):
value = value.encode('utf-8')
return ttypes.BinaryAnnotation(
key=annotation.name,
stringValue=value,
annotationType=annotation_type
)
def i64_to_string(data):
return struct.pack('>q', data)
def i64_to_base64(data):
return base64.b64encode(i64_to_string(data))
def thrift_formatter(trace, annotations, isbased64=False):
thrift_annotations = []
binary_annotations = []
host = None
for annotation in annotations:
endpoint = annotation.endpoint or trace.endpoint
if endpoint and not host:
host = ttypes.Endpoint(
ipv4=ipv4_to_int(endpoint.ipv4),
port=endpoint.port,
serviceName=endpoint.service_name,
)
if annotation.annotation_type == 'timestamp':
thrift_annotations.append(ttypes.Annotation(
timestamp=annotation.value,
value=annotation.name))
else:
binary_annotations.append(
binary_annotation_formatter(annotation))
thrift_trace = ttypes.Span(
traceId=i64_to_string(trace.trace_id),
name=trace.name,
id=i64_to_string(trace.span_id),
host=host,
parentId=i64_to_string(trace.parent_span_id),
annotations=thrift_annotations,
binaryAnnotations=binary_annotations
)
if isbased64:
return base64_thrift(thrift_trace)
else:
return thrift_trace
|
Willyham/tchannel-python
|
tchannel/zipkin/formatters.py
|
Python
|
mit
| 5,332
|
import numpy as np
from lazyflow.rtype import SubRegion
from .abcs import OpTrain
from .abcs import OpPredict
from tsdl.tools import Classification
from tsdl.tools import Regression
class OpStateTrain(OpTrain, Classification):
def execute(self, slot, subindex, roi, result):
assert len(self.Train) == 2, "need data and target"
assert len(self.Valid) == 2, "need data and target"
assert roi.start[0] == 0
assert roi.stop[0] == 1
train = self.Train[0][...].wait()
valid = self.Valid[0][...].wait()
X = np.concatenate((train, valid), axis=0)
X = X.view(np.ndarray)
assert len(self.Train[1].meta.shape) == 2,\
"target needs to be a matrix"
assert len(self.Valid[1].meta.shape) == 2,\
"target needs to be a matrix"
train = self.Train[1][...].wait()
valid = self.Valid[1][...].wait()
y = np.concatenate((train, valid), axis=0)
y = np.argmax(y.view(np.ndarray), axis=1)[:, np.newaxis]
sse = np.square(X-y).sum(axis=0)
idx = np.argmin(sse)
result[0] = idx
class OpStatePredict(OpPredict, Classification):
def execute(self, slot, subindex, roi, result):
a = roi.start[0]
b = roi.stop[0]
c = 0
d = self.Input.meta.shape[1]
new_roi = SubRegion(self.Input, start=(a, c), stop=(b, d))
X = self.Input.get(new_roi).wait()
idx = self.Classifier[...].wait()[0]
classes = np.round(X[:, idx]).astype(np.int)
for i, c in enumerate(range(roi.start[1], roi.stop[1])):
result[:, i] = classes == c
class OpStateTrainRegression(OpTrain, Regression):
@classmethod
def get_default_config(cls):
config = super(OpStateTrainRegression, cls).get_default_config()
config["index"] = 0
return config
def execute(self, slot, subindex, roi, result):
result[0] = self._index
class OpStatePredictRegression(OpPredict, Regression):
def execute(self, slot, subindex, roi, result):
a = roi.start[0]
b = roi.stop[0]
idx = self.Classifier[...].wait()[0]
new_roi = SubRegion(self.Input, start=(a, idx), stop=(b, idx+1))
req = self.Input.get(new_roi)
req.writeInto(result)
req.block()
|
burgerdev/hostload
|
tsdl/classifiers/state.py
|
Python
|
mit
| 2,316
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import HourLocator, DateFormatter
import datetime
import sys
from collections import Counter
import math
import re
import numpy as np
months = HourLocator()
hourFmt = DateFormatter('%H')
filepath = str(sys.argv[1])
mtagged = Counter()
tagged = Counter()
untagged = Counter()
total = Counter()
rt = Counter()
k = 0
with open(filepath) as f:
for tweet in f:
if math.fmod(k,100000) == 0 :
print(k)
tweet = re.findall('"((?:(?!(?:",")).)*)"', tweet)
td = datetime.datetime.strptime(tweet[1], '%Y-%m-%d %H:%M:%S').strftime('%H')
tags = tweet[10].split(" ")
tags = sorted([t.lower() for t in tags])
if len(tags) == 0 or set(tags).issubset(set([''])) :
untagged[str(td)] += 1
if set(tags).issubset(set(['ferguson','mikebrown'])) :
mtagged[str(td)] += 1
else :
tagged[str(td)] += 1
if int(tweet[8]) == 1 :
rt[str(td)] += 1
total[str(td)] += 1
k = k + 1
tv = sorted([datetime.datetime.strptime(str(t),'%H') for t in tagged])
def process(serie) :
serie = sorted(serie.items(), key=lambda k: int(k[0]))
return [t[1] for t in serie]
counts_t = process(tagged)
counts_m = process(mtagged)
counts_u = process(untagged)
total = process(total)
rt = process(rt)
plt.plot_date(tv,counts_t,'-',label="tagged")
plt.plot_date(tv,counts_m,'-', label="min tagged")
plt.plot_date(tv,counts_u,'-',label="no tagged")
plt.plot_date(tv,total,'--',label="total")
plt.plot_date(tv,rt,'--',label="retweet")
plt.legend()
plt.gcf().autofmt_xdate()
plt.savefig(str(sys.argv[2]),dpi=200)
|
jwheatp/twitter-riots
|
analysis/plotfreq.py
|
Python
|
mit
| 1,668
|
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.db.models import Avg, Count, Max, ExpressionWrapper, F, CharField
import json
import datetime
from apps.base.views import (BaseView, LoginRequiredMixin)
from apps.ip.models import IP
from .models import Transaction
class TransactionStatusAPI(BaseView):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get(self, *args, **kwargs):
result = {
"data": [
]
}
try:
your_ip = IP.objects.values_list('address', flat=True).filter(user_profile=self.request.user.profile)
your_id_transaction = Transaction.objects.filter(address__in=set(your_ip)).values('address').annotate(id=Max('id')).values_list("id", flat=True)
your_transaction = Transaction.objects.filter(id__in=set(your_id_transaction))
#print("your_transaction: ", your_transaction)
for transaction in your_transaction:
arr_data = []
arr_data.append(transaction.address)
arr_data.append(transaction.agent_ping_time.strftime('%m/%d/%Y %H:%M:%S'))
if transaction.time_avg == 999:
arr_data.append('Offline')
else:
arr_data.append(transaction.time_avg)
result['data'].append(arr_data)
except Exception as error:
print("Something error: ", error)
return HttpResponse(json.dumps(result))
class TransactionAPI(BaseView):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get(self, *args, **kwargs):
result = {
"data": [
]
}
try:
your_ip = IP.objects.values_list('address', flat=True).filter(user_profile=self.request.user.profile)
your_transaction = Transaction.objects.filter(address__in=set(your_ip)).order_by('-id')
for transaction in your_transaction:
arr_data = []
arr_data.append(transaction.address)
arr_data.append(transaction.agent_ping_time.strftime('%m/%d/%Y %H:%M:%S'))
arr_data.append(str(transaction.time_avg) + ' ms')
result['data'].append(arr_data)
except:
print("Something error!")
return HttpResponse(json.dumps(result))
def post(self, *args, **kwargs):
js_data = self.request.body.decode('utf-8')
if self._insert_data(js_data):
result = {'status': True}
else:
result = {'status': False}
return HttpResponse(json.dumps(result))
def _insert_data(self, receive):
# {
# "data": b '[{"i": "192.168.1.1", "t": 77, "c": "2016-05-17 07:32:42"}, {"i": "192.168.1.2", "t": 22, "c": "2016-05-17 07:32:42"}, {"i": "192.168.1.3", "t": 97, "c": "2016-05-17 07:32:42"}]',
# 'vhost': 'default'
# }
try:
receive = json.loads(receive)
list_data = receive['data']
print("list_data: ", list_data)
for item in list_data:
print("item: ", item)
transaction = Transaction()
transaction.address = item['ip']
transaction.vhost = receive['vhost']
transaction.time_avg = float(str(item['t']))
transaction.agent_ping_time = datetime.datetime.strptime(item['c'], "%Y-%m-%d %H:%M:%S")
transaction.save()
return True
except Exception as error:
print("Loi me no roi ", error)
return False
|
pythonvietnam/nms
|
apps/transaction/views.py
|
Python
|
mit
| 3,975
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sensuconfigs.views.home', name='home'),
# url(r'^sensuconfigs/', include('sensuconfigs.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
Numerical-Brass/sensu-configs
|
sensuconfigs/urls.py
|
Python
|
mit
| 566
|
#!/usr/bin/env python
#coding=utf-8
from __future__ import absolute_import
from celery import Celery
app = Celery('lib', include=['lib.tasks'])
app.config_from_object('lib.config')
if __name__ == '__main__':
app.start()
|
youqingkui/zhihufav
|
lib/celery_app.py
|
Python
|
mit
| 230
|
def execute(options, arguments):
print('Running test command')
|
manhg/matsumi
|
task/test.py
|
Python
|
mit
| 66
|
#!/usr/bin/env python
from counters import __version__
sdict = {
'name' : 'counters',
'version' : __version__,
'description' : 'A Python port of https://github.com/francois/counters',
'long_description' : 'Provides an easy interface to count any kind of performance metrics within a system',
'url': 'http://github.com/francois/pycounters',
'download_url' : 'http://cloud.github.com/downloads/francois/pycounters/counters-%s.tar.gz' % __version__,
'author' : 'François Beausoleil',
'author_email' : 'francois@teksol.info',
'maintainer' : 'François Beausoleil',
'maintainer_email' : 'francois@teksol.info',
'keywords' : ['Redis', 'key-value store'],
'license' : 'MIT',
'packages' : ['counters'],
'test_suite' : 'tests.all_tests',
'classifiers' : [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(**sdict)
|
francois/pycounters
|
setup.py
|
Python
|
mit
| 1,199
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import os
import logging
import requests
from indra.config import get_config
from indra.literature import pubmed_client
# Python3
try:
from functools import lru_cache
# Python2
except ImportError:
from functools32 import lru_cache
logger = logging.getLogger(__name__)
crossref_url = 'http://api.crossref.org/'
crossref_search_url = 'http://search.crossref.org/dois'
# http://clickthroughsupport.crossref.org/click-through-service-for-researchers/
def get_api_key():
return get_config('CROSSREF_CLICKTHROUGH_KEY')
@lru_cache(maxsize=100)
def get_metadata(doi):
"""Returns the metadata of an article given its DOI from CrossRef
as a JSON dict"""
url = crossref_url + 'works/' + doi
res = requests.get(url)
if res.status_code != 200:
logger.info('Could not get CrossRef metadata for DOI %s, code %d' %
(doi, res.status_code))
return None
raw_message = res.json()
metadata = raw_message.get('message')
return metadata
def get_fulltext_links(doi):
"""Return a list of links to the full text of an article given its DOI.
Each list entry is a dictionary with keys:
- URL: the URL to the full text
- content-type: e.g. text/xml or text/plain
- content-version
- intended-application: e.g. text-mining
"""
metadata = get_metadata(doi)
if metadata is None:
return None
links = metadata.get('link')
return links
def get_publisher(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
publisher = metadata.get('publisher')
return publisher
def get_url(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
url = metadata.get('URL')
return url
def get_license_links(doi):
metadata = get_metadata(doi)
if metadata is None:
return None
licenses = metadata.get('license')
if licenses is None:
return None
urls = [l.get('URL') for l in licenses]
return urls
def doi_query(pmid, search_limit=10):
"""Get the DOI for a PMID by matching CrossRef and Pubmed metadata.
Searches CrossRef using the article title and then accepts search hits only
if they have a matching journal ISSN and page number with what is obtained
from the Pubmed database.
"""
# Get article metadata from PubMed
pubmed_meta_dict = pubmed_client.get_metadata_for_ids(
[pmid], get_issns_from_nlm=True)
if pubmed_meta_dict is None or pubmed_meta_dict.get(pmid) is None:
logger.warning('No metadata found in Pubmed for PMID%s' % pmid)
return None
# The test above ensures we've got this now
pubmed_meta = pubmed_meta_dict[pmid]
# Check if we already got a DOI from Pubmed itself!
if pubmed_meta.get('doi'):
return pubmed_meta.get('doi')
# Check for the title, which we'll need for the CrossRef search
pm_article_title = pubmed_meta.get('title')
if pm_article_title is None:
logger.warning('No article title found in Pubmed for PMID%s' % pmid)
return None
# Get the ISSN list
pm_issn_list = pubmed_meta.get('issn_list')
if not pm_issn_list:
logger.warning('No ISSNs found in Pubmed for PMID%s' % pmid)
return None
# Get the page number
pm_page = pubmed_meta.get('page')
if not pm_page:
logger.debug('No page number found in Pubmed for PMID%s' % pmid)
return None
# Now query CrossRef using the title we've got
url = crossref_search_url
params = {'q': pm_article_title, 'sort': 'score'}
try:
res = requests.get(crossref_search_url, params)
except requests.exceptions.ConnectionError as e:
logger.error('CrossRef service could not be reached.')
logger.error(e)
return None
except Exception as e:
logger.error('Error accessing CrossRef service: %s' % str(e))
return None
if res.status_code != 200:
logger.info('PMID%s: no search results from CrossRef, code %d' %
(pmid, res.status_code))
return None
raw_message = res.json()
mapped_doi = None
# Iterate over the search results, looking up XREF metadata
for result_ix, result in enumerate(raw_message):
if result_ix > search_limit:
logger.info('PMID%s: No match found within first %s results, '
'giving up!' % (pmid, search_limit))
break
xref_doi = result['doi']
# Get the XREF metadata using the DOI
xref_meta = get_metadata(xref_doi)
if xref_meta is None:
continue
xref_issn_list = xref_meta.get('ISSN')
xref_page = xref_meta.get('page')
# If there's no ISSN info for this article, skip to the next result
if not xref_issn_list:
logger.debug('No ISSN found for DOI %s, skipping' % xref_doi)
continue
# If there's no page info for this article, skip to the next result
if not xref_page:
logger.debug('No page number found for DOI %s, skipping' %
xref_doi)
continue
# Now check for an ISSN match by looking for the set intersection
# between the Pubmed ISSN list and the CrossRef ISSN list.
matching_issns = set(pm_issn_list).intersection(set(xref_issn_list))
# Before comparing page numbers, regularize the page numbers a bit.
# Note that we only compare the first page number, since frequently
# the final page number will simply be missing in one of the data
# sources. We also canonicalize page numbers of the form '14E' to
# 'E14' (which is the format used by Pubmed).
pm_start_page = pm_page.split('-')[0].upper()
xr_start_page = xref_page.split('-')[0].upper()
if xr_start_page.endswith('E'):
xr_start_page = 'E' + xr_start_page[:-1]
# Now compare the ISSN list and page numbers
if matching_issns and pm_start_page == xr_start_page:
# We found a match!
mapped_doi = xref_doi
break
# Otherwise, keep looking through the results...
# Return a DOI, or None if we didn't find one that met our matching
# criteria
return mapped_doi
|
johnbachman/belpy
|
indra/literature/crossref_client.py
|
Python
|
mit
| 6,381
|
try:
import pandas as pd
except ImportError:
pd = None
if pd is not None:
from tdda.constraints.pd.constraints import (discover_df,
verify_df,
detect_df)
from tdda.constraints.db.constraints import (discover_db_table,
verify_db_table,
detect_db_table)
|
tdda/tdda
|
tdda/constraints/__init__.py
|
Python
|
mit
| 455
|
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
if bytes == str: # python2
iseq = lambda s: map(ord, s)
bseq = lambda s: ''.join(map(chr, s))
buffer = lambda s: s
else: # python3
iseq = lambda s: s
bseq = bytes
buffer = lambda s: s.buffer
def b58encode(input):
'''Encode a string using Base58'''
origlen = len(input)
input = input.lstrip(b'\0')
newlen = len(input)
p, acc = 1, 0
for c in iseq(input[::-1]):
acc += p * c
p = p << 8
result = ''
while acc > 0:
acc, mod = divmod(acc, 58)
result += alphabet[mod]
return (result + alphabet[0] * (origlen - newlen))[::-1]
def b58decode(input):
'''Decode a Base58 encoded string'''
if not isinstance(input, str):
input = input.decode('ascii')
origlen = len(input)
input = input.lstrip(alphabet[0])
newlen = len(input)
p, acc = 1, 0
for c in input[::-1]:
acc += p * alphabet.index(c)
p *= 58
result = []
while acc > 0:
acc, mod = divmod(acc, 256)
result.append(mod)
return (bseq(result) + b'\0' * (origlen - newlen))[::-1]
|
d1ffeq/ecp
|
ecp/base58.py
|
Python
|
mit
| 1,183
|
from import_string.base import import_string
class Config(dict):
def apply(self, cfg):
obj = import_string(cfg)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
class BaseConfig(object):
DEBUG = False
APP_ID = 0
IMPLICIT = True
USER_LOGIN = 'test@vk.com'
USER_PASSWORD = 'password'
LOG_TO_FILE = True
LOG_FILENAME = 'vk_bot.log'
PLUGINS_PATH = 'plugins'
STORAGE_PATH = 'storage/db.db'
COMMAND_SYMBOL = '!'
PREFIX = '[BOT] '
ADMINS = []
|
roman901/vk_bot
|
vk_bot/config.py
|
Python
|
mit
| 561
|
import sys
import numpy as np
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
from spherecluster import SphericalKMeans
from spherecluster import VonMisesFisherMixture
from spherecluster import sample_vMF
plt.ion()
'''
Implements "small-mix" example from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
Provides a basic smell test that the algoriths are performing as intended.
'''
def r_input(val=None):
val = val or ''
if sys.version_info[0] >= 3:
return eval(input(val))
return raw_input(val)
###############################################################################
# Generate small-mix dataset
mu_0 = np.array([-0.251, -0.968])
mu_1 = np.array([0.399, 0.917])
mus = [mu_0, mu_1]
kappa_0 = 8 # concentration parameter
kappa_1 = 2 # concentration parameter
kappas = [kappa_0, kappa_1]
num_points_per_class = 100
X_0 = sample_vMF(mu_0, kappa_0, num_points_per_class)
X_1 = sample_vMF(mu_1, kappa_1, num_points_per_class)
X = np.zeros((2 * num_points_per_class, 2))
X[:num_points_per_class, :] = X_0
X[num_points_per_class:, :] = X_1
labels = np.zeros((2 * num_points_per_class, ))
labels[num_points_per_class:] = 1
###############################################################################
# K-Means clustering
km = KMeans(n_clusters=2, init='k-means++', n_init=10)
km.fit(X)
cdists = []
for center in km.cluster_centers_:
cdists.append(np.linalg.norm(mus[0] - center))
km_mu_0_idx = np.argmin(cdists)
km_mu_1_idx = 1 - km_mu_0_idx
km_mu_0_error = np.linalg.norm(mus[0] - km.cluster_centers_[km_mu_0_idx])
km_mu_1_error = np.linalg.norm(mus[1] - km.cluster_centers_[km_mu_1_idx])
km_mu_0_error_norm = np.linalg.norm(mus[0] - km.cluster_centers_[km_mu_0_idx] / np.linalg.norm(km.cluster_centers_[km_mu_0_idx]))
km_mu_1_error_norm = np.linalg.norm(mus[1] - km.cluster_centers_[km_mu_1_idx] / np.linalg.norm(km.cluster_centers_[km_mu_1_idx]))
###############################################################################
# Spherical K-Means clustering
skm = SphericalKMeans(n_clusters=2, init='k-means++', n_init=20)
skm.fit(X)
cdists = []
for center in skm.cluster_centers_:
cdists.append(np.linalg.norm(mus[0] - center))
skm_mu_0_idx = np.argmin(cdists)
skm_mu_1_idx = 1 - skm_mu_0_idx
skm_mu_0_error = np.linalg.norm(mus[0] - skm.cluster_centers_[skm_mu_0_idx])
skm_mu_1_error = np.linalg.norm(mus[1] - skm.cluster_centers_[skm_mu_1_idx])
###############################################################################
# Mixture of von Mises Fisher clustering (soft)
vmf_soft = VonMisesFisherMixture(n_clusters=2, posterior_type='soft', n_init=20)
vmf_soft.fit(X)
cdists = []
for center in vmf_soft.cluster_centers_:
cdists.append(np.linalg.norm(mus[0] - center))
vmf_soft_mu_0_idx = np.argmin(cdists)
vmf_soft_mu_1_idx = 1 - vmf_soft_mu_0_idx
vmf_soft_mu_0_error = np.linalg.norm(
mus[0] - vmf_soft.cluster_centers_[vmf_soft_mu_0_idx])
vmf_soft_mu_1_error = np.linalg.norm(
mus[1] - vmf_soft.cluster_centers_[vmf_soft_mu_1_idx])
###############################################################################
# Mixture of von Mises Fisher clustering (hard)
vmf_hard = VonMisesFisherMixture(n_clusters=2, posterior_type='hard', n_init=20,
init='random-orthonormal')
vmf_hard.fit(X)
cdists = []
for center in vmf_hard.cluster_centers_:
cdists.append(np.linalg.norm(mus[0] - center))
vmf_hard_mu_0_idx = np.argmin(cdists)
vmf_hard_mu_1_idx = 1 - vmf_hard_mu_0_idx
vmf_hard_mu_0_error = np.linalg.norm(
mus[0] - vmf_hard.cluster_centers_[vmf_hard_mu_0_idx])
vmf_hard_mu_1_error = np.linalg.norm(
mus[1] - vmf_hard.cluster_centers_[vmf_hard_mu_1_idx])
###############################################################################
# Show results
plt.figure()
# Original data
ax = plt.subplot(1, 5, 1, aspect='equal', adjustable='box-forced',
xlim=[-1.1, 1.1], ylim=[-1.1, 1.1])
for ex in X_0:
plt.plot(ex[0], ex[1], 'r+')
for ex in X_1:
plt.plot(ex[0], ex[1], 'b+')
ax.set_aspect('equal')
plt.title('Original data')
plt.show()
# K-means labels
ax = plt.subplot(1, 5, 2, aspect='equal', adjustable='box-forced',
xlim=[-1.1, 1.1], ylim=[-1.1, 1.1])
for ex, label in zip(X, km.labels_):
if label == km_mu_0_idx:
plt.plot(ex[0], ex[1], 'r+')
else:
plt.plot(ex[0], ex[1], 'b+')
ax.set_aspect('equal')
plt.title('K-means clustering')
plt.show()
# Spherical K-means labels
ax = plt.subplot(1, 5, 3, aspect='equal', adjustable='box-forced',
xlim=[-1.1, 1.1], ylim=[-1.1, 1.1])
for ex, label in zip(X, skm.labels_):
if label == skm_mu_0_idx:
plt.plot(ex[0], ex[1], 'r+')
else:
plt.plot(ex[0], ex[1], 'b+')
ax.set_aspect('equal')
plt.title('Spherical K-means clustering')
plt.show()
# von Mises Fisher soft labels
ax = plt.subplot(1, 5, 4, aspect='equal', adjustable='box-forced',
xlim=[-1.1, 1.1], ylim=[-1.1, 1.1])
for ex, label in zip(X, vmf_soft.labels_):
if label == vmf_soft_mu_0_idx:
plt.plot(ex[0], ex[1], 'r+')
else:
plt.plot(ex[0], ex[1], 'b+')
ax.set_aspect('equal')
plt.title('soft-movMF clustering')
plt.show()
# von Mises Fisher hard labels
ax = plt.subplot(1, 5, 5, aspect='equal', adjustable='box-forced',
xlim=[-1.1, 1.1], ylim=[-1.1, 1.1])
for ex, label in zip(X, vmf_hard.labels_):
if label == vmf_hard_mu_0_idx:
plt.plot(ex[0], ex[1], 'r+')
else:
plt.plot(ex[0], ex[1], 'b+')
ax.set_aspect('equal')
plt.title('hard-movMF clustering')
plt.show()
print('mu 0: {}'.format(mu_0))
print('mu 0: {} (kmeans), error={} ({})'.format(km.cluster_centers_[km_mu_0_idx], km_mu_0_error, km_mu_0_error_norm))
print('mu 0: {} (spherical kmeans), error={}'.format(skm.cluster_centers_[skm_mu_0_idx], skm_mu_0_error))
print('mu 0: {} (vmf-soft), error={}'.format(vmf_soft.cluster_centers_[vmf_soft_mu_0_idx], vmf_soft_mu_0_error))
print('mu 0: {} (vmf-hard), error={}'.format(vmf_hard.cluster_centers_[vmf_hard_mu_0_idx], vmf_hard_mu_0_error))
print('---')
print('mu 1: {}'.format(mu_1))
print('mu 1: {} (kmeans), error={} ({})'.format(km.cluster_centers_[km_mu_1_idx], km_mu_1_error, km_mu_1_error_norm))
print('mu 1: {} (spherical kmeans), error={}'.format(skm.cluster_centers_[skm_mu_1_idx], skm_mu_1_error))
print('mu 1: {} (vmf-soft), error={}'.format(vmf_soft.cluster_centers_[vmf_soft_mu_1_idx], vmf_soft_mu_1_error))
print('mu 1: {} (vmf-hard), error={}'.format(vmf_hard.cluster_centers_[vmf_hard_mu_1_idx], vmf_hard_mu_1_error))
print('---')
print('true kappas {}'.format(kappas))
print('vmf-soft kappas {}'.format(vmf_soft.concentrations_[[vmf_soft_mu_0_idx, vmf_soft_mu_1_idx]]))
print('vmf-hard kappas {}'.format(vmf_hard.concentrations_[[vmf_hard_mu_0_idx, vmf_hard_mu_1_idx]]))
print('---')
print('vmf-soft weights {}'.format(vmf_soft.weights_[[vmf_soft_mu_0_idx, vmf_soft_mu_1_idx]]))
print('vmf-hard weights {}'.format(vmf_hard.weights_[[vmf_hard_mu_0_idx, vmf_hard_mu_1_idx]]))
print('---')
print("Homogeneity: %0.3f (k-means)" % metrics.homogeneity_score(labels, km.labels_))
print("Homogeneity: %0.3f (spherical k-means)" % metrics.homogeneity_score(labels, skm.labels_))
print("Homogeneity: %0.3f (vmf-soft)" % metrics.homogeneity_score(labels, vmf_soft.labels_))
print("Homogeneity: %0.3f (vmf-hard)" % metrics.homogeneity_score(labels, vmf_hard.labels_))
print('---')
print("Completeness: %0.3f (k-means)" % metrics.completeness_score(labels, km.labels_))
print("Completeness: %0.3f (spherical k-means)" % metrics.completeness_score(labels, skm.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, vmf_soft.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, vmf_hard.labels_))
print('---')
print("V-measure: %0.3f (k-means)" % metrics.v_measure_score(labels, km.labels_))
print("V-measure: %0.3f (spherical k-means)" % metrics.v_measure_score(labels, skm.labels_))
print("V-measure: %0.3f (vmf-soft)" % metrics.v_measure_score(labels, vmf_soft.labels_))
print("V-measure: %0.3f (vmf-hard)" % metrics.v_measure_score(labels, vmf_hard.labels_))
r_input()
|
clara-labs/spherecluster
|
examples/small_mix.py
|
Python
|
mit
| 8,179
|
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.oauth2.token
'''
from fantastico.utils.dictionary_object import DictionaryObject
class Token(DictionaryObject):
'''This class provides a token model which can be built from a generic dictionary. All dictionary keys become token
members.'''
|
rcosnita/fantastico
|
fantastico/oauth2/token.py
|
Python
|
mit
| 1,400
|
from Hangman import Hangman
def test_check3():
word = "Hello"
attempts = 5
game = Hangman(word, attempts)
for elem in word[:-1]:
game.attempt(elem)
game.attempt("a")
game.check_status()
assert game.status == 0
assert game.number_of_mistakes == 1
game.attempt("a")
assert game.number_of_mistakes == 2
game.attempt("b")
assert game.number_of_mistakes == 3
game.attempt("c")
assert game.number_of_mistakes == 4
game.attempt("d")
assert game.number_of_mistakes == 5
game.check_status()
assert game.status == -1
|
glazastyi/Hangman
|
tests/test_check3.py
|
Python
|
mit
| 589
|
from django.shortcuts import render
def page(req):
return render(req, 'en/public/connection.html')
|
addisaden/django-tutorials
|
TasksManager/views/connection.py
|
Python
|
mit
| 104
|
#!/usr/bin/env nix-shell
#!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])"
import json
import re
import requests
import sys
releases = ("openjdk8", "openjdk11", "openjdk13", "openjdk14", "openjdk15", "openjdk16")
oses = ("mac", "linux")
types = ("jre", "jdk")
impls = ("hotspot", "openj9")
arch_to_nixos = {
"x64": ("x86_64",),
"aarch64": ("aarch64",),
"arm": ("armv6l", "armv7l"),
}
def get_sha256(url):
resp = requests.get(url)
if resp.status_code != 200:
print("error: could not fetch checksum from url {}: code {}".format(url, resp.code), file=sys.stderr)
sys.exit(1)
return resp.text.strip().split(" ")[0]
def generate_sources(release, assets):
out = {}
for asset in assets:
if asset["os"] not in oses: continue
if asset["binary_type"] not in types: continue
if asset["openjdk_impl"] not in impls: continue
if asset["heap_size"] != "normal": continue
if asset["architecture"] not in arch_to_nixos: continue
# examples: 11.0.1+13, 8.0.222+10
version, build = asset["version_data"]["semver"].split("+")
type_map = out.setdefault(asset["os"], {})
impl_map = type_map.setdefault(asset["binary_type"], {})
arch_map = impl_map.setdefault(asset["openjdk_impl"], {
"packageType": asset["binary_type"],
"vmType": asset["openjdk_impl"],
})
for nixos_arch in arch_to_nixos[asset["architecture"]]:
arch_map[nixos_arch] = {
"url": asset["binary_link"],
"sha256": get_sha256(asset["checksum_link"]),
"version": version,
"build": build,
}
return out
out = {}
for release in releases:
resp = requests.get("https://api.adoptopenjdk.net/v2/latestAssets/releases/" + release)
if resp.status_code != 200:
print("error: could not fetch data for release {} (code {})".format(release, resp.code), file=sys.stderr)
sys.exit(1)
out[release] = generate_sources(release, resp.json())
with open("sources.json", "w") as f:
json.dump(out, f, indent=2, sort_keys=True)
f.write('\n')
|
NixOS/nixpkgs
|
pkgs/development/compilers/adoptopenjdk-bin/generate-sources.py
|
Python
|
mit
| 2,198
|
from typing import Dict, List, Union, cast
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model
from .cache import element_cache
GROUP_DEFAULT_PK = 1 # This is the hard coded pk for the default group.
GROUP_ADMIN_PK = 2 # This is the hard coded pk for the admin group.
# Hard coded collection string for users and groups
group_collection_string = "users/group"
user_collection_string = "users/user"
def get_group_model() -> Model:
"""
Return the Group model that is active in this project.
"""
try:
return apps.get_model(settings.AUTH_GROUP_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured(
"AUTH_GROUP_MODEL must be of the form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
f"AUTH_GROUP_MODEL refers to model '{settings.AUTH_GROUP_MODEL}' that has not been installed"
)
def has_perm(user_id: int, perm: str) -> bool:
"""
Checks that user has a specific permission.
user_id 0 means anonymous user.
"""
# Convert user to right type
# TODO: Remove this and make use, that user has always the right type
user_id = user_to_user_id(user_id)
return async_to_sync(async_has_perm)(user_id, perm)
async def async_has_perm(user_id: int, perm: str) -> bool:
"""
Checks that user has a specific permission.
user_id 0 means anonymous user.
"""
if not user_id and not await async_anonymous_is_enabled():
has_perm = False
elif not user_id:
# Use the permissions from the default group.
default_group = await element_cache.get_element_full_data(
group_collection_string, GROUP_DEFAULT_PK
)
if default_group is None:
raise RuntimeError("Default Group does not exist.")
has_perm = perm in default_group["permissions"]
else:
user_data = await element_cache.get_element_full_data(
user_collection_string, user_id
)
if user_data is None:
raise RuntimeError(f"User with id {user_id} does not exist.")
if GROUP_ADMIN_PK in user_data["groups_id"]:
# User in admin group (pk 2) grants all permissions.
has_perm = True
else:
# Get all groups of the user and then see, if one group has the required
# permission. If the user has no groups, then use the default group.
group_ids = user_data["groups_id"] or [GROUP_DEFAULT_PK]
for group_id in group_ids:
group = await element_cache.get_element_full_data(
group_collection_string, group_id
)
if group is None:
raise RuntimeError(
f"User is in non existing group with id {group_id}."
)
if perm in group["permissions"]:
has_perm = True
break
else:
has_perm = False
return has_perm
def in_some_groups(user_id: int, groups: List[int]) -> bool:
"""
Checks that user is in at least one given group. Groups can be given as a list
of ids or group instances. If the user is in the admin group (pk = 2) the result
is always true.
user_id 0 means anonymous user.
"""
if len(groups) == 0:
return False # early end here, if no groups are given.
# Convert user to right type
# TODO: Remove this and make use, that user has always the right type
user_id = user_to_user_id(user_id)
return async_to_sync(async_in_some_groups)(user_id, groups)
async def async_in_some_groups(user_id: int, groups: List[int]) -> bool:
"""
Checks that user is in at least one given group. Groups can be given as a list
of ids. If the user is in the admin group (pk = 2) the result
is always true.
user_id 0 means anonymous user.
"""
if not len(groups):
return False # early end here, if no groups are given.
if not user_id and not await async_anonymous_is_enabled():
in_some_groups = False
elif not user_id:
# Use the permissions from the default group.
in_some_groups = GROUP_DEFAULT_PK in groups
else:
user_data = await element_cache.get_element_full_data(
user_collection_string, user_id
)
if user_data is None:
raise RuntimeError(f"User with id {user_id} does not exist.")
if GROUP_ADMIN_PK in user_data["groups_id"]:
# User in admin group (pk 2) grants all permissions.
in_some_groups = True
else:
# Get all groups of the user and then see, if one group has the required
# permission. If the user has no groups, then use the default group.
group_ids = user_data["groups_id"] or [GROUP_DEFAULT_PK]
for group_id in group_ids:
if group_id in groups:
in_some_groups = True
break
else:
in_some_groups = False
return in_some_groups
def anonymous_is_enabled() -> bool:
"""
Returns True if the anonymous user is enabled in the settings.
"""
from ..core.config import config
return config["general_system_enable_anonymous"]
async def async_anonymous_is_enabled() -> bool:
"""
Like anonymous_is_enabled but async.
"""
from ..core.config import config
if config.key_to_id is None:
await config.build_key_to_id()
config.key_to_id = cast(Dict[str, int], config.key_to_id)
element = await element_cache.get_element_full_data(
config.get_collection_string(),
config.key_to_id["general_system_enable_anonymous"],
)
return False if element is None else element["value"]
AnyUser = Union[Model, int, AnonymousUser, None]
def user_to_user_id(user: AnyUser) -> int:
"""
Takes an object, that represents a user returns its user_id.
user_id 0 means anonymous user.
User can be
* an user object,
* an user id or
* an anonymous user.
Raises an TypeError, if the given user object can not be converted.
"""
User = get_user_model()
if user is None:
user_id = 0
elif isinstance(user, int):
# Nothing to do
user_id = user
elif isinstance(user, AnonymousUser):
user_id = 0
elif isinstance(user, User):
user_id = user.pk
else:
raise TypeError(
f"Unsupported type for user. User {user} has type {type(user)}."
)
return user_id
|
boehlke/OpenSlides
|
openslides/utils/auth.py
|
Python
|
mit
| 6,857
|
# -*- coding: utf-8 -*-
"""
.. module:: organizations
"""
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils.text import slugify
from django.views.generic import View
from apps.volontulo.forms import VolounteerToOrganizationContactForm
from apps.volontulo.lib.email import send_mail
from apps.volontulo.models import Offer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
from apps.volontulo.utils import correct_slug
def organizations_list(request):
"""View responsible for listing all organizations.
:param request: WSGIRequest instance
"""
organizations = Organization.objects.all()
return render(
request,
"organizations/list.html",
{'organizations': organizations},
)
class OrganizationsCreate(View):
"""Class view supporting creation of new organization."""
@staticmethod
@login_required
def get(request):
"""Method responsible for rendering form for new organization."""
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
@staticmethod
@login_required
def post(request):
"""Method responsible for saving new organization."""
if not (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
messages.error(
request,
"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
organization = Organization(
name=request.POST.get('name'),
address=request.POST.get('address'),
description=request.POST.get('description'),
)
organization.save()
request.user.userprofile.organizations.add(organization)
messages.success(
request,
"Organizacja została dodana."
)
return redirect(
'organization_view',
slug=slugify(organization.name),
id_=organization.id,
)
@correct_slug(Organization, 'organization_form', 'name')
@login_required
def organization_form(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for editing organization.
Edition will only work, if logged user has been registered as organization.
"""
org = Organization.objects.get(pk=id_)
users = [profile.user.email for profile in org.userprofiles.all()]
if (
request.user.is_authenticated() and
request.user.email not in users
):
messages.error(
request,
"Nie masz uprawnień do edycji tej organizacji."
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
if not (
request.user.is_authenticated() and
UserProfile.objects.get(user=request.user).organizations
):
return redirect('homepage')
if request.method == 'POST':
if (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
org.name = request.POST.get('name')
org.address = request.POST.get('address')
org.description = request.POST.get('description')
org.save()
messages.success(
request,
"Oferta została dodana/zmieniona."
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
else:
messages.error(
request,
"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': org},
)
@correct_slug(Organization, 'organization_view', 'name')
def organization_view(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for viewing organization."""
org = get_object_or_404(Organization, id=id_)
offers = Offer.objects.filter(organization_id=id_)
allow_contact = True
allow_edit = False
allow_offer_create = False
if (
request.user.is_authenticated() and
request.user.userprofile in org.userprofiles.all()
):
allow_contact = False
allow_edit = True
allow_offer_create = True
if request.method == 'POST':
form = VolounteerToOrganizationContactForm(request.POST)
if form.is_valid():
send_mail(
request,
'volunteer_to_organisation',
[
userprofile.user.email
for userprofile in org.userprofiles.all()
],
{k: v for k, v in request.POST.items()},
)
messages.success(request, "Email został wysłany.")
else:
messages.error(
request,
"Formularz zawiera nieprawidłowe dane: {errors}".format(
errors=form.errors
)
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': form,
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
},
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': VolounteerToOrganizationContactForm(),
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
}
)
|
mlipa/volontulo
|
backend/apps/volontulo/views/organizations.py
|
Python
|
mit
| 6,545
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20160407_2257'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='add_date',
field=models.DateTimeField(default=datetime.datetime(2016, 4, 8, 15, 32, 8, 270806)),
),
]
|
davogler/POSTv3
|
articles/migrations/0006_auto_20160408_1532.py
|
Python
|
mit
| 469
|
"""
Settings.
"""
import importlib
def import_from_string(val):
"""
Attempt to import a class from a string representation.
"""
try:
parts = val.split('.')
module_path, class_name = '.'.join(parts[:-1]), parts[-1]
m = importlib.import_module(module_path)
return getattr(m, class_name)
except (ImportError, AttributeError) as e:
msg = 'Could not import {}, {}, {}'.format(val, e.__class__.__name__, e)
raise ImportError(msg)
DEFAULTS = {
'OBTAIN_JWT_ALLOWED_FAIL_ATTEMPTS': 3,
'CACHE_BACKEND_CLASS': 'drf_requests_jwt.backends.file_cache.FileCacheBackend'
}
|
sensidev/drf-requests-jwt
|
drf_requests_jwt/settings.py
|
Python
|
mit
| 637
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class MeasurementValueSource(IdentifiedObject):
"""MeasurementValueSource describes the alternative sources updating a MeasurementValue. User conventions for how to use the MeasurementValueSource attributes are described in the introduction to IEC 61970-301.
"""
def __init__(self, MeasurementValues=None, *args, **kw_args):
"""Initialises a new 'MeasurementValueSource' instance.
@param MeasurementValues: The MeasurementValues updated by the source
"""
self._MeasurementValues = []
self.MeasurementValues = [] if MeasurementValues is None else MeasurementValues
super(MeasurementValueSource, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["MeasurementValues"]
_many_refs = ["MeasurementValues"]
def getMeasurementValues(self):
"""The MeasurementValues updated by the source
"""
return self._MeasurementValues
def setMeasurementValues(self, value):
for x in self._MeasurementValues:
x.MeasurementValueSource = None
for y in value:
y._MeasurementValueSource = self
self._MeasurementValues = value
MeasurementValues = property(getMeasurementValues, setMeasurementValues)
def addMeasurementValues(self, *MeasurementValues):
for obj in MeasurementValues:
obj.MeasurementValueSource = self
def removeMeasurementValues(self, *MeasurementValues):
for obj in MeasurementValues:
obj.MeasurementValueSource = None
|
rwl/PyCIM
|
CIM14/ENTSOE/Equipment/Meas/MeasurementValueSource.py
|
Python
|
mit
| 2,757
|
from random import random
import Queue
from shapes import Point
from spatial import convexHull
def randomPoint(k=None):
if k:
return Point(int(k * random()), int(k * random()))
return Point(random(), random())
def randomConvexPolygon(sample, k=None, n=3):
hull = convexHull([randomPoint(k=k) for i in range(sample)])
while hull.n < n:
hull = convexHull([randomPoint(k=k) for i in range(sample)])
return hull
def randomTiling(polygon, n, CONCAVE=False):
"""Generates a random concave tiling of a convex region."""
class PolygonWithArea(object):
def __init__(self, polygon):
self.polygon = polygon
self.area = polygon.area()
def __cmp__(self, that):
return -cmp(self.area, that.area)
# Start with initial convex region
initial = PolygonWithArea(polygon)
# Place in PQ to pop by area
pq = Queue.PriorityQueue(maxsize=n + 1)
pq.put(initial)
# Create some concave regions
triangles = []
for i in range(n):
# Split up largest polygon
polygon = pq.get().polygon
for polygon in polygon.split(INTERIOR=CONCAVE):
if polygon.n == 3:
triangles.append(polygon)
else:
pq.put(PolygonWithArea(polygon))
polygons = triangles
while pq.qsize():
polygons.append(pq.get().polygon)
return polygons
def randomConcaveTiling(polygon, n=10):
return randomTiling(polygon, n=n, CONCAVE=True)
def randomConvexTiling(polygon, n=10):
return randomTiling(polygon, n)
|
crm416/point-location
|
geo/generator.py
|
Python
|
mit
| 1,590
|
from shape import Shape
import random
import numpy
"""
Author: Thomas Elgin (https://github.com/telgin)
"""
class Square(Shape):
"""
Square implementation.
"""
def randomizePoints(self):
"""
Randomizes the points, essentially creating a new small shape
somewhere within the bounds of the image
"""
self.points = numpy.uint32(numpy.zeros([4, 2]))
startsize = 5
# pick a random point on the image (upper left of square)
self.points[0] = [random.randint(0, self.imageBounds[0]-(startsize+1)),
random.randint(0, self.imageBounds[1]-(startsize+1))]
#upper right
self.points[1] = [self.points[0][0], self.points[0][1]+startsize]
#lower right
self.points[2] = [self.points[0][0]+startsize, self.points[0][1]+startsize]
#lower left
self.points[3] = [self.points[0][0]+startsize, self.points[0][1]]
def mutate(self, heat=10):
"""
Redefine mutate so we're not modifying individual vertices. The definition of a
square would not allow for that. This will translate or scale the vertices randomly.
:param heat: The length of the range of the random number. The range
is centered on the current number.
"""
# must have this in order to allow undoMutate
self.oldPoints = numpy.copy(self.points)
# randomly choose translate or scale
if random.random() > .5: # scale
center = numpy.average(self.points, axis=0)
# (python do-while) init points to something out of bounds, try different
# scale operations until you get one where all points are within the bounds
points = numpy.array([[-1,-1], [-1, -1], [-1, -1], [-1, -1]])
while not self.pointsBounded(points):
# calculate scale factor from heat
rand = (float(random.randint(0, heat)) / 100) / 2
if random.random() > .5:
scale = 1 + (rand * 2)
else:
scale = 1 - rand
# apply scale factor
points = numpy.copy(self.points)
points = numpy.transpose([points[:, 0] - center[0], points[:, 1] - center[1]])
points *= scale
points = numpy.transpose([points[:, 0] + center[0], points[:, 1] + center[1]])
points = numpy.round(points).astype(numpy.int)
self.points = points
else: # translate
# decide how much to translate based on heat
xmod = random.randint(0,heat)-(heat//2)
ymod = random.randint(0,heat)-(heat//2)
# compute min/max for x/y
maxx, maxy = numpy.max(self.points, 0).astype(numpy.int)
minx, miny = numpy.min(self.points, 0).astype(numpy.int)
# make sure translation does not result in any points outside bounds
xmod = max(xmod, -minx)
xmod = min(xmod, (self.imageBounds[0]-1)-maxx)
ymod = max(ymod, -miny)
ymod = min(ymod, (self.imageBounds[1]-1)-maxy)
self.points = numpy.transpose([self.points[:, 0] + xmod, self.points[:, 1] + ymod])
|
telgin/PolygonCompositionImages
|
square.py
|
Python
|
mit
| 3,359
|
from rx.disposable import Disposable, SingleAssignmentDisposable
from rx.observable import Producer
from rx.subject import Subject
from threading import RLock
class EventProducer(Producer):
def __init__(self, scheduler):
self.scheduler = scheduler
self.gate = RLock()
self.session = None
def getHandler(self, onNext):
raise NotImplementedError()
def addHandler(self, handler):
raise NotImplementedError()
def run(self, observer, cancel, setSink):
connection = None
with self.gate:
#
# A session object holds on to a single handler to the underlying event, feeding
# into a subject. It also ref counts the number of connections to the subject.
#
# When the ref count goes back to zero, the event handler is unregistered, and
# the session will reach out to reset the _session field to null under the _gate
# lock. Future subscriptions will cause a new session to be created.
#
if self.session == None:
self.session = self.Session(self)
connection = self.session.connect(observer)
return connection
class Session(object):
def __init__(self, parent):
self.parent = parent
self.removeHandler = None
self.subject = Subject()
self.count = 0
def connect(self, observer):
#
# We connect the given observer to the subject first, before performing any kind
# of initialization which will register an event handler. This is done to ensure
# we don't have a time gap between adding the handler and connecting the user's
# subject, e.g. when the ImmediateScheduler is used.
#
# [OK] Use of unsafe Subscribe: called on a known subject implementation.
#
connection = self.subject.subscribe(observer)
self.count += 1
if self.count == 1:
try:
self.initialize()
except Exception as e:
self.count -= 1
connection.dispose()
observer.onError(e)
return Disposable.empty()
def dispose():
connection.dispose()
with self.parent.gate:
self.count -=1
if self.count == 0:
self.parent.scheduler.schedule(self.removeHandler.dispose)
self.parent.session = None
return Disposable.create(dispose)
def initialize(self):
#
# When the ref count goes to zero, no-one should be able to perform operations on
# the session object anymore, because it gets nulled out.
#
assert self.removeHandler == None
self.removeHandler = SingleAssignmentDisposable()
#
# Conversion code is supposed to be a pure function and shouldn't be run on the
# scheduler, but the add handler call should. Notice the scheduler can be the
# ImmediateScheduler, causing synchronous invocation. This is the default when
# no SynchronizationContext is found (see QueryLanguage.Events.cs and search for
# the GetSchedulerForCurrentContext method).
#
onNext = self.parent.getHandler(self.subject.onNext)
self.parent.scheduler.scheduleWithState(onNext, self.addHandler)
def addHandler(self, scheduler, onNext):
try:
removeHandler = self.parent.addHandler(onNext)
except Exception as e:
self.subject.onError(e)
else:
self.removeHandler.disposable = removeHandler
#
# We don't propagate the exception to the OnError channel upon Dispose. This is
# not possible at this stage, because we've already auto-detached in the base
# class Producer implementation. Even if we would switch the OnError and auto-
# detach calls, it wouldn't work because the remove handler logic is scheduled
# on the given scheduler, causing asynchrony. We can't block waiting for the
# remove handler to run on the scheduler.
#
return Disposable.empty()
#end Session
class ClassicEventProducer(EventProducer):
def __init__(self, addHandler, removeHandler, scheduler):
super(ClassicEventProducer, self).__init__(scheduler)
self.addHandlerAction = addHandler
self.removeHandlerAction = removeHandler
def addHandler(self, handler):
self.addHandlerAction(handler)
return Disposable.create(lambda: self.removeHandlerAction(handler))
class FromEvent(ClassicEventProducer):
def __init__(self, addHandler, removeHandler, scheduler):
super(FromEvent, self).__init__(addHandler, removeHandler, scheduler)
def getHandler(self, onNext):
return onNext
|
akuendig/RxPython
|
rx/linq/fromEvent.py
|
Python
|
mit
| 4,544
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The core data store and collection logic for beets.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import sys
import unicodedata
import time
import re
from unidecode import unidecode
from beets import logging
from beets.mediafile import MediaFile, MutagenError, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile
from beets.util.functemplate import Template
from beets import dbcore
from beets.dbcore import types
import beets
log = logging.getLogger('beets')
# Library-specific query types.
class PathQuery(dbcore.FieldQuery):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
escape_re = re.compile(r'[\\_%]')
escape_char = b'\\'
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
`case_sensitive` can be a bool or `None`, indicating that the
behavior should depend on the filesystem.
"""
super(PathQuery, self).__init__(field, pattern, fast)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
@classmethod
def is_path_query(cls, query_part):
"""Try to guess whether a unicode query part is a path query.
Condition: separator precedes colon and the file exists.
"""
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
return (os.sep in query_part
and os.path.exists(syspath(normpath(query_part))))
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
if self.case_sensitive:
file_blob = buffer(self.file_path)
dir_blob = buffer(self.dir_path)
return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
escape = lambda m: self.escape_char + m.group(0)
dir_pattern = self.escape_re.sub(escape, self.dir_path)
dir_blob = buffer(dir_pattern + b'%')
file_pattern = self.escape_re.sub(escape, self.file_path)
file_blob = buffer(file_pattern)
return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format(
self.field), (file_blob, self.escape_char, dir_blob,
self.escape_char)
# Library-specific field types.
class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].get(unicode),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string, beets.config['time_format'].get(unicode))
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class PathType(types.Type):
sql = u'BLOB'
query = PathQuery
model_type = bytes
def format(self, value):
return util.displayable_path(value)
def parse(self, string):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, unicode):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, buffer):
# SQLite must store bytestings as buffers to avoid decoding.
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value):
if isinstance(value, bytes):
value = buffer(value)
return value
class MusicalKey(types.String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r'db': 'c#',
r'eb': 'd#',
r'gb': 'f#',
r'ab': 'g#',
r'bb': 'a#',
}
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r'[\W\s]+minor', 'm', key)
key = re.sub(r'[\W\s]+major', '', key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
class DurationType(types.Float):
"""Human-friendly (M:SS) representation of a time interval."""
query = dbcore.query.DurationQuery
def format(self, value):
if not beets.config['format_raw_length'].get(bool):
return beets.ui.human_seconds_short(value or 0.0)
else:
return value
def parse(self, string):
try:
# Try to format back hh:ss to seconds.
return util.raw_seconds_short(string)
except ValueError:
# Fall back to a plain float.
try:
return float(string)
except ValueError:
return self.null
# Library-specific sort types.
class SmartArtistSort(dbcore.query.Sort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def __init__(self, model_cls, ascending=True, case_insensitive=True):
self.album = model_cls is Album
self.ascending = ascending
self.case_insensitive = case_insensitive
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
field = 'albumartist' if self.album else 'artist'
collate = 'COLLATE NOCASE' if self.case_insensitive else ''
return ('(CASE {0}_sort WHEN NULL THEN {0} '
'WHEN "" THEN {0} '
'ELSE {0}_sort END) {1} {2}').format(field, collate, order)
def sort(self, objs):
if self.album:
field = lambda a: a.albumartist_sort or a.albumartist
else:
field = lambda i: i.artist_sort or i.artist
if self.case_insensitive:
key = lambda x: field(x).lower()
else:
key = field
return sorted(objs, key=key, reverse=not self.ascending)
# Special path format key.
PF_KEY_DEFAULT = 'default'
# Exceptions.
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super(FileOperationError, self).__init__(path, reason)
self.path = path
self.reason = reason
def __unicode__(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return u'{0}: {1}'.format(
util.displayable_path(self.path),
unicode(self.reason)
)
def __str__(self):
return unicode(self).encode('utf8')
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __unicode__(self):
return u'error reading ' + super(ReadError, self).__unicode__()
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __unicode__(self):
return u'error writing ' + super(WriteError, self).__unicode__()
# Item and Album model classes.
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
_format_config_key = None
"""Config key that specifies how an instance should be formatted.
"""
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self):
super(LibModel, self).store()
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
super(LibModel, self).remove()
plugins.send('database_change', lib=self._db, model=self)
def add(self, lib=None):
super(LibModel, self).add(lib)
plugins.send('database_change', lib=self._db, model=self)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].get(unicode)
result = self.evaluate_template(spec)
if isinstance(spec, bytes):
# if spec is a byte string then we must return a one as well
return result.encode('utf8')
else:
return result
def __str__(self):
return format(self).encode('utf8')
def __unicode__(self):
return format(self)
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
def __init__(self, item, for_path=False):
super(FormattedItemMapping, self).__init__(item, for_path)
self.album = item.get_album()
self.album_keys = []
if self.album:
for key in self.album.keys(True):
if key in Album.item_keys or key not in item._fields.keys():
self.album_keys.append(key)
self.all_keys = set(self.model_keys).union(self.album_keys)
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key. Certain unset values are remapped.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
if key == 'artist' and not value:
return self._get('albumartist')
elif key == 'albumartist' and not value:
return self._get('artist')
else:
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Item(LibModel):
_table = 'items'
_flex_table = 'item_attributes'
_fields = {
'id': types.PRIMARY_ID,
'path': PathType(),
'album_id': types.FOREIGN_ID,
'title': types.STRING,
'artist': types.STRING,
'artist_sort': types.STRING,
'artist_credit': types.STRING,
'album': types.STRING,
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'composer': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'track': types.PaddedInt(2),
'tracktotal': types.PaddedInt(2),
'disc': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'lyrics': types.STRING,
'comments': types.STRING,
'bpm': types.INTEGER,
'comp': types.BOOLEAN,
'mb_trackid': types.STRING,
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'media': types.STRING,
'albumdisambig': types.STRING,
'disctitle': types.STRING,
'encoder': types.STRING,
'rg_track_gain': types.NULL_FLOAT,
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
'initial_key': MusicalKey(),
'length': DurationType(),
'bitrate': types.ScaledInt(1000, u'kbps'),
'format': types.STRING,
'samplerate': types.ScaledInt(1000, u'kHz'),
'bitdepth': types.INTEGER,
'channels': types.INTEGER,
'mtime': DateType(),
'added': DateType(),
}
_search_fields = ('artist', 'title', 'comments',
'album', 'albumartist', 'genre')
_types = {
'data_source': types.STRING,
}
_media_fields = set(MediaFile.readable_fields()) \
.intersection(_fields.keys())
"""Set of item fields that are backed by `MediaFile` fields.
Any kind of field (fixed, flexible, and computed) may be a media
field. Only these fields are read from disk in `read` and written in
`write`.
"""
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
"""Set of item fields that are backed by *writable* `MediaFile` tag
fields.
This excludes fields that represent audio data, such as `bitrate` or
`length`.
"""
_formatter = FormattedItemMapping
_sorts = {'artist': SmartArtistSort}
_format_config_key = 'format_item'
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters['singleton'] = lambda i: i.album_id is None
getters['filesize'] = Item.try_filesize # In bytes.
return getters
@classmethod
def from_path(cls, path):
"""Creates a new item from the media file at the specified path.
"""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr.
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, unicode):
value = bytestring_path(value)
elif isinstance(value, buffer):
value = bytes(value)
if key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
super(Item, self).__setitem__(key, value)
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
specified, it is not reset (as it might otherwise be).
"""
super(Item, self).update(values)
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Updates all the properties in `_media_fields`
from the media file.
Raises a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, (int, long)):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {k: v for k, v in item_tags.items()
if k in self._media_fields} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send('write', item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool))
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(self.path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except (OSError, IOError, MutagenError) as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send('after_write', item=self, path=path)
def try_write(self, path=None, tags=None):
"""Calls `write()` but catches and logs `FileOperationError`
exceptions.
Returns `False` an exception was caught and `True` otherwise.
"""
try:
self.write(path, tags)
return True
except FileOperationError as exc:
log.error("{0}", exc)
return False
def try_sync(self, write=None):
"""Synchronize the item with the database and the media file
tags, updating them with this object's current state.
By default, the current `path` for the item is used to write
tags. If `write` is `False`, no tags are written. If `write` is
a path, tags are written to that file instead.
Similar to calling :meth:`write` and :meth:`store`.
"""
if write is True:
write = None
if write is not False:
self.try_write(path=write)
self.store()
# Files themselves.
def move_file(self, dest, copy=False, link=False):
"""Moves or copies the item's file, updating the path value if
the move succeeds. If a file exists at ``dest``, then it is
slightly modified to be unique.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if copy:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
else:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Returns the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning(u'could not get filesize: {0}', exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Removes the item. If `delete`, then the associated file is
removed from disk. If `with_album`, then the item's album (if
any) is removed if it the item was the last in the album.
"""
super(Item, self).remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send('item_removed', item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(self, copy=False, link=False, basedir=None, with_album=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved.
Similarly, `link` creates a symlink instead.
basedir overrides the library base directory for the
destination.
If the item is in an album, the album is given an opportunity to
move its art. (This can be disabled by passing
with_album=False.)
The item is stored to the database if it is in the database, so
any dirty fields prior to the move() call will be written as a
side effect. You probably want to call save() to commit the DB
transaction.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, copy, link)
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(copy)
album.store()
# Prune vacated directory.
if not copy:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(self, fragment=False, basedir=None, platform=None,
path_formats=None):
"""Returns the path in the library directory designated for the
item (i.e., where the file ought to be). fragment makes this
method return just the path fragment underneath the root library
directory; the path is also returned as Unicode instead of
encoded as a bytestring. basedir can override the library's base
directory for the destination.
"""
self._check_db()
platform = platform or sys.platform
basedir = basedir or self._db.directory
path_formats = path_formats or self._db.path_formats
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, "no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = Template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if platform == 'darwin':
subpath = unicodedata.normalize('NFD', subpath)
else:
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self._db.directory)
subpath, fellback = util.legalize_path(
subpath, self._db.replacements, maxlen,
os.path.splitext(self.path)[1], fragment
)
if fellback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning('Fell back to default replacements when naming '
'file {}. Configure replacements to avoid lengthening '
'the filename.', subpath)
if fragment:
return subpath
else:
return normpath(os.path.join(basedir, subpath))
class Album(LibModel):
"""Provides access to information about albums stored in a
library. Reflects the library's "albums" table, including album
art.
"""
_table = 'albums'
_flex_table = 'album_attributes'
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(),
'added': DateType(),
'albumartist': types.STRING,
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'album': types.STRING,
'genre': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
'day': types.PaddedInt(2),
'disctotal': types.PaddedInt(2),
'comp': types.BOOLEAN,
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
'catalognum': types.STRING,
'script': types.STRING,
'language': types.STRING,
'country': types.STRING,
'albumstatus': types.STRING,
'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
}
_search_fields = ('album', 'albumartist', 'genre')
_types = {
'path': PathType(),
'data_source': types.STRING,
}
_sorts = {
'albumartist': SmartArtistSort,
'artist': SmartArtistSort,
}
item_keys = [
'added',
'albumartist',
'albumartist_sort',
'albumartist_credit',
'album',
'genre',
'year',
'month',
'day',
'disctotal',
'comp',
'mb_albumid',
'mb_albumartistid',
'albumtype',
'label',
'mb_releasegroupid',
'asin',
'catalognum',
'script',
'language',
'country',
'albumstatus',
'albumdisambig',
'rg_album_gain',
'rg_album_peak',
'original_year',
'original_month',
'original_day',
]
"""List of keys that are set on an album's items.
"""
_format_config_key = 'format_album'
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters['path'] = Album.item_dir
getters['albumtotal'] = Album._albumtotal
return getters
def items(self):
"""Returns an iterable over the items associated with this
album.
"""
return self._db.items(dbcore.MatchQuery('album_id', self.id))
def remove(self, delete=False, with_items=True):
"""Removes this album and all its associated items from the
library. If delete, then the items' files are also deleted
from disk, along with any album art. The directories
containing the album are also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super(Album, self).remove()
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, copy=False, link=False):
"""Move or copy any existing album art so that it remains in the
same directory as the items.
"""
old_art = self.artpath
if not old_art:
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if copy:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
else:
util.move(old_art, new_art)
self.artpath = new_art
# Prune old path when moving.
if not copy:
util.prune_dirs(os.path.dirname(old_art),
self._db.directory)
def move(self, copy=False, link=False, basedir=None):
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base
directory for the destination. The album is stored to the
database, persisting any modifications to its metadata.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(copy, link, basedir=basedir, with_album=False)
# Move art.
self.move_art(copy, link)
self.store()
def item_dir(self):
"""Returns the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError('empty album')
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album
"""
if self.disctotal == 1 or not beets.config['per_disc_numbering']:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Returns a path to the destination for the album art image
for the album. `image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = Template(beets.config['art_filename'].get(unicode))
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Sets the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Sends an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send('art_set', album=self)
def store(self):
"""Update the database with the album information. The album's
tracks are also updated.
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_updates[key] = self[key]
with self._db.transaction():
super(Album, self).store()
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
def try_sync(self, write=True):
"""Synchronize the album and its items with the database and
their files by updating them with this object's current state.
`write` indicates whether to write tags to the item files.
"""
self.store()
for item in self.items():
item.try_sync(bool(write))
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
path_parts = []
non_path_parts = []
for s in parts:
if PathQuery.is_path_query(s):
path_parts.append(s)
else:
non_path_parts.append(s)
query, sort = dbcore.parse_sorted_query(
model_cls, non_path_parts, prefixes
)
# Add path queries to aggregate query.
# Match field / flexattr depending on whether the model has the path field
fast_path_query = 'path' in model_cls._fields
query.subqueries += [PathQuery('path', s, fast_path_query)
for s in path_parts]
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
assert isinstance(s, unicode), "Query is not unicode: {0!r}".format(s)
try:
parts = util.shlex_split(s)
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
# The Library: interface to the database.
class Library(dbcore.Database):
"""A database of music containing songs and albums.
"""
_models = (Item, Album)
def __init__(self, path='library.blb',
directory='~/Music',
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
if path != ':memory:':
self.path = bytestring_path(normpath(path))
super(Library, self).__init__(path)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
self.replacements = replacements
self._memotable = {} # Used for template substitution performance.
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database. Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError(u'need at least one item')
# Create the album structure using metadata from the first item.
values = dict((key, items[0][key]) for key in Album.item_keys)
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch. If a order specification is present
in the query string the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, basestring):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentTypeError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super(Library, self)._fetch(
model_cls, query, sort
)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option.
"""
return dbcore.sort_from_strings(
Album, beets.config['sort_album'].as_str_seq())
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option.
"""
return dbcore.sort_from_strings(
Item, beets.config['sort_item'].as_str_seq())
def albums(self, query=None, sort=None):
"""Get :class:`Album` objects matching the query.
"""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None):
"""Get :class:`Item` objects matching the query.
"""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch an :class:`Item` by its ID. Returns `None` if no match is
found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
an :class:`Album` object for the album. If no such album exists,
returns `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
# Default path template resources.
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function. May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions(object):
"""A container class for the default functions provided to path
templates. These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = b'tmpl_'
def __init__(self, item=None, lib=None):
"""Parametrize the functions. If `item` or `lib` is None, then
some functions (namely, ``aunique``) will always evaluate to the
empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Covert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return s.title()
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0:_int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars):]
@staticmethod
def tmpl_if(condition, trueval, falseval=u''):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return unidecode(s)
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].get(unicode)
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return u''
if self.item.album_id is None:
return u''
memokey = ('aunique', keys, disam, self.item.album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig'
keys = keys.split()
disam = disam.split()
album = self.lib.get_album(self.item)
if not album:
# Do nothing for singletons.
self.lib._memotable[memokey] = u''
return u''
# Find matching albums to disambiguate with.
subqueries = []
for key in keys:
value = album.get(key, '')
subqueries.append(dbcore.MatchQuery(key, value))
albums = self.lib.albums(dbcore.AndQuery(subqueries))
# If there's only one album to matching these details, then do
# nothing.
if len(albums) == 1:
self.lib._memotable[memokey] = u''
return u''
# Find the first disambiguator that distinguishes the albums.
for disambiguator in disam:
# Get the value for each album for the current field.
disam_values = set([a.get(disambiguator, '') for a in albums])
# If the set of unique values is equal to the number of
# albums in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(albums):
break
else:
# No disambiguator distinguished all fields.
res = u' {0}'.format(album.id)
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator)
res = u' [{0}]'.format(disam_value)
self.lib._memotable[memokey] = res
return res
# Get the name of tmpl_* functions in the above class.
DefaultTemplateFunctions._func_names = \
[s for s in dir(DefaultTemplateFunctions)
if s.startswith(DefaultTemplateFunctions._prefix)]
|
LordSputnik/beets
|
beets/library.py
|
Python
|
mit
| 49,018
|
# lesson 2.1 - data types
year = 2013 # integer number
age = 13.75 # decimal number
name = "John" # string
# now print them out to screen
#print (year)
#print (age)
#print (name)
year = int(input("Enter the Year "))
age = float(input("Enter your age as a decimal "))
name = input("Enter your name ")
# place a comma(,) before and after each variable to oing to a string
print ("Your name is ",name, " you were born in ",year," and are ",age," years old <Commas only>")
# or STR() function will convert a number to a string data type
print ("Your name is ",name, " you were born in "+ str(year)+" and are "+str(age)+" years old <STR functiomn>")
|
paulcockram7/paulcockram7.github.io
|
10python/l02/Ex3AddInput.py
|
Python
|
mit
| 660
|
from igraph import *
from random import sample,random,choice
from core import Algorithm
from egraphs import FBEgoGraph
class AlbatrossSampling(Algorithm):
def update_graph(self, start_node, new_node):
g = self.sampled_graph
start_id = g.vs['name'].index(start_node)
if new_node['name'] not in g.vs['name']:
g.add_vertex(**new_node)
index = g.vs['name'].index(new_node['name'])
g.add_edge(start_id,index)
else:
index = g.vs['name'].index(new_node['name'])
if g.get_eid(start_id, index, directed=False, error=False) == -1:
g.add_edge(start_id,index)
def run(self,k,p_jump=0.02):
start_node = choice(self.sampled_graph.vs['name'])
n_attribute = len(self.sampled_graph.vertex_attributes())-2
i = 0
while i < k:
query_result = self.egraph.query_node(start_node,n_attribute)
new_node = choice(query_result)
self.update_graph(start_node,new_node)
if random() < p_jump:
start_node = choice(self.sampled_graph.vs['name'])
elif random() < float(self.sampled_graph.degree(start_node))/self.sampled_graph.degree(new_node['name']):
start_node = new_node['name']
i += 1
if __name__ == "__main__":
fbego_graph = FBEgoGraph('data/egofb.txt')
fuck_as = AlbatrossSampling(fbego_graph)
print fuck_as.validate()
|
ryaninhust/sampling
|
albatross_sampling.py
|
Python
|
mit
| 1,462
|
#!/usr/bin/env python
# Filename:DBCompiler.py
# Edited by chow 2013.09.06
# ChangeList:
# fix a bug which may cause the script can't find the config.xml
import logging
import re
import os
import sys
import getopt
import xml.etree.ElementTree as et
pathList={}
class Compiler:
def main(self,argv):
try:
opts,args=getopt.getopt(argv[1:],"hl:f:d:")
except getopt.GetoptError:
sys.exit()
#print(os.path.split(argv[0])[0]);
for opt,arg in opts:
if opt=='-h':
self.usage()
sys.exit()
elif opt=='-l':
pathList['configFilePath']=arg
elif opt=='-f':
pathList['filePath']=arg
elif opt=='-d':
pathList['dirPath']=arg
else:
assert False,"unhandled option"
if 'configFilePath' in pathList:
self.loadTheConfig(pathList['configFilePath'])
else:
#use the default path to find the config.xml
self.loadTheConfig(os.path.join(os.path.split(argv[0])[0],"config.xml"))
#print(pathList)
self.prepareToCompile(pathList)
#end of main
def loadTheConfig(self,configFile='config.xml'):
root=et.parse(configFile).getroot()
for child in root:
if child.tag in pathList:
pass
else:
pathList[child.tag]=child.text
def prepareToCompile(self,pathList):
if 'filePath' in pathList:
if os.path.isfile(pathList['filePath']):
dirPath,filename=os.path.split(pathList['filePath'])
pathList['dirPath']=dirPath
logging.basicConfig(filename = os.path.join(pathList['dirPath'], 'log.txt'), level = logging.DEBUG,filemode = 'w', format = '%(asctime)s - %(levelname)s: %(message)s')
self.beforCompile(filename,pathList)
else:
self.logTofile("Not supported file,only .as file is supported")
elif 'dirPath' in pathList:
if os.path.isdir(pathList['dirPath']):
logging.basicConfig(filename = os.path.join(pathList['dirPath'], 'log.txt'), level = logging.DEBUG,filemode = 'w', format = '%(asctime)s - %(levelname)s: %(message)s')
files=os.listdir(pathList['dirPath'])
for f in files:
self.beforCompile(f,pathList)
else:
self.logTofile("Error in file(Dir) path")
#end of prepareToCompile
def beforCompile(self,f,pathList):
file=os.path.join(pathList['dirPath'],f)
if (f.split('.')[-1]=='as'):
self.compileFile(file,pathList)
else:
self.logTofile("Not supported file,only .as file is supported")
def compileFile(self,f,pathList):
command = os.path.join(pathList["flexSDKPath"],"mxmlc ")+f
#print command
#print_command="mxmlc "+os.path.basename(f)
if ("libraryPath" in pathList):
command =command + " -library-path+="+pathList["libraryPath"]
#print_command=print_command+" -library-path+="
#for libpath in pathList["libraryPath"].split(";"):
#print_command=print_command+os.path.basename(libpath)+";"
command =command + " -static-link-runtime-shared-libraries=" +pathList["staticLinkRuntimeSharedLibraries"]
#print_command=print_command+" -static-link-runtime-shared-libraries=" +pathList["staticLinkRuntimeSharedLibraries"]
#self.logTofile("Compile command:"+print_command)
command =command +">/dev/null 2>"+os.path.join(pathList['dirPath'],"errlog.log")
#result=commands.getstatus(command)
result= os.popen(command).read()
for line in result.split('\n'):
if line.split():
self.logTofile(line)
if ("outputPath" in pathList ):
if not os.path.exists(pathList["outputPath"]):
os.mkdir(pathList["outputPath"])
if 'nt'==os.name:
result=os.popen("move /y " +f.split('.')[0]+".swf "+ pathList["outputPath"]).read()
else:
result=os.popen("mv -f "+f.split('.')[0]+".swf "+ pathList["outputPath"]).read()
for line in result.split('\n'):
if line.split():
self.logTofile(line,True)
self.logTofile("Finish {} compilation".format(f))
#end of compileFile
def logTofile(self,msg='\n',toFile=True):
if msg=='\n':
return
if (toFile):
m=re.search('error',msg,re.IGNORECASE)
if bool(m):
logging.error(msg)
else:
logging.info(msg)
#text=re.sub(r'C:\\',' ',msg)
print(msg)
#End of logTofile
def usage(self):
print("""DBAutoCompiler [-h|-l|-f|-d]
Options and arguments:
-h : Help
-l : Load the config file
-f : The file which is need to compile
-d : The direction of files to compile
""")
#End of usage
if __name__=='__main__':
Compiler().main(sys.argv)
|
EffectHub/effecthub
|
scripts/DBCompiler.py
|
Python
|
mit
| 6,562
|
"""General utilities, such as exception classes."""
import typing
# Titanic-specific exceptions
class TitanicError(Exception):
"""Base Titanic error."""
class RoundingError(TitanicError):
"""Rounding error, such as attempting to round NaN."""
class PrecisionError(RoundingError):
"""Insufficient precision to perform rounding."""
# some common data structures
class ImmutableDict(dict):
def __delitem__(self, key):
raise ValueError('ImmutableDict cannot be modified: attempt to delete {}'
.format(repr(key)))
def __setitem__(self, key, value):
raise ValueError('ImmutableDict cannot be modified: attempt to assign [{}] = {}'
.format(repr(key), repr(value)))
def clear(self):
raise ValueError('ImmutableDict cannot be modified: attempt to clear')
def pop(self, key, *args):
raise ValueError('ImmutableDict cannot be modified: attempt to pop {}'
.format(repr(key)))
def popitem(self):
raise ValueError('ImmutableDict cannot be modified: attempt to popitem')
def setdefault(self, key, default=None):
raise ValueError('ImmutableDict cannot be modified: attempt to setdefault {}, default={}'
.format(repr(key), repr(default)))
def update(self, *args, **kwargs):
raise ValueError('ImmutableDict cannot be modified: attempt to update')
@classmethod
def fromkeys(cls, *args):
return cls(dict.fromkeys(*args))
# Useful things
def bitmask(n: int) -> int:
"""Produces a bitmask of n 1s if n is positive, or n 0s if n is negative."""
if n >= 0:
return (1 << n) - 1
else:
return -1 << -n
def maskbits(x: int, n:int) -> int:
"""Mask x & bitmask(n)"""
if n >= 0:
return x & ((1 << n) - 1)
else:
return x & (-1 << -n)
def is_even_for_rounding(c, exp):
"""General-purpose tiebreak used when rounding to even.
If the significand is less than two bits,
decide evenness based on the representation of the exponent.
"""
if c.bit_length() > 1:
return c & 1 == 0
else:
return exp & 1 == 0
|
billzorn/fpunreal
|
titanfp/titanic/utils.py
|
Python
|
mit
| 2,194
|
import logging
logging.basicConfig()
from serial_handler import SerialHandler, serial_wrapper
BAUDRATE = 9600
class TagHeuer520(SerialHandler):
def __init__(self, port=None):
super(TagHeuer520,self).__init__(port,BAUDRATE)
self.line_buffer = ""
@serial_wrapper
def read(self):
""" Read and parse time from serial port """
c = self.serial.read(1)
while c != '':
if c == '\r':
logging.debug(repr(self.line_buffer))
result = None
# TODO add ability to handle other types of time events eg. T-, T+, etc.
if self.line_buffer.startswith('T ') and len(self.line_buffer) == 30:
channel = self.line_buffer[12:14].strip()
time_ms = 0
try:
if self.line_buffer[15:17].strip() != '':
time_ms += (60*60*1000) * int(self.line_buffer[15:17])
if self.line_buffer[18:20].strip() != '':
time_ms += (60*1000) * int(self.line_buffer[18:20])
time_ms += 1000 * int(self.line_buffer[21:23])
time_ms += int(self.line_buffer[24:27])
except ValueError:
self.line_buffer = ""
return None
result = (channel, time_ms)
else:
logging.debug('bad time event format')
self.line_buffer = ""
return result
else:
self.line_buffer += c
c = self.serial.read(1)
return None
if __name__ == "__main__":
import sys
timer = TagHeuer520(sys.argv[1])
while True:
t = timer.read()
if t:
print t
|
LateralGs/rallyx_timing_scoring
|
software/tag_heuer_520.py
|
Python
|
mit
| 1,551
|
# from collections import Counter # for my original solution
class Solution(object):
@staticmethod
def singleNumber(nums):
"""
single_number == PEP8 (forced mixedCase by LeetCode)
:type nums: List[int]
:rtype: int
"""
# cnt = Counter(nums)
# return next(k for k, v in cnt.iteritems() if v == 1)
return reduce(lambda a, b: a ^ b, nums)
|
the-zebulan/LeetCode
|
Easy/single_number.py
|
Python
|
mit
| 410
|
#!/usr/bin/env python
import pytest
from pytest import fixture
from circuits import handler, Event, Component
from circuits.net.events import read, write
from circuits.protocols.irc import IRC
from circuits.protocols.irc import strip, joinprefix, parseprefix
from circuits.protocols.irc import (
PASS, USER, NICK, PONG, QUIT,
JOIN, PART, PRIVMSG, NOTICE, AWAY,
KICK, TOPIC, MODE, INVITE, NAMES, WHOIS
)
from circuits.six import u
class App(Component):
def init(self):
IRC().register(self)
self.data = []
self.events = []
@handler(False)
def reset(self):
self.data = []
self.events = []
@handler()
def _on_event(self, event, *args, **kwargs):
self.events.append(event)
def request(self, message):
self.fire(write(bytes(message)))
def write(self, data):
self.data.append(data)
@fixture(scope="function")
def app(request):
app = App()
while len(app):
app.flush()
return app
def test_strip():
s = ":\x01\x02test\x02\x01"
s = strip(s)
assert s == "\x01\x02test\x02\x01"
s = ":\x01\x02test\x02\x01"
s = strip(s, color=True)
assert s == "test"
def test_joinprefix():
nick, ident, host = "test", "foo", "localhost"
s = joinprefix(nick, ident, host)
assert s == "test!foo@localhost"
def test_parseprefix():
s = "test!foo@localhost"
nick, ident, host = parseprefix(s)
assert nick == "test"
assert ident == "foo"
assert host == "localhost"
s = "test"
nick, ident, host = parseprefix(s)
assert nick == "test"
assert ident is None
assert host is None
@pytest.mark.parametrize("event,data", [
(PASS("secret"), b"PASS secret\r\n"),
(
USER("foo", "localhost", "localhost", "Test Client"),
b"USER foo localhost localhost :Test Client\r\n"
),
(NICK("test"), b"NICK test\r\n"),
(PONG("localhost"), b"PONG :localhost\r\n"),
(QUIT(), b"QUIT Leaving\r\n"),
(QUIT("Test"), b"QUIT Test\r\n"),
(QUIT("Test Message"), b"QUIT :Test Message\r\n"),
(JOIN("#test"), b"JOIN #test\r\n"),
(JOIN("#test", "secret"), b"JOIN #test secret\r\n"),
(PART("#test"), b"PART #test\r\n"),
(PRIVMSG("test", "Hello"), b"PRIVMSG test Hello\r\n"),
(PRIVMSG("test", "Hello World"), b"PRIVMSG test :Hello World\r\n"),
(NOTICE("test", "Hello"), b"NOTICE test Hello\r\n"),
(NOTICE("test", "Hello World"), b"NOTICE test :Hello World\r\n"),
(KICK("#test", "test"), b"KICK #test test :\r\n"),
(KICK("#test", "test", "Bye"), b"KICK #test test Bye\r\n"),
(KICK("#test", "test", "Good Bye!"), b"KICK #test test :Good Bye!\r\n"),
(TOPIC("#test", "Hello World!"), b"TOPIC #test :Hello World!\r\n"),
(MODE("+i"), b"MODE +i\r\n"),
(MODE("#test", "+o", "test"), b"MODE #test +o test\r\n"),
(INVITE("test", "#test"), b"INVITE test #test\r\n"),
(NAMES(), b"NAMES\r\n"),
(NAMES("#test"), b"NAMES #test\r\n"),
(AWAY("I am away."), b"AWAY :I am away.\r\n"),
(WHOIS("somenick"), b"WHOIS :somenick\r\n"),
])
def test_commands(event, data):
message = event.args[0]
return bytes(message) == data
@pytest.mark.parametrize("data,event", [
(
b":localhost NOTICE * :*** Looking up your hostname...\r\n",
Event.create(
"notice", (u("localhost"), None, None), u("*"),
u("*** Looking up your hostname..."),
)
),
])
def test_responses(app, data, event):
app.reset()
app.fire(read(data))
while len(app):
app.flush()
e = app.events[-1]
assert event.name == e.name
assert event.args == e.args
assert event.kwargs == e.kwargs
|
eriol/circuits
|
tests/protocols/test_irc.py
|
Python
|
mit
| 3,696
|
"""
Unittest for GeometryType class.
"""
import unittest
from crystalpy.diffraction.GeometryType import BraggDiffraction, LaueDiffraction, \
BraggTransmission, LaueTransmission
from crystalpy.diffraction.GeometryType import GeometryType
class GeometryTypeTest(unittest.TestCase):
def testConstructor(self):
geometry_type_description = "a geometry type"
geometry_type = GeometryType(geometry_type_description)
self.assertEqual(geometry_type.description(),
geometry_type_description)
def testDescription(self):
geometry_type_description = "a geometry type"
geometry_type = GeometryType(geometry_type_description)
self.assertEqual(geometry_type.description(),
geometry_type_description)
def testEqualOperator(self):
geometry_type_one = GeometryType("type one")
geometry_type_two = GeometryType("type two")
self.assertEqual(geometry_type_one,geometry_type_one)
self.assertEqual(geometry_type_two,geometry_type_two)
self.assertNotEqual(geometry_type_one,geometry_type_two)
def testAllGeometryTypes(self):
all_geometries = GeometryType.allGeometryTypes()
self.assertIn(BraggDiffraction(), all_geometries)
self.assertIn(LaueDiffraction(), all_geometries)
self.assertIn(BraggTransmission(), all_geometries)
self.assertIn(LaueTransmission(), all_geometries)
class BraggDiffractionTest(unittest.TestCase):
def testConstructor(self):
bragg_diffraction = BraggDiffraction()
self.assertEqual(bragg_diffraction.description(),
"Bragg diffraction")
class LaueDiffractionTest(unittest.TestCase):
def testConstructor(self):
laue_diffraction = LaueDiffraction()
self.assertEqual(laue_diffraction.description(),
"Laue diffraction")
class BraggTransmissionTest(unittest.TestCase):
def testConstructor(self):
bragg_transmission = BraggTransmission()
self.assertEqual(bragg_transmission.description(),
"Bragg transmission")
class LaueTransmissionTest(unittest.TestCase):
def testConstructor(self):
laue_transmission = LaueTransmission()
self.assertEqual(laue_transmission.description(),
"Laue transmission")
|
edocappelli/crystalpy
|
crystalpy/tests/diffraction/GeometryTypeTest.py
|
Python
|
mit
| 2,412
|
from rest_framework import serializers
from rest_framework.reverse import reverse
from .models import Post, Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class PostSerializer(serializers.ModelSerializer):
comments = CommentSerializer(many=True, read_only=True)
links = serializers.SerializerMethodField()
def get_links(self, obj):
request = self.context['request']
return {
'self': reverse('post-detail',
kwargs={'pk': obj.pk},
request=request),
'list': reverse('post-list',
request=request),
'apply': reverse('post-apply',
kwargs={'pk': obj.pk},
request=request),
}
class Meta:
model = Post
fields = '__all__'
|
chatcaos-org/django_rest_framework
|
src/hdson_rest/blog/serializers.py
|
Python
|
mit
| 939
|
from PyQt4 import QtGui
class BooksSearcherWidget(QtGui.QWidget):
def __init__(self, label):
super(BooksSearcherWidget, self).__init__()
# Create searcher widgets
self.book_searcher_label = QtGui.QLabel(label)
self.book_searcher_line_edit = QtGui.QLineEdit()
self.book_search_button = QtGui.QPushButton("Search")
# Create searcher layout
self.book_searcher_box = QtGui.QHBoxLayout()
self.book_searcher_box.addWidget(self.book_searcher_line_edit)
self.book_searcher_box.addWidget(self.book_search_button)
# Create main layout
self.main_layout = QtGui.QVBoxLayout()
self.main_layout.addWidget(self.book_searcher_label)
self.main_layout.addLayout(self.book_searcher_box)
# Set the window layout
self.setLayout(self.main_layout)
def get_text(self):
return self.book_searcher_line_edit.text()
|
franramirez688/Taric-Challange
|
taric_challange/gui/widgets/books_searcher.py
|
Python
|
mit
| 932
|
"""Simple FTP Server"""
import argparse
import os
import sys
import threading
import time
import logging
_stash = globals()["_stash"]
try:
import pyftpdlib
except ImportError:
print("Installing pyftpdlib...")
_stash("pip install pyftpdlib")
es = os.getenv("?")
if es != 0:
print(_stash.text_color("Failed to install pyftpdlib!", "red"))
sys.exit(1)
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.servers import FTPServer
from pyftpdlib.handlers import FTPHandler
def run(ns):
"""starts the server."""
auth = DummyAuthorizer()
if ns.user is not None:
auth.add_user(ns.user, ns.pswd, ns.path, perm=ns.perm)
else:
auth.add_anonymous(ns.path, perm=ns.perm)
handler = FTPHandler
handler.authorizer = auth
handler.banner = "StaSh v{v} FTP-Server".format(v=_stash.__version__)
address = ("0.0.0.0", ns.port)
server = FTPServer(address, handler)
server.max_cons = 128
server.max_cons_per_ip = 128
# setup logging
logger = logging.getLogger("pyftpdlib")
logger.setLevel(logging.CRITICAL)
logger.propagate = False
# server needs to run in a thread to be killable
thr = threading.Thread(
name="FTP-Server Thread", target=server.serve_forever
)
thr.daemon = True
thr.start()
print("FTP-Server started on {h}:{p}".format(h=address[0], p=str(address[1])))
try:
while True:
time.sleep(0.2)
except KeyboardInterrupt:
print("Stopping Server...")
server.close_all()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-p", "--port", action="store", type=int,
default=21, dest="port", help="port to listen on"
)
parser.add_argument(
"-u", "--user", action="store", default=None, dest="user",
help="username (default: anonymous)"
)
parser.add_argument(
"--pswd", action="store", default=None, dest="pswd",
help="password"
)
parser.add_argument(
"--perm", action="store", default="elradfmwM", dest="perm",
help="permissions of the user"
)
parser.add_argument(
"--path", action="store", default=os.getcwd(), dest="path",
help="path to serve"
)
ns = parser.parse_args()
if (ns.user is not None) and (ns.pswd is None):
print(
_stash.text_color(
"Error: If user is given, pswd must also be given!", "red"
)
)
sys.exit(1)
if (ns.pswd is not None) and (ns.user is None):
print(
_stash.text_color(
"Error: If pswd is given, user must also be given!", "red"
)
)
sys.exit(1)
run(ns)
|
cclauss/stash
|
bin/ftpserver.py
|
Python
|
mit
| 2,452
|
# -*- coding: utf-8 -*-
from time import sleep
from datetime import datetime
import boto
import boto.dynamodb2
from boto.dynamodb2.table import Table
conn = boto.dynamodb2.layer1.DynamoDBConnection()
# tbl = Table("test")
def t():
return conn.query(
"cl-hp-votes",
{
"q_id": {
"ComparisonOperator": "EQ",
"AttributeValueList": [{"N":"1"}]
},
"o_id": {
"ComparisonOperator": "EQ",
"AttributeValueList": [{"N":"1"}]
}
},
index_name="q_id-o_id-index",
select="COUNT"
)
while True:
try:
print("%s: %s" % (datetime.now().isoformat(), t()))
except KeyboardInterrupt:
break
|
clifflu/headless-poller
|
test_scripts/gsi_read.py
|
Python
|
mit
| 756
|
# apis_v1/test_views_organization_suggestion_tasks.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.core.urlresolvers import reverse
from django.test import TestCase
import json
from follow.models import UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW
from twitter.models import TwitterWhoIFollow, TwitterLinkToOrganization
from voter.models import Voter, VoterDeviceLink
class WeVoteAPIsV1TestsOrganizationSuggestionTasks(TestCase):
def setUp(self):
self.generate_voter_device_id_url = reverse("apis_v1:deviceIdGenerateView")
self.organization_suggestion_tasks_url = reverse("apis_v1:organizationSuggestionTasksView")
def test_organization_suggestion_tasks_with_no_voter_device_id(self):
#######################################
# Make sure the correct errors are thrown when no one is signed in
response01 = self.client.get(self.organization_suggestion_tasks_url)
json_data01 = json.loads(response01.content.decode())
self.assertEqual('status' in json_data01, True, "'status' expected in the json response, and not found")
self.assertEqual('success' in json_data01, True, "'success' expected in the json response, and not found")
self.assertEqual('voter_device_id' in json_data01, True,
"'voter_device_id' expected in the json response, and not found")
self.assertEqual('kind_of_suggestion_task' in json_data01, True,
"'kind_of_suggestion_task' expected in the json response, and not found")
self.assertEqual('kind_of_follow_task' in json_data01, True,
"'kind_of_follow_task' expected in the json response, and not found")
self.assertEqual('organization_suggestion_task_saved' in json_data01, True,
"'organization_suggestion_task_saved' expected in the json response, and not found")
self.assertEqual('organization_suggestion_list' in json_data01, True,
"'organization_suggestion_list' expected in the json response, and not found")
self.assertEqual(
json_data01['status'], 'VALID_VOTER_DEVICE_ID_MISSING',
"status: {status} (VALID_VOTER_DEVICE_ID_MISSING expected), voter_device_id: {voter_device_id}".format(
status=json_data01['status'], voter_device_id=json_data01['voter_device_id']))
self.assertEqual(json_data01['success'], False, "success 'False' expected, True returned")
self.assertEqual(json_data01['voter_device_id'], '',
"voter_device_id == '' expected, voter_device_id: {voter_device_id} returned".format(
voter_device_id=json_data01['voter_device_id']))
self.assertEqual(json_data01['kind_of_suggestion_task'], UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW,
"kind_of_suggestion_task == UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW expected, "
"kind_of_suggestion_task: {kind_of_suggestion_task} returned".format(
kind_of_suggestion_task=json_data01['kind_of_suggestion_task']))
self.assertEqual(json_data01['kind_of_follow_task'], '',
"kind_of_follow_task == [] expected, "
"kind_of_follow_task: {kind_of_follow_task} returned".format(
kind_of_follow_task=json_data01['kind_of_follow_task']))
self.assertEqual(json_data01['organization_suggestion_task_saved'], False,
"organization_suggestion_task_saved == False expected, organization_suggestion_task_saved: "
"{organization_suggestion_task_saved} returned".format(
organization_suggestion_task_saved=json_data01['organization_suggestion_task_saved']))
self.assertEqual(json_data01['organization_suggestion_list'], [],
"organization_suggestion_list == [] expected, organization_suggestion_list: "
"{organization_suggestion_list} returned".format(
organization_suggestion_list=json_data01['organization_suggestion_list']))
def test_organization_suggestion_tasks_with_voter_device_id(self):
#######################################
# Generate the voter_device_id cookie
response10 = self.client.get(self.generate_voter_device_id_url)
json_data10 = json.loads(response10.content.decode())
# Make sure we got back a voter_device_id we can use
self.assertEqual('voter_device_id' in json_data10, True,
"voter_device_id expected in the deviceIdGenerateView json response")
# Now put the voter_device_id in a variable we can use below
voter_device_id = json_data10['voter_device_id'] if 'voter_device_id' in json_data10 else ''
#######################################
# Make sure the correct errors are thrown when an kind_of_suggestion_task isn't passed in for a voter that
# does not exist
response11 = self.client.get(self.organization_suggestion_tasks_url, {'voter_device_id': voter_device_id})
json_data11 = json.loads(response11.content.decode())
self.assertEqual('status' in json_data11, True, "'status' expected in the json response, and not found")
self.assertEqual('success' in json_data11, True, "'success' expected in the json response, and not found")
self.assertEqual('voter_device_id' in json_data11, True,
"'voter_device_id' expected in the json response, and not found")
self.assertEqual('organization_suggestion_task_saved' in json_data11, True,
"'organization_suggestion_task_saved' expected in the json response, and not found")
self.assertEqual('organization_suggestion_list' in json_data11, True,
"'organization_suggestion_list' expected in the json response, and not found")
self.assertEqual(
json_data11['status'], 'VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID',
"status: {status} (VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID expected), "
"voter_device_id: {voter_device_id}".format(
status=json_data11['status'], voter_device_id=json_data11['voter_device_id']))
self.assertEqual(json_data11['success'], False, "success 'False' expected, True returned")
self.assertEqual(json_data11['organization_suggestion_task_saved'], False,
"organization_suggestion_task_saved == False expected, organization_suggestion_task_saved: "
"{organization_suggestion_task_saved} returned".format(
organization_suggestion_task_saved=json_data11['organization_suggestion_task_saved']))
self.assertEqual(json_data11['organization_suggestion_list'], [],
"organization_suggestion_list == [] expected, organization_suggestion_list: "
"{organization_suggestion_list} returned".format(
organization_suggestion_list=json_data11['organization_suggestion_list']))
#######################################
# Add a voter and twitter ids i follow but do not create twitter link to organization
# so we can test no organization suggestions to follow
voter, created = Voter.objects.update_or_create(we_vote_id='wvt3voter1',
linked_organization_we_vote_id='wvt3org1',
first_name='WeVote',
twitter_id=39868320, twitter_name='We Vote',
twitter_screen_name='wevote')
VoterDeviceLink.objects.update_or_create(voter_device_id=voter_device_id, voter_id=voter.id)
TwitterWhoIFollow.objects.update_or_create(twitter_id_of_me=39868320, twitter_id_i_follow=41521318)
TwitterWhoIFollow.objects.update_or_create(twitter_id_of_me=39868320, twitter_id_i_follow=16535694)
#######################################
# Make sure the correct errors are thrown when twitter link to organization is not created
# for twitter ids i follow
response12 = self.client.get(self.organization_suggestion_tasks_url, {'voter_device_id': voter_device_id})
json_data12 = json.loads(response12.content.decode())
self.assertEqual('status' in json_data12, True, "'status' expected in the json response, and not found")
self.assertEqual('success' in json_data12, True, "'success' expected in the json response, and not found")
self.assertEqual('voter_device_id' in json_data12, True,
"'voter_device_id' expected in the json response, and not found")
self.assertEqual('organization_suggestion_task_saved' in json_data12, True,
"'organization_suggestion_task_saved' expected in the json response, and not found")
self.assertEqual('organization_suggestion_list' in json_data12, True,
"'organization_suggestion_list' expected in the json response, and not found")
self.assertEqual(
json_data12['status'], ' TWITTER_WHO_I_FOLLOW_LIST_RETRIEVED FAILED retrieve_twitter_link_to_organization'
' FAILED retrieve_twitter_link_to_organization',
"status: {status} (TWITTER_WHO_I_FOLLOW_LIST_RETRIEVED FAILED retrieve_twitter_link_to_organization FAILED"
" retrieve_twitter_link_to_organization expected), voter_device_id: {voter_device_id}".format
(status=json_data12['status'], voter_device_id=json_data12['voter_device_id']))
self.assertEqual(json_data12['success'], False, "success 'False' expected, True returned")
self.assertEqual(json_data12['organization_suggestion_task_saved'], False,
"organization_suggestion_task_saved == False expected, organization_suggestion_task_saved: "
"{organization_suggestion_task_saved} returned".format(
organization_suggestion_task_saved=json_data12['organization_suggestion_task_saved']))
self.assertEqual(json_data12['organization_suggestion_list'], [],
"organization_suggestion_list == [] expected, organization_suggestion_list: "
"{organization_suggestion_list} returned".format(
organization_suggestion_list=json_data12['organization_suggestion_list']))
#######################################
# Create two twitter link to organization so we can test all suggestions of twitter organizations to follow
TwitterLinkToOrganization.objects.create(twitter_id=41521318, organization_we_vote_id='wv02org1397')
TwitterLinkToOrganization.objects.create(twitter_id=16535694, organization_we_vote_id='wv02org1456')
#######################################
# Make sure the correct results are given when voter and twitter link to organizations created successfully
response13 = self.client.get(self.organization_suggestion_tasks_url, {'voter_device_id': voter_device_id})
json_data13 = json.loads(response13.content.decode())
self.assertEqual('status' in json_data13, True, "'status' expected in the json response, and not found")
self.assertEqual('success' in json_data13, True, "'success' expected in the json response, and not found")
self.assertEqual('voter_device_id' in json_data13, True,
"'voter_device_id' expected in the json response, and not found")
self.assertEqual('organization_suggestion_task_saved' in json_data13, True,
"'organization_suggestion_task_saved' expected in the json response, and not found")
self.assertEqual('organization_suggestion_list' in json_data13, True,
"'organization_suggestion_list' expected in the json response, and not found")
self.assertEqual(
json_data13['status'], ' TWITTER_WHO_I_FOLLOW_LIST_RETRIEVED '
' RETRIEVE_TWITTER_LINK_TO_ORGANIZATION_FOUND_BY_TWITTER_USER_ID '
'SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED '
'RETRIEVE_TWITTER_LINK_TO_ORGANIZATION_FOUND_BY_TWITTER_USER_ID '
'SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED',
"status: {status} ( TWITTER_WHO_I_FOLLOW_LIST_RETRIEVED "
" RETRIEVE_TWITTER_LINK_TO_ORGANIZATION_FOUND_BY_TWITTER_USER_ID SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED "
"RETRIEVE_TWITTER_LINK_TO_ORGANIZATION_FOUND_BY_TWITTER_USER_ID SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED"
"expected), voter_device_id: {voter_device_id}".format
(status=json_data13['status'], voter_device_id=json_data13['voter_device_id']))
self.assertEqual(json_data13['success'], True, "success 'True' expected, True returned")
self.assertEqual(json_data13['organization_suggestion_task_saved'], True,
"organization_suggestion_task_saved == True expected, organization_suggestion_task_saved: "
"{organization_suggestion_task_saved} returned".format(
organization_suggestion_task_saved=json_data13['organization_suggestion_task_saved']))
self.assertEqual(json_data13['organization_suggestion_list'], [{'organization_we_vote_id': 'wv02org1397'},
{'organization_we_vote_id': 'wv02org1456'}],
"organization_suggestion_list == [('organization_we_vote_id': 'wv02org1397'), "
"('organization_we_vote_id': 'wv02org1456')] expected, organization_suggestion_list:"
"{organization_suggestion_list} returned".format
(organization_suggestion_list=json_data13['organization_suggestion_list']))
|
jainanisha90/WeVoteServer
|
apis_v1/tests/test_views_organization_suggestion_tasks.py
|
Python
|
mit
| 14,255
|
#!/usr/bin/python2
from shell_command import ShellCommand
from source import Directory, File
import os
class Target(object):
def __init__(self, name, sources=None, includes=None):
self.name = name
self.sources = [] if sources is None else sources
self.cflags = []
self.ldflags = []
self.compiled_objects = []
if isinstance(includes, str):
self.includes = [includes]
elif isinstance(includes, list):
self.includes = includes
elif includes is None:
self.includes = []
else:
raise ValueError(str(includes))
def add_ldflag(self, flag):
if not flag in self.ldflags:
self.ldflags += [flag]
def remove_ldflag(self, flag):
if flag in self.ldflags:
self.ldflags.remove(flag)
def add_cflag(self, flag):
if not flag in self.cflags:
self.cflags += [flag]
def remove_cflag(self, flag):
if flag in self.cflags:
self.cflags.remove(flag)
def _gen_include_flags(self):
flags = map(os.path.abspath, self.includes)
flags = ' -I'.join(flags)
if flags:
return ('-I' + flags).split(' ')
return []
def compile_object(self, builder, source, flags=None):
compiler = builder.toolchain.compiler
obj = os.path.join(builder.tmpdir, source.objectfile)
if source.is_newer(obj) is False:
return {'source': source, 'status': 'skipped'}
flags = [] if flags is None else flags
include = self._gen_include_flags()
flags = [source.path, '-c'] + include + self.cflags + flags + ['-o', obj]
cmd = ShellCommand(compiler, flags)
code, output = cmd.run(verbose=builder.verbose)
if 0 == code:
status = 'success'
self.compiled_objects += [obj]
else:
status = 'failed'
return {'source': source, 'status': status, 'output': output}
def final(self, builder):
raise NotImplementedError()
class Executable(Target):
def __init__(self, name, sources=None, includes=None):
super(self.__class__, self).__init__(name, sources, includes)
self.objects = []
def gather_objects(self, builder):
self.objects += Directory(builder.tmpdir, exts=['o']).discover()
def link(self, builder):
if len(self.objects) == 0:
return False
compiler = builder.toolchain.compiler
target = builder.output_file(self)
flags = map(lambda x: x.path, self.objects) + self.ldflags + ['-o', target]
cmd = ShellCommand(compiler, flags)
builder.print_msg('LD', target)
code, output = cmd.run(verbose=builder.verbose)
print output.strip()
return code == 0
def final(self, builder):
self.gather_objects(builder)
self.link(builder)
|
goniz/buildscript
|
build_system/target.py
|
Python
|
mit
| 2,918
|
#!/usr/bin/python
# -*- coding: utf8 -*-
"""히든커멘드지롱"""
import re
from botlib import BotLib
from util.util import enum
Type = enum(
Nico = 1,
Kkamo = 2,
Japan = 3,
)
def input_to_type(text):
if re.findall(ur"니코", text): return Type.Nico
if re.findall(ur"까모", text): return Type.Kkamo
if text == '!japan': return Type.Japan
return None
#def get_argument(text):
# return
def on_message(msg, server):
text = msg.get("text", "")
msgtype = input_to_type(text)
if msgtype == Type.Nico:
BotLib.say(msg['channel'], u"오타쿠 기분나빠...")
if msgtype == Type.Kkamo:
BotLib.say(msg['channel'], u"까모오......")
if msgtype == Type.Japan:
BotLib.say(msg['channel'], u"일본 또가?")
|
storyhe/playWithBot
|
plugins/say.py
|
Python
|
mit
| 789
|
"""
You are given the root node of a binary search tree (BST) and a value to insert into the tree. Return the root node of the BST after the insertion. It is guaranteed that the new value does not exist in the original BST.
Notice that there may exist multiple valid ways for the insertion, as long as the tree remains a BST after insertion. You can return any of them.
Example 1:
[img1](https://assets.leetcode.com/uploads/2020/10/05/insertbst.jpg)
[img2](https://assets.leetcode.com/uploads/2020/10/05/insertbst.jpg)
Input: root = [4,2,7,1,3], val = 5
Output: [4,2,7,1,3,5]
Explanation: Another accepted tree is:
[img3](https://assets.leetcode.com/uploads/2020/10/05/bst.jpg)
Example 2:
Input: root = [40,20,60,10,30,50,70], val = 25
Output: [40,20,60,10,30,50,70,null,null,25]
Example 3:
Input: root = [4,2,7,1,3,null,null,null,null,null,null], val = 5
Output: [4,2,7,1,3,5]
Constraints:
The number of nodes in the tree will be in the range [0, 104].
-108 <= Node.val <= 108
All the values Node.val are unique.
-108 <= val <= 108
It's guaranteed that val does not exist in the original BST.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if root is None:
return TreeNode(val)
if root.val > val:
root.left = self.insertIntoBST(root.left, val)
else:
root.right = self.insertIntoBST(root.right, val)
return root
|
franklingu/leetcode-solutions
|
questions/insert-into-a-binary-search-tree/Solution.py
|
Python
|
mit
| 1,627
|
"""Main package API entry point.
Import core objects here.
"""
from .__pkg__ import (
__description__,
__url__,
__version__,
__author__,
__email__,
__license__
)
from .model import ModelBase, make_declarative_base
from .query import Query, QueryModel
from .manager import Manager, ManagerMixin
from .session import Session
__all__ = [
'ModelBase',
'make_declarative_base',
'Query',
'QueryModel',
'Manager',
'ManagerMixin',
'Session'
]
|
dgilland/alchy
|
alchy/__init__.py
|
Python
|
mit
| 491
|
# copyright 2015 by mike lodato (zvxryb@gmail.com)
# this work is subject to the terms of the MIT license
import math
import pyglet
class GrayCode(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(GrayCode, self).__init__(*args, **kwargs)
y0 = 0
y1 = self.height
self.i = 0
self.frames = []
n = int(math.ceil(math.log(self.width, 2)))
for i in range(n):
indices = []
vertices = []
colors = []
m = 0
dx = 2 ** i
x0 = 0
for j in range(self.width // dx + 1):
x1 = x0 + dx
if (j ^ (j >> 1)) & 1 > 0:
indices += (x + m for x in [0, 1, 2, 2, 3, 0])
vertices += (x0, y0, x1, y0, x1, y1, x0, y1)
colors += 12 * (255,)
m += 4
x0 = x1
self.frames.append(pyglet.graphics.vertex_list_indexed(
m, indices, ('v2i', vertices), ('c3B', colors)))
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
pyglet.app.exit()
def on_draw(self):
pyglet.gl.glClearColor(1.0, 0.0, 0.0, 1.0)
self.clear()
self.i -= 1
if self.i < 0:
self.i = len(self.frames) - 1
self.frames[self.i].draw(pyglet.gl.GL_TRIANGLES)
|
zvxryb/3d-scanner
|
draw.py
|
Python
|
mit
| 1,140
|
#!/usr/bin/env python
from spider import *
class CaltechSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.school = 'caltech'
self.subject = 'eecs'
def doWork(self):
print "downloading caltech course info"
r = requests.get('http://www.cms.caltech.edu/academics/course_desc')
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(self.subject, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
print "processing html and write data to file..."
for li in soup.find_all('li'):
data = li.text.strip()
if data == 'Graduate Programs':
break
course_num = ''
title = ''
description = 'description:'
instructors = ''
prereq = ''
link = ''
i = 0
if li.strong != None and li.strong.a != None:
link = li.strong.a['href']
for item in data.split('.'):
i += 1
if i == 1:
course_num = item.strip().replace(' ', '')
elif i == 2:
title = item.strip()
elif item.strip().startswith('Instructor'):
instructors = item.strip().replace('Instructor: ', 'instructors:').replace('Instructors: ', 'instructors:')
elif item.strip().startswith('Prerequisites'):
prereq = item.strip().replace('Prerequisites: ', 'prereq:')
else:
description += item.strip() + ' '
print course_num + ' ' + title + ' ' + link
if prereq != '':
description = prereq + ' ' + description
if instructors != '':
description = instructors + ' ' + description
self.count += 1
self.write_db(f, course_num, title, link, description)
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
start = CaltechSpider()
start.doWork()
|
roscopecoltran/scraper
|
.staging/meta-engines/xlinkBook/update/update_caltech.py
|
Python
|
mit
| 2,382
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import json
import logging
from functools import partial
from tempfile import NamedTemporaryFile
from subprocess import check_output, CalledProcessError, PIPE
from matplotlib import rcParams
from matplotlib.figure import Figure
from matplotlib.backends.backend_pdf import PdfPages
# Optional write dependencies:
try:
from PIL import Image, PngImagePlugin
except ImportError:
Image = None
try:
from PyPDF2 import PdfFileReader
except ImportError:
PdfFileReader = None
# Python 3
try:
basestring
except NameError:
basestring = (str, bytes)
__all__ = ["savefig"]
# Save a reference to the matplotlib savefig implementation.
mpl_savefig = Figure.savefig
def get_git_info(include_diff=False):
# Check the status to see if there are any uncommitted changes.
if include_diff:
try:
diff = check_output("git diff", shell=True, stderr=PIPE).decode()
except CalledProcessError:
return None
# Get the commit information.
cmd = "git log -1 --date=iso8601 --format=\"format:%H || %ad || %an\""
try:
result = check_output(cmd, shell=True, stderr=PIPE).decode()
except CalledProcessError:
return None
# Build the results dictionary and include changes if there are any.
ret = dict(zip(["git-hash", "git-date", "git-author"],
result.split(" || ")))
if include_diff and len(diff):
ret["git-diff"] = diff
return ret
def savefig_png(self, fn, *args, **kwargs):
include_diff = kwargs.pop("include_diff", False)
# This is a hack to deal with filenames without extensions. Not sure why
# this is necessary.
fn = os.path.splitext(fn)[0] + ".png"
# We'll start by saving the figure because the metadata is going to be
# inserted after the fact.
ret = mpl_savefig(self, fn, *args, **kwargs)
# If PIL isn't installed, we'll just call the standard savefig.
if Image is None:
logging.warn(
"PIL or pillow must be installed to add metadata to PNG files.")
return ret
# Get the git commit information.
git_info = get_git_info(include_diff=include_diff)
if git_info is None:
return ret
# Inject the git info into the figure as metadata.
img = Image.open(fn)
meta = PngImagePlugin.PngInfo()
for k, v in git_info.items():
meta.add_text(k, v)
img.save(fn, "png", pnginfo=meta)
return ret
def savefig_pdf(self, fn, *args, **kwargs):
include_diff = kwargs.pop("include_diff", False)
# Get the git commit information.
git_info = get_git_info(include_diff=include_diff)
if git_info is None:
return mpl_savefig(self, fn, *args, **kwargs)
# Build the PDF object that will take the metadata.
fn = os.path.splitext(fn)[0] + ".pdf"
kwargs["format"] = "pdf"
fig = PdfPages(fn)
# Save the figure.
ret = mpl_savefig(self, fig, *args, **kwargs)
# Add the metadata.
metadata = fig.infodict()
metadata["Keywords"] = json.dumps(git_info, sort_keys=True)
# Commit the changes.
fig.close()
return ret
def savefig(self, fn, *args, **kwargs):
if not isinstance(fn, basestring):
logging.warn("The savefig module only supports filenames.")
return mpl_savefig(self, fn, *args, **kwargs)
# Figure out the format.
ext = os.path.splitext(fn)[1]
fmt = kwargs.get("format", None)
fmt = (fmt if fmt is not None
else ext[1:] if len(ext)
else rcParams["savefig.format"]).lower()
# Deal with the different formats.
if fmt == "png":
return savefig_png(self, fn, *args, **kwargs)
if fmt == "pdf":
return savefig_pdf(self, fn, *args, **kwargs)
# Fall back on the standard savefig if we don't know how to deal with the
# format.
logging.warn("Unsupported savefig format: '{0}'".format(fmt))
return mpl_savefig(self, fn, *args, **kwargs)
def monkey_patch(include_diff=False):
# Monkey patch matplotlib to call our savefig instead of the standard
# version.
def sf(*args, **kwargs):
kwargs["include_diff"] = kwargs.get("include_diff", include_diff)
return savefig(*args, **kwargs)
sf.__doc__ = mpl_savefig.__doc__
Figure.savefig = sf
def get_file_info(fn):
"""
Get the metadata stored in an image file returning ``None`` on failure.
"""
ext = os.path.splitext(fn)[1].lower()
if ext == ".png":
if Image is None:
raise ImportError("PIL or pillow must be installed to read "
"metadata from PNG files.")
img = Image.open(fn)
return img.info
if ext == ".pdf":
if PdfFileReader is None:
raise ImportError("PyPDF2 must be installed to read "
"metadata from PDF files.")
with open(fn, "rb") as f:
pdf = PdfFileReader(f)
di = pdf.getDocumentInfo()
if "/Keywords" not in di:
return None
try:
return json.loads(di["/Keywords"])
except ValueError:
return None
return None
def test_png():
monkey_patch()
import matplotlib.pyplot as pl
# Get the current git info.
git_info = get_git_info()
# Save an empty figure to a temporary file and check that the git info
# gets stored correctly.
with NamedTemporaryFile(suffix=".png") as f:
fn = f.name
pl.savefig(fn)
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
# Now try without a file extension.
with NamedTemporaryFile(suffix=".png") as f:
fn = f.name
pl.savefig(os.path.splitext(fn)[0], format="png")
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
# If the default file-type is PNG, test that too.
if not rcParams["savefig.format"].lower() == "png":
return
with NamedTemporaryFile(suffix=".png") as f:
fn = f.name
pl.savefig(os.path.splitext(fn)[0])
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
def test_pdf():
monkey_patch()
import matplotlib.pyplot as pl
# Get the current git info.
git_info = get_git_info()
# Save an empty figure to a temporary file and check that the git info
# gets stored correctly.
try:
with NamedTemporaryFile(suffix=".pdf", delete=False) as f:
fn = f.name
pl.savefig(fn)
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
finally:
os.unlink(fn)
# Now try without a file extension.
try:
with NamedTemporaryFile(suffix=".pdf", delete=False) as f:
fn = f.name
pl.savefig(os.path.splitext(fn)[0], format="pdf")
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
finally:
os.unlink(fn)
# If the default file-type is PNG, test that too.
if not rcParams["savefig.format"].lower() == "pdf":
return
try:
with NamedTemporaryFile(suffix=".pdf", delete=False) as f:
fn = f.name
pl.savefig(os.path.splitext(fn)[0])
info = get_file_info(fn)
assert all([v == info[k] for k, v in git_info.items()])
finally:
os.unlink(fn)
if __name__ == "__main__":
import sys
import argparse
# Testing.
if "--test" in sys.argv:
print("Testing PNG support...")
test_png()
print("Testing PDF support...")
test_pdf()
sys.exit(0)
# Parse the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="The file to inspect")
parser.add_argument("-d", "--diff", action="store_true",
help="Get the diff.")
args = parser.parse_args()
# Get the file info.
info = get_file_info(args.filename)
if info is None:
print("Couldn't get info from file: {0}".format(args.filename))
sys.exit(0)
# Show the diff if that was requested.
if args.diff:
if "git-diff" in info:
print(info["git-diff"])
sys.exit(0)
print("No diff found.")
# Print the summary.
keys = ["git-hash", "git-date", "git-author"]
for k in keys:
v = info.get(k, None)
if v is None:
print("Missing key: '{0}'".format(k))
else:
print("{0}: {1}".format(k, v))
|
dfm/savefig
|
savefig.py
|
Python
|
mit
| 8,667
|
import lassie
from .base import LassieBaseTestCase
class LassieTwitterCardTestCase(LassieBaseTestCase):
def test_twitter_all_properties(self):
url = 'http://lassie.it/twitter_card/all_properties.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], 'http://www.youtube.com/watch?v=fWNaR-rxAic')
self.assertEqual(data['title'], 'Carly Rae Jepsen - Call Me Maybe')
self.assertEqual(data['description'], 'Buy Now! http://smarturl.it/CallMeMaybe Music video by Carly Rae Jepsen performing Call Me Maybe. (C) 2011 604 Records Inc. #VEVOCertified on June 8, 2012. h...')
self.assertEqual(len(data['images']), 1)
image = data['images'][0]
self.assertEqual(image['src'], 'http://i1.ytimg.com/vi/fWNaR-rxAic/maxresdefault.jpg')
self.assertEqual(len(data['videos']), 1)
video = data['videos'][0]
self.assertEqual(video['src'], 'https://www.youtube.com/embed/fWNaR-rxAic')
self.assertEqual(video['width'], 1920)
self.assertEqual(video['height'], 1080)
def test_twitter_no_og_title_use_twitter_title(self):
url = 'http://lassie.it/twitter_card/no_og_title_use_twitter_title.html'
data = lassie.fetch(url)
self.assertEqual(data['description'], 'A test case for Lassie!')
self.assertEqual(data['title'], 'Lassie Twitter Test | no_og_title_use_twitter_title')
|
michaelhelmick/lassie
|
tests/test_twitter_card.py
|
Python
|
mit
| 1,403
|
# coding=utf8
"""
Module docstrings before __future__ imports can break things...
"""
from __future__ import division
import ast
import json
import os
import random
import re
import sys
import unittest
import weakref
from collections import namedtuple
from copy import copy
from functools import partial
from importlib import import_module
from multiprocessing.dummy import Pool as ThreadPool
from time import sleep
from unittest import skipUnless
from bs4 import BeautifulSoup
from cheap_repr import register_repr
from littleutils import file_to_json, string_to_file, only
from birdseye import eye
from birdseye.bird import NodeValue, is_interesting_expression, is_obvious_builtin
from birdseye.utils import PY2, PY3, PYPY
from tests.utils import SharedCounter
Session = eye.db.Session
Call = eye.db.Call
try:
from collections.abc import Set, Mapping
except ImportError:
from collections import Set, Mapping
@eye
def bar():
pass
# noinspection PyStatementEffect
@eye()
def foo():
x = 1
y = 2
if x + y > 5:
1 / 0
else:
x * y
try:
bar(x + x, 2 / 0, y + y)
foo
except ZeroDivisionError:
x - y
for i in [1, 2]:
for j in [3, 4]:
i + j
for k in [5]:
k
z = 0
while z < 2:
z += 1
z ** z
bar()
{'list': [n for n in [1, 2]]}
try:
error()
except ValueError:
pass
[1, 2, 3][:2]
@eye
def error():
raise ValueError()
class NormalClass(object):
def __init__(self):
self.x = 1
def __repr__(self):
return '<A>'
class SlotClass(object):
__slots__ = ('slot1',)
def __init__(self):
self.slot1 = 3
def __repr__(self):
return '<B>'
call_id = SharedCounter()
def call_id_mock(*_):
return 'test_id_%s' % call_id.increment()
eye._call_id = call_id_mock
def get_call_ids(func):
start_id = call_id.value + 1
func()
end_id = call_id.value + 1
return ['test_id_%s' % i for i in range(start_id, end_id)]
def hydrate(call):
str(call.function.name)
return copy(call)
# Do this here to make call ids consistent
golden_calls = {
name: [hydrate(Session().query(Call).filter_by(id=c_id).one())
for c_id in get_call_ids(lambda: import_module('test_scripts.' + name))]
for name in ('gold', 'traced')
}
CallStuff = namedtuple('CallStuff', 'call, soup, call_data, func_data')
@eye.db.provide_session
def get_call_stuff(sess, c_id):
call = sess.query(Call).filter_by(id=c_id).one()
# <pre> makes it preserve whitespace
soup = BeautifulSoup('<pre>' + call.function.html_body + '</pre>', 'html.parser')
call_data = normalise_call_data(call.data)
func_data = json.loads(call.function.data)
return CallStuff(copy(call), soup, call_data, func_data)
def byteify(x):
"""
This converts unicode objects to plain str so that the diffs in test failures
aren't filled with false differences where there's a u prefix.
"""
if PY3:
return x
# noinspection PyUnresolvedReferences
if isinstance(x, dict):
return dict((byteify(key), byteify(value)) for key, value in x.items())
elif isinstance(x, list):
return [byteify(element) for element in x]
elif isinstance(x, unicode):
return x.encode('utf-8')
else:
return x
def normalise_call_data(call_data):
"""
Replace type indices with type names.
Sort type_names.
:type call_data: str
:rtype: dict
"""
data = byteify(json.loads(call_data))
types = data['type_names']
def fix(x):
if isinstance(x, dict):
return dict((key, fix(value)) for key, value in x.items())
elif isinstance(x, list):
result = [x[0]]
type_index = x[1]
if type_index < 0:
assert type_index in (-1, -2)
result.append(type_index)
else:
result.append(types[type_index])
result.append(x[2])
for y in x[3:]:
y[1] = fix(y[1])
result.append(y)
return result
else:
return x
data['node_values'] = fix(data['node_values'], )
data['type_names'].sort()
return data
class TestBirdsEye(unittest.TestCase):
maxDiff = None
def test_stuff(self):
call_ids = get_call_ids(foo)
call, soup, call_data, func_data = get_call_stuff(call_ids[0])
node_values = call_data['node_values']
actual_values = {'expr': {}, 'stmt': {}, 'loop': {}}
loops = {}
actual_node_loops = {}
for span in soup('span'):
index = span['data-index']
if index not in node_values:
continue
if 'loop' in span['class']:
data_type = 'loop'
elif 'stmt' in span['class']:
data_type = 'stmt'
else:
data_type = 'expr'
text = span.text.strip()
actual_values[data_type][text] = node_values[index]
if data_type == 'loop':
loops[text.split()[1]] = index
this_node_loops = func_data['node_loops'].get(index)
if this_node_loops:
actual_node_loops[text] = [str(x) for x in this_node_loops]
def func_value(f):
result = [repr(f), 'function', {}] # type: list
if PY3:
result.append(['__wrapped__', [repr(f.__wrapped__), 'function', {}]])
return result
s = ['', -2, {}]
expected_values = {
'expr': {
'x': ['1', 'int', {}],
'y': ['2', 'int', {}],
'x + y': ['3', 'int', {}],
'x + y > 5': ['False', 'bool', {}],
'x * y': ['2', 'int', {}],
'2 / 0': ['ZeroDivisionError: division by zero', -1, {}],
'bar': func_value(bar),
'error': func_value(error),
'bar()': ['None', 'NoneType', {'inner_calls': [call_ids[1]]}],
'x + x': ['2', 'int', {}],
'x - y': ['-1', 'int', {}],
'i': {'0': {'0': ['1', 'int', {}],
'1': ['1', 'int', {}]},
'1': {'0': ['2', 'int', {}],
'1': ['2', 'int', {}]}},
'i + j': {'0': {'0': ['4', 'int', {}],
'1': ['5', 'int', {}]},
'1': {'0': ['5', 'int', {}],
'1': ['6', 'int', {}]}},
'j': {'0': {'0': ['3', 'int', {}],
'1': ['4', 'int', {}]},
'1': {'0': ['3', 'int', {}],
'1': ['4', 'int', {}]}},
'k': {'0': {'0': ['5', 'int', {}]},
'1': {'0': ['5', 'int', {}]}},
'[1, 2, 3][:2]': ['[1, 2]',
'list',
{'len': 2},
['0', ['1', 'int', {}]],
['1', ['2', 'int', {}]]],
# These are the values of z as in z ** z, not z < 2
'z': {'0': ['1', 'int', {}],
'1': ['2', 'int', {}]},
'z ** z': {'0': ['1', 'int', {}],
'1': ['4', 'int', {}]},
'z < 2': {'0': ['True', 'bool', {}],
'1': ['True', 'bool', {}],
'2': ['False', 'bool', {}]},
'[n for n in [1, 2]]': ['[1, 2]', 'list',
{'len': 2},
['0', ['1', 'int', {}]],
['1', ['2', 'int', {}]]],
'n': {'0': ['1', 'int', {}],
'1': ['2', 'int', {}]},
"{'list': [n for n in [1, 2]]}":
["{'list': [1, 2]}", 'dict',
{'len': 1},
["'list'",
['[1, 2]', 'list',
{'len': 2},
['0', ['1', 'int', {}]],
['1', ['2', 'int', {}]]]]],
'error()': ['ValueError', -1, {'inner_calls': [call_ids[2]]}],
},
'stmt': {
'x = 1': s,
'y = 2': s,
'[1, 2, 3][:2]': s,
'''
if x + y > 5:
1 / 0
else:
x * y
'''.strip(): s,
'x * y': s,
'''
try:
bar(x + x, 2 / 0, y + y)
foo
except ZeroDivisionError:
x - y
'''.strip(): s,
'bar(x + x, 2 / 0, y + y)': s,
'x - y': s,
'i + j': {'0': {'0': s, '1': s}, '1': {'0': s, '1': s}},
'k': {'0': {'0': s}, '1': {'0': s}},
'bar()': s,
"{'list': [n for n in [1, 2]]}": s,
'error()': s,
'''
try:
error()
except ValueError:
pass
'''.strip(): s,
'pass': s,
'z ** z': {'0': s, '1': s},
'z += 1': {'0': s, '1': s},
'z = 0': s,
},
'loop': {
'''
for i in [1, 2]:
for j in [3, 4]:
i + j
for k in [5]:
k
'''.strip(): s,
'''
for j in [3, 4]:
i + j
'''.strip(): {'0': s, '1': s},
'''
for k in [5]:
k
'''.strip(): {'0': s, '1': s},
'for n in [1, 2]': s,
'''
while z < 2:
z += 1
z ** z
'''.strip(): s,
}
}
self.assertEqual(byteify(actual_values), expected_values)
expected_node_loops = {
'z': [loops['z']],
'z ** z': [loops['z']],
'z += 1': [loops['z']],
'z < 2': [loops['z']],
'i + j': [loops['i'], loops['j']],
'i': [loops['i'], loops['j']],
'j': [loops['i'], loops['j']],
'k': [loops['i'], loops['k']],
'''
for j in [3, 4]:
i + j
'''.strip(): [loops['i']],
'''
for k in [5]:
k
'''.strip(): [loops['i']],
'n': [loops['n']]
}
self.assertEqual(actual_node_loops, expected_node_loops)
def test_comprehension_loops(self):
# noinspection PyUnusedLocal
@eye
def f():
# @formatter:off
for i in ([([x for x in [] for y in []], [x for x in [] for y in []]) for x in [x for x in [] for y in []] for y in [x for x in [] for y in []]], [([x for x in [] for y in []], [x for x in [] for y in []]) for x in [x for x in [] for y in []] for y in [x for x in [] for y in []]]):
pass
# @formatter:on
soup = get_call_stuff(get_call_ids(f)[0]).soup
for line in str(soup).splitlines():
self.assertTrue(line.count('for') in (0, 1))
def test_expansion(self):
@eye
def f():
x = {'t': [(7, 8, [9, 10]), NormalClass(), SlotClass(), "Hello World!" * 50]}
len(x)
stuff = get_call_stuff(get_call_ids(f)[0])
value = [x for x in stuff.call_data['node_values'].values()
if isinstance(x, list) and
"'t': " in x[0]
][0]
self.assertEqual(
value,
["{'t': [(7, 8, [...]), <A>, <B>, 'Hello World!H...d!Hello World!']}",
'dict', {'len': 1},
["'t'", ["[(7, 8, [9, 10]), <A>, <B>, 'Hello World!H...d!Hello World!']",
'list', {'len': 4},
['0', ['(7, 8, [9, 10])',
'tuple', {'len': 3},
['0', ['7', 'int', {}]],
['1', ['8', 'int', {}]],
['2', ['[9, 10]', 'list', {'len': 2}]]]],
['1', ['<A>', 'NormalClass', {},
['x', ['1', 'int', {}]]]],
['2', ['<B>', 'SlotClass', {},
['slot1', ['3', 'int', {}]]]],
['3', ["'Hello World!H...d!Hello World!'",
'str', {'len': 600}]]]]])
def test_against_files(self):
@register_repr(weakref.ref)
def repr_weakref(*_):
return '<weakref>'
def normalise_addresses(string):
return re.sub(r'at 0x\w+>', 'at 0xABC>', string)
for name, calls in golden_calls.items():
data = [dict(
arguments=byteify(json.loads(normalise_addresses(call.arguments))),
return_value=byteify(normalise_addresses(str(call.return_value))),
exception=call.exception,
traceback=call.traceback,
data=normalise_call_data(normalise_addresses(call.data)),
function=dict(
name=byteify(call.function.name),
html_body=byteify(call.function.html_body),
lineno=call.function.lineno,
data=byteify(json.loads(call.function.data)),
),
) for call in calls]
version = PYPY * 'pypy' + sys.version[:3]
path = os.path.join(os.path.dirname(__file__), 'golden-files', version, name + '.json')
if os.getenv("FIX_TESTS"):
with open(path, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
else:
self.assertEqual(data, byteify(file_to_json(path)))
def test_decorate_class(self):
with self.assertRaises(TypeError) as e:
# noinspection PyUnusedLocal
@eye
class Testclass(object):
def barz(self):
return 'class decorator test'
self.assertEqual(str(e.exception),
'Decorating classes is no longer supported')
@skipUnless(PY2, 'Nested arguments are only possible in Python 2')
def test_nested_arguments(self):
# Python 3 sees nested arguments as a syntax error, so I can't
# define the function here normally
# birdseye requires a source file so I can't just use exec
# The file can't just live there because then the test runner imports it
path = os.path.join(os.path.dirname(__file__),
'nested_arguments.py')
string_to_file(
"""
def f((x, y), z):
return x, y, z
""",
path)
try:
from tests.nested_arguments import f
f = eye(f)
call = get_call_stuff(get_call_ids(lambda: f((1, 2), 3))[0]).call
self.assertEqual(call.arguments, '[["x", "1"], ["y", "2"], ["z", "3"]]')
self.assertEqual(call.result, "(1, 2, 3)")
finally:
os.remove(path)
@skipUnless(PY2, 'Division future import only changes things in Python 2')
def test_future_imports(self):
from tests.future_tests import with_future, without_future
self.assertEqual(with_future.foo(), eye(with_future.foo)())
self.assertEqual(without_future.foo(), eye(without_future.foo)())
def test_expand_exceptions(self):
expand = partial(NodeValue.expression, eye.num_samples)
class A(object):
def __len__(self):
assert 0
with self.assertRaises(AssertionError):
len(A())
self.assertIsNone(expand(A(), 1).meta)
self.assertEqual(expand([4, 4, 4], 1).meta['len'], 3)
class FakeSet(Set):
def __len__(self):
return 0
def __iter__(self):
pass
def __contains__(self, x):
pass
class B(FakeSet):
def __iter__(self):
assert 0
with self.assertRaises(AssertionError):
list(B())
self.assertIsNone(expand(B(), 1).children)
class C(FakeSet):
def __iter__(self):
yield 1
yield 2
assert 0
def children_keys(cls):
return [k for k, _ in expand(cls(), 1).children]
with self.assertRaises(AssertionError):
list(C())
self.assertEqual(children_keys(C), ['<0>', '<1>'])
class D(object):
def __init__(self):
self.x = 3
self.y = 4
def __getattribute__(self, item):
assert item not in ['x', 'y']
return object.__getattribute__(self, item)
with self.assertRaises(AssertionError):
str(D().x)
# expand goes through __dict__ so x and y are reachable
self.assertEqual(sorted(children_keys(D)), ['x', 'y'])
class E(Mapping):
def __len__(self):
return 0
def __getitem__(self, key):
assert 0
def __iter__(self):
yield 4
with self.assertRaises(AssertionError):
list(E().items())
self.assertIsNone(expand(E(), 1).children)
def test_is_interesting_expression(self):
def check(s):
return is_interesting_expression(ast.parse(s, mode='eval').body)
self.assertFalse(check('1'))
self.assertFalse(check('-1'))
self.assertTrue(check('-1-3'))
self.assertFalse(check('"abc"'))
self.assertTrue(check('abc'))
self.assertFalse(check('[]'))
self.assertFalse(check('[1, 2]'))
self.assertFalse(check('[1, 2, "abc"]'))
self.assertFalse(check('[[[]]]'))
self.assertFalse(check('{}'))
self.assertFalse(check('{1:2}'))
self.assertFalse(check('["abc", 1, [2, {7:3}, {}, {3:[5, ["lkj"]]}]]'))
self.assertTrue(check('["abc", 1+3, [2, {7:3}, {}, {3:[5, ["lkj"]]}]]'))
def test_is_obvious_builtin(self):
def check(s, value):
return is_obvious_builtin(ast.parse(s, mode='eval').body, value)
self.assertTrue(check('len', len))
self.assertTrue(check('max', max))
self.assertTrue(check('True', True))
self.assertTrue(check('False', False))
self.assertTrue(check('None', None))
self.assertFalse(check('len', max))
self.assertFalse(check('max', len))
self.assertFalse(check('0', False))
self.assertFalse(check('not True', False))
if PY2:
self.assertFalse(check('None', False))
def test_tracing_magic_methods(self):
class A(object):
@eye
def __repr__(self):
return '%s(label=%r)' % (self.__class__.__name__, self.label)
@eye
def __getattr__(self, item):
return item
@eye
def __getattribute__(self, item):
return object.__getattribute__(self, item)
@eye
def __len__(self):
return self.length
a = A()
a.label = 'hello'
a.length = 3
@eye
def test_A():
self.assertEqual(a.label, 'hello')
self.assertEqual(a.length, 3)
self.assertEqual(a.thing, 'thing')
self.assertEqual(repr(a), "A(label='hello')")
test_A()
def test_unicode(self):
@eye
def f():
return u'é'
self.assertEqual(f(), u'é')
def test_optional_eye(self):
@eye(optional=True)
def f(x):
return x * 3
call_stuff = get_call_stuff(get_call_ids(lambda: f(2, trace_call=True))[0])
self.assertEqual(call_stuff.call.result, '6')
call = eye.enter_call
eye.enter_call = lambda *args, **kwargs: 1 / 0
try:
self.assertEqual(f(3, trace_call=False), 9)
self.assertEqual(f(4), 12)
finally:
eye.enter_call = call
def test_first_check(self):
def deco(f):
f.attr = 3
return f
# Correct order, everything fine
@deco
@eye
def baz():
pass
with self.assertRaises(ValueError):
# @eye should notice it was applied second
# because of __wrapped__ attribute
@eye
@deco
def baz():
pass
def test_concurrency(self):
ids = get_call_ids(lambda: ThreadPool(5).map(sleepy, range(25)))
results = [int(get_call_stuff(i).call.result) for i in ids]
self.assertEqual(sorted(results), list(range(0, 50, 2)))
def test_middle_iterations(self):
@eye
def f():
for i in range(20):
for j in range(20):
if i == 10 and j >= 12:
str(i + 1)
stuff = get_call_stuff(get_call_ids(f)[0])
iteration_list = only(stuff.call_data['loop_iterations'].values())
indexes = [i['index'] for i in iteration_list]
self.assertEqual(indexes, [0, 1, 2, 10, 17, 18, 19])
iteration_list = only(iteration_list[3]['loops'].values())
indexes = [i['index'] for i in iteration_list]
self.assertEqual(indexes, [0, 1, 2, 12, 13, 17, 18, 19])
@classmethod
def tearDownClass(cls):
assert not eye.stack, eye.stack
assert not eye.main_to_secondary_frames, eye.main_to_secondary_frames
assert not eye.secondary_to_main_frames, eye.secondary_to_main_frames
@eye
def sleepy(x):
sleep(random.random())
return x * 2
if __name__ == '__main__':
unittest.main()
|
alexmojaki/birdseye
|
tests/test_birdseye.py
|
Python
|
mit
| 21,909
|
import json
from copy import copy
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_text
from restify.http import status
class PostInBodyMiddleware(MiddlewareMixin):
def _is_json_body_request(self, request):
return len(request.body) and\
'application/json' in request.META['CONTENT_TYPE']
def process_request(self, request):
if not self._is_json_body_request(request):
return
try:
body = json.loads(force_text(request.body))
# Update request post
post = copy(request.POST)
post.update(body)
request.POST = body
except ValueError:
resp = HttpResponse()
resp.status_code = status.HTTP_400_BAD_REQUEST
return resp
|
lovasb/django-restify
|
restify/middleware.py
|
Python
|
mit
| 862
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1302010047.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL1302010047
|
MODEL1302010047/model.py
|
Python
|
cc0-1.0
| 427
|
import numpy as np
import numpy.ma as ma
from armor import pattern
def gaussianFilter(a, sigma=20, newCopy=False):
"""
#adapted from armor.pattern
returns a dbz object
2014-03-07
"""
from scipy import ndimage
a1 = a.copy()
a1.matrix = ndimage.filters.gaussian_filter(a.matrix, sigma)
a1.matrix = ma.array(a1.matrix, fill_value=-999.)
a1.matrix.mask = np.zeros(a1.matrix.shape)
a1.name = a.name + "gaussian-sigma" + str(sigma)
a1.imagePath = a.imagePath[:-4] + "gaussian-sigma" + str(sigma) + a.imagePath[-4:] #hack
a1.outputPath = a.outputPath[:-4] + "gaussian-sigma" + str(sigma) + a.outputPath[-4:] #hack
mx = a1.matrix.max()
mn = a1.matrix.min()
#a1.vmax = mx + (mx-mn)*0.2 # to avoid red top # replaced by lines below 2014-02-20
#a1.vmin = mn
a1.matrix.mask = (a1.matrix< a.missingDataThreshold)
a1.vmax = a.vmax
a1.vmin = a.vmin
if newCopy:
return a1
else:
a.matrix = a1.matrix
return a
|
yaukwankiu/armor
|
filter/filters.py
|
Python
|
cc0-1.0
| 1,054
|
#!/usr/bin/env python3
'''
Weather function
Gives a the text weather forecast. Defaults to today if no argument given. So "tomorrow" is really the only useful argument.
example input: weather_function("tomorrow"):
output: Saturday: Mainly sunny. High 86F. Winds WSW at 5 to 10 mph. Saturday Night: Clear. Low 63F. Winds N at 5 to 10 mph.
to do:
save api key elsewhere...
requires:
python3
'''
import re
import urllib.request, json
file = open("weatherapikey.txt")
api_key = file.read()
file.close()
# The chopping block takes off parts that make it too long. Ideally shortening it to a single text.
def chopping_block(text):
if len(text) > 55:
# winds are pretty local so this is the first to be removed for brevity...
text = re.sub(" Winds.+?\.", '', text)
if len(text) > 55:
# the first sentence always talks about clouds, which aren't too important to me either.
text = re.sub("^.+?\. ", '', text)
return text
# The weather function gives today's weather or tomorrow's if you specify "tom"
# returns specified day's weather and gives current temp.
def weather_function(input_str):
day = "today"
if len(input_str.split(" ")) > 1:
day = input_str.split(" ")[1]
if day[:3] == "tom":
start = 2
else:
start = 0
weather_url = "http://api.wunderground.com/api/"+api_key+"/forecast10day/q/WA/Seattle.json"
current_temp_url = "http://api.wunderground.com/api/"+api_key+"/conditions/q/WA/Seattle.json"
with urllib.request.urlopen(weather_url) as url:
parsed_json = json.loads(url.read().decode())
title = day.capitalize()
txt = parsed_json['forecast']['txt_forecast']['forecastday'][start]['fcttext']
try:
title2 = parsed_json['forecast']['txt_forecast']['forecastday'][start+1]['title'].split(" ")[1]
except:
title2 = parsed_json['forecast']['txt_forecast']['forecastday'][start+1]['title']
txt2 = parsed_json['forecast']['txt_forecast']['forecastday'][start+1]['fcttext']
with urllib.request.urlopen(current_temp_url) as url2:
parsed_json2 = json.loads(url2.read().decode())
current_temp = parsed_json2['current_observation']['temp_f']
current_temp = int(current_temp)
txt = chopping_block(txt)
txt2 = chopping_block(txt2)
full_forecast = "{}f. {}: {}\n{}: {}".format(current_temp, title, txt, title2, txt2)
return full_forecast
|
HarryMaher/dumbphone
|
weather.py
|
Python
|
epl-1.0
| 2,454
|
"""
Library for the robot based system test tool of the OpenDaylight project.
Authors: Baohua Yang@IBM, Denghui Huang@IBM
Updated: 2013-11-14
"""
import collections
'''
Common constants and functions for the robot framework.
'''
def collection_should_contain(collection, *members):
"""
Fail if not every members is in the collection.
"""
if not isinstance(collection, collections.Iterable):
return False
for m in members:
if m not in collection:
return False
else:
return True
def combine_strings(*strings):
"""
Combines the given `strings` together and returns the result.
The given strings are not altered by this keyword.
"""
result = ''
for s in strings:
if isinstance(s,str) or isinstance(s,unicode):
result += s
if result == '':
return None
else:
return result
if __name__ == '__main__':
pass
|
yeasy/robot_tool
|
libraries/Common.py
|
Python
|
epl-1.0
| 935
|
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# dpms.py - Manage DPMS settings for X displays
# -----------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2007 Dirk Meyer, et al.
#
# First Edition: Dirk Meyer <https://github.com/Dischi>
# Maintainer: Dirk Meyer <https://github.com/Dischi>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
# python imports
import os
# kaa imports
import kaa.utils
import kaa
# freevo imports
from .. import core as freevo
# blanking modes
OFF, AUTO, USER = range(3)
class PluginInterface(freevo.Plugin):
def plugin_activate(self, level):
if not os.environ.get('DISPLAY') or not kaa.utils.which('xset'):
return
# get xset process to call
self.xset = kaa.Process(kaa.utils.which('xset')).start
self.counter = 0
self._mode = OFF
# Timer to poll and increase counter. It willbe started when the
# menu is shown.
self.timer = kaa.Timer(self.poll)
# register to all events
kaa.EventHandler(self.eventhandler).register()
# turn on dpms on shutdown
kaa.main.signals['shutdown'].connect(self.xset, '+dpms')
# register to application changes
freevo.signals['application-change'].connect(self.application_changed)
# turn off dpms
self.xset('-dpms s off')
def poll(self):
"""
Poll function called every minute to check for timeout.
"""
self.counter += 1
if self.counter == freevo.config.plugin.dpms.timeout:
# timeout, force dpms and turn off the monitor
self._mode = AUTO
self.xset('dpms force off')
# stop poll timer
self.timer.stop()
def application_changed(self, app):
"""
Callback on application changes.
"""
if app.name == 'menu' and self._mode == OFF:
# menu is shown, start timer
self.timer.start(60)
self.counter = 0
else:
# something else is shown, stop timer
self.timer.stop()
def eventhandler(self, event):
"""
Handle events from Freevo.
"""
if event.source == 'user':
# user generated event (key/button), reset timeout counter
self.counter = 0
if (event.source == 'user' and self._mode == AUTO) or \
(self._mode == USER and event == freevo.DPMS_BLANK_SCREEN):
# screen is blank right now, restore it
self._mode = OFF
self.xset('dpms force on s reset')
kaa.OneShotTimer(self.xset, '-dpms s off').start(1)
self.timer.start(60)
return
if event == freevo.DPMS_BLANK_SCREEN:
# event to turn off the monitor
self._mode = USER
self.xset('dpms force off')
self.timer.stop()
|
freevo/freevo2
|
src/plugins/dpms.py
|
Python
|
gpl-2.0
| 3,882
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table(u'article_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('body', self.gf('django.db.models.fields.TextField')()),
('pub_date', self.gf('django.db.models.fields.DateTimeField')()),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['article.Article'])),
))
db.send_create_signal(u'article', ['Comment'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table(u'article_comment')
models = {
u'article.article': {
'Meta': {'object_name': 'Article'},
'body': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'article.comment': {
'Meta': {'object_name': 'Comment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['article.Article']"}),
'body': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['article']
|
websbydrew/django-drew
|
django_demosite/article/migrations/0002_auto__add_comment.py
|
Python
|
gpl-2.0
| 2,064
|
import json
import listenbrainz.db.user as db_user
import listenbrainz.db.feedback as db_feedback
from redis import Redis
from flask import url_for, current_app
from listenbrainz.db.model.feedback import Feedback
from listenbrainz.tests.integration import IntegrationTestCase
class FeedbackAPITestCase(IntegrationTestCase):
def setUp(self):
super(FeedbackAPITestCase, self).setUp()
self.user = db_user.get_or_create(1, "testuserpleaseignore")
self.user2 = db_user.get_or_create(2, "anothertestuserpleaseignore")
def tearDown(self):
r = Redis(host=current_app.config['REDIS_HOST'], port=current_app.config['REDIS_PORT'])
r.flushall()
super(FeedbackAPITestCase, self).tearDown()
def insert_test_data(self, user_id):
sample_feedback = [
{
"recording_msid": "d23f4719-9212-49f0-ad08-ddbfbfc50d6f",
"score": 1
},
{
"recording_msid": "222eb00d-9ead-42de-aec9-8f8c1509413d",
"score": -1
}
]
for fb in sample_feedback:
db_feedback.insert(
Feedback(
user_id=user_id,
recording_msid=fb["recording_msid"],
score=fb["score"]
)
)
return sample_feedback
def test_recording_feedback(self):
""" Test for submission of valid feedback """
feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert200(response)
self.assertEqual(response.json["status"], "ok")
def test_recording_feedback_unauthorised_submission(self):
""" Test for checking that unauthorized submissions return 401 """
feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 1
}
# request with no authorization header
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(feedback),
content_type="application/json"
)
self.assert401(response)
self.assertEqual(response.json["code"], 401)
# request with invalid authorization header
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(feedback),
headers={"Authorization": "Token testtokenplsignore"},
content_type="application/json"
)
self.assert401(response)
self.assertEqual(response.json["code"], 401)
def test_recording_feedback_json_with_missing_keys(self):
""" Test for checking that submitting JSON with missing keys returns 400 """
# submit a feedback without recording_msid key
incomplete_feedback = {
"score": 1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(incomplete_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["error"], "JSON document must contain recording_msid and "
"score top level keys")
# submit a feedback without score key
incomplete_feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(incomplete_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["error"], "JSON document must contain recording_msid and "
"score top level keys")
# submit an empty feedback
empty_feedback = {}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(empty_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["error"], "JSON document must contain recording_msid and "
"score top level keys")
def test_recording_feedback_json_with_extra_keys(self):
""" Test to check submitting JSON with extra keys returns 400 """
invalid_feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 1,
"extra_key": "testvalueplsignore"
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(invalid_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["error"], "JSON document may only contain recording_msid and "
"score top level keys")
def test_recording_feedback_invalid_values(self):
""" Test to check submitting invalid values in JSON returns 400 """
# submit feedback with invalid recording_msid
invalid_feedback = {
"recording_msid": "invalid_recording_msid",
"score": 1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(invalid_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["code"], 400)
# submit feedback with invalid score
invalid_feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 5
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(invalid_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["code"], 400)
# submit feedback with invalid recording_msid and score
invalid_feedback = {
"recording_msid": "invalid_recording_msid",
"score": 5
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(invalid_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert400(response)
self.assertEqual(response.json["code"], 400)
def test_recording_feedback_update_score(self):
"""
Test to check that score gets updated when a user changes feedback score for a recording_msid
i.e love to hate or vice-versa
"""
# submit a feedback with score = 1
feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert200(response)
self.assertEqual(response.json["status"], "ok")
result = db_feedback.get_feedback_for_user(self.user["id"], limit=25, offset=0)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].user_id, self.user["id"])
self.assertEqual(result[0].recording_msid, feedback["recording_msid"])
self.assertEqual(result[0].score, feedback["score"])
# submit an updated feedback for the same recording_msid with new score = -1
updated_feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": -1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(updated_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert200(response)
self.assertEqual(response.json["status"], "ok")
# check that the record gets updated
result = db_feedback.get_feedback_for_user(self.user["id"], limit=25, offset=0)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].user_id, self.user["id"])
self.assertEqual(result[0].recording_msid, updated_feedback["recording_msid"])
self.assertEqual(result[0].score, updated_feedback["score"])
def test_recording_feedback_delete_when_score_is_zero(self):
"""
Test to check that the feedback record gets deleted when a user removes feedback for a recording_msid
by submitting a score = 0
"""
# submit a feedback with score = 1
feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 1
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert200(response)
self.assertEqual(response.json["status"], "ok")
result = db_feedback.get_feedback_for_user(self.user["id"], limit=25, offset=0)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].user_id, self.user["id"])
self.assertEqual(result[0].recording_msid, feedback["recording_msid"])
self.assertEqual(result[0].score, feedback["score"])
# submit an updated feedback for the same recording_msid with new score = 0
updated_feedback = {
"recording_msid": "7babc9be-ca2b-4544-b932-7c9ab38770d6",
"score": 0
}
response = self.client.post(
url_for("feedback_api_v1.recording_feedback"),
data=json.dumps(updated_feedback),
headers={"Authorization": "Token {}".format(self.user["auth_token"])},
content_type="application/json"
)
self.assert200(response)
self.assertEqual(response.json["status"], "ok")
# check that the record gets deleted
result = db_feedback.get_feedback_for_user(self.user["id"], limit=25, offset=0)
self.assertEqual(len(result), 0)
def test_get_feedback_for_user(self):
""" Test to make sure valid response is received """
inserted_rows = self.insert_test_data(self.user["id"])
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user", user_name=self.user["musicbrainz_id"]))
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 2)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 2)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[1]["recording_msid"])
self.assertEqual(feedback[0]["score"], inserted_rows[1]["score"])
self.assertEqual(feedback[1]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[1]["recording_msid"], inserted_rows[0]["recording_msid"])
self.assertEqual(feedback[1]["score"], inserted_rows[0]["score"])
def test_get_feedback_for_user_invalid_user(self):
""" Test to make sure that the API sends 404 if user does not exist. """
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user", user_name="nouser"))
self.assert404(response)
self.assertEqual(response.json["error"], "Cannot find user: nouser")
def test_get_feedback_for_user_with_score_param(self):
""" Test to make sure valid response is received when score param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
# pass score = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"score": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[0]["recording_msid"])
self.assertEqual(feedback[0]["score"], 1)
# pass score = -1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"score": -1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[1]["recording_msid"])
self.assertEqual(feedback[0]["score"], -1)
def test_get_feedback_for_user_with_invalid_score_param(self):
""" Test to make sure 400 response is received if score argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
# pass non-int value to score
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"score": "invalid_score"})
self.assert400(response)
self.assertEqual(response.json["error"], "Invalid score argument: invalid_score")
# pass invalid int value to score
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"score": 10})
self.assert400(response)
self.assertEqual(response.json["error"], "Score can have a value of 1 or -1.")
def test_get_feedback_for_user_with_count_param(self):
""" Test to make sure valid response is received when count param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
# pass count = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"count": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[1]["recording_msid"])
self.assertEqual(feedback[0]["score"], inserted_rows[1]["score"])
def test_get_feedback_for_user_with_invalid_count_param(self):
""" Test to make sure 400 response is received if count argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
# pass non-int value to count
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"count": "invalid_count"})
self.assert400(response)
self.assertEqual(response.json["error"], "'count' should be a non-negative integer")
# pass negative int value to count
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"count": -1})
self.assert400(response)
self.assertEqual(response.json["error"], "'count' should be a non-negative integer")
def test_get_feedback_for_user_with_offset_param(self):
""" Test to make sure valid response is received when offset param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
# pass count = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"offset": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 1)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[0]["recording_msid"])
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
def test_get_feedback_for_user_with_invalid_offset_param(self):
""" Test to make sure 400 response is received if offset argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
# pass non-int value to offset
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"offset": "invalid_offset"})
self.assert400(response)
self.assertEqual(response.json["error"], "'offset' should be a non-negative integer")
# pass negative int value to offset
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"offset": -1})
self.assert400(response)
self.assertEqual(response.json["error"], "'offset' should be a non-negative integer")
def test_get_feedback_for_recording(self):
""" Test to make sure valid response is received """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1))
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 2)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 2)
self.assertEqual(feedback[0]["user_id"], self.user2["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
self.assertEqual(feedback[1]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[1]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
def test_get_feedback_for_recording_invalid_recording_msid(self):
""" Test to make sure that the API sends 404 if recording_msid is invalid. """
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording", recording_msid="invalid_recording_msid"))
self.assert400(response)
self.assertEqual(response.json["error"], "invalid_recording_msid MSID format invalid.")
def test_get_feedback_for_recording_with_score_param(self):
""" Test to make sure valid response is received when score param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass score = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"score": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 2)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 2)
self.assertEqual(feedback[0]["user_id"], self.user2["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
self.assertEqual(feedback[1]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[1]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
# pass score = -1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"score": -1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 0)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 0)
def test_get_feedback_for_recording_with_invalid_score_param(self):
""" Test to make sure 400 response is received if score argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass non-int value to score
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"score": "invalid_score"})
self.assert400(response)
self.assertEqual(response.json["error"], "Invalid score argument: invalid_score")
# pass invalid int value to score
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"score": 10})
self.assert400(response)
self.assertEqual(response.json["error"], "Score can have a value of 1 or -1.")
def test_get_feedback_for_recording_with_count_param(self):
""" Test to make sure valid response is received when count param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass count = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"count": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 0)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user2["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
def test_get_feedback_for_recording_with_invalid_count_param(self):
""" Test to make sure 400 response is received if count argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass non-int value to count
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"count": "invalid_count"})
self.assert400(response)
self.assertEqual(response.json["error"], "'count' should be a non-negative integer")
# pass negative int value to count
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"count": -1})
self.assert400(response)
self.assertEqual(response.json["error"], "'count' should be a non-negative integer")
def test_get_feedback_for_recording_with_offset_param(self):
""" Test to make sure valid response is received when offset param is passed """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass count = 1
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"offset": 1})
self.assert200(response)
data = json.loads(response.data)
self.assertEqual(data["count"], 1)
self.assertEqual(data["total_count"], len(inserted_rows))
self.assertEqual(data["offset"], 1)
feedback = data["feedback"] # sorted in descending order of their creation
self.assertEqual(len(feedback), 1)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], rec_msid_1)
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
def test_get_feedback_for_recording_with_invalid_offset_param(self):
""" Test to make sure 400 response is received if offset argument is not valid """
inserted_rows = self.insert_test_data(self.user["id"])
inserted_rows = self.insert_test_data(self.user2["id"])
rec_msid_1 = inserted_rows[0]["recording_msid"]
# pass non-int value to offset
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"offset": "invalid_offset"})
self.assert400(response)
self.assertEqual(response.json["error"], "'offset' should be a non-negative integer")
# pass negative int value to offset
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recording",
recording_msid=rec_msid_1), query_string={"offset": -1})
self.assert400(response)
self.assertEqual(response.json["error"], "'offset' should be a non-negative integer")
def test_get_feedback_for_recordings_for_user(self):
""" Test to make sure valid response is received """
inserted_rows = self.insert_test_data(self.user["id"])
recordings = ""
# recording_msids for which feedback records are inserted
for row in inserted_rows:
recordings += row["recording_msid"] + ","
# recording_msid for which feedback record doesn't exist
non_existing_rec_msid = "b83fd3c3-449c-49be-a874-31d7cf26d946"
recordings += non_existing_rec_msid
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recordings_for_user",
user_name=self.user["musicbrainz_id"]), query_string={"recordings": recordings})
self.assert200(response)
data = json.loads(response.data)
feedback = data["feedback"]
self.assertEqual(len(feedback), 3)
self.assertEqual(feedback[0]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[0]["recording_msid"], inserted_rows[0]["recording_msid"])
self.assertEqual(feedback[0]["score"], inserted_rows[0]["score"])
self.assertEqual(feedback[1]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[1]["recording_msid"], inserted_rows[1]["recording_msid"])
self.assertEqual(feedback[1]["score"], inserted_rows[1]["score"])
self.assertEqual(feedback[2]["user_id"], self.user["musicbrainz_id"])
self.assertEqual(feedback[2]["recording_msid"], non_existing_rec_msid)
self.assertEqual(feedback[2]["score"], 0)
def test_get_feedback_for_recordings_for_user_invalid_user(self):
""" Test to make sure that the API sends 404 if user does not exist. """
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_user", user_name="nouser"))
self.assert404(response)
self.assertEqual(response.json["error"], "Cannot find user: nouser")
def test_get_feedback_for_recordings_for_user_no_recordings(self):
""" Test to make sure that the API sends 400 if param recordings is not passed or is empty. """
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recordings_for_user",
user_name=self.user["musicbrainz_id"])) # missing recordings param
self.assert400(response)
self.assertEqual(response.json["error"], "'recordings' has no valid recording MSID.")
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recordings_for_user",
user_name=self.user["musicbrainz_id"]),
query_string={"recordings": ""}) # empty string
self.assert400(response)
self.assertEqual(response.json["error"], "'recordings' has no valid recording MSID.")
def test_get_feedback_for_recordings_for_user_invalid_recording(self):
""" Test to make sure that the API sends 400 if params recordings has invalid recording_msid. """
inserted_rows = self.insert_test_data(self.user["id"])
recordings = ""
# recording_msids for which feedback records are inserted
for row in inserted_rows:
recordings += row["recording_msid"] + ","
# invalid recording_msid
invalid_rec_msid = "invalid_recording_msid"
recordings += invalid_rec_msid
response = self.client.get(url_for("feedback_api_v1.get_feedback_for_recordings_for_user",
user_name=self.user["musicbrainz_id"]),
query_string={"recordings": recordings}) # recordings has invalid recording_msid
self.assert400(response)
self.assertEqual(response.json["code"], 400)
|
Freso/listenbrainz-server
|
listenbrainz/tests/integration/test_feedback_api.py
|
Python
|
gpl-2.0
| 32,317
|
#!/usr/bin/env python3
import sys
import plistlib
if __name__ == "__main__":
assert sys.version_info[0] == 3
# "advene" "3.4.0"
app_id, app_version = sys.argv[1:]
app_id_lower = app_id.lower()
plist = dict(
CFBundleExecutable=app_id_lower,
CFBundleIconFile="%s.icns" % app_id_lower,
CFBundleIdentifier="org.advene.%s" % app_id,
CFBundleInfoDictionaryVersion="6.0",
CFBundlePackageType="APPL",
CFBundleShortVersionString=app_version,
CFBundleVersion=app_version,
LSMinimumSystemVersion="10.9",
AppleMagnifiedMode=False,
NSHighResolutionCapable=True,
CFBundleSignature="????",
)
plist.update(dict(
CFBundleName="Advene",
CFBundleDocumentTypes=[
dict(
CFBundleTypeExtensions=[
"3g2", "3gp", "3gp2", "669", "aac", "adif",
"adts", "aif", "aifc", "aiff", "amf", "ams",
"ape", "asf", "dsf", "dsm", "far", "flac", "gdm",
"it", "m4a", "m4v", "med", "mid", "mod", "mp+",
"mp1", "mp2", "mp3", "mp4", "mpc", "mpeg",
"mpg", "mt2", "mtm", "oga", "ogg", "oggflac",
"ogv", "okt", "opus", "s3m", "spc", "spx",
"stm", "tta", "ult", "vgm", "wav", "wma",
"wmv", "wv", "xm",
],
CFBundleTypeRole="Viewer",
CFBundleTypeIconFile="advene.icns",
),
],
))
print(plistlib.dumps(plist).decode("utf-8"))
|
oaubert/advene
|
dev/osx_bundle/misc/create_info.py
|
Python
|
gpl-2.0
| 1,676
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('scout_group', '__first__'),
]
operations = [
migrations.CreateModel(
name='RegistrationProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40, verbose_name='Chave de Ativa\xe7\xe3o')),
('scout_group', models.ForeignKey(verbose_name='Grupo Escoteiro', blank=True, to='scout_group.ScoutGroup', null=True)),
('user', models.ForeignKey(verbose_name='user', to=settings.AUTH_USER_MODEL, unique=True)),
],
options={
'verbose_name': 'Perfil',
'verbose_name_plural': 'Perfis',
},
bases=(models.Model,),
),
]
|
roberzguerra/scout
|
registration/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,089
|
import multiprocessing
import argparse
import os
import perceptron
import codecs
import sys
import StringIO
import tparser
import time
import json
def one_process(model_file_name,g_perceptron,q,q_out,parser_config,no_avg):
"""
g_perceptron - instance of generalized perceptron (not state)
q - queue with examples
job_counter - synchronizes the jobs for printing
parser_config - holds information about the beam size used during training
no_avg - do not use averaged weight vector (so, test_time=False)
"""
if no_avg:
parser=tparser.Parser(model_file_name,gp=g_perceptron,beam_size=parser_config["beam_size"],test_time=False)
else:
parser=tparser.Parser(model_file_name,gp=g_perceptron,beam_size=parser_config["beam_size"],test_time=True)
while True:
next_job=q.get() #This will be either (progress,data) tuple, or None to signal end of training
if next_job==None:
break #We're done
job_number,data=next_job
buffIN=StringIO.StringIO(data) #Make the input look like an open file reading unicode
buffOUT=StringIO.StringIO()
parser.parse(buffIN,buffOUT)
#Done, push out
q_out.put((job_number,buffOUT.getvalue().encode("utf-8")))
q_out.put(None) #Signal downstream that we're done
def assemble_results(qout,parsers_alive):
cache={} #{jobid:txt}
counter=0
while True:
if parsers_alive>0:
next_job=qout.get()
if next_job==None:
parsers_alive-=1 #Another one done
else:
job_number,data=next_job
assert job_number not in cache
cache[job_number]=data
#Print everything you can
while counter in cache:
print cache[counter]
del cache[counter]
counter+=1
if parsers_alive==0 and not cache: #DOne
return
def feed_queue(q,inp,max_sent=0):
"""iteration_progress -> progress through the total number of iterations, will be passed on to the parser"""
if inp==None: #Do stdin
data=codecs.getreader("utf-8")(sys.stdin)
else:
data=codecs.open(inp,"rt","utf-8")
job_counter=0
counter=0
### WARNING: comments are not correctly paired with sentences -> if you communicate metadata through comments, this will need to be fixed
current=[] #List of lines waiting to be scheduled
for line in data:
if line.startswith(u"1\t"):
counter+=1
if counter%5==0: #Split the queue into batches of 20 sentences to train on
q.put((job_counter,u"".join(current)))
job_counter+=1
current=[]
if max_sent!=0 and counter>=max_sent:
break
current.append(line)
else:
if current:
q.put((job_counter,u"".join(current)))
if inp!=None:
data.close() #Close what you opened
def launch_instances(args):
"""
main() to launch everything
"""
#1) Create the Shared State for perceptron
# TODO: maybe I could have a flag with which I'd check the model exists and load instead?
# ...will overwrite by default anyway
sh_state=perceptron.PerceptronSharedState.load(args.model[0],retrainable=True)
# now load parser configuration to get correct beam size
if not os.path.exists(args.model[0]):
raise ValueError(args.model[0]+": no such model")
with open(os.path.join(args.model[0],"parser_config.json"),"r") as f:
d=json.load(f) #dictionary with parameters
q=multiprocessing.Queue(20) #Queue to pass pieces of the training data to the processes
q_out=multiprocessing.Queue(20) #Queue to pass parsed data to the process which assembles the parsed output
procs=[] #List of running processes
for _ in range(args.processes):
gp=perceptron.GPerceptron.from_shared_state(sh_state) #Fork a new perceptron
p=multiprocessing.Process(target=one_process, args=(os.path.join(args.model[0],"model.pkl"),gp,q,q_out,d,args.no_avg))
p.start()
procs.append(p)
p=multiprocessing.Process(target=assemble_results,args=(q_out,args.processes))
p.start()
procs.append(p)
#All processes started
#...feed the queue with data
feed_queue(q,args.input,args.max_sent)
#Signal end of work to all processes (Thanks @radimrehurek for this neat trick!)
for _ in range(args.processes):
q.put(None)
for p in procs:
p.join() #Wait for the processes to quit
#...and we should be done
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Runs the parser in a multi-core setting. Outputs to stdout.')
parser.add_argument('model', nargs=1, help='Name of the model file.')
parser.add_argument('input', nargs='?', help='Training file name, or nothing for training on stdin')
parser.add_argument('-p', '--processes', type=int, default=4, help='How many parsing workers to run? (default %(default)d)')
parser.add_argument('--max_sent', type=int, default=0, help='How many sentences to parse from the input? 0 for all. (default %(default)d)')
parser.add_argument('--no_avg', default=False, action="store_true", help='Do not use the averaged perceptron but the original weight vector (default %(default)s)')
args = parser.parse_args()
launch_instances(args)
|
jmnybl/Turku-Dependency-Parser
|
parse_parallel.py
|
Python
|
gpl-2.0
| 5,441
|
#!/usr/bin/env python
# srv_thread.py
# RP(Y)C factory object for QKIT, written by HR,JB@KIT 2016
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import time
import sys
import logging
#import numpy
import rpyc
from rpyc.utils.server import ThreadedServer
DEBUG = False
def logstr(logstring):
if DEBUG:
print(str(logstring))
#global wants_abort
wants_abort = False
# the following trick allows to pass an Object (DATA) to a factory class (rpc.Service)
# witout overriding the __init__() function. Basically a closure defines the namespace
# and passes the object.
def CustomizedService(DATA):
class Plasma1Service(rpyc.Service):
def on_connect(self):
# code that runs when a connection is created
print 'New connection with %s initiated.'%str(self._conn._config['endpoints'][1])
def on_disconnect(self):
# code that runs when the connection has already closed
print 'Connection to %s closed.'%str(self._conn._config['endpoints'][1])
def exposed_get_history(self,p,range): #range in hours
if DATA.debug: print 'history request from client %s'%str(self._conn._config['endpoints'][1])
return getattr(DATA,str(p)).get_history(range)
def exposed_get_last_value(self,p):
if DATA.debug: print 'value request'
return [getattr(DATA,str(p)).get_timestamp(),getattr(DATA,str(p)).get_last_value()]
return Plasma1Service
def remote_service_operator(DATA):
"""
starts the (threaded) service which exposes the
RPYC_service factory class to remote RPYC clients.
This is the function which should be called in the main function,
there is no need for hiding it in a class.
"""
CS = CustomizedService(DATA)
t = ThreadedServer(CS, port = DATA.localhost.port)#, protocol_config = {"allow_public_attrs" : True})
t.start()
class MyTestObject(object):
def __init__(self):
self.a = 'string'
if __name__ == "__main__":
"""
logic of operation:
* init DATA
* start remote service operator
* start data_srv
"""
o = MyTestObject()
o.test=lambda: 1
rso = remote_service_operator(o)
|
qkitgroup/qkit
|
qkit/services/qsurveilkit/srv_thread.py
|
Python
|
gpl-2.0
| 2,961
|
#!/usr/bin/python
import sys
import importlib
# RedBrick resource wrapper
from rb_setup import RedBrick
testfiles = {
"responsiveness": None,
"remove_all": None,
"stream": None,
"availability": None,
"common": None,
"colormotion": None,
"scenes": None,
}
for name in testfiles:
testfiles[name] = importlib.import_module(name)
def usage_exit(prog):
print "Usage 1 (on RedBrick):", prog, "rb-uid testfilename", \
"[host = localhost [port = 4223]]"
print "Usage 0 (local):", prog, "- testfilename"
if __name__ == '__main__':
host = "localhost"
port = 4223
prog = None
if len(sys.argv) < 3:
usage_exit(sys.argv[0])
filename = sys.argv[2]
if sys.argv[1] != '-':
uid = sys.argv[1]
try:
prog = testfiles[filename]
host = sys.argv[3] if len(sys.argv) > 3 else host
port = int(sys.argv[4]) if len(sys.argv) > 4 else port
except KeyError, e:
print "No such file", filename + ".py or not registered"
exit(-1)
with RedBrick(uid, host, port) as rb:
prog.run(rb)
else:
try:
prog = testfiles[filename]
except KeyError, e:
print "No such file", filename + ".py or not registered"
exit(-1)
with Lib() as lib:
prog.run(lib)
|
philkroos/tinkervision
|
src/test/red-brick/scripts/main.py
|
Python
|
gpl-2.0
| 1,384
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
StandBrowserDockWidget
A QGIS plugin
Browse forests stand
-------------------
begin : 2017-02-18
git sha : $Format:%H$
copyright : (C) 2017 by Magnus Homann
email : magnus@homann.se
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt4 import QtGui, uic
from PyQt4.QtCore import pyqtSignal
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'stand_browser_dockwidget_base.ui'))
class StandBrowserDockWidget(QtGui.QDockWidget, FORM_CLASS):
closingPlugin = pyqtSignal()
def __init__(self, parent=None):
"""Constructor."""
super(StandBrowserDockWidget, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
def closeEvent(self, event):
self.closingPlugin.emit()
event.accept()
|
homann/stand-browser
|
stand_browser_dockwidget.py
|
Python
|
gpl-2.0
| 1,979
|
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
import os
import time
import urllib
import uuid
from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import netutils
from nova.virt.xenapi import agent
from nova.virt.xenapi.image import utils as image_utils
LOG = logging.getLogger(__name__)
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
choices=('all', 'some', 'none'),
help='Cache glance images locally. `all` will cache all'
' images, `some` will only cache images that have the'
' image_property `cache_in_nova=True`, and `none` turns'
' off caching entirely'),
cfg.IntOpt('image_compression_level',
min=1,
max=9,
help='Compression level for images, e.g., 9 for gzip -9.'
' Range is 1-9, 9 being most compressed but most CPU'
' intensive on dom0.'),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
help='Time to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * units.Mi,
help='Maximum size in bytes of kernel or ramdisk images'),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
help='Filter for finding the SR to be used to install guest '
'instances on. To use the Local Storage in default '
'XenServer/XCP installations set this flag to '
'other-config:i18n-key=local-storage. To select an SR '
'with a different matching criteria, you could set it to '
'other-config:my_favorite_sr=true. On the other hand, to '
'fall back on the Default SR, as displayed by XenCenter, '
'set this flag to: default-sr:true'),
cfg.BoolOpt('sparse_copy',
default=True,
help='Whether to use sparse_copy for copying data on a '
'resize down (False will use standard dd). This speeds '
'up resizes down considerably since large runs of zeros '
'won\'t have to be rsynced'),
cfg.IntOpt('num_vbd_unplug_retries',
default=10,
help='Maximum number of retries to unplug VBD. if <=0, '
'should try once and no retry'),
cfg.StrOpt('torrent_images',
default='none',
choices=('all', 'some', 'none'),
help='Whether or not to download images via Bit Torrent.'),
cfg.StrOpt('ipxe_network_name',
help='Name of network to use for booting iPXE ISOs'),
cfg.StrOpt('ipxe_boot_menu_url',
help='URL to the iPXE boot menu'),
cfg.StrOpt('ipxe_mkisofs_cmd',
default='mkisofs',
help='Name and optionally path of the tool used for '
'ISO image creation'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vm_utils_opts, 'xenserver')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
'Paused': power_state.PAUSED,
'Suspended': power_state.SUSPENDED,
'Crashed': power_state.CRASHED}
SECTOR_SIZE = 512
MBR_SIZE_SECTORS = 63
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
KERNEL_DIR = '/boot/guest'
MAX_VDI_CHAIN_SIZE = 16
PROGRESS_INTERVAL_SECONDS = 300
# Fudge factor to allow for the VHD chain to be slightly larger than
# the partitioned space. Otherwise, legitimate images near their
# maximum allowed size can fail on build with FlavorDiskSmallerThanImage.
VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10
class ImageType(object):
"""Enumeration class for distinguishing different image types
| 0 - kernel image (goes on dom0's filesystem)
| 1 - ramdisk image (goes on dom0's filesystem)
| 2 - disk image (local SR, partitioned by objectstore plugin)
| 3 - raw disk image (local SR, NOT partitioned by plugin)
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
| 6 - config drive
"""
KERNEL = 0
RAMDISK = 1
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
DISK_CONFIGDRIVE = 6
_ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "root"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
return dict(zip(cls._ids, ImageType._strs)).get(image_type)
@classmethod
def get_role(cls, image_type_id):
"""Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
cls.DISK_ISO: 'iso',
cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
def get_vm_device_id(session, image_meta):
# NOTE: device_id should be 2 for windows VMs which run new xentools
# (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more
# information.
device_id = image_meta.properties.get('hw_device_id')
# The device_id is required to be set for hypervisor version 6.1 and above
if device_id:
hypervisor_version = session.product_version
if _hypervisor_supports_device_id(hypervisor_version):
return device_id
else:
msg = _("Device id %(id)s specified is not supported by "
"hypervisor version %(version)s") % {'id': device_id,
'version': hypervisor_version}
raise exception.NovaException(msg)
def _hypervisor_supports_device_id(version):
version_as_string = '.'.join(str(v) for v in version)
return(versionutils.is_compatible('6.1', version_as_string))
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False, device_id=None):
"""Create a VM record. Returns new VM reference.
the use_pv_kernel flag indicates whether the guest is HVM or PV
There are 3 scenarios:
1. Using paravirtualization, kernel passed in
2. Using paravirtualization, kernel within the image
3. Using hardware virtualization
"""
flavor = instance.get_flavor()
mem = str(int(flavor.memory_mb) * units.Mi)
vcpus = str(flavor.vcpus)
vcpu_weight = flavor.vcpu_weight
vcpu_params = {}
if vcpu_weight is not None:
# NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means
# we need to specify both weight and cap for either to apply
vcpu_params = {"weight": str(vcpu_weight), "cap": "0"}
cpu_mask_list = hardware.get_vcpu_pin_set()
if cpu_mask_list:
cpu_mask = hardware.format_cpu_spec(cpu_mask_list,
allow_ranges=False)
vcpu_params["mask"] = cpu_mask
viridian = 'true' if instance['os_type'] == 'windows' else 'false'
rec = {
'actions_after_crash': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_shutdown': 'destroy',
'affinity': '',
'blocked_operations': {},
'ha_always_run': False,
'ha_restart_priority': '',
'HVM_boot_params': {},
'HVM_boot_policy': '',
'is_a_template': False,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
'memory_static_min': '0',
'memory_static_max': mem,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': viridian, 'timeoffset': '0'},
'PV_args': '',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '',
'PV_legacy_args': '',
'PV_ramdisk': '',
'recommendations': '',
'tags': [],
'user_version': '0',
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': vcpu_params,
'xenstore_data': {'vm-data/allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
rec['platform']['nx'] = 'false'
if instance['kernel_id']:
# 1. Kernel explicitly passed in, use that
rec['PV_args'] = 'root=/dev/xvda1'
rec['PV_kernel'] = kernel
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
rec['platform']['nx'] = 'true'
rec['HVM_boot_params'] = {'order': 'dc'}
rec['HVM_boot_policy'] = 'BIOS order'
if device_id:
rec['platform']['device_id'] = str(device_id).zfill(4)
vm_ref = session.VM.create(rec)
LOG.debug('Created VM', instance=instance)
return vm_ref
def destroy_vm(session, instance, vm_ref):
"""Destroys a VM record."""
try:
session.VM.destroy(vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Destroy VM failed'))
return
LOG.debug("VM destroyed", instance=instance)
def clean_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (cleanly)", instance=instance)
try:
session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (cleanly) failed.'))
return False
return True
def hard_shutdown_vm(session, instance, vm_ref):
if is_vm_shutdown(session, vm_ref):
LOG.warning(_LW("VM already halted, skipping shutdown..."),
instance=instance)
return True
LOG.debug("Shutting down VM (hard)", instance=instance)
try:
session.call_xenapi('VM.hard_shutdown', vm_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Shutting down VM (hard) failed'))
return False
return True
def is_vm_shutdown(session, vm_ref):
state = get_power_state(session, vm_ref)
if state == power_state.SHUTDOWN:
return True
return False
def is_enough_free_mem(session, instance):
flavor = instance.get_flavor()
mem = int(flavor.memory_mb) * units.Mi
host_free_mem = int(session.call_xenapi("host.compute_free_memory",
session.host_ref))
return host_free_mem >= mem
def _should_retry_unplug_vbd(err):
# Retry if unplug failed with DEVICE_DETACH_REJECTED
# For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
# Since XenServer 6.2, we also need to retry if we get
# INTERNAL_ERROR, as that error goes away when you retry.
return (err == 'DEVICE_DETACH_REJECTED'
or
err == 'INTERNAL_ERROR')
def unplug_vbd(session, vbd_ref, this_vm_ref):
# make sure that perform at least once
max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1
for num_attempt in range(1, max_attempts + 1):
try:
if num_attempt > 1:
greenthread.sleep(1)
session.VBD.unplug(vbd_ref, this_vm_ref)
return
except session.XenAPI.Failure as exc:
err = len(exc.details) > 0 and exc.details[0]
if err == 'DEVICE_ALREADY_DETACHED':
LOG.info(_LI('VBD %s already detached'), vbd_ref)
return
elif _should_retry_unplug_vbd(err):
LOG.info(_LI('VBD %(vbd_ref)s uplug failed with "%(err)s", '
'attempt %(num_attempt)d/%(max_attempts)d'),
{'vbd_ref': vbd_ref, 'num_attempt': num_attempt,
'max_attempts': max_attempts, 'err': err})
else:
LOG.exception(_LE('Unable to unplug VBD'))
raise exception.StorageError(
reason=_('Unable to unplug VBD %s') % vbd_ref)
raise exception.StorageError(
reason=_('Reached maximum number of retries '
'trying to unplug VBD %s')
% vbd_ref)
def destroy_vbd(session, vbd_ref):
"""Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to destroy VBD'))
raise exception.StorageError(
reason=_('Unable to destroy VBD %s') % vbd_ref)
def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
read_only=False, bootable=False, osvol=False,
empty=False, unpluggable=True):
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
if vdi_ref is None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
vbd_rec['bootable'] = bootable
vbd_rec['mode'] = read_only and 'RO' or 'RW'
vbd_rec['type'] = vbd_type
vbd_rec['unpluggable'] = unpluggable
vbd_rec['empty'] = empty
vbd_rec['other_config'] = {}
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ',
{'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.',
{'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref})
if osvol:
# set osvol=True in other-config to indicate this is an
# attached nova (or cinder) volume
session.call_xenapi('VBD.add_to_other_config',
vbd_ref, 'osvol', 'True')
return vbd_ref
def attach_cd(session, vm_ref, vdi_ref, userdevice):
"""Create an empty VBD, then insert the CD."""
vbd_ref = create_vbd(session, vm_ref, None, userdevice,
vbd_type='cd', read_only=True,
bootable=True, empty=True,
unpluggable=False)
session.call_xenapi('VBD.insert', vbd_ref, vdi_ref)
return vbd_ref
def destroy_vdi(session, vdi_ref):
try:
session.call_xenapi('VDI.destroy', vdi_ref)
except session.XenAPI.Failure:
msg = "Unable to destroy VDI %s" % vdi_ref
LOG.debug(msg, exc_info=True)
msg = _("Unable to destroy VDI %s") % vdi_ref
LOG.error(msg)
raise exception.StorageError(reason=msg)
def safe_destroy_vdis(session, vdi_refs):
"""Tries to destroy the requested VDIs, but ignores any errors."""
for vdi_ref in vdi_refs:
try:
destroy_vdi(session, vdi_ref)
except exception.StorageError:
msg = "Ignoring error while destroying VDI: %s" % vdi_ref
LOG.debug(msg)
def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
'other_config': _get_vdi_other_config(disk_type, instance=instance),
'sm_config': {},
'tags': []})
LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,'
' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.',
{'vdi_ref': vdi_ref, 'name_label': name_label,
'virtual_size': virtual_size, 'read_only': read_only,
'sr_ref': sr_ref})
return vdi_ref
@contextlib.contextmanager
def _dummy_vm(session, instance, vdi_ref):
"""This creates a temporary VM so that we can snapshot a VDI.
VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
work around this, we need to create a temporary VM and then map the VDI to
the VM using a temporary VBD.
"""
name_label = "dummy"
vm_ref = create_vm(session, instance, name_label, None, None)
try:
vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
read_only=True)
try:
yield vm_ref
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
finally:
destroy_vm(session, instance, vm_ref)
def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference.
This function differs from the XenAPI `VDI.copy` call in that the copy is
atomic and isolated, meaning we don't see half-downloaded images. It
accomplishes this by copying the VDI's into a temporary directory and then
atomically renaming them into the SR when the copy is completed.
The correct long term solution is to fix `VDI.copy` so that it is atomic
and isolated.
"""
with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
label = "snapshot"
with snapshot_attached_here(
session, instance, vm_ref, label) as vdi_uuids:
imported_vhds = session.call_plugin_serialized(
'workarounds', 'safe_copy_vdis',
sr_path=get_sr_path(session, sr_ref=sr_ref),
vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack())
root_uuid = imported_vhds['root']['uuid']
# rescan to discover new VHDs
scan_default_sr(session)
vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
def _clone_vdi(session, vdi_to_clone_ref):
"""Clones a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref)
LOG.debug('Cloned VDI %(vdi_ref)s from VDI '
'%(vdi_to_clone_ref)s',
{'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref})
return vdi_ref
def _get_vdi_other_config(disk_type, instance=None):
"""Return metadata to store in VDI's other_config attribute.
`nova_instance_uuid` is used to associate a VDI with a particular instance
so that, if it becomes orphaned from an unclean shutdown of a
compute-worker, we can safely detach it.
"""
other_config = {'nova_disk_type': disk_type}
# create_vdi may be called simply while creating a volume
# hence information about instance may or may not be present
if instance:
other_config['nova_instance_uuid'] = instance['uuid']
return other_config
def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description,
instance):
existing_other_config = session.call_xenapi('VDI.get_other_config',
vdi_ref)
session.call_xenapi('VDI.set_name_label', vdi_ref, name_label)
session.call_xenapi('VDI.set_name_description', vdi_ref, description)
other_config = _get_vdi_other_config(vdi_type, instance=instance)
for key, value in six.iteritems(other_config):
if key not in existing_other_config:
session.call_xenapi(
"VDI.add_to_other_config", vdi_ref, key, value)
def _vm_get_vbd_refs(session, vm_ref):
return session.call_xenapi("VM.get_VBDs", vm_ref)
def _vbd_get_rec(session, vbd_ref):
return session.call_xenapi("VBD.get_record", vbd_ref)
def _vdi_get_rec(session, vdi_ref):
return session.call_xenapi("VDI.get_record", vdi_ref)
def _vdi_get_uuid(session, vdi_ref):
return session.call_xenapi("VDI.get_uuid", vdi_ref)
def _vdi_snapshot(session, vdi_ref):
return session.call_xenapi("VDI.snapshot", vdi_ref, {})
def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'):
"""Retrieves the primary VDI for a VM."""
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
# Convention dictates the primary VDI will be userdevice 0
if vbd_rec['userdevice'] == userdevice:
vdi_ref = vbd_rec['VDI']
vdi_rec = _vdi_get_rec(session, vdi_ref)
return vdi_ref, vdi_rec
raise exception.NovaException(_("No primary VDI found for %s") % vm_ref)
def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0):
vbd_refs = _vm_get_vbd_refs(session, vm_ref)
for vbd_ref in vbd_refs:
vbd_rec = _vbd_get_rec(session, vbd_ref)
if int(vbd_rec['userdevice']) >= min_userdevice:
vdi_ref = vbd_rec['VDI']
yield _vdi_get_uuid(session, vdi_ref)
def _try_strip_base_mirror_from_vdi(session, vdi_ref):
try:
session.call_xenapi("VDI.remove_from_sm_config", vdi_ref,
"base_mirror")
except session.XenAPI.Failure:
LOG.debug("Error while removing sm_config", exc_info=True)
def strip_base_mirror_from_vdis(session, vm_ref):
# NOTE(johngarbutt) part of workaround for XenServer bug CA-98606
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_ref in vbd_refs:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
_try_strip_base_mirror_from_vdi(session, vdi_ref)
def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref):
possible_snapshot_parents = vdi_uuid_chain[1:]
if len(possible_snapshot_parents) == 0:
LOG.debug("No VHD chain.", instance=instance)
return
snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents,
old_snapshots_only=True)
number_of_snapshots = len(snapshot_uuids)
if number_of_snapshots <= 0:
LOG.debug("No snapshots to remove.", instance=instance)
return
vdi_refs = [session.VDI.get_by_uuid(vdi_uuid)
for vdi_uuid in snapshot_uuids]
safe_destroy_vdis(session, vdi_refs)
# ensure garbage collector has been run
_scan_sr(session, sr_ref)
LOG.info(_LI("Deleted %s snapshots.") % number_of_snapshots,
instance=instance)
def remove_old_snapshots(session, instance, vm_ref):
"""See if there is an snapshot present that should be removed."""
LOG.debug("Starting remove_old_snapshots for VM", instance=instance)
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
post_snapshot_callback=None):
# impl method allow easier patching for tests
return _snapshot_attached_here_impl(session, instance, vm_ref, label,
userdevice, post_snapshot_callback)
def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice,
post_snapshot_callback):
"""Snapshot the root disk only. Return a list of uuids for the vhds
in the chain.
"""
LOG.debug("Starting snapshot for VM", instance=instance)
# Memorize the VDI chain so we can poll for coalesce
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref,
userdevice)
chain = _walk_vdi_chain(session, vm_vdi_rec['uuid'])
vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain]
sr_ref = vm_vdi_rec["SR"]
# clean up after any interrupted snapshot attempts
_delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref)
snapshot_ref = _vdi_snapshot(session, vm_vdi_ref)
if post_snapshot_callback is not None:
post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD)
try:
# When the VDI snapshot is taken a new parent is introduced.
# If we have taken a snapshot before, the new parent can be coalesced.
# We need to wait for this to happen before trying to copy the chain.
_wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref,
vdi_uuid_chain)
snapshot_uuid = _vdi_get_uuid(session, snapshot_ref)
chain = _walk_vdi_chain(session, snapshot_uuid)
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain]
yield vdi_uuids
finally:
safe_destroy_vdis(session, [snapshot_ref])
# TODO(johngarbut) we need to check the snapshot has been coalesced
# now its associated VDI has been deleted.
def get_sr_path(session, sr_ref=None):
"""Return the path to our storage repository
This is used when we're dealing with VHDs directly, either by taking
snapshots or by restoring an image in the DISK_VHD format.
"""
if sr_ref is None:
sr_ref = safe_find_sr(session)
pbd_rec = session.call_xenapi("PBD.get_all_records_where",
'field "host"="%s" and '
'field "SR"="%s"' %
(session.host_ref, sr_ref))
# NOTE(bobball): There can only be one PBD for a host/SR pair, but path is
# not always present - older versions of XS do not set it.
pbd_ref = list(pbd_rec.keys())[0]
device_config = pbd_rec[pbd_ref]['device_config']
if 'path' in device_config:
return device_config['path']
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
if sr_rec["type"] not in ["ext", "nfs"]:
raise exception.NovaException(
_("Only file-based SRs (ext/NFS) are supported by this feature."
" SR %(uuid)s is of type %(type)s") %
{"uuid": sr_uuid, "type": sr_rec["type"]})
return os.path.join(CONF.xenserver.sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
"""Destroy used or unused cached images.
A cached image that is being used by at least one VM is said to be 'used'.
In the case of an 'unused' image, the cached image will be the only
descendent of the base-copy. So when we delete the cached-image, the
refcount will drop to zero and XenServer will automatically destroy the
base-copy for us.
The default behavior of this function is to destroy only 'unused' cached
images. To destroy all cached images, use the `all_cached=True` kwarg.
"""
cached_images = _find_cached_images(session, sr_ref)
destroyed = set()
def destroy_cached_vdi(vdi_uuid, vdi_ref):
LOG.debug("Destroying cached VDI '%(vdi_uuid)s'")
if not dry_run:
destroy_vdi(session, vdi_ref)
destroyed.add(vdi_uuid)
for vdi_ref in cached_images.values():
vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
if all_cached:
destroy_cached_vdi(vdi_uuid, vdi_ref)
continue
# Unused-Only: Search for siblings
# Chain length greater than two implies a VM must be holding a ref to
# the base-copy (otherwise it would have coalesced), so consider this
# cached image used.
chain = list(_walk_vdi_chain(session, vdi_uuid))
if len(chain) > 2:
continue
elif len(chain) == 2:
# Siblings imply cached image is used
root_vdi_rec = chain[-1]
children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']])
if len(children) > 1:
continue
destroy_cached_vdi(vdi_uuid, vdi_ref)
return destroyed
def _find_cached_images(session, sr_ref):
"""Return a dict(uuid=vdi_ref) representing all cached images."""
cached_images = {}
for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
image_id = vdi_rec['other_config']['image-id']
except KeyError:
continue
cached_images[image_id] = vdi_ref
return cached_images
def _find_cached_image(session, image_id, sr_ref):
"""Returns the vdi-ref of the cached image."""
name_label = _get_image_vdi_label(image_id)
recs = session.call_xenapi("VDI.get_all_records_where",
'field "name__label"="%s"'
% name_label)
number_found = len(recs)
if number_found > 0:
if number_found > 1:
LOG.warning(_LW("Multiple base images for image: %s"), image_id)
return list(recs.keys())[0]
def _get_resize_func_name(session):
brand = session.product_brand
version = session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if version and brand:
xcp = brand == 'XCP'
r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def _vdi_get_virtual_size(session, vdi_ref):
size = session.call_xenapi('VDI.get_virtual_size', vdi_ref)
return int(size)
def _vdi_resize(session, vdi_ref, new_size):
resize_func_name = _get_resize_func_name(session)
session.call_xenapi(resize_func_name, vdi_ref, str(new_size))
def update_vdi_virtual_size(session, instance, vdi_ref, new_gb):
virtual_size = _vdi_get_virtual_size(session, vdi_ref)
new_disk_size = new_gb * units.Gi
msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d "
"to %(new_disk_size)d")
LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size},
instance=instance)
if virtual_size < new_disk_size:
# For resize up. Simple VDI resize will do the trick
_vdi_resize(session, vdi_ref, new_disk_size)
elif virtual_size == new_disk_size:
LOG.debug("No need to change vdi virtual size.",
instance=instance)
else:
# NOTE(johngarbutt): we should never get here
# but if we don't raise an exception, a user might be able to use
# more storage than allowed by their chosen instance flavor
msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger "
"than flavor size of %(new_disk_size)d bytes.")
msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size,
'new_disk_size': new_disk_size}
LOG.debug(msg, instance=instance)
raise exception.ResizeError(reason=msg)
def resize_disk(session, instance, vdi_ref, flavor):
size_gb = flavor.root_gb
if size_gb == 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
sr_ref = safe_find_sr(session)
clone_ref = _clone_vdi(session, vdi_ref)
try:
# Resize partition and filesystem down
_auto_configure_disk(session, clone_ref, size_gb)
# Create new VDI
vdi_size = size_gb * units.Gi
# NOTE(johannes): No resizing allowed for rescue instances, so
# using instance['name'] is safe here
new_ref = create_vdi(session, sr_ref, instance, instance['name'],
'root', vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
# Manually copy contents over
virtual_size = size_gb * units.Gi
_copy_partition(session, clone_ref, new_ref, 1, virtual_size)
return new_ref, new_uuid
finally:
destroy_vdi(session, clone_ref)
def _auto_configure_disk(session, vdi_ref, new_gb):
"""Partition and resize FS to match the size specified by
flavors.root_gb.
This is a fail-safe to prevent accidentally destroying data on a disk
erroneously marked as auto_disk_config=True.
The criteria for allowing resize are:
1. 'auto_disk_config' must be true for the instance (and image).
(If we've made it here, then auto_disk_config=True.)
2. The disk must have only one partition.
3. The file-system on the one partition must be ext3 or ext4.
"""
if new_gb == 0:
LOG.debug("Skipping auto_config_disk as destination size is 0GB")
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
partitions = _get_partitions(dev)
if len(partitions) != 1:
reason = _('Disk must have only one partition.')
raise exception.CannotResizeDisk(reason=reason)
num, start, old_sectors, fstype, name, flags = partitions[0]
if fstype not in ('ext3', 'ext4'):
reason = _('Disk contains a filesystem '
'we are unable to resize: %s')
raise exception.CannotResizeDisk(reason=(reason % fstype))
if num != 1:
reason = _('The only partition should be partition 1.')
raise exception.CannotResizeDisk(reason=reason)
new_sectors = new_gb * units.Gi / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors, flags)
def try_auto_configure_disk(session, vdi_ref, new_gb):
try:
_auto_configure_disk(session, vdi_ref, new_gb)
except exception.CannotResizeDisk as e:
msg = _LW('Attempted auto_configure_disk failed because: %s')
LOG.warn(msg % e)
def _make_partition(session, dev, partition_start, partition_end):
dev_path = utils.make_dev_path(dev)
# NOTE(bobball) If this runs in Dom0, parted will error trying
# to re-read the partition table and return a generic error
utils.execute('parted', '--script', dev_path,
'mklabel', 'msdos', run_as_root=True,
check_exit_code=not session.is_local_connection)
utils.execute('parted', '--script', dev_path, '--',
'mkpart', 'primary',
partition_start,
partition_end,
run_as_root=True,
check_exit_code=not session.is_local_connection)
partition_path = utils.make_dev_path(dev, partition=1)
if session.is_local_connection:
# Need to refresh the partitions
utils.trycmd('kpartx', '-a', dev_path,
run_as_root=True,
discard_warnings=True)
# Sometimes the partition gets created under /dev/mapper, depending
# on the setup in dom0.
mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path)
if os.path.exists(mapper_path):
return mapper_path
return partition_path
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
disk_type, size_mb, fs_type):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
2. Attach VDI to compute worker
3. Create partition
4. Create VBD between instance VM and VDI
"""
# 1. Create VDI
sr_ref = safe_find_sr(session)
ONE_MEG = units.Mi
virtual_size = size_mb * ONE_MEG
vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type,
virtual_size)
try:
# 2. Attach VDI to compute worker (VBD hotplug)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
# 3. Create partition
partition_start = "2048s"
partition_end = "-0"
partition_path = _make_partition(session, dev,
partition_start, partition_end)
if fs_type == 'linux-swap':
utils.execute('mkswap', partition_path, run_as_root=True)
elif fs_type is not None:
utils.execute('mkfs', '-t', fs_type, partition_path,
run_as_root=True)
# 4. Create VBD between instance VM and VDI
if vm_ref:
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating disk number: %s" % userdevice
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
return vdi_ref
def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
fs_type = "vfat" if is_windows else "linux-swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
def get_ephemeral_disk_sizes(total_size_gb):
if not total_size_gb:
return
max_size_gb = 2000
if total_size_gb % 1024 == 0:
max_size_gb = 1024
left_to_allocate = total_size_gb
while left_to_allocate > 0:
size_gb = min(max_size_gb, left_to_allocate)
yield size_gb
left_to_allocate -= size_gb
def generate_single_ephemeral(session, instance, vm_ref, userdevice,
size_gb, instance_name_label=None):
if instance_name_label is None:
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
# TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
return _generate_disk(session, instance, vm_ref, str(userdevice),
name_label, 'ephemeral', size_gb * 1024,
CONF.default_ephemeral_format)
def generate_ephemeral(session, instance, vm_ref, first_userdevice,
instance_name_label, total_size_gb):
# NOTE(johngarbutt): max possible size of a VHD disk is 2043GB
sizes = get_ephemeral_disk_sizes(total_size_gb)
first_userdevice = int(first_userdevice)
vdi_refs = []
try:
for userdevice, size_gb in enumerate(sizes, start=first_userdevice):
ref = generate_single_ephemeral(session, instance, vm_ref,
userdevice, size_gb,
instance_name_label)
vdi_refs.append(ref)
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.debug("Error when generating ephemeral disk. "
"Device: %(userdevice)s Size GB: %(size_gb)s "
"Error: %(exc)s", {
'userdevice': userdevice,
'size_gb': size_gb,
'exc': exc})
safe_destroy_vdis(session, vdi_refs)
def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
name_label, size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'user', size_gb * 1024, CONF.default_ephemeral_format)
def generate_configdrive(session, instance, vm_ref, userdevice,
network_info, admin_password=None, files=None):
sr_ref = safe_find_sr(session)
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
try:
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md,
network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive')
cdb.make_drive(tmp_file)
dev_path = utils.make_dev_path(dev)
utils.execute('dd',
'if=%s' % tmp_file,
'of=%s' % dev_path,
'oflag=direct,sync',
run_as_root=True)
create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
read_only=True)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while generating config drive"
LOG.debug(msg, instance=instance, exc_info=True)
safe_destroy_vdis(session, [vdi_ref])
def _create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
If the image is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
if CONF.xenserver.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args)
if filename == "":
return _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
else:
vdi_type = ImageType.to_string(image_type)
return {vdi_type: dict(uuid=None, file=filename)}
def create_kernel_and_ramdisk(context, session, instance, name_label):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['kernel_id'],
ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = _create_kernel_image(context, session,
instance, name_label, instance['ramdisk_id'],
ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
return kernel_file, ramdisk_file
def destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
args = {}
if kernel:
args['kernel-file'] = kernel
if ramdisk:
args['ramdisk-file'] = ramdisk
if args:
LOG.debug("Removing kernel/ramdisk files from dom0",
instance=instance)
session.call_plugin('kernel', 'remove_kernel_ramdisk', args)
def _get_image_vdi_label(image_id):
return 'Glance Image %s' % image_id
def _create_cached_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_type', sr_ref)
if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_LW("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %s. Ignoring the cow flag."), sr_type)
@utils.synchronized('xenapi-image-cache' + image_id)
def _create_cached_image_impl(context, session, instance, name_label,
image_id, image_type, sr_ref):
cache_vdi_ref = _find_cached_image(session, image_id, sr_ref)
downloaded = False
if cache_vdi_ref is None:
downloaded = True
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
cache_vdi_ref = session.call_xenapi(
'VDI.get_by_uuid', vdis['root']['uuid'])
session.call_xenapi('VDI.set_name_label', cache_vdi_ref,
_get_image_vdi_label(image_id))
session.call_xenapi('VDI.set_name_description', cache_vdi_ref,
'root')
session.call_xenapi('VDI.add_to_other_config',
cache_vdi_ref, 'image-id', str(image_id))
if CONF.use_cow_images:
new_vdi_ref = _clone_vdi(session, cache_vdi_ref)
elif sr_type == 'ext':
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance,
cache_vdi_ref)
else:
new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref,
sr_ref)
session.call_xenapi('VDI.set_name_label', new_vdi_ref, '')
session.call_xenapi('VDI.set_name_description', new_vdi_ref, '')
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
return downloaded, vdi_uuid
downloaded, vdi_uuid = _create_cached_image_impl(context, session,
instance, name_label,
image_id, image_type,
sr_ref)
vdis = {}
vdi_type = ImageType.get_role(image_type)
vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
return downloaded, vdis
def create_image(context, session, instance, name_label, image_id,
image_type):
"""Creates VDI from the image stored in the local cache. If the image
is not present in the cache, it streams it from glance.
Returns: A list of dictionaries that describe VDIs
"""
cache_images = CONF.xenserver.cache_images.lower()
# Determine if the image is cacheable
if image_type == ImageType.DISK_ISO:
cache = False
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
cache = False
elif cache_images == 'none':
cache = False
else:
LOG.warning(_LW("Unrecognized cache_images value '%s', defaulting to"
" True"), CONF.xenserver.cache_images)
cache = True
# Fetch (and cache) the image
start_time = timeutils.utcnow()
if cache:
downloaded, vdis = _create_cached_image(context, session, instance,
name_label, image_id,
image_type)
else:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
downloaded = True
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
LOG.info(_LI("Image creation data, cacheable: %(cache)s, "
"downloaded: %(downloaded)s duration: %(duration).2f secs "
"for image %(image_id)s"),
{'image_id': image_id, 'cache': cache, 'downloaded': downloaded,
'duration': duration})
for vdi_type, vdi in six.iteritems(vdis):
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid'])
_set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type,
instance)
return vdis
def _fetch_image(context, session, instance, name_label, image_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
vdis = _fetch_vhd_image(context, session, instance, image_id)
else:
vdis = _fetch_disk_image(context, session, instance, name_label,
image_id, image_type)
for vdi_type, vdi in six.iteritems(vdis):
vdi_uuid = vdi['uuid']
LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID"
" '%(vdi_uuid)s'",
{'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid},
instance=instance)
return vdis
def _make_uuid_stack():
# NOTE(sirp): The XenAPI plugins run under Python 2.4
# which does not have the `uuid` module. To work around this,
# we generate the uuids here (under Python 2.6+) and
# pass them as arguments
return [str(uuid.uuid4()) for i in range(MAX_VDI_CHAIN_SIZE)]
def _image_uses_bittorrent(context, instance):
bittorrent = False
torrent_images = CONF.xenserver.torrent_images.lower()
if torrent_images == 'all':
bittorrent = True
elif torrent_images == 'some':
sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
except KeyError:
pass
elif torrent_images == 'none':
pass
else:
LOG.warning(_LW("Invalid value '%s' for torrent_images"),
torrent_images)
return bittorrent
def _default_download_handler():
# TODO(sirp): This should be configurable like upload_handler
return importutils.import_object(
'nova.virt.xenapi.image.glance.GlanceStore')
def _choose_download_handler(context, instance):
if _image_uses_bittorrent(context, instance):
return importutils.import_object(
'nova.virt.xenapi.image.bittorrent.BittorrentStore')
else:
return _default_download_handler()
def get_compression_level():
level = CONF.xenserver.image_compression_level
if level is not None and (level < 1 or level > 9):
LOG.warning(_LW("Invalid value '%d' for image_compression_level"),
level)
return None
return level
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
LOG.debug("Asking xapi to fetch vhd image %s", image_id,
instance=instance)
handler = _choose_download_handler(context, instance)
try:
vdis = handler.download_image(context, session, instance, image_id)
except Exception:
default_handler = _default_download_handler()
# Using type() instead of isinstance() so instance of subclass doesn't
# test as equivalent
if type(handler) == type(default_handler):
raise
LOG.exception(_LE("Download handler '%(handler)s' raised an"
" exception, falling back to default handler"
" '%(default_handler)s'"),
{'handler': handler,
'default_handler': default_handler})
vdis = default_handler.download_image(
context, session, instance, image_id)
# Ensure we can see the import VHDs as VDIs
scan_default_sr(session)
vdi_uuid = vdis['root']['uuid']
try:
_check_vdi_size(context, session, instance, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
msg = "Error while checking vdi size"
LOG.debug(msg, instance=instance, exc_info=True)
for vdi in vdis.values():
vdi_uuid = vdi['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
safe_destroy_vdis(session, [vdi_ref])
return vdis
def _get_vdi_chain_size(session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
This will walk the VDI chain to the root, add the size of each VDI into
the total.
"""
size_bytes = 0
for vdi_rec in _walk_vdi_chain(session, vdi_uuid):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
'%(vdi_size_bytes)d',
{'cur_vdi_uuid': cur_vdi_uuid,
'vdi_size_bytes': vdi_size_bytes})
size_bytes += vdi_size_bytes
return size_bytes
def _check_vdi_size(context, session, instance, vdi_uuid):
flavor = instance.get_flavor()
allowed_size = (flavor.root_gb +
VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi
if not flavor.root_gb:
# root_gb=0 indicates that we're disabling size checks
return
size = _get_vdi_chain_size(session, vdi_uuid)
if size > allowed_size:
LOG.error(_LE("Image size %(size)d exceeded flavor "
"allowed size %(allowed_size)d"),
{'size': size, 'allowed_size': allowed_size},
instance=instance)
raise exception.FlavorDiskSmallerThanImage(
flavor_size=(flavor.root_gb * units.Gi),
image_size=(size * units.Gi))
def _fetch_disk_image(context, session, instance, name_label, image_id,
image_type):
"""Fetch the image from Glance
NOTE:
Unlike _fetch_vhd_image, this method does not use the Glance
plugin; instead, it streams the disks through domU to the VDI
directly.
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
image_type_str = ImageType.to_string(image_type)
LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s",
{'image_id': image_id, 'image_type_str': image_type_str},
instance=instance)
if image_type == ImageType.DISK_ISO:
sr_ref = _safe_find_iso_sr(session)
else:
sr_ref = safe_find_sr(session)
glance_image = image_utils.GlanceImage(context, image_id)
if glance_image.is_raw_tgz():
image = image_utils.RawTGZImage(glance_image)
else:
image = image_utils.RawImage(glance_image)
virtual_size = image.get_size()
vdi_size = virtual_size
LOG.debug("Size for image %(image_id)s: %(virtual_size)d",
{'image_id': image_id, 'virtual_size': virtual_size},
instance=instance)
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
vdi_size > CONF.xenserver.max_kernel_ramdisk_size):
max_size = CONF.xenserver.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") %
{'vdi_size': vdi_size, 'max_size': max_size})
vdi_ref = create_vdi(session, sr_ref, instance, name_label,
image_type_str, vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
filename = None
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_stream_disk(
session, image.stream_to, image_type, virtual_size, dev)
if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
# We need to invoke a plugin for copying the
# content of the VDI into the proper path.
LOG.debug("Copying VDI %s to /boot/guest on dom0",
vdi_ref, instance=instance)
args = {}
args['vdi-ref'] = vdi_ref
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
if CONF.xenserver.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
# Remove the VDI as it is not needed anymore.
destroy_vdi(session, vdi_ref)
LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref,
instance=instance)
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=None, file=filename)}
else:
vdi_role = ImageType.get_role(image_type)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_LE("Failed to fetch glance image"),
instance=instance)
e.args = e.args + ([dict(type=ImageType.to_string(image_type),
uuid=vdi_uuid,
file=filename)],)
raise
def determine_disk_image_type(image_meta):
"""Disk Image Types are used to determine where the kernel will reside
within an image. To figure out which type we're dealing with, we use
the following rules:
1. If we're using Glance, we can use the image_type field to
determine the image_type
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
if not image_meta.obj_attr_is_set("disk_format"):
return None
disk_format_map = {
'ami': ImageType.DISK,
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD,
'iso': ImageType.DISK_ISO,
}
try:
image_type = disk_format_map[image_meta.disk_format]
except KeyError:
raise exception.InvalidDiskFormat(disk_format=image_meta.disk_format)
LOG.debug("Detected %(type)s format for image %(image)s",
{'type': ImageType.to_string(image_type),
'image': image_meta})
return image_type
def determine_vm_mode(instance, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
os_type = instance['os_type']
if os_type == "linux":
return vm_mode.XEN
if os_type == "windows":
return vm_mode.HVM
# disk_image_type specific default for backwards compatibility
if disk_image_type == ImageType.DISK_VHD or \
disk_image_type == ImageType.DISK:
return vm_mode.XEN
# most images run OK as HVM
return vm_mode.HVM
def set_vm_name_label(session, vm_ref, name_label):
session.call_xenapi("VM.set_name_label", vm_ref, name_label)
def list_vms(session):
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="false" and '
'field "is_a_template"="false" and '
'field "resident_on"="%s"' % session.host_ref)
for vm_ref in vms.keys():
yield vm_ref, vms[vm_ref]
def lookup_vm_vdis(session, vm_ref):
"""Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
vdi_refs = []
if vbd_refs:
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
# Test valid VDI
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
LOG.debug('VDI %s is still available', vdi_uuid)
vbd_other_config = session.call_xenapi("VBD.get_other_config",
vbd_ref)
if not vbd_other_config.get('osvol'):
# This is not an attached volume
vdi_refs.append(vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('"Look for the VDIs failed'))
return vdi_refs
def lookup(session, name_label, check_rescue=False):
"""Look the instance up and return it if available.
:param:check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
if check_rescue:
result = lookup(session, name_label + '-rescue', False)
if result:
return result
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
return None
elif n > 1:
raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
key = str(instance['key_data'])
net = netutils.get_injected_network_template(network_info)
metadata = instance['metadata']
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
mount_required = key or net or metadata
if not mount_required:
return
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
_mounted_processing(dev, key, net, metadata)
def lookup_kernel_ramdisk(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
else:
return (None, None)
def is_snapshot(session, vm):
vm_rec = session.call_xenapi("VM.get_record", vm)
if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec:
return vm_rec['is_a_template'] and vm_rec['is_a_snapshot']
else:
return False
def get_power_state(session, vm_ref):
xapi_state = session.call_xenapi("VM.get_power_state", vm_ref)
return XENAPI_POWER_STATE[xapi_state]
def compile_info(session, vm_ref):
"""Fill record with VM status information."""
power_state = get_power_state(session, vm_ref)
max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref)
mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref)
num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref)
return hardware.InstanceInfo(state=power_state,
max_mem_kb=int(max_mem) >> 10,
mem_kb=int(mem) >> 10,
num_cpu=num_cpu)
def compile_instance_diagnostics(instance, vm_rec):
vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']]
vm_power_state = power_state.STATE_MAP[vm_power_state_int]
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=vm_power_state,
driver='xenapi',
config_drive=config_drive)
for cpu_num in range(0, int(vm_rec['VCPUs_max'])):
diags.add_cpu()
for vif in vm_rec['VIFs']:
diags.add_nic()
for vbd in vm_rec['VBDs']:
diags.add_disk()
max_mem_bytes = int(vm_rec['memory_dynamic_max'])
diags.memory_details.maximum = max_mem_bytes / units.Mi
return diags
def compile_diagnostics(vm_rec):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
vm_uuid = vm_rec["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = minidom.parseString(xml)
for i, node in enumerate(rrd.firstChild.childNodes):
# Provide the last update of the information
if node.localName == 'lastupdate':
diags['last_update'] = node.firstChild.data
# Create a list of the diagnostic keys (in their order)
if node.localName == 'ds':
ref = node.childNodes
# Name and Value
if len(ref) > 6:
keys.append(ref[0].firstChild.data)
# Read the last row of the first RRA to get the latest info
if node.localName == 'rra':
rows = node.childNodes[4].childNodes
last_row = rows[rows.length - 1].childNodes
for j, value in enumerate(last_row):
diags[keys[j]] = value.firstChild.data
break
return diags
except expat.ExpatError as e:
LOG.exception(_LE('Unable to parse rrd of %s'), e)
return {"Unable to retrieve diagnostics": e}
def fetch_bandwidth(session):
bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth')
return bw
def _scan_sr(session, sr_ref=None, max_attempts=4):
if sr_ref:
# NOTE(johngarbutt) xenapi will collapse any duplicate requests
# for SR.scan if there is already a scan in progress.
# However, we don't want that, because the scan may have started
# before we modified the underlying VHDs on disk through a plugin.
# Using our own mutex will reduce cases where our periodic SR scan
# in host.update_status starts racing the sr.scan after a plugin call.
@utils.synchronized('sr-scan-' + sr_ref)
def do_scan(sr_ref):
LOG.debug("Scanning SR %s", sr_ref)
attempt = 1
while True:
try:
return session.call_xenapi('SR.scan', sr_ref)
except session.XenAPI.Failure as exc:
with excutils.save_and_reraise_exception() as ctxt:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
if attempt < max_attempts:
ctxt.reraise = False
LOG.warning(_LW("Retry SR scan due to error: "
"%s"), exc)
greenthread.sleep(2 ** attempt)
attempt += 1
do_scan(sr_ref)
def scan_default_sr(session):
"""Looks for the system default SR and triggers a re-scan."""
sr_ref = safe_find_sr(session)
_scan_sr(session, sr_ref)
return sr_ref
def safe_find_sr(session):
"""Same as _find_sr except raises a NotFound exception if SR cannot be
determined
"""
sr_ref = _find_sr(session)
if sr_ref is None:
raise exception.StorageRepositoryNotFound()
return sr_ref
def _find_sr(session):
"""Return the storage repository to hold VM images."""
host = session.host_ref
try:
tokens = CONF.xenserver.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_LW("Flag sr_matching_filter '%s' does not respect "
"formatting convention"),
CONF.xenserver.sr_matching_filter)
return None
if filter_criteria == 'other-config':
key, value = filter_pattern.split('=', 1)
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
if not (key in sr_rec['other_config'] and
sr_rec['other_config'][key] == value):
continue
for pbd_ref in sr_rec['PBDs']:
pbd_rec = session.get_rec('PBD', pbd_ref)
if pbd_rec and pbd_rec['host'] == host:
return sr_ref
elif filter_criteria == 'default-sr' and filter_pattern == 'true':
pool_ref = session.call_xenapi('pool.get_all')[0]
sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref)
if sr_ref:
return sr_ref
# No SR found!
LOG.error(_LE("XenAPI is unable to find a Storage Repository to "
"install guest instances on. Please check your "
"configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."))
return None
def _safe_find_iso_sr(session):
"""Same as _find_iso_sr except raises a NotFound exception if SR
cannot be determined
"""
sr_ref = _find_iso_sr(session)
if sr_ref is None:
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
def _find_iso_sr(session):
"""Return the storage repository to hold ISO images."""
host = session.host_ref
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug("ISO: looking at SR %s", sr_rec)
if not sr_rec['content_type'] == 'iso':
LOG.debug("ISO: not iso content")
continue
if 'i18n-key' not in sr_rec['other_config']:
LOG.debug("ISO: iso content_type, no 'i18n-key' key")
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
LOG.debug("ISO: iso content_type, i18n-key value not "
"'local-storage-iso'")
continue
LOG.debug("ISO: SR MATCHing our criteria")
for pbd_ref in sr_rec['PBDs']:
LOG.debug("ISO: ISO, looking to see if it is host local")
pbd_rec = session.get_rec('PBD', pbd_ref)
if not pbd_rec:
LOG.debug("ISO: PBD %s disappeared", pbd_ref)
continue
pbd_rec_host = pbd_rec['host']
LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s",
{'pbd_rec': pbd_rec, 'host': host})
if pbd_rec_host == host:
LOG.debug("ISO: SR with local PBD")
return sr_ref
return None
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return [xs_url.scheme, xs_url.netloc]
def _get_rrd(server, vm_uuid):
"""Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
CONF.xenserver.connection_username,
CONF.xenserver.connection_password,
server[1],
vm_uuid))
return xml.read()
except IOError:
LOG.exception(_LE('Unable to obtain RRD XML for VM %(vm_uuid)s with '
'server details: %(server)s.'),
{'vm_uuid': vm_uuid, 'server': server})
return None
def _get_all_vdis_in_sr(session, sr_ref):
for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref):
vdi_rec = session.get_rec('VDI', vdi_ref)
# Check to make sure the record still exists. It may have
# been deleted between the get_all call and get_rec call
if vdi_rec:
yield vdi_ref, vdi_rec
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
"""Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref):
yield vdi_ref
except session.XenAPI.Failure:
continue
def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None):
if vdi_rec is None:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
if 'vhd-parent' not in vdi_rec['sm_config']:
return None
parent_uuid = vdi_rec['sm_config']['vhd-parent']
vdi_uuid = vdi_rec['uuid']
LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s',
{'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid})
return parent_uuid
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
yield vdi_rec
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec)
if not parent_uuid:
break
vdi_uuid = parent_uuid
def _is_vdi_a_snapshot(vdi_rec):
"""Ensure VDI is a snapshot, and not cached image."""
is_a_snapshot = vdi_rec['is_a_snapshot']
image_id = vdi_rec['other_config'].get('image-id')
return is_a_snapshot and not image_id
def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
"""
children = set()
for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
rec_uuid = rec['uuid']
if rec_uuid in vdi_uuid_list:
continue
parent_uuid = _get_vhd_parent_uuid(session, ref, rec)
if parent_uuid not in vdi_uuid_list:
continue
if old_snapshots_only and not _is_vdi_a_snapshot(rec):
continue
children.add(rec_uuid)
return list(children)
def _count_children(session, parent_vdi_uuid, sr_ref):
# Search for any other vdi which has the same parent as us to work out
# whether we have siblings and therefore if coalesce is possible
children = 0
for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid):
children = children + 1
return children
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
vdi_uuid_list):
"""Spin until the parent VHD is coalesced into one of the VDIs in the list
vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once
the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over.
The use case is there are any number of VDIs between those in
vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those
in vdi_uuid_list may also be coalesced (except the base UUID - which is
guaranteed to remain)
"""
# If the base disk was a leaf node, there will be no coalescing
# after a VDI snapshot.
if len(vdi_uuid_list) == 1:
LOG.debug("Old chain is single VHD, coalesce not possible.",
instance=instance)
return
# If the parent of the original disk has other children,
# there will be no coalesce because of the VDI snapshot.
# For example, the first snapshot for an instance that has been
# spawned from a cached image, will not coalesce, because of this rule.
parent_vdi_uuid = vdi_uuid_list[1]
if _count_children(session, parent_vdi_uuid, sr_ref) > 1:
LOG.debug("Parent has other children, coalesce is unlikely.",
instance=instance)
return
# When the VDI snapshot is taken, a new parent is created.
# Assuming it is not one of the above cases, that new parent
# can be coalesced, so we need to wait for that to happen.
max_attempts = CONF.xenserver.vhd_coalesce_max_attempts
# Remove the leaf node from list, to get possible good parents
# when the coalesce has completed.
# Its possible that other coalesce operation happen, so we need
# to consider the full chain, rather than just the most recent parent.
good_parent_uuids = vdi_uuid_list[1:]
for i in range(max_attempts):
# NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
# matches the underlying VHDs.
# This can also kick XenServer into performing a pending coalesce.
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
if parent_uuid and (parent_uuid not in good_parent_uuids):
LOG.debug("Parent %(parent_uuid)s not yet in parent list"
" %(good_parent_uuids)s, waiting for coalesce...",
{'parent_uuid': parent_uuid,
'good_parent_uuids': good_parent_uuids},
instance=instance)
else:
LOG.debug("Coalesce detected, because parent is: %s" % parent_uuid,
instance=instance)
return
greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%d)"
", giving up...") % max_attempts)
raise exception.NovaException(msg)
def _remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
fixed in future versions:
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
should_remap = CONF.xenserver.remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
new_prefix = CONF.xenserver.remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
def _wait_for_device(dev):
"""Wait for device node to appear."""
for i in range(0, CONF.xenserver.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
time.sleep(1)
raise exception.StorageError(
reason=_('Timeout waiting for device %s to be created') % dev)
def cleanup_attached_vdis(session):
"""Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
for vbd_ref in vbd_refs:
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
except session.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
continue
if 'nova_instance_uuid' in vdi_rec['other_config']:
# Belongs to an instance and probably left over after an
# unclean restart
LOG.info(_LI('Disconnecting stale VDI %s from compute domU'),
vdi_rec['uuid'])
unplug_vbd(session, vbd_ref, this_vm_ref)
destroy_vbd(session, vbd_ref)
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = _get_this_vm_ref(session)
vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect',
read_only=read_only, bootable=False)
try:
LOG.debug('Plugging VBD %s ... ', vbd_ref)
session.VBD.plug(vbd_ref, this_vm_ref)
try:
LOG.debug('Plugging VBD %s done.', vbd_ref)
orig_dev = session.call_xenapi("VBD.get_device", vbd_ref)
LOG.debug('VBD %(vbd_ref)s plugged as %(orig_dev)s',
{'vbd_ref': vbd_ref, 'orig_dev': orig_dev})
dev = _remap_vbd_dev(orig_dev)
if dev != orig_dev:
LOG.debug('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s',
{'vbd_ref': vbd_ref, 'dev': dev})
_wait_for_device(dev)
yield dev
finally:
utils.execute('sync', run_as_root=True)
LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref)
unplug_vbd(session, vbd_ref, this_vm_ref)
finally:
try:
destroy_vbd(session, vbd_ref)
except exception.StorageError:
# destroy_vbd() will log error
pass
LOG.debug('Destroying VBD for VDI %s done.', vdi_ref)
def _get_sys_hypervisor_uuid():
with file('/sys/hypervisor/uuid') as f:
return f.readline().strip()
def get_this_vm_uuid(session):
if session and session.is_local_connection:
# UUID is the control domain running on this host
vms = session.call_xenapi("VM.get_all_records_where",
'field "is_control_domain"="true" and '
'field "resident_on"="%s"' %
session.host_ref)
return vms[list(vms.keys())[0]]['uuid']
try:
return _get_sys_hypervisor_uuid()
except IOError:
# Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25)
# cannot read from uuid after a reboot. Fall back to trying xenstore.
# See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182
domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True)
vm_key, _ = utils.execute('xenstore-read',
'/local/domain/%s/vm' % domid.strip(),
run_as_root=True)
return vm_key.strip()[4:]
def _get_this_vm_ref(session):
return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session))
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
partitions = []
LOG.debug("Partitions:")
for line in lines[2:]:
line = line.rstrip(';')
num, start, end, size, fstype, name, flags = line.split(':')
num = int(num)
start = int(start.rstrip('s'))
end = int(end.rstrip('s'))
size = int(size.rstrip('s'))
LOG.debug(" %(num)s: %(fstype)s %(size)d sectors",
{'num': num, 'fstype': fstype, 'size': size})
partitions.append((num, start, size, fstype, name, flags))
return partitions
def _stream_disk(session, image_service_func, image_type, virtual_size, dev):
offset = 0
if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(session, virtual_size, dev)
dev_path = utils.make_dev_path(dev)
with utils.temporary_chown(dev_path):
with open(dev_path, 'wb') as f:
f.seek(offset)
image_service_func(f)
def _write_partition(session, virtual_size, dev):
dev_path = utils.make_dev_path(dev)
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
LOG.debug('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dev_path)s...',
{'primary_first': primary_first, 'primary_last': primary_last,
'dev_path': dev_path})
_make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last)
LOG.debug('Writing partition table %s done.', dev_path)
def _repair_filesystem(partition_path):
# Exit Code 1 = File system errors corrected
# 2 = File system errors corrected, system needs a reboot
utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True,
check_exit_code=[0, 1, 2])
def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags):
"""Resize partition and fileystem.
This assumes we are dealing with a single primary partition and using
ext3 or ext4.
"""
size = new_sectors - start
end = new_sectors - 1
dev_path = utils.make_dev_path(dev)
partition_path = utils.make_dev_path(dev, partition=1)
# Replay journal if FS wasn't cleanly unmounted
_repair_filesystem(partition_path)
# Remove ext3 journal (making it ext2)
utils.execute('tune2fs', '-O ^has_journal', partition_path,
run_as_root=True)
if new_sectors < old_sectors:
# Resizing down, resize filesystem before partition resize
try:
utils.execute('resize2fs', partition_path, '%ds' % size,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(six.text_type(exc))
reason = _("Shrinking the filesystem down with resize2fs "
"has failed, please check if you have "
"enough free space on your disk.")
raise exception.ResizeError(reason=reason)
utils.execute('parted', '--script', dev_path, 'rm', '1',
run_as_root=True)
utils.execute('parted', '--script', dev_path, 'mkpart',
'primary',
'%ds' % start,
'%ds' % end,
run_as_root=True)
if "boot" in flags.lower():
utils.execute('parted', '--script', dev_path,
'set', '1', 'boot', 'on',
run_as_root=True)
if new_sectors > old_sectors:
# Resizing up, resize filesystem after partition resize
utils.execute('resize2fs', partition_path, run_as_root=True)
# Add back journal
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
def _log_progress_if_required(left, last_log_time, virtual_size):
if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS):
last_log_time = timeutils.utcnow()
complete_pct = float(virtual_size - left) / virtual_size * 100
LOG.debug("Sparse copy in progress, "
"%(complete_pct).2f%% complete. "
"%(left)s bytes left to copy",
{"complete_pct": complete_pct, "left": left})
return last_log_time
def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
"""Copy data, skipping long runs of zeros to create a sparse file."""
start_time = last_log_time = timeutils.utcnow()
EMPTY_BLOCK = '\0' * block_size
bytes_read = 0
skipped_bytes = 0
left = virtual_size
LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d",
{'src_path': src_path, 'dst_path': dst_path,
'virtual_size': virtual_size, 'block_size': block_size})
# NOTE(sirp): we need read/write access to the devices; since we don't have
# the luxury of shelling out to a sudo'd command, we temporarily take
# ownership of the devices.
with utils.temporary_chown(src_path):
with utils.temporary_chown(dst_path):
with open(src_path, "r") as src:
with open(dst_path, "w") as dst:
data = src.read(min(block_size, left))
while data:
if data == EMPTY_BLOCK:
dst.seek(block_size, os.SEEK_CUR)
left -= block_size
bytes_read += block_size
skipped_bytes += block_size
else:
dst.write(data)
data_len = len(data)
left -= data_len
bytes_read += data_len
if left <= 0:
break
data = src.read(min(block_size, left))
greenthread.sleep(0)
last_log_time = _log_progress_if_required(
left, last_log_time, virtual_size)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
compression_pct = float(skipped_bytes) / bytes_read * 100
LOG.debug("Finished sparse_copy in %(duration).2f secs, "
"%(compression_pct).2f%% reduction in size",
{'duration': duration, 'compression_pct': compression_pct})
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
# Part of disk taken up by MBR
virtual_size -= MBR_SIZE_BYTES
with vdi_attached_here(session, src_ref, read_only=True) as src:
src_path = utils.make_dev_path(src, partition=partition)
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
dst_path = utils.make_dev_path(dst, partition=partition)
_write_partition(session, virtual_size, dst)
if CONF.xenserver.sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
utils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'count=%d' % num_blocks,
'iflag=direct,sync',
'oflag=direct,sync',
run_as_root=True)
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except processutils.ProcessExecutionError as e:
err = six.text_type(e)
return err
def _mounted_processing(device, key, net, metadata):
"""Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
# Mount only Linux filesystems, to avoid disturbing NTFS images
err = _mount_filesystem(dev_path, tmpdir)
if not err:
try:
# This try block ensures that the umount occurs
if not agent.find_guest_agent(tmpdir):
# TODO(berrange) passing in a None filename is
# rather dubious. We shouldn't be re-implementing
# the mount/unmount logic here either, when the
# VFSLocalFS impl has direct support for mount
# and unmount handling if it were passed a
# non-None filename
vfs = vfsimpl.VFSLocalFS(
imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW),
imgdir=tmpdir)
LOG.info(_LI('Manipulating interface files directly'))
# for xenapi, we don't 'inject' admin_password here,
# it's handled at instance startup time, nor do we
# support injecting arbitrary files here.
disk.inject_data_into_fs(vfs,
key, net, metadata, None, None)
finally:
utils.execute('umount', dev_path, run_as_root=True)
else:
LOG.info(_LI('Failed to mount filesystem (expected for '
'non-linux instances): %s'), err)
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
"""
this_vm_uuid = get_this_vm_uuid(session)
try:
session.call_xenapi('VM.get_by_uuid', this_vm_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] != 'UUID_INVALID':
raise
raise Exception(_('This domU must be running on the host '
'specified by connection_url'))
def import_all_migrated_disks(session, instance, import_root=True):
root_vdi = None
if import_root:
root_vdi = _import_migrated_root_disk(session, instance)
eph_vdis = _import_migrate_ephemeral_disks(session, instance)
return {'root': root_vdi, 'ephemerals': eph_vdis}
def _import_migrated_root_disk(session, instance):
chain_label = instance['uuid']
vdi_label = instance['name']
return _import_migrated_vhds(session, instance, chain_label, "root",
vdi_label)
def _import_migrate_ephemeral_disks(session, instance):
ephemeral_vdis = {}
instance_uuid = instance['uuid']
ephemeral_gb = instance.old_flavor.ephemeral_gb
disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb)
for chain_number, _size in enumerate(disk_sizes, start=1):
chain_label = instance_uuid + "_ephemeral_%d" % chain_number
vdi_label = "%(name)s ephemeral (%(number)d)" % dict(
name=instance['name'], number=chain_number)
ephemeral_vdi = _import_migrated_vhds(session, instance,
chain_label, "ephemeral",
vdi_label)
userdevice = 3 + chain_number
ephemeral_vdis[str(userdevice)] = ephemeral_vdi
return ephemeral_vdis
def _import_migrated_vhds(session, instance, chain_label, disk_type,
vdi_label):
"""Move and possibly link VHDs via the XAPI plugin."""
# TODO(johngarbutt) tidy up plugin params
imported_vhds = session.call_plugin_serialized(
'migration', 'move_vhds_into_sr', instance_uuid=chain_label,
sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack())
# Now we rescan the SR so we find the VHDs
scan_default_sr(session)
vdi_uuid = imported_vhds['root']['uuid']
vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
# Set name-label so we can find if we need to clean up a failed migration
_set_vdi_info(session, vdi_ref, disk_type, vdi_label,
disk_type, instance)
return {'uuid': vdi_uuid, 'ref': vdi_ref}
def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num,
ephemeral_number=0):
LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d",
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
chain_label = instance['uuid']
if ephemeral_number:
chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number
try:
# TODO(johngarbutt) tidy up plugin params
session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except session.XenAPI.Failure:
msg = "Failed to transfer vhd to new host"
LOG.debug(msg, instance=instance, exc_info=True)
raise exception.MigrationError(reason=msg)
def vm_ref_or_raise(session, instance_name):
vm_ref = lookup(session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
return vm_ref
def handle_ipxe_iso(session, instance, cd_vdi, network_info):
"""iPXE ISOs are a mechanism to allow the customer to roll their own
image.
To use this feature, a service provider needs to configure the
appropriate Nova flags, roll an iPXE ISO, then distribute that image
to customers via Glance.
NOTE: `mkisofs` is not present by default in the Dom0, so the service
provider can either add that package manually to Dom0 or include the
`mkisofs` binary in the image itself.
"""
boot_menu_url = CONF.xenserver.ipxe_boot_menu_url
if not boot_menu_url:
LOG.warning(_LW('ipxe_boot_menu_url not set, user will have to'
' enter URL manually...'), instance=instance)
return
network_name = CONF.xenserver.ipxe_network_name
if not network_name:
LOG.warning(_LW('ipxe_network_name not set, user will have to'
' enter IP manually...'), instance=instance)
return
network = None
for vif in network_info:
if vif['network']['label'] == network_name:
network = vif['network']
break
if not network:
LOG.warning(_LW("Unable to find network matching '%(network_name)s', "
"user will have to enter IP manually..."),
{'network_name': network_name}, instance=instance)
return
sr_path = get_sr_path(session)
# Unpack IPv4 network info
subnet = [sn for sn in network['subnets']
if sn['version'] == 4][0]
ip = subnet['ips'][0]
ip_address = ip['address']
netmask = network_model.get_netmask(ip, subnet)
gateway = subnet['gateway']['address']
dns = subnet['dns'][0]['address']
try:
session.call_plugin_serialized("ipxe", "inject", sr_path,
cd_vdi['uuid'], boot_menu_url, ip_address, netmask,
gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd)
except session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'CommandNotFound':
LOG.warning(_LW("ISO creation tool '%s' does not exist."),
CONF.xenserver.ipxe_mkisofs_cmd, instance=instance)
else:
raise
def set_other_config_pci(session, vm_ref, params):
"""Set the pci key of other-config parameter to params."""
other_config = session.call_xenapi("VM.get_other_config", vm_ref)
other_config['pci'] = params
session.call_xenapi("VM.set_other_config", vm_ref, other_config)
|
windskyer/nova
|
nova/virt/xenapi/vm_utils.py
|
Python
|
gpl-2.0
| 99,845
|
from bs4 import BeautifulSoup
import urllib2
from animapy.helpers.common import functions
class anitube(functions):
'''
thread function
gets:
offset: point in the list
items: search item list
parent: function caller
position: where to set the result in a list owned by the caller
does:
sets the complete data of an episode in a list owned by the caller
'''
def getVideos(self, offset, items, parent, position):
episodes = None
# in case the result is lower than the desired offset returns None
if len(items) > offset:
metaData = self.__getMetadata(items[offset])
links = self.__getVideoLinks(metaData['link'])
episodes = self.createObject(metaData['title'], metaData['image'], links['normal'], links['hd'])
if episodes != None:
parent.setResult(episodes, position)
parent.count = parent.count + 1
'''
gets:
link: page link where the video plays
returns:
the episode normal and hd link
'''
def getVideoFromLink(self, link):
episodes = None
links = self.__getVideoLinks(link)
episode = self.createObject(normal=links['normal'],hd=links['hd'])
return episode
'''
gets:
items: search item list
quant: quantity of items to get the metadata
returns:
a list with episodes metadata
'''
def getAnimesMetadata(self, items, quant):
if len(items) < quant:
quant = len(items)
result = []
# in case the result is lower than the desired offset returns None
for i in range(quant):
episodes = None
metaData = self.__getMetadata(items[i])
episode = self.createObject(metaData['title'], metaData['image'], link = metaData['link'])
if episode != None:
result.append(episode)
return result
'''
gets:
anime: name of the anime to search
order: orders the search
returns:
item list
'''
def getSearchItems(self, anime, order):
# gets the correct URL
if order == 'date':
url = 'http://www.anitube.se/search/basic/1/?sort=addate&search_type=&search_id=' + anime
elif order == 'title':
url = 'http://anitube.xpg.uol.com.br/search/basic/1/?sort=title&search_type=&search_id=' + anime
elif order == 'viewnum':
url = 'http://anitube.xpg.uol.com.br/search/basic/1/?sort=viewnum&search_type=&search_id=' + anime
elif order == 'rate':
url = 'http://anitube.xpg.uol.com.br/search/basic/1/?sort=rate&search_type=&search_id=' + anime
else:
url = 'http://www.anitube.se/search/?search_id=' + anime
content = self.calUrl(url)
soup = BeautifulSoup(content)
# returns all the items
return soup.findAll('li', { "class" : 'mainList' })
'''
gets:
tag: html tag
returns:
the content
'''
def __getContent(self, tag):
return tag.contents[0].encode('ascii','ignore')
'''
gets:
tag: html tag
returns:
the src attribute
'''
def __getSrc(self, tag):
return tag.find('img').get('src').encode('ascii','ignore')
'''
gets:
link: page link where the video plays
returns:
the episode normal and hd link
'''
def __getVideoLinks(self, link):
hd = ''
normal = ''
# calls to get the movie url
content = self.calUrl(link)
newSoup = BeautifulSoup(content)
data = newSoup.find(id="videoPlayer").findAll('script')[2].get('src')
response = urllib2.urlopen(data)
# loops throught the javascript lines to get the movie links
for line in response:
if ('cdn.anitu.be' in line) or ('vid.anitu.be' in line):
if '_hd' in line:
hd = line.rstrip()[9:-2]
else:
normal = line.rstrip()[9:-2]
return {'hd': hd, 'normal': normal}
'''
gets:
item: episode
returns:
the episode metadata
'''
def __getMetadata(self, item):
aTag = item.find('div', { "class" : 'videoTitle' }).a
title = self.__getContent(aTag)
image = self.__getSrc(item)
link = aTag.get('href')
return {'title': title, 'image': image, 'link': link.encode('ascii','ignore')}
|
JWebCoder/animapy
|
animapy/sources/anitube.py
|
Python
|
gpl-2.0
| 4,766
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import os
from PIL import ImageTk
from Tkinter import Frame, Label, BOTH, TOP, X, BOTTOM, Button, RIGHT, LEFT, SUNKEN
from ttk import Notebook, Style
########################################################################
class About(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.parent = master
self.parent.geometry("640x480")
self.parent.title(os.getenv("NAME") + " - About")
self.master.configure(padx=10, pady=10)
self.name_version = os.getenv("NAME")+" "+os.getenv("VERSION")+"-"+os.getenv("SUBVERSION")
icon = os.path.join("tkgui", "resources", "art", "pinguino11.png")
self.image_pinguino = ImageTk.PhotoImage(file=icon)
self.style_text = {"font": "inherit 20",}
self.build_home()
#----------------------------------------------------------------------
def build_home(self):
if getattr(self, "credits", False): self.credits.pack_forget()
if getattr(self, "license", False): self.license.pack_forget()
self.home = Frame(self.parent)
self.home.pack(expand=True, fill=BOTH)
Label(self.home, text=self.name_version, **self.style_text).pack(side=TOP, expand=True, fill=X)
image = Label(self.home, image=self.image_pinguino)
image.photo = self.image_pinguino
image.pack(side=TOP, expand=True, fill=BOTH)
description = "Pinguino is an Open Software and Open Hardware\nArduino-like project.\
Boards are based on 8 or 32-bit USB built-in\nMicrochip microcontrollers. The main goal\
is to build a real\nUSB system without USB to serial converter."
Label(self.home, text=description).pack(side=TOP, expand=True, fill=X)
self.panel_buttons = Frame(self.home)
self.panel_buttons.pack(side=BOTTOM, fill=BOTH, expand=True)
Button(self.panel_buttons, text="Close", command=self.quit).pack(side=RIGHT, fill=X, expand=True)
Button(self.panel_buttons, text="Credits", command=self.build_credits).pack(side=LEFT, fill=X, expand=True)
#----------------------------------------------------------------------
def build_credits(self):
if getattr(self, "home", False): self.home.pack_forget()
if getattr(self, "license", False): self.license.pack_forget()
self.credits = Frame(self.parent)
self.credits.pack(expand=True, fill=BOTH)
Label(self.credits, text="Credits", **self.style_text).pack(side=TOP, expand=True, fill=X)
style = Style()
style.configure("BW.TNotebook", background=self.parent.cget("bg"), borderwidth=1, relief=SUNKEN, highlightthickness=1)
notebook = Notebook(self.credits, style="BW.TNotebook")
write = ("Jean-Pierre Mandon",
"Régis Blanchot",
"Marcus Fazzi",
"Jesus Carmona Esteban",
"Alfred Broda",
"Yeison Cardona",
"Henk Van Beek",
"Björn Pfeiffer",
"Alexis Sánchez",
)
label_write = Label(self.credits, text="\n\n".join(write))
label_write.pack(side=TOP, expand=True, fill=BOTH)
notebook.add(label_write, text="Write by")
doc = ("Benoit Espinola",
"Sebastien Koechlin",
"Ivan Ricondo",
"Jesus Carmona Esteban",
"Marcus Fazzi",
"Régis Blanchot",
)
label_doc = Label(self.credits, text="\n\n".join(doc))
label_doc.pack(side=TOP, expand=True, fill=BOTH)
notebook.add(label_doc, text="Documented by")
trans = ("Joan Espinoza",
"Alexis Sánchez",
"Régis Blanchot",
"Moreno Manzini",
"Yeison Cardona",
"\"Avrin\"",
)
label_trans = Label(self.credits, text="\n\n".join(trans))
label_trans.pack(side=TOP, expand=True, fill=BOTH)
notebook.add(label_trans, text="Translated by")
art = ("France Cadet",
"Laurent Cos--tes",
"Daniel Rodriguez",
)
label_art = Label(self.credits, text="\n\n".join(art))
label_art.pack(side=TOP, expand=True, fill=BOTH)
notebook.add(label_art, text="Art by")
notebook.pack(side=TOP, fill=BOTH, expand=True)
self.panel_buttons = Frame(self.credits)
self.panel_buttons.pack(side=BOTTOM, fill=BOTH, expand=True)
Button(self.panel_buttons, text="Close", command=self.quit).pack(side=RIGHT, fill=X, expand=True)
Button(self.panel_buttons, text="License", command=self.build_license).pack(side=LEFT, fill=X, expand=True)
#----------------------------------------------------------------------
def build_license(self):
if getattr(self, "home", False): self.home.pack_forget()
if getattr(self, "credits", False): self.credits.pack_forget()
self.license = Frame(self.parent)
self.license.pack(expand=True, fill=BOTH)
Label(self.license, text="License", **self.style_text).pack(side=TOP, expand=True, fill=BOTH)
lic = """Pinguino is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
Pinguino is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of
the GNU General Public License along with File Hunter; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA""" + "\n" * 10
Label(self.license, text=lic).pack(side=TOP, expand=True, fill=X)
self.panel_buttons = Frame(self.license)
self.panel_buttons.pack(side=BOTTOM, fill=BOTH, expand=True)
Button(self.panel_buttons, text="Close", command=self.quit).pack(side=RIGHT, fill=X, expand=True)
Button(self.panel_buttons, text="About", command=self.build_home).pack(side=LEFT, fill=X, expand=True)
#----------------------------------------------------------------------
def quit(self):
""""""
self.master.destroy()
|
PinguinoIDE/pinguino-ide-tk
|
tkgui/ide/child_windows/about.py
|
Python
|
gpl-2.0
| 6,492
|
# Simple Last.fm API crawler to download listening events.
__author__ = 'mms'
# Load required modules
import os
import urllib
import csv
import json
import shutil
from os import listdir
from os.path import isfile, join
# Parameters
LASTFM_API_URL = "http://ws.audioscrobbler.com/2.0/"
LASTFM_API_KEY = "57ee3318536b23ee81d6b27e36997cde" # enter your API key
LASTFM_OUTPUT_FORMAT = "json"
MAX_PAGES = 5 # maximum number of pages per user
MAX_ARTISTS = 50 # maximum number of top artists to fetch
MAX_FANS = 10 # maximum number of fans per artist
MAX_EVENTS_PER_PAGE = 200 # maximum number of listening events to retrieve per page
MAX_LE = 500 # maximum number of user for fetching
GET_NEW_USERS = False # set to True if new users should be retrieved
USERS_FILE = "./seed_users.csv" # text file containing Last.fm user names
OUTPUT_DIRECTORY = "./" # directory to write output to
OUTPUT_FILE = "./users.txt" # file to write output
LE_FILE = "./LE.txt" # aggregated listening events
USE_EXISTING_LE = True # use already fetched LE from listening_events folder
# Simple function to read content of a text file into a list
def read_users(users_file):
users = [] # list to hold user names
with open(users_file, 'r') as f:
reader = csv.reader(f, delimiter='\t') # create reader
for row in reader:
users.append(row[0])
return users
# Function to call Last.fm API: Users.getRecentTrack
def lastfm_api_call_getLEs(user, output_dir):
content_merged = [] # empty list
# Ensure that output directory structure exists
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Retrieve content from URL
query_quoted = urllib.quote(user)
# Loop over number of pages to retrieve
for p in range(0, MAX_PAGES):
# Construct API call
url = LASTFM_API_URL + "?method=user.getrecenttracks&user=" + query_quoted + \
"&format=" + LASTFM_OUTPUT_FORMAT + \
"&api_key=" + LASTFM_API_KEY + \
"&limit=" + str(MAX_EVENTS_PER_PAGE) + \
"&page=" + str(p+1)
print "Retrieving page #" + str(p+1)
content = urllib.urlopen(url).read()
# Add retrieved content of current page to merged content variable
content_merged.append(content)
# Write content to local file
output_file = output_dir + "/" + user + "_" + str(p+1) + "." + LASTFM_OUTPUT_FORMAT
file_out = open(output_file, 'w')
file_out.write(content)
file_out.close()
# Return all content retrieved for given user
return content_merged
# ADDED THIS NEW FUNCTION
# Function to call Last.fm API: Chart.getTopArtists
def lastfm_api_call_getTopArtists():
content_merged = [] # empty list
# Construct API call
url = LASTFM_API_URL + "?method=chart.gettopartists" + \
"&format=" + LASTFM_OUTPUT_FORMAT + \
"&api_key=" + LASTFM_API_KEY + \
"&limit=" + str(MAX_ARTISTS)
content = urllib.urlopen(url).read()
# Add retrieved content of current page to merged content variable
content_merged.append(content)
json_content = json.loads(content)
artist_list = []
for _artist in range(0, MAX_ARTISTS):
artist_list.append((json_content["artists"]["artist"][_artist]["name"]).encode("utf-8"))
# Write content to local file
# output_file = "./topartist.txt"
# file_out = open(output_file, 'w')
# file_out.write(artist_list)
# file_out.close()
return artist_list
# ADDED THIS NEW FUNCTION
# Function to call Last.fm API: Artist.getTopFans
def lastfm_api_call_getTopFans(artist_list):
content_merged = [] # empty list
user_list = ""
# Construct API call
for _artist in range(0, MAX_ARTISTS):
url = LASTFM_API_URL + "?method=artist.gettopfans" + \
"&api_key=" + LASTFM_API_KEY + \
"&artist=" + artist_list[_artist] + \
"&format=" + LASTFM_OUTPUT_FORMAT
_content = urllib.urlopen(url).read()
# Add retrieved content of current page to merged content variable
content_merged.append(_content)
json_content = json.loads(_content)
for _user in range(0, MAX_FANS):
user_list += (json_content["topfans"]["user"][_user]["name"]).encode("utf-8") + '\n'
# Write content to local file
output_file = "./users.txt"
file_out = open(output_file, 'w')
file_out.write(artist_list)
file_out.close()
# ADDED THIS NEW FUNCTION
# Function to call Last.fm API: Artist.getTopFans
def lastfm_api_call_getFriends(user):
content_merged = [] # empty list
friend_list = [] # empty list
# Construct API call
url = LASTFM_API_URL + "?method=user.getfriends" + \
"&api_key=" + LASTFM_API_KEY + \
"&user=" + str(user) + \
"&format=" + LASTFM_OUTPUT_FORMAT
_content = urllib.urlopen(url).read()
# Add retrieved content of current page to merged content variable
content_merged.append(_content)
json_content = json.loads(_content)
if "friends" in json_content.keys():
for _friend in json_content["friends"]["user"]:
friend_list.append(_friend["name"].encode("utf-8"))
return friend_list
def retrieve_listening_events(LEs, users):
# For all users, retrieve listening events
for u in range(0, MAX_LE):
print 'Fetching listening events for user #' + str(u+1) + ': ' + users[u] + ' ...'
content = lastfm_api_call_getLEs(users[u], OUTPUT_DIRECTORY + "/listening_events/")
# Parse retrieved JSON content
try:
# For all retrieved JSON pages of current user
for page in range(0, len(content)):
listening_events = json.loads(content[page])
# Get number of listening events in current JSON
no_items = len(listening_events["recenttracks"]["track"])
# Read artist, track names and time stamp for each listening event
for item in range(0, no_items):
artist = listening_events["recenttracks"]["track"][item]["artist"]["#text"]
track = listening_events["recenttracks"]["track"][item]["name"]
time = listening_events["recenttracks"]["track"][item]["date"]["uts"]
# print users[u], artist, track, time
# Add listening event to aggregated list of LEs
LEs.append([users[u], artist.encode('utf8'), track.encode('utf8'), str(time)])
except KeyError: # JSON tag not found
print "JSON tag not found!"
continue
return LEs
def retrieve_listening_events_existing(LEs):
# get all listening event files
path = OUTPUT_DIRECTORY + "/listening_events/"
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
# prepare dictionario which holds a list of files with LE for each user
userFiles = dict()
for file in files:
user = str(file[:file.rfind("_")])
if user not in userFiles:
userFiles[user] = []
userFiles[user].append(file)
# get merged LE for every user
for user in userFiles:
content = []
for userFile in userFiles[user]:
with open(path + userFile, 'r') as lefile:
listening_events = json.loads(lefile.read())
try:
# Get number of listening events in current JSON
no_items = len(listening_events["recenttracks"]["track"])
# Read artist, track names and time stamp for each listening event
for item in range(0, no_items):
artist = listening_events["recenttracks"]["track"][item]["artist"]["#text"]
track = listening_events["recenttracks"]["track"][item]["name"]
time = listening_events["recenttracks"]["track"][item]["date"]["uts"]
print user, artist, track, time
# Add listening event to aggregated list of LEs
LEs.append([user, artist.encode('utf8'), track.encode('utf8'), str(time)])
except KeyError: # JSON tag not found
print "JSON tag not found!"
continue
return LEs
# Main program
if __name__ == '__main__':
# Create output directory if non-existent
if not os.path.exists(OUTPUT_DIRECTORY):
os.makedirs(OUTPUT_DIRECTORY)
# Read users from provided file
users = read_users(USERS_FILE)
user_list = users
data = ""
if (not os.path.exists(OUTPUT_FILE)) or GET_NEW_USERS: # if you want to retrieve new users
# Find friends from existing users to receive more than 500 users
for _user in users:
print "fetching friends of " + _user.encode("utf-8")
user_list = lastfm_api_call_getFriends(_user)
for u in user_list:
data += u.encode("utf-8") + "\n"
print "finished " + _user.encode("utf-8")
if os.path.exists(OUTPUT_FILE):
shutil.rmtree(OUTPUT_FILE)
# Write content to local file
file_out = open(OUTPUT_FILE, 'w')
file_out.write(data)
file_out.close()
users = read_users(OUTPUT_FILE)
else:
users = read_users(OUTPUT_FILE)
print "\n"
# Create list to hold all listening events
LEs = []
if USE_EXISTING_LE:
LEs = retrieve_listening_events_existing(LEs)
pass
else:
LEs = retrieve_listening_events(LEs, users)
pass
# Write retrieved listening events to text file
with open(LE_FILE, 'w') as outfile: # "a" to append
outfile.write('user\tartist\ttrack\ttime\n')
for le in LEs: # For all listening events
outfile.write(le[0] + "\t" + le[1] + "\t" + le[2] + "\t" + le[3] + "\n")
|
hawk23/music-recommender
|
Lastfm_LE_Fetcher.py
|
Python
|
gpl-2.0
| 10,562
|
import multiprocessing
import time
import logging
import queue
import os
import fnmatch
import pprint
import fss.constants
import fss.config
import fss.config.workers
import fss.workers.controller_base
import fss.workers.worker_base
_LOGGER = logging.getLogger(__name__)
_LOGGER_FILTER = logging.getLogger(__name__ + '.-filter–')
_IS_FILTER_DEBUG = bool(int(os.environ.get('FSS_FILTER_DEBUG', '0')))
if _IS_FILTER_DEBUG is True:
_LOGGER_FILTER.setLevel(logging.DEBUG)
else:
_LOGGER_FILTER.setLevel(logging.WARNING)
class GeneratorWorker(fss.workers.worker_base.WorkerBase):
"""This class knows how to recursively traverse a path to produce a list of
file-paths.
"""
def __init__(self, filter_rules_raw, *args):
super(GeneratorWorker, self).__init__(*args)
_LOGGER.info("Creating generator.")
# Set after we've popped the first item off the queue.
self.__processed_first = False
self.__filter_rules = None
self.__load_filter_rules(filter_rules_raw)
self.__local_input_q = queue.Queue()
def __load_filter_rules(self, filter_rules_raw):
_LOGGER.debug("Loading filter-rules.")
# We expect this to be a listof 3-tuples:
#
# (entry-type, filter-type, pattern)
self.__filter_rules = {
fss.constants.FT_DIR: {
fss.constants.FILTER_INCLUDE: [],
fss.constants.FILTER_EXCLUDE: [],
},
fss.constants.FT_FILE: {
fss.constants.FILTER_INCLUDE: [],
fss.constants.FILTER_EXCLUDE: [],
},
}
for (entry_type, filter_type, pattern) in filter_rules_raw:
self.__filter_rules[entry_type][filter_type].append(pattern)
# If an include filter was given for DIRECTORIES, add an exclude filter
# for "*". Since we check the include rules, first, we'll simply not be
# implicitly including anything else.
rules = self.__filter_rules[fss.constants.FT_DIR]
if rules[fss.constants.FILTER_INCLUDE]:
rules[fss.constants.FILTER_EXCLUDE].append('*')
# If an include filter was given for FILES, add an exclude filter for
# "*". Since we check the include rules, first, we'll simply not be
# implicitly including anything else.
rules = self.__filter_rules[fss.constants.FT_FILE]
if rules[fss.constants.FILTER_INCLUDE]:
rules[fss.constants.FILTER_EXCLUDE].append('*')
if fss.config.IS_DEBUG is True:
_LOGGER.debug("Final rules:\n%s",
pprint.pformat(self.__filter_rules))
def __check_to_permit(self, entry_type, entry_filename):
"""Applying the filter rules."""
rules = self.__filter_rules[entry_type]
# Should explicitly include?
for pattern in rules[fss.constants.FILTER_INCLUDE]:
if fnmatch.fnmatch(entry_filename, pattern):
_LOGGER_FILTER.debug("Entry explicitly INCLUDED: [%s] [%s] "
"[%s]",
entry_type, pattern, entry_filename)
return True
# Should explicitly exclude?
for pattern in rules[fss.constants.FILTER_EXCLUDE]:
if fnmatch.fnmatch(entry_filename, pattern):
_LOGGER_FILTER.debug("Entry explicitly EXCLUDED: [%s] [%s] "
"[%s]",
entry_type, pattern, entry_filename)
return False
# Implicitly include.
_LOGGER_FILTER.debug("Entry IMPLICITLY included: [%s] [%s]",
entry_type, entry_filename)
return True
def get_next_item(self):
"""Override the default functionality to not only try to pull things
off the external input-queue, but to first try to pull things from a
local input-queue that we'll primarily depend on. We'll only use the
external input-queue to get the initial root-path (we could reuse it to
do the recursion, but it's more costly and prone to delay).
"""
# Try to pop something off the local input-queue.
try:
return self.__local_input_q.get(block=False)
except queue.Empty:
pass
# Try to pop something off the external input-queue.
return self.input_q.get(block=False)
def process_item(self, entry_path):
_LOGGER.debug("Processing: [%s]", entry_path)
entry_filename = os.path.basename(entry_path)
# The first item in the queue is the root-directory to be scanned. It's
# not subject to the filter-rules.
if self.__processed_first is True:
if self.__check_to_permit(
fss.constants.FT_DIR,
entry_filename) is False:
# Skip.
return True
else:
self.__processed_first = True
try:
entries = os.listdir(entry_path)
except:
_LOGGER.exception("Skipping unreadable directory: [%s]",
entry_path)
else:
for filename in entries:
if self.check_quit() is True:
_LOGGER.warning("Generator has been told to quit before "
"finishing. WITHIN=[%s]", entry_path)
return False
filepath = os.path.join(entry_path, filename)
is_dir = os.path.isdir(filepath)
file_type = fss.constants.FT_DIR \
if is_dir is True \
else fss.constants.FT_FILE
if self.__check_to_permit(file_type, filename) is False:
continue
if self.tick_count % \
fss.config.workers.PROGRESS_LOG_TICK_INTERVAL == 0:
self.log(
logging.DEBUG,
"Generator progress: (%d)",
self.tick_count)
# We'll populate our own input-queue with downstream paths.
if is_dir:
self.push_to_output((fss.constants.FT_DIR, filepath))
_LOGGER.debug("Pushing directory to local input-queue: "
"[%s]", filepath)
self.__local_input_q.put(filepath)
else:
self.push_to_output((fss.constants.FT_FILE, filepath))
self.increment_tick()
def get_component_name(self):
return fss.constants.PC_GENERATOR
@property
def terminate_on_idle(self):
return True
class GeneratorController(fss.workers.controller_base.ControllerBase):
def __init__(self, filter_rules_raw, *args, **kwargs):
super(GeneratorController, self).__init__(*args, **kwargs)
args = (
filter_rules_raw,
self.pipeline_state,
self.input_q,
self.output_q,
self.log_q,
self.quit_ev
)
self.__p = multiprocessing.Process(target=_boot, args=args)
def start(self):
_LOGGER.info("Starting generator.")
self.__p.start()
def stop(self):
_LOGGER.info("Stopping generator.")
self.quit_ev.set()
# TODO(dustin): Audit for a period of time, and then stop it.
self.__p.join()
@property
def output_queue_size(self):
return fss.config.workers.GENERATOR_MAX_OUTPUT_QUEUE_SIZE
def _boot(filter_rules_raw, pipeline_state, input_q, output_q, log_q, quit_ev):
_LOGGER.info("Booting generator worker.")
g = GeneratorWorker(
filter_rules_raw,
pipeline_state,
input_q,
output_q,
log_q,
quit_ev)
g.run()
|
dsoprea/PathScan
|
fss/workers/generator.py
|
Python
|
gpl-2.0
| 7,985
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Manage access to LilyPond documentation.
"""
from __future__ import unicode_literals
import os
from PyQt4.QtCore import QSettings, QUrl
import app
import util
import signals
import qsettings
from . import documentation
# cache the LilyPond Documentation instances
_documentations = None
allLoaded = signals.Signal()
def docs():
"""Returns the list of Documentation instances that are found."""
global _documentations
if _documentations is None:
_documentations = [documentation.Documentation(url) for url in urls()]
_sort_docs()
# check whether they need to fully load their version number yet
_check_doc_versions()
return list(_documentations)
def clear():
"""Clears the cached documentation instances."""
global _documentations
_documentations = None
app.settingsChanged.connect(clear, -100)
def loaded():
"""Returns True if all Documentation are loaded (i.e. know their version).
If this function returns False, you can connect to the allLoaded signal
to get a notification when all Documentation instances have loaded their
version information. This signal will only be emitted once, after that all
connections will be removed from the signal.
"""
for d in docs():
if d.versionString() is None:
return False
return True
def _check_doc_versions():
"""Checks if all documentation instances have their version loaded.
Emits the allLoaded signal when all are loaded, also sorts the documentation
instances then on local/remote and then version number.
"""
for d in _documentations:
if d.versionString() is None:
def makefunc(doc):
def func():
doc.versionLoaded.disconnect(func)
_check_doc_versions()
return func
d.versionLoaded.connect(makefunc(d))
return
_sort_docs()
allLoaded.emit()
allLoaded.clear()
def _sort_docs():
"""Sorts all documentation instances on local/remote and then version."""
_documentations.sort(key = lambda d: (not d.isLocal(), d.version() or ()))
def urls():
"""Returns a list of QUrls where documentation can be found.
Remote urls (from the users settings) are not checked but simply returned.
For user-set local directories, if the directory itself does not contain
LilyPond documentation, all directories one level deep are searched.
This makes it possible to set one directory for local documentation and
put there multiple sets of documentation in subdirectories (e.g. with the
version number in the path name).
The paths in the settings are read, and also the usual system directories
are scanned.
"""
user_paths = qsettings.get_string_list(QSettings(), "documentation/paths")
system_prefixes = [p for p in (
'/usr',
'/usr/local',
'/usr/share/doc',
'/usr/doc',
) if os.path.isdir(p)]
# split in local and non-local ones (local are preferred)
user_prefixes = []
local = []
remote = []
for p in user_paths:
user_prefixes.append(p) if os.path.isdir(p) else remote.append(p)
remote.sort(key=util.naturalsort)
# now find all instances of LilyPond documentation in the local paths
def paths(path):
"""Yields possible places where LilyPond documentation could live."""
yield path
path = os.path.join(path, 'share', 'doc', 'lilypond', 'html')
yield path
yield os.path.join(path, 'offline-root')
def find(path):
"""Finds LilyPond documentation."""
for p in paths(path):
if os.path.isdir(os.path.join(p, 'Documentation')):
return p
# search in the user-set directories, if no docs, scan one level deeper
for p in user_prefixes:
n = find(p)
if n:
local.append(n)
elif p not in system_prefixes:
for name, dirs, files in os.walk(p):
for d in sorted(dirs, key=util.naturalsort):
n = find(os.path.join(p, d))
if n:
local.append(n)
break
# now add the system directories if documentation is found there
for p in system_prefixes:
if p not in user_prefixes:
n = find(p)
if n:
local.append(n)
urls = []
urls.extend(map(QUrl.fromLocalFile, local))
urls.extend(map(QUrl, remote))
if not urls:
urls.append(QUrl("http://lilypond.org/doc/stable"))
return urls
|
shimpe/frescobaldi
|
frescobaldi_app/lilydoc/manager.py
|
Python
|
gpl-2.0
| 5,554
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#local imports
from modules.page_mount import Principal
page = Principal()
page.mount(
page='traceability',
category='main',
js=('form.default','feedback', 'traceability'),
css=('traceability', 'default.form','default.lists', 'default.detail')
)
|
cria/microSICol
|
py/traceability.py
|
Python
|
gpl-2.0
| 310
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 16:49:53 2015
@author: gideon
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 12 16:28:29 2015
@author: gideon
### TREE_STATS ###
Description:
This script defines a function that collects some statistics from a tree
directory. The directory needs to be passed to the function.
-total tree length
-minimum/maximum branch length
-number of leaf nodes
-number of different species
-number of paralogs
(that is number of leaf nodes - number of different species)
-number of human sequences
It also stores them in a tabular file which can then be accessed.
Instructions:
Run the script. It will create a function.
It needs to be passed a directory with the locations of
the tree files.
Optional arguments are the ouput file name and the directory
in which the results should be put.
"""
"""
REWORK
fix human sequences
Human seq identifier not just ENS
"""
## INPUT
# directory = "/home/gideon/Documents/mphil_internship/Trees/*/*"
# It will save the file in the current directory
# filename = "tree_stats_output.csv"
## PACKAGES
# to change directories etc..
import os
# Package to access the files on the server.
import glob
# import regular expressions
import re
# import Tree module
from ete2 import Tree
# for writing to file
import csv
# for creating command line interface
import argparse
#-----------------------------------------------------------------------------#
### THE FUNCTION ###
def tree_stats(directory, filename="tree_stats_output.csv",
output_directory = os.getcwd()):
## LOOP PREPARATION
# match all the sub-directories in the directory
# directory = ''.join([directory + "*/*.nh"])
directory = ''.join([directory + "*"])
# create a regexp to match for later
ens_RE = re.compile("[A-Z]*")
# tree list to hold the final output.
tree_list = list()
#-----------------------------------------------------------------------------#
## LOOP
for p in glob.glob(directory):
# list for that particular tree
tree = list()
# acces the directory of the tree file
current_tree_directory = p
# create ete tree object
current_tree = Tree(newick = current_tree_directory)
# Add tree directory for identification
tree.append(current_tree_directory)
## TREE LENGTH + MAX/MIN BRANCH LENGTH
max_dist = 0.0
tree_length = 0.0
for n in current_tree.traverse():
tree_length += n.dist
if n.dist > max_dist:
max_dist = n.dist
# add tree length
tree.append(tree_length)
# add max branch length
tree.append(max_dist)
# calculate min dist
min_dist = 10000.0
for n in current_tree.traverse():
if n.dist < min_dist:
min_dist = n.dist
# add minimum branch length
tree.append(min_dist)
## MAX/MIN BRANCH LENGTHS FROM ROOT
# max length
max_leaf = current_tree.get_farthest_leaf()
#add to list
tree.append(max_leaf[0])
tree.append(max_leaf[1])
# min length
min_leaf = current_tree.get_closest_leaf()
# add to list
tree.append(min_leaf[0])
tree.append(min_leaf[1])
# NUMBER OF LEAVES
# calculate number of leaves
no_leaves = len(current_tree.get_leaves())
# add info to tree list
tree.append(no_leaves)
# NUMBER OF DIFFERENT SPECIES
# save all the names in an object
leaf_names = current_tree.get_leaf_names()
# use regexp to extract only species ids
species_ids = [ ens_RE.match(s).group(0) for s in leaf_names ]
unique_species_ids = list(set(species_ids))
no_species = len(unique_species_ids)
# add to list
tree.append(no_species)
## NUMBER OF PARALOGS
# paralogs are number of leaves - number of sepcies
no_paralogs = no_leaves - no_species
# add to list
tree.append(no_paralogs)
## NUMBER OF HUMAN SEQUENCES
# count the number of only ENS for human seqs
human_seqs = species_ids.count("ENSP")
if human_seqs == 0:
no_human_seqs = 0
tree.append(no_human_seqs)
else:
no_human_seqs = human_seqs
tree.append(no_human_seqs)
## ADD TREE TO TREE LIST
tree_list.append(tree)
## Show progress
print("Current file:" + current_tree_directory)
## END OF LOOP
#-----------------------------------------------------------------------------#
## WRITE TO FILE
# create the column names
colnames = ["Directory", "Length", "Biggest Branch", "Smallest Branch",
"Farthest Leaf Name", "Distance of farthest leaf",
"Closest Leaf Name", "Distance of closest leaf",
"# of Leaves", "# of Species", "# of Paralogs",
"# of Human seqs"]
output_file = ''.join([output_directory + "/" + filename ])
# wrtite it to a file
with open(output_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=",", dialect="excel")
writer.writerow(colnames)
writer.writerows(tree_list)
## END OF FUNCTION
#-----------------------------------------------------------------------------#
# create the argparse so that it can be run from the command-line.
parser = argparse.ArgumentParser(prog="tree_stats",
description='Calculates tree statistics')
# add directroy argument, must be given
parser.add_argument("--directory", metavar="Tree Directory", type=str)
# give output file name, optional
parser.add_argument("--outname", metavar="Output file name", action="store",
default="tree_stats_output.csv", required=False,
dest="filename")
# add optional argument for output directory
parser.add_argument("--outdir", metavar="Output Directory", action="store",
default=os.getcwd(), required=False,
dest="output_directory")
args = parser.parse_args()
## CALL THE FUNCTION
tree_stats(args.directory, args.filename, args.output_directory)
## TELL THE USER WE ARE DONE
print("Finished, the file output is in your current/specified working directory")
|
jergosh/slr_pipeline
|
bin/tree_stats.py
|
Python
|
gpl-2.0
| 6,921
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='linmix',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0.dev1',
description='linmix.py',
# long_description=long_description,
# The project's main homepage.
url='https://github.com/jmeyers314/linmix_py',
# Author details
author='Joshua E. Meyers, Brandon C. Kelly',
author_email='jmeyers314@gmail.com, bcharleskelly@gmail.com',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 2 - Pre-Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='fit bayesian statistics',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
martadesimone/Protoplanetarydisks
|
linmix/setup.py
|
Python
|
gpl-2.0
| 3,674
|