repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
yiwen-luo/LeetCode
|
Python/design-log-storage-system.py
|
1
|
1067
|
# Time: put: O(1)
# retrieve: O(n + dlogd), n is the size of the total logs
# , d is the size of the found logs
# Space: O(n)
class LogSystem(object):
def __init__(self):
self.__logs = []
self.__granularity = {'Year': 4, 'Month': 7, 'Day': 10, \
'Hour': 13, 'Minute': 16, 'Second': 19}
def put(self, id, timestamp):
"""
:type id: int
:type timestamp: str
:rtype: void
"""
self.__logs.append((id, timestamp))
def retrieve(self, s, e, gra):
"""
:type s: str
:type e: str
:type gra: str
:rtype: List[int]
"""
i = self.__granularity[gra]
begin = s[:i]
end = e[:i]
return sorted(id for id, timestamp in self.__logs \
if begin <= timestamp[:i] <= end)
# Your LogSystem object will be instantiated and called as such:
# obj = LogSystem()
# obj.put(id,timestamp)
# param_2 = obj.retrieve(s,e,gra)
|
mit
| 8,674,042,364,039,677,000
| 25.675
| 69
| 0.469541
| false
| 3.60473
| false
| false
| false
|
lutris/website
|
scripts/import_steam_linux_games.py
|
1
|
2485
|
# pylint: disable=missing-docstring
import logging
import requests
from games.models import Game, Genre
from games.util.steam import get_store_info, create_steam_installer
from platforms.models import Platform
from common.util import slugify
LOGGER = logging.getLogger(__name__)
def run():
response = requests.get(
"https://raw.githubusercontent.com/SteamDatabase/SteamLinux/master/GAMES.json"
)
linux_games = response.json()
for game_id in linux_games:
if linux_games[game_id] is not True:
LOGGER.debug(
"Game %s likely has problems, skipping. "
"This game should be added manually if appropriate.",
game_id
)
continue
if Game.objects.filter(steamid=game_id).count():
# LOGGER.debug("Game %s is already in Lutris", game_id)
continue
store_info = get_store_info(game_id)
if not store_info:
LOGGER.warning("No store info for game %s", game_id)
continue
if store_info["type"] != "game":
LOGGER.warning("%s: %s is not a game (type: %s)",
game_id, store_info["name"], store_info["type"])
continue
slug = slugify(store_info["name"])
if Game.objects.filter(slug=slug).count():
LOGGER.warning("Game %s already in Lutris but does not have a Steam ID", game_id)
continue
game = Game.objects.create(
name=store_info["name"],
slug=slug,
steamid=game_id,
description=store_info["short_description"],
website=store_info["website"] or "",
is_public=True,
)
game.set_logo_from_steam()
LOGGER.debug("%s created", game)
if store_info["platforms"]["linux"]:
platform = Platform.objects.get(slug='linux')
LOGGER.info("Creating installer for %s", game)
create_steam_installer(game)
else:
platform = Platform.objects.get(slug='windows')
game.platforms.add(platform)
for steam_genre in store_info["genres"]:
genre, created = Genre.objects.get_or_create(slug=slugify(steam_genre["description"]))
if created:
genre.name = steam_genre["description"]
LOGGER.info("Created genre %s", genre.name)
genre.save()
game.genres.add(genre)
game.save()
|
agpl-3.0
| 2,682,142,902,040,070,000
| 36.651515
| 98
| 0.57666
| false
| 4.047231
| false
| false
| false
|
pelme/vasa
|
vasa/http/endpoints.py
|
1
|
1052
|
import asyncio
import mimetypes
from pathlib import Path
from .response import DataResponse, ResponseNotFound
@asyncio.coroutine
def index(request, writer, settings):
full_path = (Path(settings.webapp_root) / 'index.html').resolve()
with full_path.open('rb') as f:
return DataResponse(writer, data=f.read(), content_type='text/html')
@asyncio.coroutine
def webapp_files(request, writer, settings, path):
try:
full_path = (Path(settings.webapp_root) / path).resolve()
except FileNotFoundError:
return ResponseNotFound(writer)
if not str(full_path).startswith(settings.webapp_root):
return ResponseNotFound(writer)
try:
with full_path.open('rb') as f:
contents = f.read()
except FileNotFoundError:
return ResponseNotFound(writer)
else:
(content_type, encoding) = mimetypes.guess_type(str(full_path))
content_type = content_type or 'application/octet-stream'
return DataResponse(writer, data=contents, content_type=content_type)
|
mit
| -6,921,504,578,001,710,000
| 28.222222
| 77
| 0.689163
| false
| 3.954887
| false
| false
| false
|
ezralanglois/arachnid
|
arachnid/core/parallel/process_tasks.py
|
1
|
12031
|
''' Common parallel/serial design patterns
This module defines a set of common tasks that can be performed in parallel or serial.
.. Created on Jun 23, 2012
.. codeauthor:: Robert Langlois <rl2528@columbia.edu>
'''
import process_queue
import logging
import numpy.ctypeslib
import multiprocessing.sharedctypes
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
def process_mp(process, vals, worker_count, init_process=None, ignored_errors=None, **extra):
''' Generator that runs a process functor in parallel (or serial if worker_count
is less than 2) over a list of given data values and returns the result
:Parameters:
process : function
Functor to be run in parallel (or serial if worker_count is less than 2)
vals : list
List of items to process in parallel
worker_count : int
Number of processes to run in parallel
init_process : function
Initalize the parameters for the child process
ignored_errors : list
Single element list with counter for ignored errors
extra : dict
Unused keyword arguments
:Returns:
val : object
Return value of process functor
'''
#_logger.error("worker_count1=%d"%worker_count)
if len(vals) < worker_count: worker_count = len(vals)
#_logger.error("worker_count2=%d"%worker_count)
if worker_count > 1:
def process_helper(val, **extra):
try:
return process(val, **extra)
except:
if ignored_errors is not None and len(ignored_errors) > 0:ignored_errors[0]+=1
if _logger.getEffectiveLevel()==logging.DEBUG or 1 == 1:
_logger.exception("Unexpected error in process - report this problem to the developer")
else:
_logger.warn("nexpected error in process - report this problem to the developer")
return extra.get('process_number', 0), val
qout = process_queue.start_workers_with_output(vals, process_helper, worker_count, init_process, ignore_error=True, **extra)
index = 0
while index < len(vals):
val = process_queue.safe_get(qout.get)
if isinstance(val, process_queue.ProcessException):
index = 0
while index < worker_count:
if process_queue.safe_get(qout.get) is None:
index += 1;
raise val
if val is None: continue
index += 1
yield val
else:
#_logger.error("worker_count3=%d"%worker_count)
logging.debug("Running with single process: %d"%len(vals))
for i, val in enumerate(vals):
try:
f = process(val, **extra)
except:
if ignored_errors is not None and len(ignored_errors) > 0: ignored_errors[0]+=1
if _logger.getEffectiveLevel()==logging.DEBUG or 1 == 1:
_logger.exception("Unexpected error in process - report this problem to the developer")
else:
_logger.warn("nexpected error in process - report this problem to the developer")
yield i, val
continue
yield i, f
def iterate_map(for_func, worker, thread_count, queue_limit=None, **extra):
''' Iterate over the input value and reduce after finished processing
'''
if thread_count < 2:
for val in worker(enumerate(for_func), process_number=0, **extra):
yield val
return
def queue_iterator(qin, process_number):
try:
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
yield val
finally: pass
#_logger.error("queue-done")
def iterate_map_worker(qin, qout, process_number, process_limit, extra):
val = None
try:
val = worker(queue_iterator(qin, process_number), process_number=process_number, **extra)
except:
_logger.exception("Error in child process")
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
finally:
qout.put(val)
#process_queue.safe_get(qin.get)
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(iterate_map_worker, thread_count, queue_limit, 1, extra)
try:
for val in enumerate(for_func):
qin.put(val)
except:
_logger.error("for_func=%s"%str(for_func))
raise
for i in xrange(thread_count): qin.put(None)
#qin.join()
for i in xrange(thread_count):
val = process_queue.safe_get(qout.get)
#qin.put(None)
if val is None: raise ValueError, "Exception in child process"
yield val
def iterate_reduce(for_func, worker, thread_count, queue_limit=None, shmem_array_info=None, **extra):
''' Iterate over the input value and reduce after finished processing
'''
if thread_count < 2:
yield worker(enumerate(for_func), process_number=0, **extra)
return
shmem_map=None
shmem_map_base=None
if shmem_array_info is not None:
shmem_map=[]
shmem_map_base=[]
for i in xrange(thread_count):
base = {}
arr = {}
for key in shmem_array_info.iterkeys():
ar = shmem_array_info[key]
if ar.dtype.str[1]=='c':
typestr = ar.dtype.str[0]+'f'+str(int(ar.dtype.str[2:])/2)
ar = ar.view(numpy.dtype(typestr))
if ar.dtype == numpy.dtype(numpy.float64):
typecode="d"
elif ar.dtype == numpy.dtype(numpy.float32):
typecode="f"
else: raise ValueError, "dtype not supported: %s"%str(ar.dtype)
base[key] = multiprocessing.sharedctypes.RawArray(typecode, ar.ravel().shape[0])
arr[key] = numpy.ctypeslib.as_array(base[key])
arr[key] = arr[key].view(shmem_array_info[key].dtype).reshape(shmem_array_info[key].shape)
shmem_map.append(arr)
shmem_map_base.append(base)
del shmem_array_info
def queue_iterator(qin, process_number):
try:
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
yield val
finally: pass
def iterate_reduce_worker(qin, qout, process_number, process_limit, extra, shmem_map_base=None):#=shmem_map):
val = None
try:
if shmem_map_base is not None:
ar = shmem_map_base[process_number]
ar_map={}
for key in ar.iterkeys():
ar_map[key] = numpy.ctypeslib.as_array(ar[key])
ar_map[key] = ar_map[key].view(shmem_map[process_number][key].dtype).reshape(shmem_map[process_number][key].shape)
extra.update(ar_map)
val = worker(queue_iterator(qin, process_number), process_number=process_number, **extra)
except:
_logger.exception("Error in child process")
while True:
val = process_queue.safe_get(qin.get)
if val is None: break
finally:
if shmem_map_base is not None:
qout.put(process_number)
else:
qout.put(val)
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(iterate_reduce_worker, thread_count, queue_limit, 1, extra, shmem_map_base)
try:
for val in enumerate(for_func):
qin.put(val)
except:
_logger.error("for_func=%s"%str(for_func))
raise
for i in xrange(thread_count): qin.put(None)
#qin.join()
for i in xrange(thread_count):
val = process_queue.safe_get(qout.get)
if shmem_map is not None:
val = shmem_map[val]
#qin.put(None)
if val is None: raise ValueError, "Exception in child process"
yield val
def for_process_mp(for_func, worker, shape, thread_count=0, queue_limit=None, **extra):
''' Generator to process collection of arrays in parallel
:Parameters:
for_func : func
Generate a list of data
work : function
Function to preprocess the images
thread_count : int
Number of threads
shape : int
Shape of worker result array
extra : dict
Unused keyword arguments
:Returns:
index : int
Yields index of output array
out : array
Yields output array of worker
'''
if thread_count < 2:
for i, val in enumerate(for_func):
res = worker(val, i, **extra)
yield i, res
else:
if queue_limit is None: queue_limit = thread_count*8
else: queue_limit *= thread_count
qin, qout = process_queue.start_raw_enum_workers(process_worker2, thread_count, queue_limit, -1, worker, extra)
try:
total = 0
for i, val in enumerate(for_func):
if i >= thread_count:
pos = process_queue.safe_get(qout.get) #if i > thread_count else i
if pos is None or pos == -1: raise ValueError, "Error occured in process: %d"%pos
res, idx = pos
yield idx, res
else:
pos = i
total += 1
qin.put((val,i))
for i in xrange(total):
pos = process_queue.safe_get(qout.get)
if pos is None or pos == -1: raise ValueError, "Error occured in process: %d"%pos
res, idx = pos
yield idx, res
finally:
#_logger.error("Terminating %d workers"%(thread_count))
for i in xrange(thread_count):
qin.put((-1, -1))
pos = process_queue.safe_get(qout.get)
if pos != -1:
_logger.error("Wrong return value: %s"%str(pos))
assert(pos==-1)
raise StopIteration
def process_worker2(qin, qout, process_number, process_limit, worker, extra):
''' Worker in each process that preprocesses the images
:Parameters:
qin : multiprocessing.Queue
Queue with index for input images in shared array
qout : multiprocessing.Queue
Queue with index and offset for the output images in shared array
process_number : int
Process number
process_limit : int
Number of processes
worker : function
Function to preprocess the images
shmem_img : multiprocessing.RawArray
Shared memory image array
shape : tuple
Dimensions of the shared memory array
extra : dict
Keyword arguments
'''
_logger.debug("Worker %d of %d - started"%(process_number, process_limit))
try:
while True:
pos = process_queue.safe_get(qin.get)
if pos is None or not hasattr(pos[0], 'ndim'): break
res, idx = pos
val = worker(res, idx, **extra)
qout.put((val, idx))
_logger.debug("Worker %d of %d - ending ..."%(process_number, process_limit))
qout.put(-1)
except:
_logger.exception("Finished with error")
qout.put(None)
else:
_logger.debug("Worker %d of %d - finished"%(process_number, process_limit))
|
gpl-2.0
| 7,339,000,769,296,700,000
| 36.596875
| 134
| 0.555232
| false
| 4.118795
| false
| false
| false
|
penguintutor/networking-quiz
|
src/quizstrings.py
|
1
|
2597
|
# Text has been moved to this class, potential to add different languages in future
# Note that this is not complete, some text (eg. buttons) has not been changed
# The corresponding json file, must be consistant by having all entries for all pages
# If an entry is not required (or is updated using a different method - eg. quiz options)
# then it should be added as empty quotes ""
import json
class QuizStrings():
filename = "quizstrings.json"
# pages contains a dict (indexed by page / screen name), then includes a dictionary which may contain lists (eg. details)
pages = {}
# Returns as a hash dictionary - useful for a full page update
def getPage(self, page_name):
return self.pages[page_name]
def getTitle(self):
return self.title
def load(self):
##todo - possibly add error checking - Not so important as it should
# fail anyway and not neccessarily in a user friendly way (user should not be editing
# the strings file and if it's missing then it's as bad as a missing .py file
with open(self.filename) as json_file:
json_data = json.load(json_file)
# Get title of the app from the root key
root_keys = list(json_data.keys())
self.title = root_keys[0]
# Json file is then broken down into relevant screens (referred to as pages)
for this_page in json_data[self.title]:
page = list(this_page.keys())[0]
page_title = this_page[page]["title"]
page_details = [
this_page[page]["details1"],
this_page[page]["details2"],
this_page[page]["details3"],
this_page[page]["details4"],
this_page[page]["details5"],
this_page[page]["details6"]
]
page_options = [
this_page[page]["option1"],
this_page[page]["option2"],
this_page[page]["option3"],
this_page[page]["option4"]
]
page_image = this_page[page]["image"]
page_left_button = this_page[page]["left_button"]
page_right_button = this_page[page]["right_button"]
self.pages[page]={"title" : page_title, "details": page_details, "options" : page_options, "image" : page_image, "left_button" : page_left_button, "right_button" : page_right_button}
|
gpl-3.0
| 1,300,464,390,233,187,300
| 42.3
| 199
| 0.562187
| false
| 4.342809
| false
| false
| false
|
RoboJackets/robocup-software
|
soccer/gameplay/tactics/positions/celebration.py
|
1
|
2691
|
import behavior
import robocup
import constants
import single_robot_composite_behavior
import main
import enum
import skills
import random
import time
class Celebration(
single_robot_composite_behavior.SingleRobotCompositeBehavior):
MaxSpinAngle = 360
SpinPerTick = 1
class State(enum.Enum):
run_around = 0
spin = 1
def __init__(self):
super().__init__(continuous=True)
for s in Celebration.State:
self.add_state(s, behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
Celebration.State.run_around, lambda: True,
'immediately')
self.add_transition(Celebration.State.run_around,
Celebration.State.spin, lambda: self.spin_time,
'time to yeet')
self.add_transition(
Celebration.State.spin,
Celebration.State.run_around, lambda: self.im_dizzy(),
'time to go running')
self.spin_angle = 0
self.spin_time = False
r = constants.Robot.Radius
self.corners = [
robocup.Point(-constants.Field.Width / 2 + r, r),
robocup.Point(constants.Field.Width / 2 - r, r), robocup.Point(
constants.Field.Width / 2 - r, constants.Field.Length - r),
robocup.Point(-constants.Field.Width / 2 + r,
constants.Field.Length - r),
robocup.Point(0, constants.Field.Length / 2)
]
self.current_corner = 0
self.start_time = time.time()
def on_enter_run_around(self):
self.current_corner = random.randint(0, 4)
self.robot.move_to_direct(self.corners[0])
def execute_run_around(self):
if (self.robot.pos - self.corners[self.current_corner]).mag() <= .05:
if (self.current_corner == 4):
self.spin_time = True
self.current_corner = random.randint(0, 3)
else:
self.current_corner = random.randint(0, 4)
if (self.current_corner < 5):
self.robot.move_to_direct(self.corners[self.current_corner])
def on_enter_spint(self):
self.start_time = time.time()
def execute_spin(self):
angle = self.robot.angle
facing_point = robocup.Point.direction(angle) + self.robot.pos
facing_point.rotate(self.robot.pos, Celebration.SpinPerTick)
self.spin_angle += Celebration.SpinPerTick
self.robot.face(facing_point)
def im_dizzy(self):
return time.time() - self.start_time > 8 and time.time(
) - self.start_time < 20
|
apache-2.0
| -893,343,985,385,578,400
| 31.421687
| 77
| 0.586771
| false
| 3.701513
| false
| false
| false
|
ncadou/proctor
|
proctor/tor.py
|
1
|
10805
|
from datetime import datetime
from itertools import chain, cycle
from os import path
from threading import Event, Lock, Thread
from time import sleep
import socks
from desub import desub
from proctor.socket import InstrumentedSocket
import logging
log = logging.getLogger(__name__)
class TorProcess(Thread):
""" Runs and manages a Tor process in a thread.
This class takes care of starting and stopping a Tor process, as well as
monitoring connection times and the error rate and restarting the process
when unhealthy.
"""
def __init__(self, name, socks_port, control_port, base_work_dir,
boot_time_max=30, errors_max=10, conn_time_avg_max=2,
grace_time=30, sockets_max=None, resurrections_max=10):
super(TorProcess, self).__init__()
self.name = name
self.socks_port = socks_port
self.control_port = control_port
self.base_work_dir = base_work_dir
self.boot_time_max = boot_time_max
self.errors_max = errors_max
self.conn_time_avg_max = conn_time_avg_max
self.grace_time = grace_time
self.sockets_max = sockets_max
self.resurrections_max = resurrections_max
self._connected = Event()
self._exclusive_access = Lock()
self._ref_count = 0
self._ref_count_lock = Lock()
self._socket_count = 0
self._socket_count_lock = Lock()
self._stats_lock = Lock()
self._stats_window = 200
self._stoprequest = Event()
self._terminated = False
def run(self):
""" Run and supervise the Tor process. """
args = dict(CookieAuthentication=0, HashedControlPassword='',
ControlPort=self.control_port, PidFile=self.pid_file,
SocksPort=self.socks_port, DataDirectory=self.work_dir)
args = map(str, chain(*(('--' + k, v) for k, v in args.iteritems())))
tor = desub.join(['tor'] + args)
self._start(tor)
resurrections = 0
while not self._stoprequest.is_set():
if not tor.is_running():
if resurrections >= self.resurrections_max:
log.error('Resurrected %s %s times, giving up.'
% (self.name, resurrections))
self._terminated = True
break
resurrections += 1
self._restart(tor, died=True)
else:
log.info('Started %s' % self.name)
self.monitor(tor)
def monitor(self, tor):
""" Make sure Tor starts and stops when appropriate. """
while tor.is_running():
# Stop nicely when asked nicely.
if self._stoprequest.wait(1):
tor.stop()
log.debug('Stopped %s' % self.name)
# Check health and restart when appropriate.
elif self._connected.is_set():
errors, timing_avg, samples = self.get_stats()
too_many_errors = errors > self.errors_max
too_slow = timing_avg > self.conn_time_avg_max
max_use_reached = (self.sockets_max
and self._socket_count >= self.sockets_max)
needs_restart = too_many_errors or too_slow or max_use_reached
if self.age > self.grace_time and needs_restart:
self._restart(tor)
else:
out = tor.stdout.read()
# Check for successful connection.
if 'Bootstrapped 100%: Done.' in out:
self._connected.set()
log.info('%s is connected' % self.name)
self._start_time = datetime.utcnow()
else:
# Check if initialization takes too long.
if self.time_since_boot > self.boot_time_max:
self._restart(tor, failed_boot=True)
# Check for socket binding failures.
else:
for port in [self.socks_port, self.control_port]:
if 'Could not bind to 127.0.0.1:%s' % port in out:
error = ('Could not bind %s to 127.0.0.1:%s'
% (self.name, port))
log.warn(error)
self._terminated = True
break
def stop(self):
""" Signal the thread to stop itself. """
self._stoprequest.set()
@property
def work_dir(self):
return path.join(self.base_work_dir, self.name)
@property
def pid_file(self):
return path.join(self.work_dir, 'pid')
@property
def connected(self):
return self._connected.is_set()
@property
def age(self):
""" Return the number of seconds since the Tor circuit is usable. """
return (datetime.utcnow() - self._start_time).total_seconds()
@property
def terminated(self):
return self._terminated
@property
def time_since_boot(self):
""" Return the number of seconds since the last Tor process start. """
return (datetime.utcnow() - self._boot_time).total_seconds()
def _start(self, tor):
""" Start a Tor process. """
with self._stats_lock:
self._boot_time = datetime.utcnow()
self._socket_count = 0
self._stats_errors = list()
self._stats_timing = list()
tor.start()
def _restart(self, tor, failed_boot=False, died=False):
""" Safely replace a Tor instance with a fresh one. """
with self._exclusive_access: # Prevent creating sockets.
# Wait until all sockets have finished.
wait_start = datetime.utcnow()
while self._ref_count > 0:
if (datetime.utcnow() - wait_start).total_seconds() > 30:
log.error('Likely got a ref_count accounting error in %s'
% self.name)
self._ref_count = 0
break
sleep(1)
self._connected.clear()
if failed_boot:
log.warn('Restarting %s (did not initialize in time)'
% self.name)
elif died:
log.warn('Resurrected %s' % self.name)
else:
errors, timing_avg, samples = self.get_stats()
log.warn(('Restarting %s '
'(errors: %s, avg time: %s, count: %s, age: %s)')
% (self.name, errors, timing_avg, self._socket_count,
int(self.age)))
tor.stop()
self._start(tor)
def _inc_socket_count(self):
""" Increment the internal socket counter. """
with self._socket_count_lock:
self._socket_count += 1
def _inc_ref_count(self):
""" Increment the internal reference counter. """
with self._ref_count_lock:
self._ref_count += 1
def _dec_ref_count(self):
""" Decrement the internal reference counter. """
with self._ref_count_lock:
self._ref_count -= 1
def _receive_stats(self, timing, errors):
""" Maintain connection statistics over time. """
with self._stats_lock:
self._stats_errors.append(errors)
self._stats_timing.append(timing)
if len(self._stats_errors) > self._stats_window:
self._stats_errors = self._stats_errors[-self._stats_window:]
self._stats_timing = self._stats_timing[-self._stats_window:]
# We consider the socket at end of life when it sends the stats.
self._dec_ref_count()
def get_stats(self):
""" Return current statistics. """
with self._stats_lock:
samples = len(self._stats_timing)
errors = sum(self._stats_errors)
timing_avg = sum(self._stats_timing) / (samples or 1)
return errors, timing_avg, samples
def create_socket(self, suppress_errors=False, *args, **kwargs):
""" Return an InstrumentedSocket that will connect through Tor. """
if self.connected:
if not self._exclusive_access.acquire(False):
return None
try:
sock = InstrumentedSocket(self._receive_stats, *args, **kwargs)
args = (socks.PROXY_TYPE_SOCKS4, 'localhost', self.socks_port,
True, None, None) # rdns, username, password
sock.setproxy(*args)
# Keep track of how many sockets are using this Tor instance.
self._inc_ref_count()
self._inc_socket_count()
return sock
finally:
self._exclusive_access.release()
elif suppress_errors:
sleep(0.1) # Prevent fast spinning in (the proxy code) caused by
# a race condition when Tor restarts.
return None
else:
raise RuntimeError('%s not yet connected.' % self.name)
class TorSwarm(object):
""" Manages a number of Tor processes. """
def __init__(self, base_socks_port, base_control_port, work_dir,
sockets_max, **kwargs):
self.base_socks_port = base_socks_port
self.base_control_port = base_control_port
self.work_dir = work_dir
self.sockets_max = sockets_max
self.kwargs = kwargs
self._instances = list()
def instances(self):
""" Return an infinite generator cycling through Tor instances. """
for instance in cycle(self._instances):
if instance.terminated:
alive = list(i for i in self._instances if not i.terminated)
if len(alive) == 0:
log.critical('No alive Tor instance left. Bailing out.')
return
yield instance
def start(self, num_instances):
""" Start and return the Tor processes. """
log.info('Starting Tor swarm with %d instances...' % num_instances)
self._instances = list()
for i in range(num_instances):
tor = TorProcess('tor-%d' % i, self.base_socks_port + i,
self.base_control_port + i, self.work_dir,
sockets_max=self.sockets_max, **self.kwargs)
self._instances.append(tor)
tor.start()
sleep(0.1)
return self._instances
def stop(self):
""" Stop the Tor processes and wait for their completion. """
for tor in self._instances:
tor.stop()
tor.join()
|
bsd-3-clause
| -6,406,841,245,967,044,000
| 38.870849
| 79
| 0.537436
| false
| 4.287698
| false
| false
| false
|
qiyuangong/Basic_Mondrian
|
basic_mondrain_test.py
|
1
|
1879
|
import unittest
from mondrian import mondrian
# from utils.read_data import read_data, read_tree
from models.gentree import GenTree
from models.numrange import NumRange
import random
import pdb
# Build a GenTree object
ATT_TREE = []
def init():
global ATT_TREE
ATT_TREE = []
tree_temp = {}
tree = GenTree('*')
tree_temp['*'] = tree
lt = GenTree('1,5', tree)
tree_temp['1,5'] = lt
rt = GenTree('6,10', tree)
tree_temp['6,10'] = rt
for i in range(1, 11):
if i <= 5:
t = GenTree(str(i), lt, True)
else:
t = GenTree(str(i), rt, True)
tree_temp[str(i)] = t
numrange = NumRange(['1', '2', '3', '4', '5',
'6', '7', '8', '9', '10'], dict())
ATT_TREE.append(tree_temp)
ATT_TREE.append(numrange)
class functionTest(unittest.TestCase):
def test1_mondrian(self):
init()
data = [['6', '1', 'haha'],
['6', '1', 'test'],
['8', '2', 'haha'],
['8', '2', 'test'],
['4', '1', 'hha'],
['4', '2', 'hha'],
['4', '3', 'hha'],
['4', '4', 'hha']]
result, eval_r = mondrian(ATT_TREE, data, 2)
# print result
# print eval_r
self.assertTrue(abs(eval_r[0] - 100.0 / 36) < 0.05)
def test2_mondrian(self):
init()
data = [['6', '1', 'haha'],
['6', '1', 'test'],
['8', '2', 'haha'],
['8', '2', 'test'],
['4', '1', 'hha'],
['4', '1', 'hha'],
['1', '1', 'hha'],
['2', '1', 'hha']]
result, eval_r = mondrian(ATT_TREE, data, 2)
# print result
# print eval_r
self.assertTrue(abs(eval_r[0] - 100.0 / 8) < 0.05)
if __name__ == '__main__':
unittest.main()
|
mit
| 86,076,437,654,644,900
| 26.632353
| 59
| 0.432145
| false
| 3.10066
| true
| false
| false
|
KanoComputing/terminal-quest
|
linux_story/story/challenges/challenge_10.py
|
1
|
6406
|
# challenge_10.py
#
# Copyright (C) 2014-2016 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# A chapter of the story
from linux_story.StepTemplate import StepTemplate
from linux_story.step_helper_functions import unblock_commands_with_cd_hint
from linux_story.story.terminals.terminal_cd import TerminalCd
class StepTemplateCd(StepTemplate):
TerminalClass = TerminalCd
# ----------------------------------------------------------------------------------------
class Step1(StepTemplateCd):
story = [
_("You're in your house. You appear to be alone."),
_("Use {{yb:cat}} to {{lb:examine}} some of the objects around you.\n")
]
allowed_commands = [
"cat banana",
"cat cake",
"cat croissant",
"cat grapes",
"cat milk",
"cat newspaper",
"cat oven",
"cat pie",
"cat sandwich",
"cat table"
]
start_dir = "~/my-house/kitchen"
end_dir = "~/my-house/kitchen"
counter = 0
deleted_items = ["~/my-house/kitchen/note"]
file_list = [
{"path": "~/town/.hidden-shelter/Eleanor"},
{"path": "~/town/.hidden-shelter/Edward"},
{"path": "~/town/.hidden-shelter/Edith"},
{"path": "~/town/.hidden-shelter/apple"},
{"path": "~/town/.hidden-shelter/dog"},
{"path": "~/town/.hidden-shelter/basket/empty-bottle"},
{"path": "~/town/.hidden-shelter/.tiny-chest/MV"},
]
first_time = True
def check_command(self, line):
if line in self.allowed_commands:
self.counter += 1
self.allowed_commands.remove(line)
hint = _("{{gb:Well done! Just look at one more item.}}")
else:
if self.first_time:
hint = _("{{rb:Use}} {{yb:cat}} {{rb:to look at two of the " +\
"objects around you.}}")
else:
hint = _("{{rb:Use the command}} {{yb:%s}} {{rb:to progress.}}")\
% self.allowed_commands[0]
level_up = (self.counter >= 2)
if not level_up:
self.send_hint(hint)
self.first_time = False
else:
return level_up
def next(self):
return 10, 2
class Step2(StepTemplateCd):
story = [
_("There doesn't seem to be anything here but loads of food."),
_("See if you can find something back in {{bb:town}}.\n"),
_("First, use {{yb:cd ..}} to {{lb:leave}} the {{bb:kitchen}}.\n")
]
start_dir = "~/my-house/kitchen"
end_dir = "~/town"
commands = [
"cd ~/town",
"cd ~/town/",
"cd ..",
"cd ../",
"cd town",
"cd town/",
"cd ../..",
"cd ../../",
"cd"
]
num_turns_in_home_dir = 0
def block_command(self, line):
return unblock_commands_with_cd_hint(line, self.commands)
def check_command(self, line):
if self.get_fake_path() == self.end_dir:
return True
hint = ""
# decide command needed to get to next part of town
if self.get_fake_path() == '~/my-house/kitchen' or self.get_fake_path() == '~/my-house':
# If the last command the user used was to get here
# then congratulate them
if line == "cd .." or line == 'cd ../':
hint = _("{{gb:Good work! Now replay the last command using " +\
"the}} {{ob:UP}} {{gb:arrow on your keyboard.}}")
# Otherwise, give them a hint
else:
hint = _("{{rb:Use}} {{yb:cd ..}} {{rb:to make your way to town.}}")
elif self.get_fake_path() == '~':
# If they have only just got to the home directory,
# then they used an appropriate command
if self.num_turns_in_home_dir == 0:
hint = _("{{gb:Cool! Now use}} {{yb:cd town}} {{gb:to head to town.}}")
# Otherwise give them a hint
else:
hint = _("{{rb:Use}} {{yb:cd town}} {{rb:to go into town.}}")
# So we can keep track of the number of turns they've been in the
# home directory
self.num_turns_in_home_dir += 1
# print the hint
self.send_hint(hint)
def next(self):
return 10, 3
class Step3(StepTemplateCd):
story = [
_("Use {{yb:ls}} to {{lb:look around}}.\n"),
]
start_dir = "~/town"
end_dir = "~/town"
commands = "ls"
hints = [_("{{rb:Use}} {{yb:ls}} {{rb:to have a look around the town.}}")]
def next(self):
return 10, 4
class Step4(StepTemplateCd):
story = [
_("The place appears to be deserted."),
_("However, you think you hear whispers."),
# TODO make this writing small
_("\n{{wb:?:}} {{Bn:\".....if they use}} {{yb:ls -a}}{{Bn:, they'll see us...\"}}"),
_("{{wb:?:}} {{Bn:\"..Shhh! ...might hear....\"}}\n")
]
start_dir = "~/town"
end_dir = "~/town"
commands = "ls -a"
hints = [
_("{{rb:You heard whispers referring to}} {{yb:ls -a}}" +\
"{{rb:, try using it!}}"),
]
def next(self):
return 10, 5
class Step5(StepTemplateCd):
story = [
_("You see a {{bb:.hidden-shelter}} that you didn't notice before.\n"),
_("{{gb:Something that starts with . is normally hidden from view.\n}}"),
_("It sounds like the whispers are coming from there. Try going in.\n")
]
start_dir = "~/town"
end_dir = "~/town/.hidden-shelter"
commands = [
"cd .hidden-shelter",
"cd .hidden-shelter/"
]
hints = [
_("{{rb:Try going inside the}} {{lb:.hidden-shelter}} {{rb:using }}" +\
"{{yb:cd}}{{rb:.}}"),
_("{{rb:Use the command}} {{yb:cd .hidden-shelter }}" +\
"{{rb:to go inside.}}")
]
def block_command(self, line):
return unblock_commands_with_cd_hint(line, self.commands)
def next(self):
return 10, 6
class Step6(StepTemplateCd):
story = [
_("Is anyone there? Have a {{lb:look around}}.\n")
]
start_dir = "~/town/.hidden-shelter"
end_dir = "~/town/.hidden-shelter"
commands = [
"ls",
"ls -a"
]
hints = [
_("{{rb:Use}} {{yb:ls}} {{rb:to have a look around you.}}")
]
def next(self):
return 11, 1
|
gpl-2.0
| 7,936,211,760,984,238,000
| 28.657407
| 96
| 0.503278
| false
| 3.468327
| false
| false
| false
|
GoteoFoundation/goteo-api
|
goteoapi/ratelimit.py
|
1
|
1974
|
# -*- coding: utf-8 -*-
import time
from functools import update_wrapper
from flask import request, g
from flask_redis import FlaskRedis
from .helpers import bad_request
from . import app
#
# REDIS RATE LIMITER
# ==================
redis = False
if app.config['REDIS_URL']:
redis = FlaskRedis(app)
class RateLimit(object):
expiration_window = 10
def __init__(self, key_prefix, limit, per):
self.reset = (int(time.time()) // per) * per + per
self.key = key_prefix + str(self.reset)
self.limit = limit
self.per = per
p = redis.pipeline()
p.incr(self.key)
p.expireat(self.key, self.reset + self.expiration_window)
self.current = min(p.execute()[0], limit)
remaining = property(lambda x: x.limit - x.current)
over_limit = property(lambda x: x.current >= x.limit)
def get_view_rate_limit():
return getattr(g, '_view_rate_limit', None)
def on_over_limit(limit):
resp = bad_request('Too many requests', 429)
return resp
def ratelimit(limit=app.config['REQUESTS_LIMIT'],
per=app.config['REQUESTS_TIME'],
over_limit=on_over_limit):
def decorator(f):
def rate_limited(*args, **kwargs):
if not app.config['REQUESTS_LIMIT'] or not redis:
return f(*args, **kwargs)
if app.config['AUTH_ENABLED'] and request.authorization:
key = 'rate-limit/%s/' % request.authorization.username
else:
remote_ip = request.environ.get('HTTP_X_REAL_IP',
request.remote_addr)
key = 'rate-limit/%s/' % remote_ip
rlimit = RateLimit(key, limit, per)
g._view_rate_limit = rlimit
if over_limit is not None and rlimit.over_limit:
return over_limit(rlimit)
return f(*args, **kwargs)
return update_wrapper(rate_limited, f)
return decorator
|
agpl-3.0
| 721,773,584,731,300,600
| 28.462687
| 71
| 0.581054
| false
| 3.724528
| false
| false
| false
|
pombredanne/openaire
|
bibsched/lib/bibsched_tasklets/bst_openaire_check_rights.py
|
1
|
2572
|
#!/usr/bin/env python
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Tasklets to update the list of OpenAIRE keywords to match any edits
made in the records.
"""
from invenio.bibdocfile import BibRecDocs
from invenio.bibtask import write_message, task_update_progress, \
task_sleep_now_if_required
from invenio.openaire_deposit_config import CFG_ACCESS_RIGHTS_KEYS
from invenio.search_engine import search_pattern, get_fieldvalues
def bst_openaire_check_rights():
"""
Tasklet to verify access rights consistency.
"""
restrictions = {
'cc0' : '',
'openAccess' : '',
'closedAccess' : 'status: closedAccess',
'restrictedAccess' : 'status: restrictedAccess',
'embargoedAccess' : 'firerole: deny until "%(date)s"\nallow any',
}
errors = []
for access_rights in CFG_ACCESS_RIGHTS_KEYS:
write_message("Checking records with access rights '%s'" % access_rights)
recids = search_pattern(p=access_rights, f="542__l")
for r in recids:
date = ''
if access_rights == 'embargoedAccess':
try:
date = get_fieldvalues(r, "942__a")[0]
except IndexError:
raise Exception("Embargoed record %s is missing embargo date in 942__a" % r)
expected_status = restrictions[access_rights] % { 'date' : date }
brd = BibRecDocs(r)
for d in brd.list_bibdocs():
real_status = d.get_status()
if real_status != expected_status:
d.set_status(expected_status)
write_message("Fixed record %s with wrong status. From: %s To: %s" % (r, real_status, expected_status))
for e in errors:
write_message(e)
if __name__ == '__main__':
bst_openaire_check_rights()
|
gpl-2.0
| 5,037,854,944,358,963,000
| 36.823529
| 123
| 0.641135
| false
| 3.885196
| false
| false
| false
|
chetan/cherokee
|
admin/consts.py
|
1
|
7437
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
AVAILABLE_LANGUAGES = [
('en', N_('English')),
('es', N_('Spanish')),
('de', N_('German')),
('fr', N_('French')),
('it', N_('Italian')),
('nl', N_('Dutch')),
('pl', N_('Polish')),
('sv_SE', N_('Swedish')),
('po_BR', N_('Brazilian Portuguese')),
('zh_CN', N_('Chinese Simplified')),
('ca', N_('Catalan')),
('gl', N_('Galician'))
]
PRODUCT_TOKENS = [
('', N_('Default')),
('product', N_('Product only')),
('minor', N_('Product + Minor version')),
('minimal', N_('Product + Minimal version')),
('os', N_('Product + Platform')),
('full', N_('Full Server string'))
]
HANDLERS = [
('', N_('None')),
('common', N_('List & Send')),
('file', N_('Static Content')),
('dirlist', N_('Only Listing')),
('redir', N_('Redirection')),
('fcgi', N_('FastCGI')),
('scgi', N_('SCGI')),
('uwsgi', N_('uWSGI')),
('proxy', N_('HTTP Reverse Proxy')),
('post_report', N_('Upload Reporting')),
('streaming', N_('Audio/Video Streaming')),
('cgi', N_('CGI')),
('ssi', N_('Server Side Includes')),
('secdownload', N_('Hidden Downloads')),
('server_info', N_('Server Info')),
('dbslayer', N_('MySQL Bridge')),
('custom_error', N_('HTTP Error')),
('admin', N_('Remote Administration')),
('empty_gif', N_('1x1 Transparent GIF'))
]
ERROR_HANDLERS = [
('', N_('Default errors')),
('error_redir', N_('Custom redirections')),
('error_nn', N_('Closest match'))
]
VALIDATORS = [
('', N_('None')),
('plain', N_('Plain text file')),
('htpasswd', N_('Htpasswd file')),
('htdigest', N_('Htdigest file')),
('ldap', N_('LDAP server')),
('mysql', N_('MySQL server')),
('pam', N_('PAM')),
('authlist', N_('Fixed list'))
]
VALIDATOR_METHODS = [
('basic', N_('Basic')),
('digest', N_('Digest')),
('basic,digest', N_('Basic or Digest'))
]
LOGGERS = [
('', N_('None')),
('combined', N_('Apache compatible')),
('ncsa', N_('NCSA')),
('custom', N_('Custom'))
]
LOGGER_WRITERS = [
('file', N_('File')),
('syslog', N_('System logger')),
('stderr', N_('Standard Error')),
('exec', N_('Execute program'))
]
BALANCERS = [
('', N_('None')),
('round_robin', N_("Round Robin")),
('ip_hash', N_("IP Hash")),
('failover', N_("Failover"))
]
SOURCE_TYPES = [
('interpreter', N_('Local interpreter')),
('host', N_('Remote host'))
]
ENCODERS = [
('gzip', N_('GZip')),
('deflate', N_('Deflate'))
]
THREAD_POLICY = [
('', N_('Default')),
('fifo', N_('FIFO')),
('rr', N_('Round-robin')),
('other', N_('Dynamic'))
]
POLL_METHODS = [
('', N_('Automatic')),
('epoll', 'epoll() - Linux >= 2.6'),
('kqueue', 'kqueue() - BSD, OS X'),
('ports', 'Solaris ports - >= 10'),
('poll', 'poll()'),
('select', 'select()'),
('win32', 'Win32')
]
REDIR_SHOW = [
('1', N_('External')),
('0', N_('Internal'))
]
ERROR_CODES = [
('400', '400 Bad Request'),
('401', '401 Unauthorized'),
('402', '402 Payment Required'),
('403', '403 Forbidden'),
('404', '404 Not Found'),
('405', '405 Method Not Allowed'),
('406', '406 Not Acceptable'),
('407', '407 Proxy Auth Required'),
('408', '408 Request Timeout'),
('409', '409 Conflict'),
('410', '410 Gone'),
('411', '411 Length Required'),
('412', '412 Precondition Failed'),
('413', '413 Request Entity too large'),
('414', '414 Request-URI too long'),
('415', '415 Unsupported Media Type'),
('416', '416 Requested range not satisfiable'),
('417', '417 Expectation Failed'),
('422', '422 Unprocessable Entity'),
('423', '423 Locked'),
('424', '424 Failed Dependency'),
('425', '425 Unordered Collection'),
('426', '426 Upgrade Required'),
('449', '449 Retry With'),
('500', '500 Internal Server Error'),
('501', '501 Not Implemented'),
('502', '502 Bad gateway'),
('503', '503 Service Unavailable'),
('504', '504 Gateway Timeout'),
('505', '505 HTTP Version Not Supported'),
('506', '506 Variant Also Negotiates'),
('507', '507 Insufficient Storage'),
('509', '509 Bandwidth Limit Exceeded'),
('510', '510 Not Extended')
]
RULES = [
('directory', N_('Directory')),
('extensions', N_('Extensions')),
('request', N_('Regular Expression')),
('header', N_('Header')),
('exists', N_('File Exists')),
('method', N_('HTTP Method')),
('bind', N_('Incoming IP/Port')),
('tls', N_('SSL / TLS')),
('fullpath', N_('Full Path')),
('from', N_('Connected from')),
('url_arg', N_('URL Argument')),
('geoip', N_('GeoIP'))
]
VRULES = [
('', N_('Match Nickname')),
('wildcard', N_('Wildcards')),
('rehost', N_('Regular Expressions')),
('target_ip', N_('Server IP'))
]
EXPIRATION_TYPE = [
('', N_('Not set')),
('epoch', N_('Already expired on 1970')),
('max', N_('Do not expire until 2038')),
('time', N_('Custom value'))
]
CRYPTORS = [
('', N_('No TLS/SSL')),
('libssl', N_('OpenSSL / libssl'))
]
EVHOSTS = [
('', N_('Off')),
('evhost', N_('Enhanced Virtual Hosting'))
]
CLIENT_CERTS = [
('', N_('Skip')),
('accept', N_('Accept')),
('required', N_('Require'))
]
COLLECTORS = [
('', N_('Disabled')),
('rrd', N_('RRDtool graphs'))
]
UTC_TIME = [
('', N_('Local time')),
('1', N_('UTC: Coordinated Universal Time'))
]
DWRITER_LANGS = [
('json', N_('JSON')),
('python', N_('Python')),
('php', N_('PHP')),
('ruby', N_('Ruby'))
]
POST_TRACKERS = [
('', N_('Disabled')),
('post_track', N_('POST tracker'))
]
CACHING_OPTIONS = [
('', N_('Not set')),
('public', N_('Public')),
('private', N_('Private')),
('no-cache', N_('No Cache'))
]
COMPRESSION_LEVELS = [
('', N_('Default')),
('0', N_('0 - No compression')),
('1', N_('1')),
('2', N_('2')),
('3', N_('3')),
('4', N_('4')),
('5', N_('5')),
('6', N_('6')),
('7', N_('7')),
('8', N_('8')),
('9', N_('9 - Max compression'))
]
|
gpl-2.0
| -8,224,613,567,037,564,000
| 26.544444
| 67
| 0.477746
| false
| 3.430351
| false
| false
| false
|
hassy/informatics-explorer
|
utils.py
|
1
|
1514
|
# author: Hasan Veldstra <hasan.veldstra@gmail.com>
# license: MIT
import os
import fnmatch
try:
import simplejson as json
except:
import json
def locate(pattern, root=os.getcwd()):
"""
Generate of all files in a directory that match the pattern.
"""
for path, dirs, files in os.walk(root):
for filename in [os.path.abspath(os.path.join(path, filename)) for filename in files if fnmatch.fnmatch(filename, pattern)]:
yield filename
def sort_by_value(d):
"""
Sort dict by numeric value.
"""
return sorted(d.iteritems(), key=lambda (k, v): (v, k), reverse=True)
def flatten(x):
"""
Return flat list of items from all sub-sequences in list x.
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def slurp(fn):
"""
Read text file into a string.
"""
f = open(fn)
s = f.read()
f.close()
return s
def write(fn, data):
"""
Write string to a file.
"""
f = open(fn, "w")
f.write(data)
f.close()
return True
def load_json(filename):
"""
Return datastructure from JSON data in a file.
"""
return json.loads(slurp(filename))
def dump_as_json(filename, datastructure):
"""
Writes datastructure as JSON into a file.
"""
write(filename, json.dumps(datastructure, sort_keys=True, indent=4))
|
mit
| 741,321,748,822,468,600
| 21.954545
| 132
| 0.599736
| false
| 3.570755
| false
| false
| false
|
rchuppala/usc_agent
|
src/usc-agent-dev/common/source/pyang/pyang/translators/schemanode.py
|
1
|
10232
|
# Copyright (c) 2013 by Ladislav Lhotka, CZ.NIC <lhotka@nic.cz>
#
# Python class representing a node in a RELAX NG schema.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from xml.sax.saxutils import escape
class SchemaNode(object):
"""This class represents a node in a RELAX NG schema.
The details are tailored to the specific features of the hybrid
DSDL schema generated from YANG modules, but the class may be
reasonably used for representing any other RELAX NG schema.
Specific types of nodes are created using class methods below.
Instance variables:
* `self.attr` - dictionary of XML attributes. Keys are attribute
names and values attribute values.
* `self.children` - list of child nodes.
* `self.default` - default value (only for "element" nodes)
* `self.interleave` - boolean flag determining the interleave
status. If True, the children of `self` will end up inside
<interleave>.
* `self.keys` - list of QNames of YANG list keys (only for "_list_"
nodes having children).
* `self.keymap` - dictionary of key nodes (only for "_list_" nodes
having children). The keys of the dictionary are the QNames of
YANG list keys.
* `self.minEl` - minimum number of items (only for "_list_" nodes).
* `self.maxEl` - maximum number of items (only for "_list_" nodes).
* `self.name` - name of the schema node (XML element name).
* `self.occur` - specifies the occurrence status using integer
values: 0=optional, 1=implicit, 2=mandatory, 3=presence.
* `self.parent` - parent node.
* `self.text` - text content.
"""
def element(cls, name, parent=None, interleave=None, occur=0):
"""Create an element node."""
node = cls("element", parent, interleave=interleave)
node.attr["name"] = name
node.occur = occur
return node
element = classmethod(element)
def leaf_list(cls, name, parent=None, interleave=None):
"""Create _list_ node for a leaf-list."""
node = cls("_list_", parent, interleave=interleave)
node.attr["name"] = name
node.keys = None
node.minEl = "0"
node.maxEl = None
node.occur = 3
return node
leaf_list = classmethod(leaf_list)
def list(cls, name, parent=None, interleave=None):
"""Create _list_ node for a list."""
node = cls.leaf_list(name, parent, interleave=interleave)
node.keys = []
node.keymap = {}
return node
list = classmethod(list)
def choice(cls, parent=None, occur=0):
"""Create choice node."""
node = cls("choice", parent)
node.occur = occur
node.default_case = None
return node
choice = classmethod(choice)
def case(cls, parent=None):
"""Create case node."""
node = cls("case", parent)
node.occur = 0
return node
case = classmethod(case)
def define(cls, name, parent=None, interleave=False):
"""Create define node."""
node = cls("define", parent, interleave=interleave)
node.occur = 0
node.attr["name"] = name
return node
define = classmethod(define)
def __init__(self, name, parent=None, text="", interleave=None):
"""Initialize the object under `parent`.
"""
self.name = name
self.parent = parent
if parent is not None: parent.children.append(self)
self.text = text
self.adjust_interleave(interleave)
self.children = []
self.annots = []
self.attr = {}
def serialize_children(self):
"""Return serialization of receiver's children.
"""
return ''.join([ch.serialize() for ch in self.children])
def serialize_annots(self):
"""Return serialization of receiver's annotation elements.
"""
return ''.join([ch.serialize() for ch in self.annots])
def adjust_interleave(self, interleave):
"""Inherit interleave status from parent if undefined."""
if interleave == None and self.parent:
self.interleave = self.parent.interleave
else:
self.interleave = interleave
def subnode(self, node):
"""Make `node` receiver's child."""
self.children.append(node)
node.parent = self
node.adjust_interleave(None)
def annot(self, node):
"""Add `node` as an annotation of the receiver."""
self.annots.append(node)
node.parent = self
def set_attr(self, key, value):
"""Set attribute `key` to `value` and return the receiver."""
self.attr[key] = value
return self
def start_tag(self, alt=None, empty=False):
"""Return XML start tag for the receiver."""
if alt:
name = alt
else:
name = self.name
result = "<" + name
for it in self.attr:
result += ' %s="%s"' % (it, escape(self.attr[it], {'"':""", '%': "%%"}))
if empty:
return result + "/>%s"
else:
return result + ">"
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
def serialize(self, occur=None):
"""Return RELAX NG representation of the receiver and subtree.
"""
fmt = self.ser_format.get(self.name, SchemaNode._default_format)
return fmt(self, occur) % (escape(self.text) +
self.serialize_children())
def _default_format(self, occur):
"""Return the default serialization format."""
if self.text or self.children:
return self.start_tag() + "%s" + self.end_tag()
return self.start_tag(empty=True)
def _wrapper_format(self, occur):
"""Return the serializatiopn format for <start>."""
return self.start_tag() + self._chorder() + self.end_tag()
def _define_format(self, occur):
"""Return the serialization format for a define node."""
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
middle = self._chorder() if self.children else "<empty/>%s"
return self.start_tag() + middle + self.end_tag()
def _element_format(self, occur):
"""Return the serialization format for an element node."""
if occur:
occ = occur
else:
occ = self.occur
if occ == 1:
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
else:
self.attr["nma:implicit"] = "true"
middle = self._chorder() if self.children else "<empty/>%s"
fmt = self.start_tag() + self.serialize_annots() + middle + self.end_tag()
if (occ == 2 or self.parent.name == "choice"
or self.parent.name == "case" and len(self.parent.children) == 1):
return fmt
else:
return "<optional>" + fmt + "</optional>"
def _chorder(self):
"""Add <interleave> if child order is arbitrary."""
if (self.interleave and len(self.children) > 1):
return "<interleave>%s</interleave>"
return "%s"
def _list_format(self, occur):
"""Return the serialization format for a _list_ node."""
if self.keys:
self.attr["nma:key"] = " ".join(self.keys)
keys = ''.join([self.keymap[k].serialize(occur=2)
for k in self.keys])
else:
keys = ""
if self.maxEl:
self.attr["nma:max-elements"] = self.maxEl
if int(self.minEl) == 0:
ord_ = "zeroOrMore"
else:
ord_ = "oneOrMore"
if int(self.minEl) > 1:
self.attr["nma:min-elements"] = self.minEl
middle = self._chorder() if self.children else "<empty/>%s"
return ("<" + ord_ + ">" + self.start_tag("element") +
self.serialize_annots() + keys +
middle + self.end_tag("element") + "</" + ord_ + ">")
def _choice_format(self, occur):
"""Return the serialization format for a choice node."""
middle = "%s" if self.children else "<empty/>%s"
fmt = self.start_tag() + middle + self.end_tag()
if self.occur != 2:
return "<optional>" + fmt + "</optional>"
else:
return fmt
def _case_format(self, occur):
"""Return the serialization format for a case node."""
if self.occur == 1:
self.attr["nma:implicit"] = "true"
ccnt = len(self.children)
if ccnt == 0: return "<empty/>%s"
if ccnt == 1 or not self.interleave:
return self.start_tag("group") + "%s" + self.end_tag("group")
return (self.start_tag("interleave") + "%s" +
self.end_tag("interleave"))
ser_format = { "nma:data": _wrapper_format,
"nma:input": _wrapper_format,
"nma:notification": _wrapper_format,
"nma:output": _wrapper_format,
"element": _element_format,
"_list_": _list_format,
"choice": _choice_format,
"case": _case_format,
"define": _define_format,
}
"""Class variable - dictionary of methods returning string
serialization formats. Keys are node names."""
|
gpl-2.0
| 3,778,500,691,622,639,600
| 35.412811
| 89
| 0.57672
| false
| 4.026761
| false
| false
| false
|
spotify/crtauth
|
test/roundtrip_test.py
|
1
|
14010
|
# Copyright (c) 2011-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
from crtauth import server
from crtauth import key_provider
from crtauth import rsa
from crtauth import protocol
from crtauth import ssh
from crtauth import exceptions
from crtauth import msgpack_protocol
from crtauth.client import create_response
from crtauth.server import create_response as server_create_response
inner_s = ("AAAAB3NzaC1yc2EAAAABIwAAAQEArt7xdaxlbzzGlgLhqpLuE5x9d+so0M"
"JiqQSmiUJojuK+v1cxnYCnQQPF0BkAhw2hiFiDvLLVogIu8m2wCV9XAGxrz38NLHVq"
"ke+EAduJAfiiD1iwvSLbFBOMVRYfzUoiuPIudwZqmLuCpln1RUE6O/ujmYNyoPS4fq"
"a1svaiZ4C77tLMi2ztMIX97SN2o0EntrhOonJ1nk+7JLYvkhsT8rX20bg6Mlu909iO"
"vtTbElnypKzmjFZyBvzZhocRo4yfrekP3s2QyKSIB5ARGenoSoQa43cD93tqbLGK4o"
"JSkkfxc9HFPo0t+deDorZmelNNFvEn5KeqP0HJvw/jm2U1PQ==")
s = ("ssh-rsa %s noa@vader.local" % inner_s)
t_pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDK0wNhgGlFZf"
"BoRBS+M8wGoyOOVunYYjeaoRXKFKfhx288ZIo87WMfN6i5KnUTH3A/mYlVnK4bh"
"chS6dUFisaXcURvFgY46pUSGuLTZxTe9anIIR/iT+V+8MRDHXffRGOCLEQUl0le"
"YTht0dc7rxaW42d83yC7uuCISbgWqOANvMkZYqZjaejOOGVpkApxLGG8K8RvNBB"
"M8TYqE3DQHSyRVU6S9HWLbWF+i8W2h4CLX2Quodf0c1dcqlftClHjdIyed/zQKh"
"Ao+FDcJrN+2ZDJ0mkYLVlJDZuLk/K/vSOwD3wXhby3cdHCsxnRfy2Ylnt31VF0a"
"VtlhW4IJ+5mMzmz noa@date.office.spotify.net")
test_priv_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAytMDYYBpRWXwaEQUvjPMBqMjjlbp2GI3mqEVyhSn4cdvPGSK
PO1jHzeouSp1Ex9wP5mJVZyuG4XIUunVBYrGl3FEbxYGOOqVEhri02cU3vWpyCEf
4k/lfvDEQx1330RjgixEFJdJXmE4bdHXO68WluNnfN8gu7rgiEm4FqjgDbzJGWKm
Y2nozjhlaZAKcSxhvCvEbzQQTPE2KhNw0B0skVVOkvR1i21hfovFtoeAi19kLqHX
9HNXXKpX7QpR43SMnnf80CoQKPhQ3CazftmQydJpGC1ZSQ2bi5Pyv70jsA98F4W8
t3HRwrMZ0X8tmJZ7d9VRdGlbZYVuCCfuZjM5swIDAQABAoIBADtnoHbfQHYGDGrN
ffHTg+9xuslG5YjuA3EzuwkMEbvMSOU8YUzFDqInEDDjoZSvQZYvJw0/LbN79Jds
S2srIU1b7HpIzhu/gVfjLgpTB8bh1w95vDfxxLrwU9uAdwqaojaPNoV9ZgzRltB7
hHnDp28cPcRSKekyK+9fAB8K6Uy8N00hojBDwtwXM8C4PpQKod38Vd0Adp9dEdX6
Ro9suYb+d+qFalYbKIbjKWkll+ZiiGJjF1HSQCTwlzS2haPXUlbk57HnN+8ar+a3
ITTc2gbNuTqBRD1V/gCaD9F0npVI3mQ34eUADNVVGS0xw0pN4j++Da8KXP+pyn/G
DU/n8SECgYEA/KN4BTrg/LB7cGrzkMQmW26NA++htjiWHK3WTsQBKBDFyReJBn67
o9kMTHBP35352RfuJ3xEEJ0/ddqGEY/SzNk3HMTlxBbR5Xq8ye102dxfEO3eijJ/
F4VRSf9sFgdRoLvE62qLudytK4Ku9nnKoIqrMxFweTpwxzf2jjIKDbECgYEAzYXe
QxT1A/bfs5Qd6xoCVOAb4T/ALqFo95iJu4EtFt7nvt7avqL+Vsdxu5uBkTeEUHzh
1q47LFoFdGm+MesIIiPSSrbfZJ6ht9kw8EbF8Py85X4LBXey67JlzzUq+ewFEP91
do7uGQAY+BRwXtzzPqaVBVa94YOxdq/AGutrIqMCgYBr+cnQImwKU7tOPse+tbbX
GRa3+fEZmnG97CZOH8OGxjRiT+bGmd/ElX2GJfJdVn10ZZ/pzFii6TI4Qp9OXjPw
TV4as6Sn/EDVXXHWs+BfRKp059VXJ2HeQaKOh9ZAS/x9QANXwn/ZfhGdKQtyWHdb
yiiFeQyjI3EUFD0SZRya4QKBgA1QvQOvmeg12Gx0DjQrLTd+hY/kZ3kd8AUKlvHU
/qzaqD0PhzCOstfAeDflbVGRPTtRu/gCtca71lqidzYYuiAsHfXFP1fvhx64LZmD
nFNurHZZ4jDqfmcS2dHA6hXjGrjtNBkITZjFDtkTyev7eK74b/M2mXrA44CDBnk4
A2rtAoGAMv92fqI+B5taxlZhTLAIaGVFbzoASHTRl3eQJbc4zc38U3Zbiy4deMEH
3QTXq7nxWpE4YwHbgXAeJUGfUpE+nEZGMolj1Q0ueKuSstQg5p1nwhQIxej8EJW+
7siqmOTZDKzieik7KVzaJ/U02Q186smezKIuAOYtT8VCf9UksJ4=
-----END RSA PRIVATE KEY-----"""
class RoundtripTest(unittest.TestCase):
def test_read_base64_key(self):
key = rsa.RSAPublicKey(s)
self.assertEqual(key.fingerprint(), "\xfb\xa1\xeao\xd3y")
self.assertEqual(key.decoded, inner_s)
self.assertEqual(key.encoded[:15], "\x00\x00\x00\x07ssh-rsa"
"\x00\x00\x00\x01")
def test_read_binary_key(self):
key = rsa.RSAPublicKey(ssh.base64url_decode(s.split(" ")[1]))
self.assertEqual(key.fingerprint(), "\xfb\xa1\xeao\xd3y")
self.assertEqual(key.decoded, inner_s)
self.assertEqual(key.encoded[:15], "\x00\x00\x00\x07ssh-rsa"
"\x00\x00\x00\x01")
def test_create_challenge(self):
auth_server = server.AuthServer("gurka", DummyKeyProvider(),
"server.name")
s = auth_server.create_challenge("noa")
cb = ssh.base64url_decode(s)
verifiable_payload = protocol.VerifiablePayload.deserialize(cb)
challenge = protocol.Challenge.deserialize(verifiable_payload.payload)
self.assertEquals("\xfb\xa1\xeao\xd3y", challenge.fingerprint)
def test_create_challenge_v1(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("noa", 1)
cb = ssh.base64url_decode(challenge)
decoded_challenge = msgpack_protocol.Challenge.deserialize(cb)
self.assertEquals("\xfb\xa1\xeao\xd3y", decoded_challenge.fingerprint)
def test_create_challenge_no_legacy_support(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name",
lowest_supported_version=1)
self.assertRaises(exceptions.ProtocolVersionError,
auth_server.create_challenge, "noa")
def test_create_challenge_v1_another(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name",
lowest_supported_version=1)
challenge = auth_server.create_challenge("noa", 1)
cb = ssh.base64url_decode(challenge)
decoded_challenge = msgpack_protocol.Challenge.deserialize(cb)
self.assertEquals("\xfb\xa1\xeao\xd3y", decoded_challenge.fingerprint)
def test_authentication_roundtrip(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server.create_token(response)
self.assertTrue(auth_server.validate_token(token))
def test_authentication_roundtrip_v1(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test", 1)
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server.create_token(response)
self.assertTrue(auth_server.validate_token(token))
def test_authentication_roundtrip_mitm1(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test")
try:
create_response(challenge, "another.server",
ssh.SingleKeySigner(test_priv_key))
self.fail("Should have gotten InvalidInputException")
except exceptions.InvalidInputException:
pass
def test_authentication_roundtrip_mitm2(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"another.server")
try:
auth_server_b.create_token(response)
self.fail("should have thrown exception")
except exceptions.InvalidInputException:
pass
def test_create_token_too_new(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() - 1000)
try:
auth_server_b.create_token(response)
self.fail("Should have issued InvalidInputException, "
"challenge too new")
except exceptions.InvalidInputException:
pass
def test_create_token_invalid_duration(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
token = auth_server._make_token("some_user", int(time.time()) + 3600)
self.assertRaises(exceptions.InvalidInputException,
auth_server.validate_token, token)
def test_create_token_too_old(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() + 1000)
try:
auth_server_b.create_token(response)
self.fail("Should have issued InvalidInputException, "
"challenge too old")
except exceptions.InvalidInputException:
pass
def test_create_token_invalid_input(self):
auth_server = server.AuthServer("gurka", DummyKeyProvider(),
"server.name")
for t in ("2tYneWsOm88qu_Trzahw2r6ZLg37oepv03mykGS-HdcnWJLuUMDOmfVI"
"Wl5n3U6qt6Fub2E", "random"):
try:
auth_server.create_token(t)
self.fail("Input is invalid, should have thrown exception")
except exceptions.ProtocolError:
pass
def test_validate_token_too_old(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server_a.create_token(response)
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() + 1000)
try:
auth_server_b.validate_token(token)
self.fail("Should have issued TokenExpiredException, "
"token too old")
except exceptions.TokenExpiredException:
pass
def test_validate_token_too_new(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server_a.create_token(response)
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() - 1000)
try:
auth_server_b.validate_token(token)
self.fail("Should have issued TokenExpiredException, "
"token too new")
except exceptions.TokenExpiredException:
pass
def test_validate_token_wrong_secret(self):
token = "dgAAAJgtmNoqST9RaxayI7UP5-GLviUDAAAAFHQAAABUJYr_VCWLPQAAAAR0ZXN0"
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: 1411746561.058992)
auth_server.validate_token(token)
auth_server = server.AuthServer("wrong_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: 1411746561.058992)
try:
auth_server.validate_token(token)
self.fail("Should have gotten InvalidInputException")
except exceptions.InvalidInputException:
pass
def test_b64_roundtrip(self):
l = ["a", "ab", "abc", "abcd"]
for i in l:
self.assertEquals(ssh.base64url_decode(ssh.base64url_encode(i)), i)
def test_compatibility_create_response(self):
self.assertEqual(server_create_response, create_response)
class DummyKeyProvider(key_provider.KeyProvider):
def get_key(self, username):
if username == 'noa':
return rsa.RSAPublicKey(s)
elif username == 'test':
return rsa.RSAPublicKey(t_pubkey)
else:
raise exceptions.CrtAuthError("Unknown username: %s" % username)
|
apache-2.0
| 1,554,741,149,084,993,500
| 47.310345
| 82
| 0.635261
| false
| 3.176871
| true
| false
| false
|
kosklain/CausalityCompetition
|
CausalityTrainer.py
|
1
|
2257
|
import data_io
import CausalityFeatureFunctions as f
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
class CausalityTrainer:
def __init__(self, directionForward=True):
self.directionForward = directionForward
def getFeatureExtractor(self, features):
combined = f.FeatureMapper(features)
return combined
def getPipeline(self, feat):
features = self.getFeatureExtractor(feat)
steps = [("extract_features", features),
("classify", RandomForestRegressor(compute_importances=True, n_estimators=500,
verbose=2, n_jobs=1, min_samples_split=10,
random_state=0))]
return Pipeline(steps)
def getTrainingDataset(self):
print "Reading in the training data"
train = data_io.read_train_pairs()
print "Reading the information about the training data"
train2 = data_io.read_train_info()
train["A type"] = train2["A type"]
train["B type"] = train2["B type"]
return train
def run(self):
features = f.features
train = self.getTrainingDataset()
print "Reading preprocessed features"
if f.preprocessedFeatures != []:
intermediate = data_io.read_intermediate_train()
for i in f.preprocessedFeatures:
train[i] = intermediate[i]
for i in features:
if i[0] in f.preprocessedFeatures:
i[1] = i[0]
i[2] = f.SimpleTransform(transformer=f.ff.identity)
print "Reading targets"
target = data_io.read_train_target()
print "Extracting features and training model"
classifier = self.getPipeline(features)
if self.directionForward:
finalTarget = [ x*(x+1)/2 for x in target.Target]
else:
finalTarget = [ -x*(x-1)/2 for x in target.Target]
classifier.fit(train, finalTarget)
print classifier.steps[-1][1].feature_importances_
print "Saving the classifier"
data_io.save_model(classifier)
if __name__=="__main__":
ct = CausalityTrainer()
|
gpl-2.0
| 6,256,355,404,492,569,000
| 38.614035
| 96
| 0.594152
| false
| 4.340385
| false
| false
| false
|
dgnorth/DriftUe4Plugin
|
Scripts/publish.py
|
1
|
13837
|
"""Build and upload script to make UE4 client and server builds available on S3.
"""
import sys
import os
import threading
import time
import json
from datetime import datetime
import mimetypes
import argparse
import re
import getpass
import operator
from dateutil.parser import parse
from tabulate import tabulate
import boto3
from boto3.s3.transfer import S3Transfer, TransferConfig
def get_archives_in_folder(path):
ret = []
for filename in os.listdir(path):
if filename.endswith(".zip"):
full_filename = os.path.join(path, filename)
ret.append(full_filename)
return ret
def delete_archives(path):
archives = get_archives_in_folder(path)
for full_filename in archives:
print "Deleting old archive '%s'" % full_filename
os.remove(full_filename)
def get_script_path():
return os.path.abspath(os.path.split(__file__)[0])
def get_config():
config_filename = os.path.join(get_script_path(), "publish.cfg")
ret = {}
try:
with open(config_filename, 'r') as f:
ret = json.load(f)
except:
print "No config file. All configuration must come from command-line"
return ret
def get_project_file():
# assume the project file is one level above this script
path = os.path.abspath(os.path.join(get_script_path(), "..\\")).replace('\\', '/')
project_name = path.split('/')[-1]
ret = os.path.join(path, project_name) + ".uproject"
if not os.path.exists(ret):
raise RuntimeError("Project file '%s' not found" % ret)
return project_name, ret
config = get_config()
index_file = None
def get_index_path(s3path):
return "{path}/index.json".format(path=s3path)
def get_index(s3region, s3bucket, s3path):
global index_file
if index_file:
return index_file
key_name = get_index_path(s3path)
try:
response = boto3.client('s3', s3region).get_object(Bucket=s3bucket, Key=key_name)
except Exception as e:
if 'NoSuchKey' not in str(e):
raise
index_file = {
'repository': None,
'next_build_number': 10000,
'refs': [],
}
else:
index_file = json.load(response['Body'])
return index_file
def download_manifest(s3region, s3bucket, manifest_key_name):
s3 = boto3.client('s3', s3region)
resp = s3.get_object(Bucket=s3bucket, Key=manifest_key_name)
ret = json.load(resp['Body'])
return ret
def get_staging_directory(project_file, config):
project_root, _ = os.path.split(project_file)
project_root = os.path.abspath(project_root)
return os.path.join(project_root, 'Saved', 'StagedBuilds', config)
def create_build_manifest(build_number, repository, ref, project, target_platform, config, version_string=None, executable_path=None):
# Gather info for build manifest
build_manifest = {
'repository': repository,
'ref': ref,
'project': project,
'target_platform': target_platform,
'config': config,
'timestamp': datetime.utcnow().isoformat(),
'commit_id': None,
'build_number': build_number,
'built_by': getpass.getuser(),
'version_string': version_string,
'executable_path': executable_path,
}
# Generate a canonical name for the build archive (excluding extension)
canonical_ref = '{}/{}'.format(ref, build_number).replace('/', '.')
canonical_buildname = '{project}-{target_platform}-{config}-{canonical_ref}'.format(
canonical_ref=canonical_ref, **build_manifest
)
build_manifest['build'] = canonical_buildname
return build_manifest
def transfer_build_to_s3(archive_name, key_name):
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
self._start_time = time.time()
self._last_time = time.time()
self._megseg = 0.0
@property
def archive_info(self):
return {
"filename": self._filename,
"size": long(self._size),
"upload_time_sec": long(self._last_time - self._start_time)
}
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if time.time() - self._last_time > 0.02:
self._last_time = time.time()
elapsed = time.time() - self._start_time
self._megseg = (self._seen_so_far / 1024.0 / 1024.0) / elapsed
sys.stdout.write("Upload progress: %s kb / %s kb (%.2f%%) %.1f mb/s\r" % (self._seen_so_far // 1024, self._size // 1024, percentage, self._megseg))
sys.stdout.flush()
transfer_config = TransferConfig(
multipart_threshold=4 * 1024 * 1024,
max_concurrency=30
)
client = boto3.client('s3', s3region)
transfer = S3Transfer(client, transfer_config)
mimetype, encoding = mimetypes.guess_type(archive_name)
if mimetype is None:
print "Can't figure out mimetype for:", archive_name
sys.exit(1)
print " Archive filename: ", archive_name
print " S3 Bucket: ", s3bucket
print " S3 Key Name: ", key_name
print " Key Mime Type: ", mimetype
cb = ProgressPercentage(archive_name)
transfer.upload_file(
archive_name, s3bucket, key_name,
extra_args={'ContentType': mimetype},
callback=cb,
)
return cb
def publish_build(zippathname, build_manifest, s3region, s3bucket, s3path):
client = boto3.client('s3', s3region)
# The archive and manifest must be stored in the correct subfolder, so we append
# the UE4 build folder root and repository name.
base_name = "{}/{}/{}".format(
s3path,
build_manifest['target_platform'],
build_manifest['build']
)
zipname, zipext = os.path.splitext(zippathname)
archive_key_name = base_name + zipext
manifest_key_name = base_name + '.json'
# Upload the build to S3
progress = transfer_build_to_s3(zippathname, archive_key_name)
# Update manifest information
build_manifest['archive_info'] = progress.archive_info
build_manifest['archive'] = archive_key_name
# Get a permalink to the build
build_manifest['archive_url'] = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': s3bucket,
'Key': archive_key_name,
},
ExpiresIn=60*60*24*365,
HttpMethod='GET'
)
# Upload build manifest. Use the same name as the archive, but .json.
response = client.put_object(
Bucket=s3bucket,
Key=manifest_key_name,
Body=json.dumps(build_manifest, indent=4),
ContentType='application/json'
)
print "build_manifest:", json.dumps(build_manifest, indent=4)
# Index file update
print "Updating index file"
index_file = get_index(s3region, s3bucket, s3path)
if index_file['next_build_number'] != build_manifest['build_number']:
print "ATTENTION! Build number counter and build number don't match!"
index_file['next_build_number'] += 1
ref = build_manifest['ref']
target_platform = build_manifest['target_platform']
for ref_item in index_file['refs']:
if ref_item['ref'] == ref and ref_item['target_platform'] == target_platform:
break
else:
ref_item = {
'ref': ref,
'target_platform': target_platform,
}
index_file['refs'].append(ref_item)
# Add a reference to the manifest file
ref_item['build_manifest'] = manifest_key_name
key_name = get_index_path(s3path)
response = client.put_object(
Bucket=s3bucket,
Key=key_name,
Body=json.dumps(index_file, indent=4),
ContentType='application/json'
)
print "Publishing build succeeded"
def list_builds(s3region, s3bucket, s3path):
sys.stdout.write('Fetching build information...')
index_file = get_index(s3region, s3bucket, s3path)
results = []
for entry in index_file['refs']:
sys.stdout.write('.')
manifest = download_manifest(s3region, s3bucket, entry['build_manifest'])
dt = manifest['timestamp']
dt = parse(dt).replace(tzinfo=None).strftime("%Y-%m-%d %H:%M")
sz = int(manifest['archive_info']['size'])/1024/1024
results.append([entry['ref'], dt, manifest['built_by'], manifest['config'], entry['target_platform'], manifest['build_number'], sz])
results.sort(key=operator.itemgetter(5), reverse=True)
print
print tabulate(results, headers=['ref', 'timestamp', 'built by', 'config', 'platform', 'build number', 'size [mb]'])
if __name__ == "__main__":
start_time = time.time()
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest="cmd")
parser_list = subparsers.add_parser('list', help='List published builds')
parser_list.add_argument('--s3bucket', default=config.get('bucket'), help="S3 Bucket name (default: %s)" % config.get('bucket'))
parser_list.add_argument('--s3region', default=config.get('region'), help="S3 Region name (default: %s)" % config.get('region'))
parser_list.add_argument('--s3path', default=config.get('path'), help="S3 Path (default: %s)" % config.get('path'))
parser_publish = subparsers.add_parser('publish', help='Publish a build to the cloud')
parser_publish.add_argument("-r", "--ref", required=True, help='Ref to publish this build under (required)')
parser_publish.add_argument("-c", "--config", default="Development", help='Build configuration that was built (default: Development)')
parser_publish.add_argument('-a', '--archive', help="Path to archive file to upload to S3. If not specified all .zip archives from staging folder will be published.")
parser_publish.add_argument('-v', '--version-string', help="A canonical version string of the build (optional).")
parser_publish.add_argument('-p', '--platform', default="Win64", help="Platform of the build (default: Win64)")
parser_publish.add_argument('--s3bucket', default=config.get('bucket'), help="S3 Bucket name (default: %s)" % config.get('bucket'))
parser_publish.add_argument('--s3region', default=config.get('region'), help="S3 Region name (default: %s)" % config.get('region'))
parser_publish.add_argument('--s3path', default=config.get('path'), help="S3 Path (default: %s)" % config.get('path'))
args = parser.parse_args()
tp_archives = {} # Key is target platform, value is archive folder, zip file name.
build_manifests = []
project_name, project_file = get_project_file()
s3region = args.s3region
s3bucket = args.s3bucket
s3path = args.s3path
if not all([s3region, s3bucket, s3path]):
print "Missing required parameters. Please run command with --help for details"
sys.exit(1)
if args.cmd == 'publish':
server_platform = args.platform
executable_path = "{project_name}\\Binaries\\{server_platform}\\{project_name}Server.exe".format(project_name=project_name,
server_platform=server_platform)
config_name = args.config
ref = args.ref
REF_MAX_LEN = 16
if len(ref) > REF_MAX_LEN:
print "ref can be at most %s characters" % REF_MAX_LEN
sys.exit(2)
re1 = re.compile(r"[\w.-]*$")
if not re1.match(ref):
print "ref cannot contain any special characters other than . and -"
sys.exit(2)
if args.archive:
archives = args.archive.split(",")
else:
staging_directory = get_staging_directory(project_file, config_name)
archives = get_archives_in_folder(staging_directory)
if len(archives) == 0:
print "No archives found in folder '%s'. Nothing to publish!" % staging_directory
sys.exit(2)
index_file = get_index(s3region, s3bucket, s3path)
for archive in archives:
if not os.path.exists(archive):
print "Archive '%s' not found. Cannot publish" % archive
sys.exit(1)
for archive in archives:
target_platform = archive.replace("\\", ".").split(".")[-2]
print "Publishing target platform '%s'" % target_platform
build_manifest = create_build_manifest(
build_number=index_file['next_build_number'],
repository=s3path,
ref=args.ref,
project=project_name,
target_platform=target_platform,
config=config_name,
version_string=args.version_string,
executable_path=executable_path,
)
publish_build(archive, build_manifest, s3region, s3bucket, s3path)
build_manifests.append(build_manifest)
if build_manifests:
print "Build manifests:"
for build_manifest in build_manifests:
print json.dumps(build_manifest, indent=4)
print
for build_manifest in build_manifests:
print "Archive URL: %s" % build_manifest['archive_url']
elif args.cmd == 'list':
list_builds(s3region, s3bucket, s3path)
|
mit
| 7,270,144,388,517,457,000
| 36.702997
| 170
| 0.610176
| false
| 3.811846
| true
| false
| false
|
brianloveswords/django-badger
|
setup.py
|
1
|
1111
|
from setuptools import setup
setup(
name='django-badger',
version='0.0.1',
description='Django app for managing and awarding badgers',
long_description=open('README.rst').read(),
author='Leslie Michael Orchard',
author_email='me@lmorchard.com',
url='http://github.com/lmorchard/django-badger',
license='BSD',
packages=['badger', 'badger.templatetags', 'badger.management', 'badger.management.commands', 'badger.migrations'],
package_data={'badger': ['fixtures/*', 'templates/badger_playdoh/*.html', 'templates/badger_playdoh/includes/*.html', 'templates/badger_vanilla/*.html', 'templates/badger_vanilla/includes/*.html']},
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
# I don't know what exactly this means, but why not?
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
bsd-3-clause
| 3,426,159,240,017,291,300
| 41.730769
| 202
| 0.656166
| false
| 3.844291
| false
| false
| false
|
paulsbrookes/cqed_sims_qutip
|
spectroscopy/spec_anim.py
|
1
|
6723
|
import numpy as np
from qutip import *
from pylab import *
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import yaml
from scipy.interpolate import interp1d
class parameters:
def __init__(self, wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels):
self.wc = wc
self.wq = wq
self.eps = eps
self.g = g
self.chi = chi
self.gamma = gamma
self.kappa = kappa
self.t_levels = t_levels
self.c_levels = c_levels
def hamiltonian(params, wd):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
H = - (params.wc - wd) * a.dag() * a - (params.wq - wd) * sm.dag() * sm \
+ params.chi * sm.dag() * sm * (sm.dag() * sm - 1) + params.g * (a.dag() * sm + a * sm.dag()) \
+ params.eps * (a + a.dag())
return H
def transmission_calc_array(params, wd_points):
transmissions = parallel_map(transmission_calc, wd_points, (params,), num_cpus = 10)
transmissions = np.array(transmissions)
return transmissions
def transmission_calc(wd, params):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = []
c_ops.append(np.sqrt(params.kappa) * a)
c_ops.append(np.sqrt(params.gamma) * sm)
H = hamiltonian(params, wd)
rho_ss = steadystate(H, c_ops)
transmission = expect(a, rho_ss)
return transmission
def new_points(wd_points, transmissions, threshold):
metric_vector = curvature_vector(wd_points, transmissions)
indices = np.array([index for index, metric in enumerate(metric_vector) if metric > threshold]) + 1
new_wd_points = generate_points(wd_points, indices)
return new_wd_points
def generate_points(wd_points, indices):
n_points = 6
new_wd_points = np.array([])
for index in indices:
multi_section = np.linspace(wd_points[index - 1], wd_points[index + 1], n_points)
new_wd_points = np.concatenate((new_wd_points, multi_section))
unique_set = set(new_wd_points) - set(wd_points)
new_wd_points_unique = np.array(list(unique_set))
return new_wd_points_unique
def curvature_vector(wd_points, transmissions):
is_ordered = all([wd_points[i] <= wd_points[i + 1] for i in xrange(len(wd_points) - 1)])
assert is_ordered, "Vector of wd_points is not ordered."
assert len(wd_points) == len(transmissions), "Vectors of wd_points and transmissions are not of equal length."
metric_vector = []
for index in range(len(wd_points) - 2):
metric = curvature(wd_points[index:index + 3], transmissions[index:index + 3])
metric_vector.append(metric)
return metric_vector
def curvature(wd_triplet, transmissions_triplet):
wd_are_floats = all([isinstance(wd_triplet[i], float) for i in xrange(len(wd_triplet) - 1)])
assert wd_are_floats, "The vector wd_triplet contains numbers which are not floats."
transmissions_are_floats = all([isinstance(transmissions_triplet[i], float) \
for i in xrange(len(transmissions_triplet) - 1)])
assert transmissions_are_floats, "The vector transmissions_triplet contains numbers which are not floats."
wd_delta_0 = wd_triplet[1] - wd_triplet[0]
wd_delta_1 = wd_triplet[2] - wd_triplet[1]
transmissions_delta_0 = transmissions_triplet[1] - transmissions_triplet[0]
transmissions_delta_1 = transmissions_triplet[2] - transmissions_triplet[1]
metric = 2 * (wd_delta_1 * transmissions_delta_1 - wd_delta_0 * transmissions_delta_0) / (wd_delta_0 + wd_delta_1)
abs_normalised_metric = np.absolute(metric / transmissions_triplet[1])
return abs_normalised_metric
def y_lim_calc(y_points):
buffer_fraction = 0.1
y_max = np.amax(y_points)
y_min = np.amin(y_points)
range = y_max - y_min
y_lim_u = y_max + buffer_fraction * range
y_lim_l = y_min - buffer_fraction * range
return np.array([y_lim_l, y_lim_u])
def sweep(eps, wd_lower, wd_upper, params, fidelity):
params.eps = eps
save = 1
wd_points = np.linspace(wd_lower, wd_upper, 10)
transmissions = transmission_calc_array(params, wd_points)
abs_transmissions = np.absolute(transmissions)
new_wd_points = new_points(wd_points, abs_transmissions, fidelity)
fig, ax = plt.subplots(1, 1)
ax.set_xlim(wd_lower, wd_upper)
y_limits = y_lim_calc(abs_transmissions)
ax.set_ylim(y_limits[0], y_limits[1])
ax.set_xlabel('Cavity drive frequency (GHz)')
ax.set_ylabel('|<a>|')
ax.hold(True)
plt.show(False)
plt.draw()
background = fig.canvas.copy_from_bbox(ax.bbox)
points = ax.plot(wd_points, abs_transmissions, 'o')[0]
while (len(new_wd_points) > 0):
new_transmissions = transmission_calc_array(params, new_wd_points)
new_abs_transmissions = np.absolute(new_transmissions)
wd_points = np.concatenate([wd_points, new_wd_points])
transmissions = concatenate([transmissions, new_transmissions])
abs_transmissions = concatenate([abs_transmissions, new_abs_transmissions])
sort_indices = np.argsort(wd_points)
wd_points = wd_points[sort_indices]
transmissions = transmissions[sort_indices]
abs_transmissions = abs_transmissions[sort_indices]
new_wd_points = new_points(wd_points, abs_transmissions, fidelity)
points.set_data(wd_points, abs_transmissions)
fig.canvas.restore_region(background)
ax.draw_artist(points)
fig.canvas.blit(ax.bbox)
y_limits = y_lim_calc(abs_transmissions)
ax.set_ylim(y_limits[0], y_limits[1])
if save == 1:
np.savetxt('results/abs_transmissions.csv', abs_transmissions, delimiter=',')
np.savetxt('results/drive_frequencies.csv', wd_points, delimiter=',')
params_dic = {'f_c': params.wc,
'f_q': params.wq,
'epsilon': params.eps,
'g': params.g,
'kappa': params.kappa,
'gamma': params.gamma,
'transmon_levels': params.t_levels,
'cavity_levels': params.c_levels}
with open('results/parameters.yml', 'w') as outfile: yaml.dump(params_dic, outfile, default_flow_style = True)
plt.scatter(wd_points, abs_transmissions)
plt.show()
if __name__ == '__main__':
#wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels
params = parameters(10.4262, 9.4914, 0.0001, 0.275, -0.097, 0.00146, 0.000833, 2, 10)
eps = 0.0001
fidelity = 0.05
wd_lower = 10.4
wd_upper = 10.55
sweep(eps, wd_lower, wd_upper, params, fidelity)
|
apache-2.0
| 1,119,140,199,306,191,900
| 37.861272
| 118
| 0.638554
| false
| 3.171226
| false
| false
| false
|
marpaia/chef-osx
|
cookbooks/python/files/default/ipython/profile_default/ipython_config.py
|
1
|
16895
|
# Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Whether to display a banner upon starting IPython.
c.TerminalIPythonApp.display_banner = False
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.TerminalIPythonApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.TerminalIPythonApp.pylab_import_all = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u'/Users/marpaia/.ipython'
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s] %(message)s'
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vim'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.5 (default, Jun 1 2013, 01:36:25) \nType "copyright", "credits" or "license" for more information.\n\nIPython 0.13.2 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
#
# c.HistoryManager.db_log_output = False
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
#
# c.HistoryManager.db_cache_size = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
|
apache-2.0
| 5,318,918,276,306,134,000
| 35.333333
| 418
| 0.671974
| false
| 3.89555
| true
| false
| false
|
sevazhidkov/leonard
|
jobs/return_messages.py
|
1
|
3006
|
import os
import random
import logging
import arrow
import telegram
from telegram.error import Unauthorized
from leonard import Leonard
from modules.menu import GREETING_PHRASES
from libs.timezone import local_time
from libs.utils import FakeMessage
telegram_client = telegram.Bot(os.environ['BOT_TOKEN'])
bot = Leonard(telegram_client)
bot.collect_plugins()
RETURN_MESSAGE_HOURS = list(range(11, 20))
RETURN_MESSAGE = '{} {}\n{}'
HOUR_MESSAGES = [(range(11, 17), 'Have a nice day ❤️'),
(range(17, 20), 'Good evening!')]
ASSIST_MESSAGES = ['By the way, if you have problems with me, you can write my developer @sevazhidkov',
'You can unsubscribe from such messages using Subscriptions 📬']
def main():
return
for key in bot.redis.scan_iter(match='user:*:registered'):
if bot.redis.get(key).decode('utf-8') != '1':
# TODO: Add reminder about registration
continue
_, u_id, _ = key.decode('utf-8').split(':')
status = bot.user_get(u_id, 'notifications:returns:messages')
if status == '0':
continue
time = local_time(bot, int(u_id))
if time.hour not in RETURN_MESSAGE_HOURS:
continue
if bot.user_get(u_id, 'return_sent'):
continue
return_hour = bot.user_get(u_id, 'return_hour')
if return_hour and time.hour != int(return_hour):
continue
elif not return_hour:
# Choose hour for return message
hour = random.choice(RETURN_MESSAGE_HOURS)
bot.user_set(u_id, 'return_hour', hour, ex=len(RETURN_MESSAGE_HOURS) * 60 * 60)
if hour != time.hour:
continue
last_interaction = arrow.get(bot.user_get(u_id, 'last_interaction') or time)
interaction_delta = time - last_interaction
if interaction_delta and last_interaction.replace(hours=+1) > time:
continue
bot.logger.info('Checking return message to: {}, where list: {}'.format(
u_id, ([0] * round(interaction_delta.days / 2) + [0]) + [1, 1]
))
result = random.choice(([0] * round(interaction_delta.days / 2) + [0]) + [1, 1])
bot.user_set(u_id, 'return_sent', time.timestamp, ex=len(RETURN_MESSAGE_HOURS) * 60 * 60)
if result != 1:
continue
m = FakeMessage()
m.u_id = u_id
for interval, message in HOUR_MESSAGES:
if time.hour in interval:
hour_message = message
try:
bot.call_handler(m, 'main-menu', phrase=RETURN_MESSAGE.format(
hour_message, random.choice(GREETING_PHRASES), random.choice(ASSIST_MESSAGES)
))
except Unauthorized:
bot.logger.warning('Unauthorized for {}'.format(u_id))
except Exception as error:
bot.logger.error(error)
if __name__ == '__main__':
try:
main()
except Exception as e:
bot.logger.error(e)
|
mit
| 8,447,620,987,240,022,000
| 31.956044
| 103
| 0.592197
| false
| 3.604567
| false
| false
| false
|
espdev/readthedocs.org
|
readthedocs/projects/signals.py
|
1
|
1499
|
"""Project signals"""
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""Add post-commit hook on project import"""
project = sender
request = kwargs.get('request')
_set = False
_service = None
for service_cls in registry:
if service_cls.is_project_service(project):
for service in service_cls.for_user(request.user):
_service = service
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
_set = True
else:
messages.error(request, _('Webhook configuration failed'))
if not _set and _service:
messages.error(
request,
_('No accounts available to set webhook on. '
'Please connect your %s account.' % _service.get_adapter()().get_provider().name)
)
|
mit
| 8,835,816,446,431,787,000
| 30.893617
| 95
| 0.651768
| false
| 4.258523
| false
| false
| false
|
alexis-roche/niseg
|
examples/partial_volume_estimation.py
|
1
|
1753
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Script example of partial volume estimation
"""
from argparse import ArgumentParser
import numpy as np
import nibabel as nb
from niseg import BrainT1PVE
# Parse command line
description = 'Estimate brain tissue concentrations of CSF, GM and WM from a skull \
stripped T1 image in CSF, GM and WM. If no mask image is provided, the mask is defined \
by thresholding the input image above zero (strictly).'
parser = ArgumentParser(description=description)
parser.add_argument('img', metavar='img', nargs='+', help='input image')
parser.add_argument('--mask', dest='mask', help='mask image')
parser.add_argument('--niters', dest='niters',
help='number of iterations (default=%d)' % 25)
parser.add_argument('--beta', dest='beta',
help='Spatial smoothness beta parameter (default=%f)' % 0.5)
parser.add_argument('--ngb_size', dest='ngb_size',
help='Grid neighborhood system (default=%d)' % 6)
args = parser.parse_args()
def get_argument(dest, default):
val = args.__getattribute__(dest)
if val == None:
return default
else:
return val
# Input image
img = nb.load(args.img[0])
# Input mask image
mask_img = get_argument('mask', None)
if mask_img == None:
mask_img = img
else:
mask_img = nb.load(mask_img)
mask = mask_img.get_data() > 0
# Other optional arguments
niters = get_argument('niters', 25)
beta = get_argument('beta', None)
ngb_size = get_argument('ngb_size', 6)
# Perform tissue classification
PV = BrainT1PVE(img, mask=mask, beta=beta, ngb_size=ngb_size)
PV.run(niters=niters, print_parameters=True)
# Save tissue concentration maps
PV.save('temp')
|
bsd-3-clause
| 9,000,726,666,906,467,000
| 28.711864
| 88
| 0.697661
| false
| 3.187273
| false
| false
| false
|
ShrimpingIt/tableaux
|
depictions/umbrella/main.py
|
1
|
2106
|
from os import urandom
from time import sleep
from neopixel import NeoPixel
from machine import Pin
from math import floor
dataPin = Pin(13)
ledCount = 27
np = NeoPixel(dataPin, ledCount)
def blank():
for pos in range(ledCount):
np[pos]=(0,0,0)
np.write();
def visitAll():
for pos in range(ledCount):
blank()
np[pos]=(0,0,255)
np.write()
print(pos)
input('...')
def log2_approx(val):
val = floor(val)
approx = 0
while val != 0:
val &= ~ (1<<approx)
approx = approx + 1
return approx
def rand_int(bound):
byteCount = (log2_approx(bound) // 8) + 1 # each byte is 8 powers of two
val = 0
for idx, entry in enumerate(bytearray(urandom(byteCount))):
val |= entry << (idx * 8)
return val % bound
blue = (0,0,255)
sequences = [
[0],
[17,1],
[18,2,16],
[19,3,15],
[20,4,14,12],
[21,5,13,11],
[23,6,10],
[24,7,9],
[25,8],
[26],
]
positions = [-1 for entry in sequences]
under = [
3, 15,
4, 14, 12,
5, 13, 11,
6, 10
]
#sheltered = under
sheltered = []
d0 = Pin(16, Pin.OUT)
d1 = Pin(5, Pin.OUT)
d0.high()
d1.high()
'''
d1 = Pin(5, Pin.OUT)
pwm1 = PWM(d1)
pwm1.freq(1000)
pwm1.duty(256)
ramp = range(0,1024, 8)
while True:
for duty in ramp:
pwm1.duty(duty)
sleep(0.05)
for duty in reversed(ramp):
pwm1.duty(duty)
sleep(0.05)
'''
def run():
while True:
blank()
for index, sequence in enumerate(sequences):
# retrieve activity for this drop
position = positions[index]
if position == -1:
# inactive drops sometimes become active (at 0)
if rand_int(2) == 0:
position = 0
else:
position = position + 1 # previously active drops fall one more step
if position == len(sequence): # drops falling off the bottom become inactive
position = -1
elif sequence[position] in sheltered: # drops going into sheltered area become inactive
position = -1
# light any active lights
if position != -1:
pixel = sequence[position]
np[pixel] = blue
# store activity for this drop for next time round loop
positions[index] = position
np.write()
sleep(0.05)
run()
|
agpl-3.0
| 2,675,218,914,456,920,600
| 16.697479
| 91
| 0.625356
| false
| 2.603214
| false
| false
| false
|
fullphat/redsquare
|
support/blink1.py
|
1
|
8924
|
#!/usr/bin/env python
import sys
import time
import re
import sys
import uuid
debugimport=False
use_pyusb=False
try:
print "[blink1]: trying blink1_pyusb..."
from blink1_pyusb import Blink1 as Blink1_pyusb
print "[blink1]: using blink1_pyusb"
use_pyusb = True
#sys.modules['Blink1'] = blink1_pyusb
except ImportError:
try:
print "[blink1]: couldn't load blink1_pyusb, trying blink1_ctypes..."
from blink1_ctypes import Blink1 as Blink1_ctypes
#sys.modules['Blink1'] = blink1_ctypes
print "[blink1]: using blink1_ctypes"
except ImportError:
print "[blink1]: Failed to load blink1_pyusb or blink1_ctypes"
print "[blink1]: Try installing pyusb using 'sudo pip install pyusb'"
sys.exit(1)
hostid = uuid.uuid4().hex[:8]
class Blink1:
'''
Object wrapper class.
This a wrapper for objects. It is initialiesed with the object to wrap
and then proxies the unhandled getattribute methods to it.
Other classes are to inherit from it.
'''
def __init__(self, unit=0):
'''
Wrapper constructor.
'''
# wrap the object
if use_pyusb :
blink1 = Blink1_pyusb(unit)
else :
blink1 = Blink1_ctypes()
self._wrapped_obj = blink1
def __getattr__(self, attr):
# see if this object has attr
# NOTE do not use hasattr, it goes into
# infinite recurrsion
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the wrapped object
try :
return getattr(self._wrapped_obj, attr)
except Exception:
print "****** error!"
return None
# FIXME: can't overload methods?
# def fade_to_rgb(self, millis, colorstr):
# rgb = Blink1.parse_color_string(colorstr)
# self._wrapped_obj.fade_to_rgb(millis, rgb[0], rgb[1], rgb[2])
def get_hostid(self): # FIXME
return hostid
def get_blink1id(self):
return self.get_hostid() + self.get_serialnumber()
@classmethod
def parse_color_string(cls,rgbstr):
'''
'''
rgbstr = rgbstr.lower()
rgb = None
# match hex color code "#FFcc00"
m = re.search(r"#([0-9a-f]{6})", rgbstr)
if m:
rgb = tuple(ord(c) for c in m.group(1).decode('hex'))
else:
# match color triplets like "255,0,0" and "0xff,0xcc,0x33"
m = re.search(r"(0x[\da-f]+|\d+),(0x[\da-f]+|\d+),(0x[\da-f]+|\d+)",
rgbstr)
if m:
rgb = tuple(int(c,0) for c in m.groups())
return rgb
class Blink1Pattern:
def __init__(self):
self.repeats = 0
self.colors = []
self.times = []
def __repr__(self):
return "{ 'repeats': "+ repr(self.repeats) +", 'colors': "+repr(self.colors)+", 'times':"+repr(self.times) +" }"
def __str__(self):
#strtimes = "".join( str(n) for n in self.times )
return "{ 'repeats': "+ str(self.repeats) +", 'colors': "+str(self.colors)+", 'times': [" + ", ".join( str(n) for n in self.times ) +"] }"
@classmethod
def parse_pattern_string(cls, pattstr):
'''
parse color patterns in the format: '3, #ff00ff, 1.5 ,#000000, 1.0'
'''
print "parse_pattern_string:"+pattstr
vals = pattstr.split(',')
if( len(vals) % 2 == 0 ) : # even is bad, must be odd
print "bad patternstr: "+pattstr
else:
patt = Blink1Pattern()
patt.repeats = int(vals[0]) #
# every other element from pos 2
# every other element from pos 2
patt.colors = map( Blink1.parse_color_string, vals[1::2])
patt.times = [float(m) for m in vals[2::2]]
return patt
def demo(blink1):
'''
'''
print "blink1 version: "+ blink1.get_version()
democolors = [ (255, 0, 0), # red
( 0,255, 0), # grn
( 0, 0,255), # blu
(255,255, 0), # yellow
( 0,255,255), # cyan
(255, 0,255), # magenta
( 0, 0, 0), # off
]
demo_millis = 200
for rgb in democolors:
(r,g,b) = rgb
print "fading to %3i,%3i,%3i" % (r,g,b)
blink1.fade_to_rgbn( demo_millis/2, r,g,b, 0 )
time.sleep( demo_millis/1000.0 )
blink1.fade_to_rgbn( demo_millis/2, 0,0,0, 0 )
time.sleep( demo_millis/1000.0 )
def main():
'''
'''
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--demo',
action='store_const', dest='cmd',const='demo',
help='run simple demo')
parser.add_option('--version',
action='store_const', dest='cmd',const='version',
help='return firmware version')
parser.add_option('--hostid',
action='store_const', dest='cmd',const='hostid',
help='return hostid')
parser.add_option('--blink',
dest='blink',default=0, type='int',
help='blink specified number of times')
parser.add_option('--play',
dest='play',default=0, type='string',
help='play built-in light sequence')
parser.add_option('--patt',
dest='patt',default=0, type='string',
help='play specified color pattern')
parser.add_option('--rgb', default='',
dest='rgb',
help="the RGB color to use")
parser.add_option('-l', '--led', default=0, type='int',
dest='ledn',
help="which LED to use (default=both)")
parser.add_option('-m', '--millis', default=300, type='int',
dest='fade_millis',
help="fade millis for color commands")
parser.add_option('-t', '--delay', default=500, type='int',
dest='delay_millis',
help="millis between commands like blink, random, etc.")
parser.add_option('--debug',action="store_true", dest='debug' )
parser.add_option('--on', action="store_const",dest='rgb',const="#FFFFFF")
parser.add_option('--off', action="store_const",dest='rgb',const="#000000")
parser.add_option('--red', action="store_const",dest='rgb',const="#FF0000")
parser.add_option('--green',action="store_const",dest='rgb',const="#00FF00")
parser.add_option('--blue', action="store_const",dest='rgb',const="#0000FF")
(options, args) = parser.parse_args()
rgbstr = options.rgb
fade_millis = options.fade_millis
ledn = options.ledn
rgb = Blink1.parse_color_string( rgbstr )
debug_rw = options.debug
#print "rgbval:%s millis:%i ledn:%i " % (repr(rgb),fade_millis,ledn)
#
blink1 = Blink1()
if blink1.dev == None :
print("no blink1 found")
# blink command (takes an argument of number of blinks)
if options.blink :
if not rgb : rgb = (255,255,255)
for i in range(0,options.blink):
blink1.fade_to_rgbn( fade_millis, rgb[0],rgb[1],rgb[2], ledn)
time.sleep( options.delay_millis / 1000.0 )
blink1.fade_to_rgbn( fade_millis, 0,0,0, ledn)
time.sleep( options.delay_millis / 1000.0 )
elif options.play :
play = map(int, options.play.split(',')) # convert str list to int list
#print "play: "+repr(options.play) + ','+repr(play)
play.extend( [0] * (4 - len(play)) ) # make list fixed size, seems dumb
blink1.playloop( play[0], play[1], play[2], play[3] )
elif options.patt :
blink1patt = Blink1Pattern.parse_pattern_string(options.patt)
print "playing pattern: "+ str(blink1patt)
for i in range(blink1patt.repeats):
for j in range(len(blink1patt.colors)):
color = blink1patt.colors[j]
millis = int( blink1patt.times[j] * 1000 )
print "color: "+str(color) +", millis: "+ str(millis)
blink1.fade_to_rgb( millis/2, color[0], color[1], color[2])
time.sleep( millis / 1000.0 )
elif options.cmd == 'version':
print "version: "+ blink1.get_version()
elif options.cmd == 'hostid':
print "hostid: "+ blink1.get_hostid()
elif options.cmd == 'demo' :
demo(blink1)
elif options.cmd == None and rgb :
print "fading to #%02x%02x%02x" % (rgb) + " in %d msec" % fade_millis
blink1.fade_to_rgbn( fade_millis, rgb[0],rgb[1],rgb[2], ledn)
else:
parser.print_help()
if __name__ == "__main__":
sys.exit(main())
|
mit
| -756,769,589,801,679,400
| 32.051852
| 146
| 0.532721
| false
| 3.42572
| false
| false
| false
|
AddonScriptorDE/plugin.video.trailerseite_de
|
default.py
|
1
|
11540
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import socket
import re
import sys
import xbmcplugin
import xbmcaddon
import xbmcgui
socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
addonId = 'plugin.video.trailerseite_de'
addon = xbmcaddon.Addon(id=addonId)
translation = addon.getLocalizedString
xbox = xbmc.getCondVisibility("System.Platform.xbox")
maxVideoQuality = str(addon.getSetting("maxVideoQuality"))
showAllTrailers = addon.getSetting("showAllTrailers") == "true"
forceViewMode = addon.getSetting("forceViewMode") == "true"
viewMode = str(addon.getSetting("viewMode"))
baseUrl = "http://www.trailerseite.de"
def index():
addDir(translation(30001), baseUrl+"/kino/neustarts-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30002), baseUrl+"/kino/film-trailer-vorschau.html", 'listMoviesMain', "")
addDir(translation(30003), baseUrl+"/kino/highlights-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30004), baseUrl+"/kino/arthouse-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30005), baseUrl+"/kino/charts/deutsche-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30006), baseUrl+"/kino/charts/us-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30007), baseUrl+"/kino/charts/arthouse-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30015), "http://feeds.feedburner.com/updates?format=xml", 'listLastTrailer', "")
addDir(translation(30016), "http://feeds.feedburner.com/updates?format=xml", 'listLastVideos', "")
addDir(translation(30014), baseUrl+"/kino/starttermine-kinofilme-24075.html", 'listMoviesDate', "")
addDir(translation(30008), baseUrl+"/kino/film-trailer-a-z.html", 'listMoviesAZ', "")
addDir(translation(30009), baseUrl+"/trailer-dvd/neustarts/", 'listMoviesMain', "")
addDir(translation(30010), baseUrl+"/trailer-dvd/dvd-vorschau.html", 'listMoviesMain', "")
addDir(translation(30011), baseUrl+"/trailer-dvd/dvd-top-10.html", 'listMoviesMain', "")
addDir(translation(30012), baseUrl+"/filmkritiken/16007-film-specials.html", 'listMoviesMain', "")
addDir("Der ehrliche Dennis", baseUrl+"/der-ehrliche-dennis/index.html", 'listMoviesMain', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesMain(url):
content = getUrl(url)
spl = content.split('<div class="expoteaser">')
listMovies(url, spl)
spl = content.split('<div class="teasermultiple">')
listMovies(url, spl)
spl = content.split('<div class="rightteaser">')
listMovies(url, spl)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listMovies(mainUrl, spl):
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = baseUrl+match[0]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = baseUrl+"/"+match[0]
thumbNew = thumb.replace("-expo.jpg", ".jpg").replace("-right.jpg", ".jpg").replace(".jpg", "-right.jpg")
req = urllib2.Request(thumbNew)
try:
urllib2.urlopen(req)
thumb = thumbNew
except:
thumbNew = thumb.replace("-expo.jpg", ".jpg").replace("-right.jpg", ".jpg").replace(".jpg", "-expo.jpg")
req = urllib2.Request(thumbNew)
try:
urllib2.urlopen(req)
thumb = thumbNew
except:
pass
if showAllTrailers and mainUrl not in [baseUrl+"/der-ehrliche-dennis/index.html", baseUrl+"/filmkritiken/16007-film-specials.html"]:
addDir(title, url, 'listTrailers', thumb)
else:
addLink(title, url, 'playVideo', thumb, "")
def listTrailers(url, name, thumb):
content = getUrl(url)
spl = content.split('<div class="extraplayer">')
addLink(name+" Trailer", url, 'playVideo', thumb, "")
for i in range(1, len(spl), 1):
entry = spl[i]
if 'class="aFLVPlayer"' not in entry:
entry = entry[entry.find("<a href=")+1:]
match = re.compile('<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(entry)
url = match[0][0]
title = match[0][1]
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = match[0]
addLink(title, url, 'playVideo', thumb, "")
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listLastTrailer(url):
content = getUrl(url)
spl = content.split('<item>')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('<link>(.+?)</link>', re.DOTALL).findall(entry)
url = match[0]
match = re.compile('<title>(.+?)</title>', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('<date>(.+?)-(.+?)-(.+?) ', re.DOTALL).findall(entry)
month = match[0][1]
day = match[0][2]
title = day+"."+month+" - "+title
if '/film/' in url and "Trailer" in title:
addLink(title, url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listLastVideos(url):
content = getUrl(url)
spl = content.split('<item>')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('<link>(.+?)</link>', re.DOTALL).findall(entry)
url = match[0]
match = re.compile('<title>(.+?)</title>', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('<date>(.+?)-(.+?)-(.+?) ', re.DOTALL).findall(entry)
month = match[0][1]
day = match[0][2]
title = day+"."+month+" - "+title
if '/film/' in url and "Trailer" not in title:
addLink(title, url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesAZ(url):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = getUrl(url)
content = content[content.find('<div class="abhaken">'):]
content = content[:content.find('</table>'):]
match = re.compile('<a href="(.+?)" title=".+?" >(.+?)</a>', re.DOTALL).findall(content)
for url, title in match:
match2 = re.compile('<a href=".+?" title="(.+?)"', re.DOTALL).findall(title)
if match2:
title = cleanTitle(match2[0][0])
else:
title = cleanTitle(title)
if showAllTrailers:
addDir(title, baseUrl+url, 'listTrailers', "")
else:
addLink(title, baseUrl+url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesDate(url):
content = getUrl(url)
spl = content.split('<div class="textbox-white">')
for i in range(1, len(spl), 1):
entry = spl[i].replace('">', '" title="TEST" >')
entry = entry[:entry.find("</tr>")]
match = re.compile('<h3>Ab (.+?).20', re.DOTALL).findall(entry)
date = match[0]
match = re.compile('<a href="(.+?)" title=".+?" >(.+?)</a>', re.DOTALL).findall(entry)
for url, title in match:
title = date+" - "+cleanTitle(title)
if showAllTrailers:
addDir(title, baseUrl+url, 'listTrailers', "")
else:
addLink(title, baseUrl+url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def playVideo(url):
content = getUrl(url)
matchDM = re.compile('src="http://www.dailymotion.com/embed/video/(.+?)\\?', re.DOTALL).findall(content)
content = content[content.find('<div class="flashplayer">'):]
matchSD = re.compile('href="(.+?)"', re.DOTALL).findall(content)
matchHD = re.compile('<a class="aFLVPlayer" href="(.+?)"></a>', re.DOTALL).findall(content)
streamUrl = ""
if matchHD and maxVideoQuality == "1":
streamUrl = matchHD[0]
elif matchSD:
streamUrl = matchSD[0]
elif matchDM:
streamUrl = getDailyMotionUrl(matchDM[0])
listitem = xbmcgui.ListItem(path=streamUrl)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def queueVideo(url, name):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
listitem = xbmcgui.ListItem(name)
playlist.add(url, listitem)
def getDailyMotionUrl(id):
if xbox:
url = "plugin://video/DailyMotion.com/?url="+id+"&mode=playVideo"
else:
url = "plugin://plugin.video.dailymotion_com/?url="+id+"&mode=playVideo"
return url
def cleanTitle(title):
title = title.replace("<", "<").replace(">", ">").replace("&", "&").replace("&", "&").replace("'", "'")
title = title.replace("'", "'").replace("–", "-").replace("“", "-").replace("”", "-").replace("’", "'")
title = title.replace(""", "\"").replace("ü", "ü").replace("ä", "ä").replace("ö", "ö")
title = title.replace("Trailer", "").strip()
return title
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:19.0) Gecko/20100101 Firefox/19.0')
response = urllib2.urlopen(req)
link = response.read()
response.close()
return link
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addLink(name, url, mode, iconimage, desc):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+urllib.quote_plus(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": desc})
liz.setProperty('IsPlayable', 'true')
liz.addContextMenuItems([(translation(30013), 'RunPlugin(plugin://'+addonId+'/?mode=queueVideo&url='+urllib.quote_plus(u)+'&name='+urllib.quote_plus(name)+')',)])
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addDir(name, url, mode, iconimage):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+urllib.quote_plus(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
if mode == 'listMoviesMain':
listMoviesMain(url)
elif mode == 'listLastTrailer':
listLastTrailer(url)
elif mode == 'listLastVideos':
listLastVideos(url)
elif mode == 'listVideosCharts':
listVideosCharts(url)
elif mode == 'listMoviesAZ':
listMoviesAZ(url)
elif mode == 'listMoviesDate':
listMoviesDate(url)
elif mode == 'listTrailers':
listTrailers(url, name, thumb)
elif mode == 'playVideo':
playVideo(url)
elif mode == 'queueVideo':
queueVideo(url, name)
else:
index()
|
gpl-2.0
| -1,058,085,944,360,981,600
| 39.911348
| 166
| 0.623039
| false
| 3.307626
| false
| false
| false
|
bmmalone/as-auto-sklearn
|
as_asl/train_oasc_models.py
|
1
|
2559
|
#! /usr/bin/env python3
import argparse
import misc.automl_utils as automl_utils
import misc.parallel as parallel
import as_asl.as_asl_command_line_utils as clu
import as_asl.as_asl_filenames as filenames
import as_asl.as_asl_utils as as_asl_utils
from as_asl.as_asl_ensemble import ASaslPipeline
import logging
import misc.logging_utils as logging_utils
logger = logging.getLogger(__name__)
def _log_info(msg, scenario_name, fold):
msg = "[{}, fold {}]: {}".format(scenario_name, fold, msg)
logger.info(msg)
def _outer_cv(fold, args, config):
msg = "loading the scenario"
_log_info(msg, args.scenario, fold)
scenario_name, scenario = automl_utils.load_scenario(args.scenario)
msg = "extracting fold training data"
_log_info(msg, scenario_name, fold)
testing, training = scenario.get_split(fold)
msg = "constructing and fitting the pipeline"
_log_info(msg, scenario_name, fold)
pipeline = ASaslPipeline(args)
pipeline_fit = pipeline.fit(scenario=training)
msg = "writing pipeline to disk"
_log_info(msg, scenario_name, fold)
model_type = scenario.scenario
model_filename = filenames.get_model_filename(
config['base_path'],
model_type,
fold=fold,
note=config.get('note')
)
pipeline_fit.dump(model_filename)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Run the Bayesian optimization-based approach for "
"training models for algorithm selection.")
clu.add_config(parser)
clu.add_scenario(parser)
clu.add_simple_presolver_options(parser)
clu.add_num_cpus(parser)
clu.add_cv_options(parser)
automl_utils.add_automl_options(parser, default_total_training_time=20)
automl_utils.add_blas_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
# see which folds to run
if len(args.folds) == 0:
args.folds = [f for f in range(args.num_folds)]
clu.validate_folds_options(args)
required_keys = ['base_path']
config = as_asl_utils.load_config(args.config, required_keys)
# check if we need to spawn a new process for blas
if automl_utils.spawn_for_blas(args):
return
pipeline = parallel.apply_parallel_iter(
args.folds,
args.num_cpus,
_outer_cv,
args,
config,
progress_bar=True
)
if __name__ == '__main__':
main()
|
mit
| -4,272,369,325,987,425,300
| 27.433333
| 92
| 0.66823
| false
| 3.421123
| true
| false
| false
|
moshthepitt/afya360
|
health_facilities/sitemaps.py
|
1
|
1083
|
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.core.paginator import Paginator
from .models import HealthFacility
class HealthFacilitySitemap(Sitemap):
changefreq = "monthly"
priority = 0.6
def items(self):
return HealthFacility.objects.all()
def lastmod(self, obj):
return obj.updated_on
def health_facility_sitemaps(chunk=1000):
"""
next we'll attemtp to generate a number of sitemaps in chunks using Paginator and GenericSitemap
"""
health_facility_sitemap = {}
health_facilities = HealthFacility.objects.all()
paginated_health_facilities = Paginator(health_facilities, chunk)
for this_page in paginated_health_facilities.page_range:
health_facility_dict = {
'queryset': paginated_health_facilities.page(this_page).object_list,
'date_field': 'updated_on',
}
health_facility_sitemap['health_facilitys_%s' % this_page] = GenericSitemap(
health_facility_dict, priority=0.6, changefreq='monthly')
return health_facility_sitemap
|
mit
| -7,888,297,912,088,862,000
| 32.84375
| 100
| 0.698984
| false
| 3.734483
| false
| false
| false
|
datapythonista/pandas
|
scripts/sync_flake8_versions.py
|
3
|
5144
|
"""
Check that the flake8 (and pandas-dev-flaker) pins are the same in:
- environment.yml
- .pre-commit-config.yaml, in the flake8 hook
- .pre-commit-config.yaml, in the additional dependencies of the yesqa hook
The flake8 hook revision in .pre-commit-config.yaml is taken as the reference revision.
Usage: either
- ``python scripts/sync_flake8_versions.py``, or
- ``pre-commit run sync-flake8-versions --all-files``.
"""
from __future__ import annotations
from dataclasses import (
dataclass,
replace,
)
import sys
from typing import (
Any,
Mapping,
Sequence,
TypeVar,
)
import yaml
@dataclass
class Revision:
name: str
compare: str
version: str
@dataclass
class Revisions:
name: str
pre_commit: Revision | None = None
yesqa: Revision | None = None
environment: Revision | None = None
YamlMapping = Mapping[str, Any]
Repo = TypeVar("Repo", bound=YamlMapping)
COMPARE = ("<=", "==", ">=", "<", ">", "=")
def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> tuple[Repo, YamlMapping]:
for repo in repos:
for hook in repo["hooks"]:
if hook["id"] == hook_name:
return repo, hook
else: # pragma: no cover
raise RuntimeError(f"Repo with hook {hook_name} not found")
def _conda_to_pip_compat(dep):
if dep.compare == "=":
return replace(dep, compare="==")
else:
return dep
def _validate_additional_dependencies(
flake8_additional_dependencies,
yesqa_additional_dependencies,
environment_additional_dependencies,
) -> None:
for dep in flake8_additional_dependencies:
if dep not in yesqa_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'flake8' "
"and 'yesqa' in '.pre-commit-config.yaml'\n"
)
sys.exit(1)
if dep not in environment_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'enviroment.yml' "
"and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n"
)
sys.exit(1)
def _validate_revisions(revisions):
if revisions.environment != revisions.pre_commit:
sys.stdout.write(
f"{revisions.name} in 'environment.yml' does not "
"match in 'flake8' from 'pre-commit'\n"
)
sys.exit(1)
if revisions.yesqa != revisions.pre_commit:
sys.stdout.write(
f"{revisions.name} in 'yesqa' does not match "
"in 'flake8' from 'pre-commit'\n"
)
sys.exit(1)
def _process_dependencies(deps):
for dep in deps:
if isinstance(dep, str):
for compare in COMPARE:
if compare in dep:
pkg, rev = dep.split(compare, maxsplit=1)
yield _conda_to_pip_compat(Revision(pkg, compare, rev))
break
else:
yield from _process_dependencies(dep["pip"])
def get_revisions(
precommit_config: YamlMapping, environment: YamlMapping
) -> tuple[Revisions, Revisions]:
flake8_revisions = Revisions(name="flake8")
pandas_dev_flaker_revisions = Revisions(name="pandas-dev-flaker")
repos = precommit_config["repos"]
flake8_repo, flake8_hook = _get_repo_hook(repos, "flake8")
flake8_revisions.pre_commit = Revision("flake8", "==", flake8_repo["rev"])
flake8_additional_dependencies = []
for dep in _process_dependencies(flake8_hook.get("additional_dependencies", [])):
if dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.pre_commit = dep
else:
flake8_additional_dependencies.append(dep)
_, yesqa_hook = _get_repo_hook(repos, "yesqa")
yesqa_additional_dependencies = []
for dep in _process_dependencies(yesqa_hook.get("additional_dependencies", [])):
if dep.name == "flake8":
flake8_revisions.yesqa = dep
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.yesqa = dep
else:
yesqa_additional_dependencies.append(dep)
environment_dependencies = environment["dependencies"]
environment_additional_dependencies = []
for dep in _process_dependencies(environment_dependencies):
if dep.name == "flake8":
flake8_revisions.environment = dep
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.environment = dep
else:
environment_additional_dependencies.append(dep)
_validate_additional_dependencies(
flake8_additional_dependencies,
yesqa_additional_dependencies,
environment_additional_dependencies,
)
for revisions in flake8_revisions, pandas_dev_flaker_revisions:
_validate_revisions(revisions)
if __name__ == "__main__":
with open(".pre-commit-config.yaml") as fd:
precommit_config = yaml.safe_load(fd)
with open("environment.yml") as fd:
environment = yaml.safe_load(fd)
get_revisions(precommit_config, environment)
sys.exit(0)
|
bsd-3-clause
| -1,023,367,306,878,247,000
| 29.43787
| 88
| 0.625
| false
| 3.743814
| true
| false
| false
|
atakan/Fractal-Trails
|
trail_analyze.py
|
1
|
6139
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Mehmet Atakan Gürkan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (probably in a file named COPYING).
# If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import sys
import argparse
from trail_length_calc import trail_length_1d, trail_length_3d
parser = argparse.ArgumentParser(description='Analyzes a given trail')
parser.add_argument('-i', '--input-file',
metavar='<input file>',
type=argparse.FileType('r'), dest='infile',
default=None,
help='name(s) of the input file(s) (use \'-\' for stdin)')
# XXX accepting multiple file names is not implemented yet.
# (will use nargs?)
parser.add_argument('-t',
type=float, metavar='<float>', default=1.0,
help='duration of motion (default: 1.0)')
parser.add_argument('--first-column-time', dest='firstcol',
action='store_true',
help='first column in data file is time (overrides \'-t\'; default: time interval is determined by subdividing duration uniformly)')
parser.add_argument('--numpy', dest='inputformat', action='store_const',
const='numpy', default='undecided',
help='input in NumPy format (default: NumPy)')
parser.add_argument('--ascii', dest='inputformat', action='store_const',
const='numpy', default='undecided',
help='input in ASCII format (default: NumPy)')
parser.add_argument('--jump-check', dest='jumpcheck',
action='store_true',
help='check if there are discontinuities in the data, ie., points with same time but different coordinates')
args = parser.parse_args()
def br_pl(a1, a2, m1, m2, m3, b):
'''A function that returns a function that makes a broken powerlaw.
a1, a2 : x coordinates of the break points.
b : y intersect of the first power law (x<=a1)
m1, m2, m3: slopes for x<a1, a1<x<a2 and a2<x .'''
def ifelse(x, y, z) :
if x: return y
else: return z
k1 = a1*(m1-m2) + b
k2 = a2*(m2-m3) + k1
return lambda x: ifelse(x<a1, m1*x +b,
ifelse(x<a2, m2*x+k1,
m3*x+k2))
def set_ruler_lengths(rl_min, rl_max, tend) :
'''A function that creates an array of ruler lengths/sampling intervals.
All values returned are in the closed interval of [rl_min, rl_max]. They
are exact divisors of tend, which is the absolute maximum.'''
dummy_rl = [tend/1.0, tend/2.0, tend/3.0, tend/4.0,
tend/5.0, tend/6.0, tend/7.0, tend/8.0,
tend/10.0, tend/12.0, tend/14.0, tend/17.0,
tend/20.0, tend/24.0, tend/28.0, tend/33.0,
tend/40.0, tend/48.0, tend/56.0, tend/67.0]
for i in range(100) :
dummy_rl.append(dummy_rl[-4]/2.0)
rl = []
for drl in dummy_rl :
if drl <= rl_max and drl >= rl_min :
rl.append(drl)
rl.reverse()
return np.array(rl)
dn = np.loadtxt(args.infile)
if args.firstcol==True :
if np.size(np.shape(dn))==2 and np.shape(dn)[1]==4 :
tt = dn[:,0]
dd = dn[:,1:]
elif np.size(np.shape(dn))==2 and np.shape(dn)[1]==2 :
tt = dn[:,0]
dd = dn[:,1]
else :
print('input file is not 1D or 3D')
print(np.shape(dn))
sys.exit()
else :
tt = np.linspace(0, args.t, np.shape(dn)[0])
dd = dn
if args.jumpcheck == True :
same_ts = []
told = tt[0]
found_duplicate = False
duplicates = []
length_dup = 1
for i in range(1,len(tt)) :
tnow = tt[i]
if tnow == told :
found_duplicate = True
length_dup += 1
else :
if found_duplicate == True : # duplicate string ended
duplicates.append([i-length_dup, length_dup])
length_dup = 1
found_duplicate = False
told = tnow
if found_duplicate == True : # no more data
duplicates.append([i-length_dup+1, length_dup])
# print(tt)
# print(duplicates)
for i, k in duplicates :
if i == 0 : # special case 1, starting w/ dups
tprev = tt[0]
tnext = tt[i+k+1]
tdel = tnext-tprev
for j in range(k) :
tt[i+j] += tdel * 1e-4 * float(j)/k
elif i+k == len(tt) : # special case 2, ending w/ dups
tprev = tt[i-1]
tnext = tt[-1]
tdel = tnext-tprev
for j in range(k) :
tt[i+j] -= tdel * 1e-4 * float(k-j-1)/k
else :
tprev = tt[i-1]
tnext = tt[i+k+1]
for j in range(k) :
tdup = tt[i]
if j<k/2 :
tdel = tdup-tprev
else :
tdel = tnext-tdup
tt[i+j] += tdel * 1e-4 * float(j - k/2.0)/k
# print(tt)
# sys.exit(0)
tend = tt[-1]
period = 2.3e-4
rl_min = period/5.0
rl_max = tend/2.0
ruler_lengths = set_ruler_lengths(rl_min, rl_max, tend)
if np.size(np.shape(dd))==2 and np.shape(dd)[1]==3 :
# print('3d')
trail_lengths = trail_length_3d(ruler_lengths, tt, dd)
elif np.size(np.shape(dd))==1 :
# print('1d')
trail_lengths = trail_length_1d(ruler_lengths, tt, dd)
else :
print('input file is not 1D or 3D')
print(np.shape(dd))
sys.exit()
for i, rl in enumerate(ruler_lengths) :
print(rl, trail_lengths[i])
|
gpl-3.0
| -4,184,026,960,048,453,000
| 34.894737
| 152
| 0.558977
| false
| 3.213613
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/types/topic_view_service.py
|
1
|
1202
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetTopicViewRequest",},
)
class GetTopicViewRequest(proto.Message):
r"""Request message for
[TopicViewService.GetTopicView][google.ads.googleads.v8.services.TopicViewService.GetTopicView].
Attributes:
resource_name (str):
Required. The resource name of the topic view
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 5,047,127,096,734,223,000
| 29.820513
| 100
| 0.710483
| false
| 3.915309
| false
| false
| false
|
bsmedberg/socorro
|
socorro/external/crash_data_base.py
|
1
|
3506
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.external import (
MissingArgumentError,
ResourceNotFound,
ResourceUnavailable,
ServiceUnavailable
)
from socorro.external.crashstorage_base import CrashIDNotFound
from socorro.lib import external_common
class CrashDataBase(object):
"""
Common implementation of the crash data service for all crashstorage
schemes. Any external service that wants to implement a CrashData service
may subclass from this service. All they'd have to do is implement the
'get_storage' method to return an appropriate instance of their own
crashstorage class.
"""
def __init__(self, *args, **kwargs):
super(CrashDataBase, self).__init__()
self.config = kwargs['config']
self.all_services = kwargs['all_services']
def get_storage(self):
"""derived classes must implement this method to return an instance
of their own crashstorage class"""
raise NotImplementedError
def get(self, **kwargs):
"""Return JSON data of a crash report, given its uuid. """
filters = [
('uuid', None, 'str'),
('datatype', None, 'str')
]
params = external_common.parse_arguments(filters, kwargs)
if not params.uuid:
raise MissingArgumentError('uuid')
if not params.datatype:
raise MissingArgumentError('datatype')
# get a generic crashstorage instance from whatever external resource
# is implementing this service.
store = self.get_storage()
datatype_method_mapping = {
'raw': 'get_raw_dump',
'meta': 'get_raw_crash',
'processed': 'get_processed',
'unredacted': 'get_unredacted_processed',
}
get = store.__getattribute__(datatype_method_mapping[params.datatype])
try:
if params.datatype == 'raw':
return (get(params.uuid), 'application/octet-stream')
else:
return get(params.uuid)
except CrashIDNotFound:
if params.datatype in ('processed', 'unredacted'):
# try to fetch a raw crash just to ensure that the raw crash
# exists. If this line fails, there's no reason to actually
# submit the priority job.
try:
store.get_raw_crash(params.uuid)
except CrashIDNotFound:
raise ResourceNotFound(params.uuid)
# search through the existing other services to find the
# Priorityjob service.
try:
priorityjob_service_impl = self.all_services[
'Priorityjobs'
]
except KeyError:
raise ServiceUnavailable('Priorityjobs')
# get the underlying implementation of the Priorityjob
# service and instantiate it.
priority_job_service = priorityjob_service_impl.cls(
config=self.config
)
# create the priority job for this crash_ids
priority_job_service.create(uuid=params.uuid)
raise ResourceUnavailable(params.uuid)
raise ResourceNotFound(params.uuid)
|
mpl-2.0
| 8,774,703,158,655,654,000
| 37.108696
| 78
| 0.595836
| false
| 4.776567
| false
| false
| false
|
EarToEarOak/RTLSDR-Scanner
|
rtlsdr_scanner/plot_spect.py
|
1
|
16014
|
#
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import threading
import time
from matplotlib import cm, patheffects
import matplotlib
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
from matplotlib.dates import DateFormatter
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from matplotlib.text import Text
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
import numpy
from rtlsdr_scanner.constants import Markers, PlotFunc
from rtlsdr_scanner.events import EventThread, Event, post_event
from rtlsdr_scanner.misc import format_time, format_precision
from rtlsdr_scanner.spectrum import split_spectrum, Measure, smooth_spectrum, Extent, \
diff_spectrum, get_peaks
from rtlsdr_scanner.utils_mpl import utc_to_mpl
class Spectrogram(object):
def __init__(self, notify, figure, settings):
self.notify = notify
self.figure = figure
self.settings = settings
self.data = [[], [], []]
self.axes = None
self.plot = None
self.extent = None
self.bar = None
self.barBase = None
self.lines = {}
self.labels = {}
self.overflowLabels = {}
self.overflow = {'left': [],
'right': []}
self.threadPlot = None
self.__setup_plot()
self.set_grid(self.settings.grid)
def __setup_plot(self):
gs = GridSpec(1, 2, width_ratios=[9.5, 0.5])
self.axes = self.figure.add_subplot(gs[0],
facecolor=self.settings.background)
self.axes.set_xlabel("Frequency (MHz)")
self.axes.set_ylabel('Time')
numFormatter = ScalarFormatter(useOffset=False)
timeFormatter = DateFormatter("%H:%M:%S")
self.axes.xaxis.set_major_formatter(numFormatter)
self.axes.yaxis.set_major_formatter(timeFormatter)
self.axes.xaxis.set_minor_locator(AutoMinorLocator(10))
self.axes.yaxis.set_minor_locator(AutoMinorLocator(10))
self.axes.set_xlim(self.settings.start, self.settings.stop)
now = time.time()
self.axes.set_ylim(utc_to_mpl(now), utc_to_mpl(now - 10))
self.bar = self.figure.add_subplot(gs[1])
norm = Normalize(vmin=-50, vmax=0)
self.barBase = ColorbarBase(self.bar, norm=norm,
cmap=cm.get_cmap(self.settings.colourMap))
self.__setup_measure()
self.__setup_overflow()
self.hide_measure()
def __setup_measure(self):
dashesHalf = [1, 5, 5, 5, 5, 5]
self.lines[Markers.HFS] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='purple')
self.lines[Markers.HFE] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='purple')
self.lines[Markers.OFS] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='#996600')
self.lines[Markers.OFE] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='#996600')
if matplotlib.__version__ >= '1.3':
effect = patheffects.withStroke(linewidth=3, foreground="w",
alpha=0.75)
self.lines[Markers.HFS].set_path_effects([effect])
self.lines[Markers.HFE].set_path_effects([effect])
self.lines[Markers.OFS].set_path_effects([effect])
self.lines[Markers.OFE].set_path_effects([effect])
for line in self.lines.itervalues():
self.axes.add_line(line)
bbox = self.axes.bbox
box = dict(boxstyle='round', fc='white', ec='purple', clip_box=bbox)
self.labels[Markers.HFS] = Text(0, 0, '-3dB', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='purple')
self.labels[Markers.HFE] = Text(0, 0, '-3dB', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='purple')
box['ec'] = '#996600'
self.labels[Markers.OFS] = Text(0, 0, 'OBW', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='#996600')
self.labels[Markers.OFE] = Text(0, 0, 'OBW', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='#996600')
for label in self.labels.itervalues():
self.axes.add_artist(label)
def __setup_overflow(self):
bbox = self.axes.bbox
box = dict(boxstyle='round', fc='white', ec='black', alpha=0.5,
clip_box=bbox)
self.overflowLabels['left'] = Text(0, 0.9, '', fontsize='xx-small',
ha="left", va="top", bbox=box,
transform=self.axes.transAxes,
alpha=0.5)
self.overflowLabels['right'] = Text(1, 0.9, '', fontsize='xx-small',
ha="right", va="top", bbox=box,
transform=self.axes.transAxes,
alpha=0.5)
for label in self.overflowLabels.itervalues():
self.axes.add_artist(label)
def __clear_overflow(self):
for label in self.overflowLabels:
self.overflow[label] = []
def __draw_vline(self, marker, x):
line = self.lines[marker]
label = self.labels[marker]
yLim = self.axes.get_ylim()
xLim = self.axes.get_xlim()
if xLim[0] < x < xLim[1]:
line.set_visible(True)
line.set_xdata([x, x])
line.set_ydata([yLim[0], yLim[1]])
self.axes.draw_artist(line)
label.set_visible(True)
label.set_position((x, yLim[1]))
self.axes.draw_artist(label)
elif x is not None and x < xLim[0]:
self.overflow['left'].append(marker)
elif x is not None and x > xLim[1]:
self.overflow['right'].append(marker)
def __draw_overflow(self):
for pos, overflow in self.overflow.iteritems():
if len(overflow) > 0:
text = ''
for measure in overflow:
if len(text) > 0:
text += '\n'
text += self.labels[measure].get_text()
label = self.overflowLabels[pos]
if pos == 'left':
textMath = '$\\blacktriangleleft$\n' + text
elif pos == 'right':
textMath = '$\\blacktriangleright$\n' + text
label.set_text(textMath)
label.set_visible(True)
self.axes.draw_artist(label)
def draw_measure(self, measure, show):
if self.axes.get_renderer_cache() is None:
return
self.hide_measure()
self.__clear_overflow()
if show[Measure.HBW]:
xStart, xEnd, _y = measure.get_hpw()
self.__draw_vline(Markers.HFS, xStart)
self.__draw_vline(Markers.HFE, xEnd)
if show[Measure.OBW]:
xStart, xEnd, _y = measure.get_obw()
self.__draw_vline(Markers.OFS, xStart)
self.__draw_vline(Markers.OFE, xEnd)
self.__draw_overflow()
def hide_measure(self):
for line in self.lines.itervalues():
line.set_visible(False)
for label in self.labels.itervalues():
label.set_visible(False)
for label in self.overflowLabels.itervalues():
label.set_visible(False)
def scale_plot(self, force=False):
if self.figure is not None and self.plot is not None:
extent = self.plot.get_extent()
if self.settings.autoF or force:
if extent[0] == extent[1]:
extent[1] += 1
self.axes.set_xlim(extent[0], extent[1])
if self.settings.autoL or force:
vmin, vmax = self.plot.get_clim()
self.barBase.set_clim(vmin, vmax)
try:
self.barBase.draw_all()
except:
pass
if self.settings.autoT or force:
self.axes.set_ylim(extent[2], extent[3])
def redraw_plot(self):
if self.figure is not None:
post_event(self.notify, EventThread(Event.DRAW))
def get_axes(self):
return self.axes
def get_axes_bar(self):
return self.barBase.ax
def get_bar(self):
return self.barBase
def get_plot_thread(self):
return self.threadPlot
def set_title(self, title):
self.axes.set_title(title, fontsize='medium')
def set_plot(self, spectrum, extent, annotate=False):
self.extent = extent
self.threadPlot = ThreadPlot(self, self.settings,
self.axes,
spectrum,
self.extent,
self.barBase,
annotate)
self.threadPlot.start()
def clear_plots(self):
children = self.axes.get_children()
for child in children:
if child.get_gid() is not None:
if child.get_gid() in ['plot', 'peak', 'peakText',
'peakShadow', 'peakThres']:
child.remove()
def set_grid(self, on):
if on:
self.axes.grid(True, color='w')
else:
self.axes.grid(False)
self.redraw_plot()
def set_colourmap(self, colourMap):
if self.plot is not None:
self.plot.set_cmap(colourMap)
self.barBase.set_cmap(colourMap)
try:
self.barBase.draw_all()
except:
pass
def close(self):
self.figure.clear()
self.figure = None
class ThreadPlot(threading.Thread):
def __init__(self, parent, settings, axes, data, extent,
barBase, annotate):
threading.Thread.__init__(self)
self.name = "Plot"
self.parent = parent
self.settings = settings
self.axes = axes
self.data = data
self.extent = extent
self.barBase = barBase
self.annotate = annotate
def run(self):
if self.data is None:
self.parent.threadPlot = None
return
total = len(self.data)
if total > 0:
if self.settings.plotFunc == PlotFunc.NONE:
peakF, peakL, peakT = self.__plot(self.data)
elif self.settings.plotFunc == PlotFunc.SMOOTH:
peakF, peakL, peakT = self.__plot_smooth()
elif self.settings.plotFunc == PlotFunc.DIFF:
peakF, peakL, peakT = self.__plot_diff()
if self.annotate:
self.__plot_peak(peakF, peakL, peakT)
if self.settings.peaks:
self.__plot_peaks()
self.parent.scale_plot()
self.parent.redraw_plot()
self.parent.threadPlot = None
def __plot(self, spectrum):
width = len(spectrum[min(self.data)])
height = len(spectrum)
c = numpy.ma.masked_all((height, width))
self.parent.clear_plots()
j = height
for ys in reversed(spectrum):
j -= 1
_xs, zs = split_spectrum(spectrum[ys])
for i in range(len(zs)):
try:
c[j, i] = zs[i]
except IndexError:
continue
norm = None
if not self.settings.autoL:
minY, maxY = self.barBase.get_clim()
norm = Normalize(vmin=minY, vmax=maxY)
extent = self.extent.get_ft()
self.parent.plot = self.axes.imshow(c, aspect='auto',
extent=extent,
norm=norm,
cmap=cm.get_cmap(self.settings.colourMap),
interpolation='spline16',
gid="plot")
return self.extent.get_peak_flt()
def __plot_smooth(self):
data = smooth_spectrum(self.data,
self.settings.smoothFunc,
self.settings.smoothRatio)
self.extent = Extent(data)
return self.__plot(data)
def __plot_diff(self):
data = diff_spectrum(self.data)
self.extent = Extent(data)
self.parent.extent = self.extent
return self.__plot(data)
def __plot_peak(self, peakF, peakL, peakT):
self.__clear_markers()
y = utc_to_mpl(peakT)
start, stop = self.axes.get_xlim()
textX = ((stop - start) / 50.0) + peakF
when = format_time(peakT)
text = '{}\n{}\n{when}'.format(*format_precision(self.settings,
peakF, peakL,
fancyUnits=True),
when=when)
if matplotlib.__version__ < '1.3':
self.axes.annotate(text,
xy=(peakF, y), xytext=(textX, y),
ha='left', va='bottom', size='x-small',
color='w', gid='peakText')
self.axes.plot(peakF, y, marker='x', markersize=10, color='w',
mew=3, gid='peakShadow')
self.axes.plot(peakF, y, marker='x', markersize=10, color='r',
gid='peak')
else:
effect = patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)
self.axes.annotate(text,
xy=(peakF, y), xytext=(textX, y),
ha='left', va='bottom', size='x-small',
path_effects=[effect], gid='peakText')
self.axes.plot(peakF, y, marker='x', markersize=10, color='r',
path_effects=[effect], gid='peak')
def __plot_peaks(self):
sweep, indices = get_peaks(self.data, self.settings.peaksThres)
lastTime = utc_to_mpl(max(self.data))
for i in indices:
self.axes.plot(sweep.keys()[i], lastTime,
linestyle='None',
marker='+', markersize=10, color='r',
gid='peakThres')
def __clear_markers(self):
children = self.axes.get_children()
for child in children:
if child.get_gid() is not None:
if child.get_gid() in ['peak', 'peakText',
'peakShadow', 'peakThres']:
child.remove()
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
|
gpl-3.0
| 6,269,624,982,629,573,000
| 36.415888
| 87
| 0.513301
| false
| 4.025641
| false
| false
| false
|
jasongrout/jupyterlab-extension
|
setup.py
|
1
|
5340
|
# -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'jupyterlab_extension')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'A pre-alpha JupyterLab demo.'
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'jupyterlab_extension', 'build', 'bundle.js'),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def has_npm(self):
try:
check_call(['npm', '--version'])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, 'package.json')
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error("`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo")
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info("Installing build dependencies with npm. This may take a while...")
check_call(['npm', 'install'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
check_call(['npm', 'run', 'build'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += '\nnpm is required to build a development version of widgetsnbextension'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, 'jupyterlab_extension', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'jupyterlab_extension',
'version': version_ns['__version__'],
'description': 'A pre-alpha Jupyter lab environment notebook server extension.',
'long_description': LONG_DESCRIPTION,
'License': 'BSD',
'include_package_data': True,
'install_requires': ['notebook>=4.2.0'],
'packages': find_packages(),
'zip_safe': False,
'package_data': {'jupyterlab_extension': [
'build/*',
'lab.html'
]},
'cmdclass': {
'build_py': js_prerelease(build_py),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Jupyter Development Team',
'author_email': 'jupyter@googlegroups.com',
'url': 'http://jupyter.org',
'keywords': ['ipython', 'jupyter', 'Web'],
'classifiers': [
'Development Status :: 1 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
}
setup(**setup_args)
|
bsd-3-clause
| -5,766,849,120,819,335,000
| 32.797468
| 124
| 0.603371
| false
| 3.739496
| false
| false
| false
|
ox-it/talks.ox
|
talks/users/forms.py
|
1
|
3163
|
from __future__ import absolute_import
from django import forms
from django.core.exceptions import ValidationError
from django.db.models.query_utils import Q
from talks.events import typeahead, datasources
from django.contrib.auth.models import User
from talks.users.models import Collection, TalksUser, TalksUserCollection, DEFAULT_COLLECTION_NAME, COLLECTION_ROLES_EDITOR, COLLECTION_ROLES_READER, COLLECTION_ROLES_OWNER
from talks.contributors.forms import XMLFriendlyTextField
class CollectionForm(forms.ModelForm):
title = XMLFriendlyTextField(
max_length=250,
required=True
)
description = XMLFriendlyTextField(
widget=forms.Textarea(attrs={'rows': 8}),
required=False,
)
editor_set = forms.ModelMultipleChoiceField(
queryset=TalksUser.objects.filter().distinct(),
label="Other Editors",
help_text="Share editing with another Talks Editor by typing in their full email address",
required=False,
widget=typeahead.MultipleTypeahead(datasources.TALKSUSERS_EMAIL_EXACT_DATA_SOURCE),
)
class Meta:
model = Collection
fields = ('title', 'description', 'public', 'editor_set')
labels = {
'public': "Make this list public?"
}
help_texts = {
'title': "If you wish to make this list public please make sure the list has a distinctive title and description - e.g.: Recommended talks for 3rd Year Biology"
}
def save(self):
collection = super(CollectionForm, self).save(commit=False)
collection.save()
# clear the list of editors and repopulate with the contents of the form
collection.editor_set.through.objects.filter(role=COLLECTION_ROLES_EDITOR, collection=collection).delete()
if 'editor_set' in self.cleaned_data:
for user in self.cleaned_data['editor_set']:
if collection.user_collection_permission(user) == 'owner':
pass
else:
TalksUserCollection.objects.create(user=user,
collection=collection,
role=COLLECTION_ROLES_EDITOR)
collection.save()
return collection
def clean(self):
cleaned_data = self.cleaned_data
public = cleaned_data.get('public')
title = cleaned_data.get('title')
collection = super(CollectionForm, self).save(commit=False) # get the collection instance without saving the form
number_of_readers = collection.get_number_of_readers()
# If we're making the collection public, ensure that the collection title is not 'My Collection'
if public and (title == DEFAULT_COLLECTION_NAME):
raise ValidationError({'title': 'Please change the title of your list to something less generic before making your list public'})
if not public and (number_of_readers > 0):
raise ValidationError({'public': 'Unable to revoke public status - there are already ' + str(number_of_readers) + ' readers following this list.'})
|
apache-2.0
| 9,132,082,129,076,920,000
| 42.328767
| 172
| 0.653494
| false
| 4.411437
| false
| false
| false
|
sanguinariojoe/FreeCAD
|
src/Mod/Draft/draftguitools/gui_mirror.py
|
9
|
9314
|
# ***************************************************************************
# * (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides GUI tools to create mirrored objects.
The mirror tool creates a `Part::Mirroring` object, which is the same
as the one created by the Part module.
Perhaps in the future a specific Draft `Mirror` object can be defined.
"""
## @package gui_mirror
# \ingroup draftguitools
# \brief Provides GUI tools to create mirrored objects.
## \addtogroup draftguitools
# @{
from PySide.QtCore import QT_TRANSLATE_NOOP
import FreeCAD as App
import FreeCADGui as Gui
import Draft_rc
import DraftVecUtils
import WorkingPlane
import draftguitools.gui_base_original as gui_base_original
import draftguitools.gui_tool_utils as gui_tool_utils
from draftutils.messages import _msg
from draftutils.translate import translate
# The module is used to prevent complaints from code checkers (flake8)
True if Draft_rc.__name__ else False
class Mirror(gui_base_original.Modifier):
"""Gui Command for the Mirror tool."""
def GetResources(self):
"""Set icon, menu and tooltip."""
return {'Pixmap': 'Draft_Mirror',
'Accel': "M, I",
'MenuText': QT_TRANSLATE_NOOP("Draft_Mirror", "Mirror"),
'ToolTip': QT_TRANSLATE_NOOP("Draft_Mirror", "Mirrors the selected objects along a line defined by two points.")}
def Activated(self):
"""Execute when the command is called."""
super(Mirror, self).Activated(name="Mirror")
self.ghost = None
if self.ui:
if not Gui.Selection.getSelection():
self.ui.selectUi(on_close_call=self.finish)
_msg(translate("draft", "Select an object to mirror"))
self.call = \
self.view.addEventCallback("SoEvent",
gui_tool_utils.selectObject)
else:
self.proceed()
def proceed(self):
"""Proceed with the command if one object was selected."""
if self.call:
self.view.removeEventCallback("SoEvent", self.call)
self.sel = Gui.Selection.getSelection()
self.ui.pointUi(title=translate("draft", self.featureName), icon="Draft_Mirror")
self.ui.modUi()
self.ui.xValue.setFocus()
self.ui.xValue.selectAll()
# self.ghost = trackers.ghostTracker(self.sel)
# TODO: solve this (see below)
self.call = self.view.addEventCallback("SoEvent", self.action)
_msg(translate("draft", "Pick start point of mirror line"))
self.ui.isCopy.hide()
def finish(self, closed=False, cont=False):
"""Terminate the operation of the tool."""
if self.ghost:
self.ghost.finalize()
super(Mirror, self).finish()
if cont and self.ui:
if self.ui.continueMode:
Gui.Selection.clearSelection()
self.Activated()
def mirror(self, p1, p2, copy=False):
"""Mirror the real shapes."""
sel = '['
for o in self.sel:
if len(sel) > 1:
sel += ', '
sel += 'FreeCAD.ActiveDocument.' + o.Name
sel += ']'
Gui.addModule("Draft")
_cmd = 'Draft.mirror'
_cmd += '('
_cmd += sel + ', '
_cmd += DraftVecUtils.toString(p1) + ', '
_cmd += DraftVecUtils.toString(p2)
_cmd += ')'
_cmd_list = ['m = ' + _cmd,
'FreeCAD.ActiveDocument.recompute()']
self.commit(translate("draft", "Mirror"),
_cmd_list)
def action(self, arg):
"""Handle the 3D scene events.
This is installed as an EventCallback in the Inventor view.
Parameters
----------
arg: dict
Dictionary with strings that indicates the type of event received
from the 3D view.
"""
if arg["Type"] == "SoKeyboardEvent":
if arg["Key"] == "ESCAPE":
self.finish()
elif arg["Type"] == "SoLocation2Event": # mouse movement detection
(self.point,
ctrlPoint, info) = gui_tool_utils.getPoint(self, arg)
if len(self.node) > 0:
last = self.node[-1]
if self.ghost:
if self.point != last:
# TODO: the following doesn't work at the moment
mu = self.point.sub(last).normalize()
# This part used to test for the GUI to obtain
# the camera view but this is unnecessary
# as this command is always launched in the GUI.
_view = Gui.ActiveDocument.ActiveView
mv = _view.getViewDirection().negative()
mw = mv.cross(mu)
_plane = WorkingPlane.plane(u=mu, v=mv, w=mw,
pos=last)
tm = _plane.getPlacement().toMatrix()
m = self.ghost.getMatrix()
m = m.multiply(tm.inverse())
m.scale(App.Vector(1, 1, -1))
m = m.multiply(tm)
m.scale(App.Vector(-1, 1, 1))
self.ghost.setMatrix(m)
if self.extendedCopy:
if not gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT):
self.finish()
gui_tool_utils.redraw3DView()
elif arg["Type"] == "SoMouseButtonEvent":
if (arg["State"] == "DOWN") and (arg["Button"] == "BUTTON1"):
if self.point:
self.ui.redraw()
if (self.node == []):
self.node.append(self.point)
self.ui.isRelative.show()
if self.ghost:
self.ghost.on()
_msg(translate("draft",
"Pick end point of mirror line"))
if self.planetrack:
self.planetrack.set(self.point)
else:
last = self.node[0]
if (self.ui.isCopy.isChecked()
or gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT)):
self.mirror(last, self.point, True)
else:
self.mirror(last, self.point)
if gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT):
self.extendedCopy = True
else:
self.finish(cont=True)
def numericInput(self, numx, numy, numz):
"""Validate the entry fields in the user interface.
This function is called by the toolbar or taskpanel interface
when valid x, y, and z have been entered in the input fields.
"""
self.point = App.Vector(numx, numy, numz)
if not self.node:
self.node.append(self.point)
if self.ghost:
self.ghost.on()
_msg(translate("draft", "Pick end point of mirror line"))
else:
last = self.node[-1]
if self.ui.isCopy.isChecked():
self.mirror(last, self.point, True)
else:
self.mirror(last, self.point)
self.finish()
Gui.addCommand('Draft_Mirror', Mirror())
## @}
|
lgpl-2.1
| -7,578,842,251,887,063,000
| 42.12037
| 129
| 0.492914
| false
| 4.362529
| false
| false
| false
|
nicodv/bgg
|
bgg/util/retry.py
|
1
|
3841
|
"""
Module that implements a retry decorator.
You can, for example, do this:
@retry(5)
def my_function():
...
And 'my_function', upon an exception, will be retried 4 more times until
a final exception is raised. 'retry' will wait a little bit longer after each
failure before retrying.
Very useful for, for example, retrying a download if timeouts occur frequently.
Customization of exceptions and exception handlers is possible.
"""
from time import sleep
from functools import wraps
def _warning_printer(func, exception, tries_remaining):
"""Simple exception handler that prints a warning.
:param exception: The exception instance which was raised
:param int tries_remaining: The number of tries remaining
"""
print("Caught '{0}' in {1}, {2} tries remaining.".format(
exception, func.__name__, tries_remaining))
def _error_printer(func, exception, tries):
"""Exception handler that prints an error.
:param exception: The exception instance which was raised
:param int tries: Total number of tries
"""
try:
print("{} failed (reason: {}), giving up after {} tries.".format(
func.__name__, exception.reason, int(tries)))
except AttributeError:
print("{} failed, giving up after {} tries.".format(
func.__name__, int(tries)))
def retry(max_tries, delay=1, backoff=2, exceptions=(Exception,),
on_retry=_warning_printer, on_fail=_error_printer):
"""Function decorator implementing retry logic.
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
Additionally you may specify a on_retry function which will be
called prior to retrying with the number of remaining tries and the
exception instance. This is primarily intended to give the opportunity to
log the failure. on_fail is another function called after failure if no
retries remain.
:param int max_tries: Maximum number of retries
:param int or float delay: Sleep this many seconds * backoff *
try number after failure
:param int or float backoff: Multiply delay by this after each failure
:param tuple exceptions: A tuple of exception classes; default (Exception,)
:param func on_retry: An on-retry exception handler function
(args should be: function, exception, tries_remaining)
:param func on_fail: A final exception handler function
(args should be: function, exception, tries_remaining)
"""
assert max_tries > 0
def dec(func):
# 'wraps' updates a wrapper function to look like the wrapped function
@wraps(func)
def f2(*args, **kwargs):
mydelay = delay
tries = reversed(range(max_tries))
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
# call on_retry exception handler after an exception
if on_retry is not None:
on_retry(func, e, tries_remaining)
sleep(mydelay)
mydelay *= backoff
else:
# no more retries, call the on_fail exception handler
if on_fail is not None:
on_fail(func, e, max_tries)
else:
raise e
return f2
return dec
|
mit
| 5,816,004,726,221,605,000
| 36.656863
| 79
| 0.634731
| false
| 4.736128
| false
| false
| false
|
mifumagalli/mypython
|
redshifts/zfit.py
|
1
|
48275
|
"""
Gui to inspect spectra in 1/2D
"""
try:
import Tkinter as tkinter
import tkFont as tkfont
from Tkinter import Tk
import tkFileDialog as filedialog
except:
import tkinter
from tkinter import font as tkfont
from tkinter import Tk
from tkinter import filedialog
from astropy.io import fits
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import os
import numpy as np
import scipy
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy import interpolate
from scipy import signal
from astropy.io import fits
from astropy.table import Table
import sys, getopt
class zfitwin(tkinter.Tk):
""" The basic class of the widget """
def __init__(self,parent, startfile=None, z_start=0.0):
""" My constructor """
self.tk = Tk()
#set min and preferred size of main gui
self.minwinwidth=300
self.minwinheight=300
screen_width = self.winfo_screenwidth()
screen_height = self.winfo_screenheight()
self.preferwinwidth=int(screen_width*0.8)
self.preferwinheight=int(screen_height*0.8)
self.minsize(width=self.minwinwidth, height=self.minwinheight)
self.geometry("{}x{}".format(self.preferwinwidth,self.preferwinheight))
#tweak the aspect ratio of the menu and data gui
self.menuaspect=[1,0.24] #Ruari 24/05 fixes bug where different resolutions cause the menu to be cut off
self.dataaspect=[1,1-0.24] #Ruari 24/05 fixes bug where different resolutions cause the menu to be cut off
self.dpi=80
#find exect dir
self.execdir=__file__.split('zfit.py')[0]
if(len(self.execdir)==0):
self.execdir='./'
#Fiddle with font
default_font = tkfont.nametofont("TkDefaultFont")
scalefont = int(screen_height/1080.0*14)
default_font.configure(size=scalefont)
#init gui frame
self.initialize(startfile, z_start)
def initialize(self, startfile, z_start):
""" This init the basic gui """
#create a menu frame
self.menuframe=tkinter.Frame(self,width=int(self.preferwinwidth*self.menuaspect[0]),
height=int(self.preferwinheight*self.menuaspect[1]))
self.menuframe.grid_propagate(0)
self.menuframe.grid()
#create a data frame
self.dataframe=tkinter.Frame(self,width=int(self.preferwinwidth*self.dataaspect[0]),
height=int(self.preferwinheight*self.dataaspect[1]))
self.dataframe.grid_propagate(0)
self.dataframe.grid()
#stick the 2D image in a separate window
self.imgframe=tkinter.Toplevel(width=600,height=600)
#update for later use of units
self.update()
#now initialise the menu frame
self.init_menuframe()
#now initialise the data frame
self.init_dataframe(startfile)
#If zstart exists show the lines automatically
if z_start != 0.0:
self.displaylines()
self.shwlinstate.set(1)
self.redshiftline.set("{}".format(z_start))
def init_menuframe(self):
""" This init the menu specific part """
#exit button
self.menu_exit = tkinter.Button(self.menuframe,text=u"EXIT",command=self.OnExit)
self.menu_exit.grid(column=0,row=0)
#save button
self.menu_save = tkinter.Button(self.menuframe,text=u"Save",command=self.OnSave)
self.menu_save.grid(column=0,row=1)
#choice of spectra
self.menu_select = tkinter.Button(self.menuframe,text=u"Open Spectrum",
command=self.SelectFile)
self.menu_select.grid(column=0,row=2)
#current spectrum
self.currspec=tkinter.StringVar()
self.currspec.set('Spect: Demo')
self.current=tkinter.Label(self.menuframe,textvariable = self.currspec)
self.current.grid(column=0,row=3)
self.mouse_position=tkinter.StringVar()
self.mouse_position.set('Mouse:(None,None)')
self.mouse_position_w=tkinter.Label(self.menuframe,textvariable = self.mouse_position)
self.mouse_position_w.grid(column=0,row=4,columnspan=3)
#Message window
self.generic_message=tkinter.StringVar()
self.generic_message.set('zfit-> Ready to go!')
self.generic_message_w=tkinter.Label(self.menuframe,textvariable = self.generic_message)
self.generic_message_w.grid(column=5,row=3,columnspan=3)
#line control stuff
self.init_linecontrol()
#templates control stuff
self.init_templcontrol()
def init_dataframe(self, startfile):
""" This init the data specific part """
#Work out the geometry of the different data parts
#canvas for spectrum ...
self.pltspec_width=self.dataframe.winfo_width()
self.pltspec_height=int(self.dataframe.winfo_height()*0.6)
#canvas for twod spec
self.twodspc_width=self.dataframe.winfo_width()
self.twodspc_height=int((self.dataframe.winfo_height()-self.pltspec_height)*0.6)
#canvas for twod err
self.twoderr_width=self.dataframe.winfo_width()
self.twoderr_height=int((self.dataframe.winfo_height()-self.pltspec_height)*0.5)
#work out dimensions for twod image
self.twodimg_width=self.imgframe.winfo_width()
self.twodimg_height=self.imgframe.winfo_height()
#now open with default spectrum and plot
#self.filename=os.path.abspath(self.execdir)+"/test_spectrum.fits" RUari Jul 17 17
if startfile==None:
self.filename=os.path.abspath(self.execdir)+"/test_spectrum.fits"
else:
self.filename=startfile
self.currspec.set('Spect: '+startfile)
self.fits=fits.open(self.filename)
#unpack
self.fitwav1d=self.fits[2].data
self.fitspe1d=self.fits[0].data
self.fitspe1d_original=np.copy(self.fitspe1d)
self.fiterr1d=self.fits[1].data
self.fitspe2d=self.fits[4].data
self.fiterr2d=self.fits[5].data
self.fitimg=self.fits[6].data
#load sky model and normalise to source flux
skyspe=fits.open('{}/templates/sky/SKY_SPECTRUM_0001.fits'.format(self.execdir))
skycnt=fits.open('{}/templates/sky/SKY_CONTINUUM_0001.fits'.format(self.execdir))
#compute continuum subtracted sky model
self.wavesky=np.array(skyspe[1].data['LAMBDA'])
cont_resampled=interp1d(skycnt[1].data['LAMBDA'],skycnt[1].data['FLUX'],bounds_error=False,fill_value=0)(skyspe[1].data['LAMBDA'])
self.fluxsky=np.array(skyspe[1].data['DATA'])-cont_resampled
self.fluxsky=self.fluxsky/np.max(self.fluxsky)*0.5*np.max(self.fitspe1d)
self.drawdata()
#set tmpfitxcorr to None to avoid error or later init
self.tmpfitxcorr=None
#set smoothwindow
self.smooth=3
def init_linecontrol(self):
""" This controls operation with emission lines """
#just say what it is
linelabel=tkinter.Label(self.menuframe,text = "Emission lines")
linelabel.grid(column=1,row=0,columnspan=2)
#drop down menu to select emission lines
llab = tkinter.Label(self.menuframe, text="Select Lines: ")
llab.grid(column=1,row=1)
self.linelist = tkinter.StringVar(self.menuframe)
self.linelist.set("gal_vac") # default value
self.lineselect = tkinter.OptionMenu(self.menuframe, self.linelist,"gal_vac","gal_air","lbg","lls","tell")
self.lineselect.grid(column=2,row=1)
#set the linelist in trace state
self.linelist.trace("w",self.displaylines)
#line redshift window
zlab = tkinter.Label(self.menuframe, text="z = ")
zlab.grid(column=1,row=2)
self.redshiftline = tkinter.StringVar()
self.redlinecntr = tkinter.Entry(self.menuframe,textvariable=self.redshiftline)
self.redlinecntr.grid(column=2,row=2)
self.redshiftline.set("0.0000")
#set the redshift in a trace state
self.redshiftline.trace("w",self.displaylines)
#display lines
self.shwlinstate=tkinter.IntVar()
self.lineshow = tkinter.Checkbutton(self.menuframe, text="Show Lines",
variable=self.shwlinstate,command=self.displaylines)
self.lineshow.grid(column=1,row=3)
#fit lines
self.line_fit = tkinter.Button(self.menuframe,text=u"FitLines",command=self.fitlines)
self.line_fit.grid(column=2,row=3)
def init_templcontrol(self):
""" Control the options for template fitting """
#just say what it is
templabel=tkinter.Label(self.menuframe,text = "Templates")
templabel.grid(column=3,row=0,columnspan=4)
#drop down menu to select template family
llab = tkinter.Label(self.menuframe, text="Pick template: ")
llab.grid(column=3,row=1)
self.tempgroup= tkinter.StringVar(self.menuframe)
self.tempgroup.set("Select")
self.tempselect = tkinter.OptionMenu(self.menuframe,self.tempgroup,"kinney","lbgs","sdss")
self.tempselect.grid(column=4,row=1)
self.tempgroup.trace("w",self.loadtemplate)
#just say what it is
self.currenttemplate=tkinter.StringVar(self.menuframe)
self.currenttemplate.set("Current: None")
self.tempchoice=tkinter.Label(self.menuframe,textvariable = self.currenttemplate)
self.tempchoice.grid(column=5,row=1,columnspan=2)
#D not use trace for template, as these are expensive to compute
#template redshift window
zlab = tkinter.Label(self.menuframe, text="z = ")
zlab.grid(column=3,row=2)
self.redshifttemp = tkinter.StringVar()
self.redtempcntr = tkinter.Entry(self.menuframe,textvariable=self.redshifttemp)
self.redtempcntr.grid(column=4,row=2)
self.redshifttemp.set("0.0000")
#rmag window
rmg = tkinter.Label(self.menuframe, text="flux = ")
rmg.grid(column=3,row=3)
self.magtemp = tkinter.StringVar()
self.magtemcntr = tkinter.Entry(self.menuframe,textvariable=self.magtemp)
self.magtemcntr.grid(column=4,row=3)
self.magtemp.set("1.00")
#display template
self.shwtempstate=tkinter.IntVar()
self.tempshow = tkinter.Button(self.menuframe,text="Show Template",command=self.displaytemplate)
self.tempshow.grid(column=3,row=4)
self.temphide = tkinter.Button(self.menuframe,text="Hide Template",command=self.hidetemplate)
self.temphide.grid(column=4,row=4)
#fit template
self.template_fit = tkinter.Button(self.menuframe,text=u"FitTemplate",command=self.fittemplate)
self.template_fit.grid(column=5,row=2)
#toggle sky
self.shwskystate=tkinter.IntVar()
self.template_sky=tkinter.Button(self.menuframe,text=u"Sky On/Off",command=self.togglesky)
self.template_sky.grid(column=5,row=4)
def OnExit(self):
""" Quit all on exit """
self.fits.close()
self.quit()
self.destroy()
def OnSave(self):
""" Save screen """
print('Placeholder')
def SelectFile(self):
""" Select and open file as one wishes """
#select file
self.filename=filedialog.askopenfilename(initialdir='./')
#update name
self.currspec.set("Spec: "+self.filename.split("/")[-1])
#close old and reopen
self.fits.close()
self.fits=fits.open(self.filename)
#unpack
self.fitwav1d=self.fits[2].data
self.fitspe1d=self.fits[0].data
self.fitspe1d_original=np.copy(self.fits[0].data)
self.fiterr1d=self.fits[1].data
self.fitspe2d=self.fits[4].data
self.fiterr2d=self.fits[5].data
self.fitimg=self.fits[6].data
#redraw
self.drawdata(refresh=True)
def drawdata(self,refresh=False):
"""
Once the spectrum is set, populate the data part of the gui
refresh -> True, wipe all canvas before redrawing
"""
if(refresh):
#now destroy all data canvas
self.twodimagePlot.get_tk_widget().destroy()
self.spectrumPlot.get_tk_widget().destroy()
self.twodspcPlot.get_tk_widget().destroy()
self.twoderrPlot.get_tk_widget().destroy()
#refresh 2D image
self.init_twodimage()
#refresh the spectrum
self.init_spectrum()
#refresh 2D spec
self.init_twodspec()
#refresh 2D err
self.init_twoderr()
def init_twodimage(self):
""" Draw the 2D image """
#create properties for this plot
self.twodimagePlot_prop={}
#figure staff
self.twodimagePlot_prop["figure"] = Figure(figsize=(self.twodimg_width/self.dpi,self.twodimg_height/self.dpi),
dpi=self.dpi)
self.twodimagePlot_prop["axis"] = self.twodimagePlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_twodimage()
#send it to canvas - connect event
self.twodimagePlot = FigureCanvasTkAgg(self.twodimagePlot_prop["figure"],master=self.imgframe)
#Draw is required in matplotlib > 2.2, show is kept for legacy only
try:
self.twodimagePlot.draw()
except:
self.twodimagePlot.show()
#need to set tight layout after showing
self.twodimagePlot_prop["figure"].tight_layout()
#enable event on click
self.twodimagePlot.mpl_connect("button_press_event", self.pressbutton)
self.twodimagePlot.mpl_connect("key_press_event", self.presskey)
self.twodimagePlot.get_tk_widget().grid()
def update_twodimage(self,update=False):
"""
Code that updates the 2D image
Update = True, redraw
"""
self.twodimagePlot_prop["image"] =self.twodimagePlot_prop["axis"].imshow(self.fitimg,origin='lower',aspect='auto')
self.twodimagePlot_prop["image"].set_cmap('hot')
#self.twodimagePlot_prop["axis"].set_xlabel('Pix')
#self.twodimagePlot_prop["axis"].set_ylabel('Pix')
def init_spectrum(self):
""" Draw the spectrum """
#create properties for this plot
self.spectrumPlot_prop={}
self.spectrumPlot_prop["xmin"]=np.min(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["xmax"]=np.max(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["ymin"]=np.min(np.nan_to_num(self.fitspe1d))
self.spectrumPlot_prop["ymax"]=np.max(np.nan_to_num(self.fitspe1d))
#figure stuff
self.spectrumPlot_prop["figure"]= Figure(figsize=(0.99*self.pltspec_width/self.dpi,0.96*self.pltspec_height/self.dpi),
dpi=self.dpi)
self.spectrumPlot_prop["axis"]= self.spectrumPlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_spectrum()
#send it to canvas
self.spectrumPlot = FigureCanvasTkAgg(self.spectrumPlot_prop["figure"],master=self.dataframe)
try:
self.spectrumPlot.draw()
except:
self.spectrumPlot.show()
#enable event on click
self.spectrumPlot_prop["figure"].tight_layout()
self.spectrumPlot.mpl_connect("button_press_event", self.pressbutton)
self.spectrumPlot.mpl_connect("motion_notify_event", self.movemouse)
self.spectrumPlot.mpl_connect("key_press_event", self.presskey)
self.spectrumPlot.get_tk_widget().grid(column=0,row=0)
def update_spectrum(self,update=False):
"""
Code that updates the spectrum
Update = True, redraw
"""
if(update):
self.spectrumPlot_prop["axis"].cla()
#plot main data
self.spectrumPlot_prop["axis"].step(self.fitwav1d,self.fitspe1d,where='mid')
self.spectrumPlot_prop["axis"].step(self.fitwav1d,self.fiterr1d,color='red',\
linestyle='--',zorder=1,where='mid')
self.spectrumPlot_prop["axis"].set_xlim(self.spectrumPlot_prop["xmin"],self.spectrumPlot_prop["xmax"])
self.spectrumPlot_prop["axis"].set_ylim(self.spectrumPlot_prop["ymin"],self.spectrumPlot_prop["ymax"])
self.spectrumPlot_prop["axis"].set_xlabel('Wavelength')
#self.spectrumPlot_prop["axis"].set_ylabel('Flux')
#if needed plot sky
if(self.shwskystate.get()):
self.spectrumPlot_prop["axis"].step(self.wavesky,self.fluxsky,where='mid',color='black')
#if needed, plot lines
if(self.shwlinstate.get()):
#set redshift
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
#loop over lines and draw
for lw,lnam in self.infoline:
#find the obs wave
lwplot=lw*(1+redsh)
if((lwplot > self.spectrumPlot_prop["xmin"]) & (lwplot < self.spectrumPlot_prop["xmax"])):
self.spectrumPlot_prop["axis"].axvline(lwplot, color='grey', linestyle='--')
self.spectrumPlot_prop["axis"].text(lwplot,self.spectrumPlot_prop["ymax"],lnam,
verticalalignment='top',rotation=90,fontsize=12)
#if needed, plot template
if(self.shwtempstate.get()):
self.spectrumPlot_prop["axis"].plot(self.fitwav1d,self.templatedata_current,color='black',zorder=3)
#plot zero line
self.spectrumPlot_prop["axis"].plot([self.spectrumPlot_prop["xmin"],self.spectrumPlot_prop["xmax"]],
[0,0],color='green',zorder=2,linestyle=':')
#finally draw
if(update):
self.spectrumPlot.draw()
def init_twodspec(self):
""" Draw the 2D spectrum """
#create properties for this plot
self.twodspcPlot_prop={}
#figure staff
self.twodspcPlot_prop["figure"]= Figure(figsize=(0.99*self.twodspc_width/self.dpi,0.96*self.twodspc_height/self.dpi),
dpi=self.dpi)
self.twodspcPlot_prop["axis"] = self.twodspcPlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_twodspec()
#send it to canvas
self.twodspcPlot = FigureCanvasTkAgg(self.twodspcPlot_prop["figure"],master=self.dataframe)
try:
self.twodspcPlot.draw()
except:
self.twodspcPlot.show()
#enable event on click
self.twodspcPlot_prop["figure"].tight_layout()
self.twodspcPlot.mpl_connect("button_press_event", self.pressbutton)
self.twodspcPlot.mpl_connect("key_press_event", self.presskey)
self.twodspcPlot.mpl_connect("motion_notify_event", self.movemouse)
self.twodspcPlot.get_tk_widget().grid(column=0,row=1,sticky='NW')
def wavemap(self,x,pos):
""" Utility to map the pixel in 2D image to wavelegth """
#wavelength mapper
index=np.arange(0,len(self.fitwav1d))
wave=np.interp(x,index,self.fitwav1d)
'The two args are the value and tick position'
return "%.1f" % wave
def inv_wavemap(self,x):
""" Utility to map wavelegth to pixel in 2D mage """
#wavelength mapper
index=np.arange(0,len(self.fitwav1d))
pix=np.interp(x,self.fitwav1d,index,left=0,right=len(self.fitwav1d))
return pix
def update_twodspec(self,update=False):
"""
Code that updates the 2D spectrum
Update = True, redraw
"""
if(update):
self.twodspcPlot_prop["axis"].cla()
self.twodspcPlot_prop["image"]=self.twodspcPlot_prop["axis"].imshow(np.rot90(self.fitspe2d),origin='lower',aspect='auto')
self.twodspcPlot_prop["image"].set_cmap('hot')
#control level
medianlevel=np.median(np.nan_to_num(self.fitspe2d))
stdlevel=np.std(np.nan_to_num(self.fitspe2d))
self.twodspcPlot_prop["image"].set_clim(medianlevel-3.*stdlevel,medianlevel+3*stdlevel)
#wave mapper
self.twodspcPlot_prop["axis"].xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(self.wavemap))
#now set X axis as in 1d spectrum
xpixmin=self.inv_wavemap(self.spectrumPlot_prop["xmin"])
xpixmax=self.inv_wavemap(self.spectrumPlot_prop["xmax"])
#force minimum maximum
if(xpixmin == xpixmax):
xpixmin = xpixmax-1
if(xpixmax == 0):
xpixmax = 1
self.twodspcPlot_prop["axis"].set_xlim(xpixmin,xpixmax)
self.twodspcPlot_prop["axis"].set_xlabel('Wavelength')
if(update):
self.twodspcPlot.draw()
def init_twoderr(self):
""" Draw the 2D error """
#create properties for this plot
self.twoderrPlot_prop={}
#figure staff
#self.twoderr.grid(column=1,row=2,sticky='NW')
self.twoderrPlot_prop['figure'] = Figure(figsize=(0.99*self.twoderr_width/self.dpi,0.96*self.twoderr_height/self.dpi),
dpi=self.dpi)
self.twoderrPlot_prop['axis'] = self.twoderrPlot_prop['figure'].add_subplot(111)
#call plotting routine
self.update_twoderr()
#send it to canvas
self.twoderrPlot = FigureCanvasTkAgg(self.twoderrPlot_prop['figure'],master=self.dataframe)
try:
self.twoderrPlot.draw()
except:
self.twoderrPlot.show()
#enable event on click
self.twoderrPlot_prop['figure'].tight_layout()
self.twoderrPlot.mpl_connect("button_press_event", self.pressbutton)
self.twoderrPlot.mpl_connect("key_press_event", self.presskey)
self.twoderrPlot.mpl_connect("motion_notify_event", self.movemouse)
self.twoderrPlot.get_tk_widget().grid(column=0,row=2,sticky='NW')
def update_twoderr(self,update=False):
"""
Code that updates the 2D error
Update = True, redraw
"""
if(update):
self.twoderrPlot_prop["axis"].cla()
self.twoderrPlot_prop['image'] =self.twoderrPlot_prop['axis'].imshow(np.rot90(self.fiterr2d),origin='lower',aspect='auto')
self.twoderrPlot_prop['image'].set_cmap('hot')
#control level
medianlevel=np.median(np.nan_to_num(self.fiterr2d))
stdlevel=np.std(np.nan_to_num(self.fiterr2d))
self.twoderrPlot_prop["image"].set_clim(medianlevel-3.*stdlevel,medianlevel+3*stdlevel)
#wave mapper
self.twoderrPlot_prop["axis"].xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(self.wavemap))
#now set X axis as in 1d spectrum
xpixmin=self.inv_wavemap(self.spectrumPlot_prop["xmin"])
xpixmax=self.inv_wavemap(self.spectrumPlot_prop["xmax"])
#force minimum maximum
if(xpixmin == xpixmax):
xpixmin = xpixmax-1
if(xpixmax == 0):
xpixmax = 1
self.twoderrPlot_prop["axis"].set_xlim(xpixmin,xpixmax)
self.twoderrPlot_prop["axis"].set_xlabel('Wavelength')
if(update):
self.twoderrPlot.draw()
def displaylines(self,*args):
""" Display the line list by refreshing plot in update state """
#first parse the line lists
linefile=self.execdir+"/lines/"+self.linelist.get()+".lst"
self.infoline = Table.read(linefile, format='ascii.basic')
#self.infoline=np.loadtxt(linefile, dtype={'names': ('wave', 'tag'),
# 'formats': ('f4', 'S4')})
#refresh plot
self.update_spectrum(update=True)
def loadtemplate(self,*args):
""" Load template from disk and preselect some
useful default
"""
#if so, start dialogue to pick the desired template
self.picktemplate=filedialog.askopenfilename(initialdir='{}/templates/{}'.format(self.execdir,self.tempgroup.get()))
#set current template
self.currenttemplate.set("Current: "+self.picktemplate.split("/")[-1])
#load current template
if('sdss' in self.tempgroup.get()):
#load fits
fitstemp=fits.open(self.picktemplate)
#grab flux
self.templatedata={'flux':fitstemp[0].data[0,:]}
#cosntruct wave
waveinx=np.arange(0,len(self.templatedata['flux']),1)
wavevac=10**(waveinx*1.*fitstemp[0].header['COEFF1']+1.*fitstemp[0].header['COEFF0'])
##go to air
#self.templatedata['wave']= wavevac/(1.0+2.735182e-4+131.4182/wavevac**2+2.76249e8/wavevac**4)
#remain in vac
self.templatedata['wave']= wavevac
else:
#load text
#self.templatedata=np.loadtxt(self.picktemplate, dtype={'names': ('wave', 'flux'),
# 'formats': ('f10', 'f10')},usecols=(0,1))
self.templatedata = Table.read(self.picktemplate, format='ascii.basic')
#set sensible pick in redshift and adjust data as needed
if('lbg' in self.tempgroup.get()):
self.redshifttemp.set("3.000")
elif('kinney' in self.tempgroup.get()):
self.templatedata['flux']=self.templatedata['flux']/1e-14
elif('sdss' in self.tempgroup.get()):
self.templatedata['flux']=self.templatedata['flux']*100.
else:
self.redshifttemp.set("0.000")
def displaytemplate(self,*args):
""" Compute and display template """
self.shwtempstate.set(1)
#compute template given current values
self.adapttemplate()
#refresh plot
self.update_spectrum(update=True)
def hidetemplate(self,*args):
""" Hide template """
self.shwtempstate.set(0)
#refresh plot
self.update_spectrum(update=True)
def adapttemplate(self):
""" Interpolate a template over the data """
#redshift factor
redhfactor=(1+float(self.redshifttemp.get()))
#now construct interpolation
thisw=self.templatedata['wave']*redhfactor
thisf=self.templatedata['flux']
intflx = interp1d(thisw,thisf,kind='linear',bounds_error=False,fill_value=0.0)
#apply normalisation
self.templatedata_current=intflx(self.fitwav1d)*float(self.magtemp.get())
def togglesky(self,*args):
""" Switch on/off sky """
if(self.shwskystate.get()):
self.shwskystate.set(0)
else:
self.shwskystate.set(1)
#refresh plot
self.update_spectrum(update=True)
def fitlines(self):
""" Fit the line list """
#loop over lines inside spectrum
#lounch a new window
self.lnfit=tkinter.Toplevel(self.tk)
#add a display
fig=Figure(figsize=(self.preferwinwidth/self.dpi,self.preferwinheight/self.dpi),dpi=self.dpi)
#pick z
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
lines_good_wave_rest=[]
lines_good_wave_obs=[]
lines_good_name=[]
for lw,lnam in self.infoline:
lwplot=lw*(1+redsh)
if((lwplot > min(self.fitwav1d)+8) & (lwplot < max(self.fitwav1d)-8)):
#do a boxchart in 6A bin to see if line exists
inside=np.where((self.fitwav1d > lwplot-4)& (self.fitwav1d < lwplot+4))
continuum=np.where(((self.fitwav1d > lwplot-20)& (self.fitwav1d < lwplot-10)) |
((self.fitwav1d > lwplot+10)& (self.fitwav1d < lwplot+20)))
clevel=np.median(self.fitspe1d[continuum])
flux=np.sum((self.fitspe1d[inside]-clevel))
noise=np.sqrt(np.sum(self.fiterr1d[inside]**2))
#cut in SN
if(flux/noise > 2):
#stash
lines_good_wave_rest.append(lw)
lines_good_wave_obs.append(lwplot)
lines_good_name.append(lnam)
#generate a 4x? grid of plots
nlines=len(lines_good_wave_rest)
ncol=4
nraw=int(nlines/ncol)
if(nlines%ncol > 0):
nraw=nraw+1
czall=[]
#loop on good stuff for fits
for ii in range(nlines):
#select region to fit
fitwindow=np.where((self.fitwav1d > lines_good_wave_obs[ii]-10) & (self.fitwav1d < lines_good_wave_obs[ii]+10))
continuum=np.where(((self.fitwav1d > lines_good_wave_obs[ii]-20)& (self.fitwav1d < lines_good_wave_obs[ii]-10)) |
((self.fitwav1d > lines_good_wave_obs[ii]+10)& (self.fitwav1d < lines_good_wave_obs[ii]+20)))
clevel=np.median(self.fitspe1d[continuum])
p0=np.array([10.,1.*float(lines_good_wave_obs[ii]),2.,0.])
#fit a Gaussian
yval=np.nan_to_num(self.fitspe1d[fitwindow]-clevel)
yerr=np.nan_to_num(self.fiterr1d[fitwindow]*1.)
xval=np.nan_to_num(self.fitwav1d[fitwindow]*1.)
popt,pcov=curve_fit(self.gauss,xval,yval,p0=p0, sigma=yerr)
perr = np.sqrt(np.diag(pcov))
#eval fit
xg=np.arange(min(xval)-2,max(xval)+2,0.2)
fitg=self.gauss(xg,*popt)
#grab fits
czfit=popt[1]/lines_good_wave_rest[ii]-1.
czfiterr=perr[1]/lines_good_wave_rest[ii]
czall.append(czfit)
#display
ax = fig.add_subplot(nraw,ncol,ii+1)
ax.plot(xval,yval)
ax.plot(xval,yerr,color='red',linestyle="--",zorder=1)
ax.plot(xg,fitg,color='black',linestyle=":")
ax.set_title("{0}{1} z = {2:.6} +/- {3:.5}".format(lines_good_name[ii],int(lines_good_wave_rest[ii]),czfit,czfiterr))
#send message to user and reset redshift
bestz=np.median(np.array(czall))
bestez=np.std(np.array(czall))
self.generic_message.set(r'zfit-> Best fit is {:6.5f}+/-{:6.5f}'.format(bestz,bestez))
self.redshiftline.set(bestz)
#send figure to canvas
self.linefitplot = FigureCanvasTkAgg(fig,master=self.lnfit)
try:
self.linefitplot.draw()
except:
self.linefitplot.show()
#fig.tight_layout()
self.linefitplot.get_tk_widget().grid()
def fittemplate(self):
""" Fit the template """
#init the template correlation
realdata={'wave':self.fitwav1d,'flux':self.fitspe1d,'error':self.fiterr1d}
##Testing sequence
#realdata={'wave':self.templatedata['wave']*(1+0.4329),'flux':self.templatedata['flux'],
# 'error':self.templatedata['flux']}
print('Computing correlation... be patient!')
#find the wavelength range covering the min/max extent
absmin=np.min([np.min(self.templatedata['wave']),np.min(realdata['wave'])])
absmax=np.max([np.max(self.templatedata['wave']),np.max(realdata['wave'])])
#resample in log
deltal=5e-4
lnwave=np.arange(np.log(absmin),np.log(absmax),deltal)
#resample with spline (s controls the smoothing)
x=np.nan_to_num(self.templatedata['wave'])
y=np.nan_to_num(self.templatedata['flux'])
resamp_templ=interpolate.splrep(np.log(x),y,s=0)
x=np.nan_to_num(realdata['wave'])
y=np.nan_to_num(realdata['flux'])
resamp_real=interpolate.splrep(np.log(x),y,s=0)
#put everything on the same array - zero padding the extrapolation
flux_templ=interpolate.splev(lnwave,resamp_templ,der=0,ext=1)
flux_real=interpolate.splev(lnwave,resamp_real,der=0,ext=1)
#masking strong sky lines
mask=np.where((lnwave > np.log(5569.)) & (lnwave < np.log(5584.)))
flux_real[mask]=0
mask=np.where((lnwave > np.log(6292.)) & (lnwave < np.log(6308.)))
flux_real[mask]=0
mask=np.where((lnwave > np.log(6356.)) & (lnwave < np.log(6369.)))
flux_real[mask]=0
mask=np.where((lnwave > 8.6752) & (lnwave < 8.6860))
flux_real[mask]=0
mask=np.where((lnwave > 8.8274) & (lnwave < 8.8525))
flux_real[mask]=0
mask=np.where((lnwave > 8.8862) & (lnwave < np.log(12000.)))
flux_real[mask]=0
#correlate
xcorr=np.correlate(flux_real,flux_templ,mode='full')
#find the peak in the second half in units of redshift
indxmax=np.argmax(xcorr)-len(xcorr)/2
peakz=np.exp(indxmax*deltal)-1
#print peakz
#find the reshift axis
indxarr=np.arange(0,len(lnwave),1)
self.xcorr_redax=np.exp(indxarr*deltal)-1
self.xcorr_xcorr=xcorr[len(xcorr)/2:]
self.xcorr_redshift=peakz
#set the redshift in template window
self.redshifttemp.set("{}".format(self.xcorr_redshift))
#trigger display options
#lounch a new window
self.tmlfit=tkinter.Toplevel(self.tk)
#add xcorr to display
#create properties for this plot
self.tmpfitxcorr_prop={}
self.tmpfitxcorr_prop["xmin"]=np.min(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["xmax"]=np.max(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["ymin"]=np.min(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["ymax"]=np.max(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["figure"]=Figure(figsize=(self.preferwinwidth/self.dpi*0.75,self.preferwinheight/self.dpi*0.75),dpi=self.dpi)
self.tmpfitxcorr_prop["axis"]= self.tmpfitxcorr_prop["figure"].add_subplot(111)
#call plotting routine
self.update_xcorrplot()
#send it to canvas
self.tmpfitxcorr = FigureCanvasTkAgg(self.tmpfitxcorr_prop["figure"],master=self.tmlfit)
self.tmpfitxcorr.show()
#enable event on click
self.tmpfitxcorr_prop["figure"].tight_layout()
self.tmpfitxcorr.mpl_connect("button_press_event", self.pressbutton)
self.tmpfitxcorr.mpl_connect("key_press_event", self.presskey)
self.tmpfitxcorr.get_tk_widget().grid(column=0,row=0)
def update_xcorrplot(self,update=False):
""" Update plot for xcorrplot """
if(update):
self.tmpfitxcorr_prop["axis"].cla()
#plot main data
self.tmpfitxcorr_prop["axis"].plot(self.xcorr_redax,self.xcorr_xcorr)
self.tmpfitxcorr_prop["axis"].axvline(self.xcorr_redshift, color='grey', linestyle='--')
self.tmpfitxcorr_prop["axis"].set_xlim(self.tmpfitxcorr_prop["xmin"],self.tmpfitxcorr_prop["xmax"])
self.tmpfitxcorr_prop["axis"].set_ylim(self.tmpfitxcorr_prop["ymin"],self.tmpfitxcorr_prop["ymax"])
self.tmpfitxcorr_prop["axis"].set_xlabel('Redshift')
self.tmpfitxcorr_prop["axis"].set_ylabel('XCORR')
#finally draw
if(update):
self.tmpfitxcorr.draw()
def movemouse(self,event):
""" Do stuff when mouse moves """
if(event.canvas == self.spectrumPlot):
self.mouse_position.set('Mouse:({},{})'.format(event.xdata,event.ydata))
elif(event.canvas == self.twodspcPlot):
try:
self.mouse_position.set('Mouse:({},{})'.format(self.wavemap(event.xdata,0.0),event.ydata))
except:
self.mouse_position.set('Mouse:(None,None)')
elif(event.canvas == self.twoderrPlot):
try:
self.mouse_position.set('Mouse:({},{})'.format(self.wavemap(event.xdata,0.0),event.ydata))
except:
self.mouse_position.set('Mouse:(None,None)')
def pressbutton(self,event):
""" Do stuff when data plot is pressed with mouse """
#this is how to redirect events
if(event.canvas == self.twoderrPlot):
#set focus
self.twoderrPlot.get_tk_widget().focus_set()
if(event.canvas == self.twodspcPlot):
#set focus
self.twodspcPlot.get_tk_widget().focus_set()
if(event.canvas == self.twodimagePlot):
#set focus
self.twodimagePlot.get_tk_widget().focus_set()
if(event.canvas == self.spectrumPlot):
#set focus
self.spectrumPlot.get_tk_widget().focus_set()
#for right click, trigger line selector
if(event.button == 3):
self.lineselectorwidget(event)
if(event.canvas == self.tmpfitxcorr):
#set focus
self.tmpfitxcorr.get_tk_widget().focus_set()
def presskey(self,event):
""" Do stuff when data plot is pressed with key """
#quit on q
if(event.key == "q"):
self.OnExit()
#keyboard event when focus on spectrum
if(event.canvas == self.spectrumPlot):
self.spetrumPlot_events(event)
#keyboard event when focus on xcorr
if(event.canvas == self.tmpfitxcorr):
self.tmpfitxcorr_events(event)
def tmpfitxcorr_events(self,event):
""" Handle events of xcorr plot """
#set bottom plot
if(event.key == "b"):
self.tmpfitxcorr_prop["ymin"]=event.ydata
self.update_xcorrplot(update=True)
#set top plot
if(event.key == "t"):
self.tmpfitxcorr_prop["ymax"]=event.ydata
self.update_xcorrplot(update=True)
#set left plot
if(event.key == "l"):
self.tmpfitxcorr_prop["xmin"]=event.xdata
self.update_xcorrplot(update=True)
#set right plot
if(event.key == "r"):
self.tmpfitxcorr_prop["xmax"]=event.xdata
self.update_xcorrplot(update=True)
#zoom in
if(event.key == "i"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#zoom in by factor of 2
currentwidth=currentwidth*0.5
#zoom around selected wave
self.tmpfitxcorr_prop["xmin"]=event.xdata-currentwidth/2.
self.tmpfitxcorr_prop["xmax"]=event.xdata+currentwidth/2.
self.update_xcorrplot(update=True)
#zoom out
if(event.key == "o"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#zoom out by factor of 2
currentwidth=currentwidth*2
#zoom around selected wave
self.tmpfitxcorr_prop["xmin"]=event.xdata-currentwidth/2.
self.tmpfitxcorr_prop["xmax"]=event.xdata+currentwidth/2.
self.update_xcorrplot(update=True)
#pan left
if(event.key == "["):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#pan left
self.tmpfitxcorr_prop["xmin"]=self.tmpfitxcorr_prop["xmin"]-currentwidth/2
self.tmpfitxcorr_prop["xmax"]=self.tmpfitxcorr_prop["xmax"]-currentwidth/2
self.update_xcorrplot(update=True)
#pan right
if(event.key == "]"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#pan right
self.tmpfitxcorr_prop["xmin"]=self.tmpfitxcorr_prop["xmin"]+currentwidth/2
self.tmpfitxcorr_prop["xmax"]=self.tmpfitxcorr_prop["xmax"]+currentwidth/2
self.update_xcorrplot(update=True)
#set reset plot
if(event.key == "W"):
self.tmpfitxcorr_prop["xmin"]=np.min(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["xmax"]=np.max(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["ymin"]=np.min(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["ymax"]=np.max(np.nan_to_num(self.xcorr_xcorr))
self.update_xcorrplot(update=True)
#mark new redshift
if(event.key == "z"):
#update relevent info
self.xcorr_redshift=event.xdata
self.redshifttemp.set("{}".format(self.xcorr_redshift))
#refresh plot
self.update_xcorrplot(update=True)
#display template
self.displaytemplate()
def spetrumPlot_events(self,event):
"""" Handle events of spectrum plot """
#set bottom plot
if(event.key == "b"):
self.spectrumPlot_prop["ymin"]=event.ydata
self.update_spectrum(update=True)
#set top plot
if(event.key == "t"):
self.spectrumPlot_prop["ymax"]=event.ydata
self.update_spectrum(update=True)
#set left plot
if(event.key == "l"):
self.spectrumPlot_prop["xmin"]=event.xdata
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#set right plot
if(event.key == "r"):
self.spectrumPlot_prop["xmax"]=event.xdata
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#zoom in
if(event.key == "i"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#zoom in by factor of 2
currentwidth=currentwidth*0.5
#zoom around selected wave
self.spectrumPlot_prop["xmin"]=event.xdata-currentwidth/2.
self.spectrumPlot_prop["xmax"]=event.xdata+currentwidth/2.
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#zoom out
if(event.key == "o"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#zoom out by factor of 2
currentwidth=currentwidth*2
#zoom around selected wave
self.spectrumPlot_prop["xmin"]=event.xdata-currentwidth/2.
self.spectrumPlot_prop["xmax"]=event.xdata+currentwidth/2.
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#pan left
if(event.key == "["):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#pan left
self.spectrumPlot_prop["xmin"]=self.spectrumPlot_prop["xmin"]-currentwidth/2
self.spectrumPlot_prop["xmax"]=self.spectrumPlot_prop["xmax"]-currentwidth/2
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#pan right
if(event.key == "]"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#pan right
self.spectrumPlot_prop["xmin"]=self.spectrumPlot_prop["xmin"]+currentwidth/2
self.spectrumPlot_prop["xmax"]=self.spectrumPlot_prop["xmax"]+currentwidth/2
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#set reset plot
if(event.key == "W"):
self.spectrumPlot_prop["xmin"]=np.min(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["xmax"]=np.max(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["ymin"]=np.min(np.nan_to_num(self.fitspe1d))
self.spectrumPlot_prop["ymax"]=np.max(np.nan_to_num(self.fitspe1d))
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#smooth plot
if(event.key == "S"):
self.fitspe1d=signal.medfilt(self.fitspe1d,self.smooth)
self.smooth=self.smooth+2
self.update_spectrum(update=True)
#unsmooth smooth
if(event.key == "U"):
self.fitspe1d=self.fitspe1d_original
self.smooth=3
self.update_spectrum(update=True)
def lineselectorwidget(self,event):
""" Control what happens when right-click on 1D spectrum
- trigger construction of line list selector
"""
#refresh lines as needed
self.displaylines()
#lounch a new window
self.lnsel=tkinter.Toplevel(self.tk)
#pick z
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
#create line buttons for those visibles
self.wlineselect = tkinter.DoubleVar()
self.wlinepos = event.xdata
i=0
for lw,lnam in self.infoline:
lwplot=lw*(1+redsh)
tkinter.Radiobutton(self.lnsel, text=lnam+"{}".format(int(lw)),
variable=self.wlineselect, value=lw,
command=self.pickedline).grid(row = i%30, column = i/30, sticky = "NWSE")
i=i+1
self.tk.wait_window(self.lnsel)
def pickedline(self):
""" If one pick a line, find redshift """
#find the redshift
redshift=self.wlinepos/self.wlineselect.get()-1
#set it - auto trigger refresh
self.shwlinstate.set(1)
self.redshiftline.set("{}".format(redshift))
#destroy window
self.lnsel.destroy()
def gauss(self,x, *p):
""" Gaussian model for line fit """
A, mu, sigma, zero = p
gg=A*np.exp(-1.*(x-mu)*(x-mu)/(2.*sigma*sigma))+zero
return gg
def zfit(startfile=None, z_start=0.0):
""" Mains that runs the gui """
app = zfitwin(None, startfile=startfile, z_start=z_start)
app.title('Fit your redshift!')
app.mainloop()
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],"i:z:",["ifile=","redshift="])
startfile = None
z_start = 0.0
#cube_range = None
for opt, arg in opts:
if opt in ("-i", "--ifile"):
startfile = arg
elif opt in ("-z", "--redshift"):
z_start = float(arg)
#elif opt in ("-c", "--cube"):
# cube = float(arg)
zfit(startfile=startfile, z_start=z_start)
|
gpl-2.0
| 7,933,736,069,385,149,000
| 36.833072
| 139
| 0.588938
| false
| 3.568525
| false
| false
| false
|
airbnb/streamalert
|
streamalert_cli/terraform/monitoring.py
|
1
|
3570
|
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared.logger import get_logger
from streamalert_cli.terraform.common import monitoring_topic_arn
LOGGER = get_logger(__name__)
def generate_monitoring(cluster_name, cluster_dict, config):
"""Add the CloudWatch Monitoring module to the Terraform cluster dict.
Example configuration:
"cloudwatch_monitoring": {
"enabled": true,
"kinesis_alarms_enabled": true,
"lambda_alarms_enabled": true,
"settings": {
"lambda_invocation_error_period": "600",
"kinesis_iterator_age_error_period": "600",
"kinesis_write_throughput_exceeded_threshold": "100"
}
}
Args:
cluster_name (str): The name of the currently generating cluster
cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
config (dict): The loaded config from the 'conf/' directory
Returns:
bool: Result of applying the cloudwatch_monitoring module
"""
prefix = config['global']['account']['prefix']
infrastructure_config = config['global'].get('infrastructure')
monitoring_config = config['clusters'][cluster_name]['modules']['cloudwatch_monitoring']
if not (infrastructure_config and 'monitoring' in infrastructure_config):
LOGGER.error('Invalid config: Make sure you declare global infrastructure options!')
return False
if not monitoring_config.get('enabled', False):
LOGGER.info('CloudWatch Monitoring not enabled, skipping...')
return True
sns_topic_arn = monitoring_topic_arn(config)
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)] = {
'source': './modules/tf_monitoring',
'sns_topic_arn': sns_topic_arn,
'kinesis_alarms_enabled': False,
'lambda_alarms_enabled': False
}
if monitoring_config.get('lambda_alarms_enabled', True):
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)].update({
'lambda_functions': ['{}_{}_streamalert_classifier'.format(prefix, cluster_name)],
'lambda_alarms_enabled': True
})
if monitoring_config.get('kinesis_alarms_enabled', True):
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)].update({
'kinesis_stream': '${{module.kinesis_{}.stream_name}}'.format(cluster_name),
'kinesis_alarms_enabled': True
})
# Add support for custom settings for tweaking alarm thresholds, eval periods, and periods
# Note: This does not strictly check for proper variable names, since there are so many.
# Instead, Terraform will error out if an improper name is used.
# Also, every value in these settings should be a string, so cast for safety.
for setting_name, setting_value in monitoring_config.get('settings', {}).items():
cluster_dict['module']['cloudwatch_monitoring_{}'.format(
cluster_name)][setting_name] = str(setting_value)
return True
|
apache-2.0
| -359,748,981,042,700,000
| 40.034483
| 97
| 0.685154
| false
| 4.136732
| true
| false
| false
|
sarisabban/ProtVR
|
FlaskApp/ProtVR.py
|
1
|
1848
|
# Author: Sari Sabban
# Email: sari.sabban@gmail.com
# URL: https://github.com/sarisabban
#
# Created By: Sari Sabban
# Created Date: 20 March 2017
import urllib
def ProtVR(x):
lis=list()
filename='HELLO C '
filename=urllib.urlopen('http://files.rcsb.org/view/'+x+'.pdb')
lis.append('<script src="/static/aframe.min.js"></script>\n')
lis.append('<a-scene>\n')
lis.append('\t<a-sky color="#111111"></a-sky>\n')
for line in filename:
line=line.decode()
if line.startswith('ATOM'):
splitline=line.split()
try:
coordinates=(splitline[11],splitline[6],splitline[7],splitline[8])
except:
coordinates=(splitline[10],splitline[6],splitline[7],splitline[8])
if coordinates[0]=='N':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2D2DE1"></a-sphere>'
elif coordinates[0]=='C':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2DE12D"></a-sphere>'
elif coordinates[0]=='O':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#E14343"></a-sphere>'
elif coordinates[0]=='H':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBCBCB"></a-sphere>'
elif coordinates[0]=='S':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBAE38"></a-sphere>'
elif coordinates[0]=='I':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#830083"></a-sphere>'
else:
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#6F6F6F"></a-sphere>'
result=' '.join(js)
lis.append(result)
lis.append('</a-scene>')
final=' '.join(lis)
return(final)
#print(final)
#ProtVR('2HIU')
|
mit
| 2,022,517,550,096,471,300
| 39.173913
| 119
| 0.653139
| false
| 2.812785
| false
| false
| false
|
Einsteinish/PyTune3
|
apps/reader/views.py
|
1
|
103912
|
import datetime
import time
import boto
import redis
import requests
import random
import zlib
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import IntegrityError
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404, UnreadablePostError
from django.conf import settings
from django.core.mail import mail_admins
#from django.core.validators import email_re
from django.core.validators import EmailValidator
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from django.utils.encoding import smart_unicode
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.recommendations.models import RecommendedFeed
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts
from apps.search.models import MUserSearch
from apps.statistics.models import MStatistics
# from apps.search.models import SearchStarredStory
try:
from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory
except:
pass
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from apps.social.models import MSocialSubscription, MActivity, MInteraction
from apps.categories.models import MCategory
from apps.social.views import load_social_page
from apps.rss_feeds.tasks import ScheduleImmediateFetches
from utils import json_functions as json
from utils.user_functions import get_user, ajax_login_required
from utils.feed_functions import relative_timesince
from utils.story_functions import format_story_link_date__short
from utils.story_functions import format_story_link_date__long
from utils.story_functions import strip_tags
from utils import log as logging
from utils.view_functions import get_argument_or_404, render_to, is_true
from utils.view_functions import required_params
from utils.ratelimit import ratelimit
from vendor.timezones.utilities import localtime_for_timezone
BANNED_URLS = [
"brentozar.com",
]
@never_cache
@render_to('reader/dashboard.xhtml')
def index(request, **kwargs):
if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']:
username = request.subdomain
if '.' in username:
username = username.split('.')[0]
user = User.objects.filter(username=username)
if not user:
user = User.objects.filter(username__iexact=username)
if user:
user = user[0]
if not user:
return HttpResponseRedirect('http://%s%s' % (
Site.objects.get_current().domain,
reverse('index')))
return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs)
if request.user.is_anonymous():
return welcome(request, **kwargs)
else:
return dashboard(request, **kwargs)
def dashboard(request, **kwargs):
user = request.user
feed_count = UserSubscription.objects.filter(user=request.user).count()
recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
approved_date__lte=datetime.datetime.now()
).select_related('feed')[:2]
unmoderated_feeds = []
if user.is_staff:
unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
declined_date__isnull=True
).select_related('feed')[:2]
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
start_import_from_google_reader = request.session.get('import_from_google_reader', False)
if start_import_from_google_reader:
del request.session['import_from_google_reader']
if not user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
logging.user(request, "~FBLoading dashboard")
return {
'user_profile' : user.profile,
'feed_count' : feed_count,
'account_images' : range(1, 4),
'recommended_feeds' : recommended_feeds,
'unmoderated_feeds' : unmoderated_feeds,
'statistics' : statistics,
'social_profile' : social_profile,
'start_import_from_google_reader': start_import_from_google_reader,
'debug' : settings.DEBUG,
}, "reader/dashboard.xhtml"
def welcome(request, **kwargs):
user = get_user(request)
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
if request.method == "POST":
if request.POST.get('submit', '').startswith('log'):
login_form = LoginForm(request.POST, prefix='login')
signup_form = SignupForm(prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(request.POST, prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(prefix='signup')
logging.user(request, "~FBLoading welcome")
return {
'user_profile' : hasattr(user, 'profile') and user.profile,
'login_form' : login_form,
'signup_form' : signup_form,
'statistics' : statistics,
'social_profile' : social_profile,
'post_request' : request.method == 'POST',
}, "reader/welcome.xhtml"
@never_cache
def login(request):
code = -1
message = ""
if request.method == "POST":
form = LoginForm(request.POST, prefix='login')
if form.is_valid():
login_user(request, form.get_user())
if request.POST.get('api'):
logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW")
code = 1
else:
logging.user(form.get_user(), "~FG~BBLogin~FW")
return HttpResponseRedirect(reverse('index'))
else:
message = form.errors.items()[0][1][0]
if request.POST.get('api'):
return HttpResponse(json.encode(dict(code=code, message=message)), content_type='application/json')
else:
return index(request)
@never_cache
def signup(request):
if request.method == "POST":
form = SignupForm(prefix='signup', data=request.POST)
if form.is_valid():
new_user = form.save()
login_user(request, new_user)
logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
if not new_user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
return index(request)
@never_cache
def logout(request):
logging.user(request, "~FG~BBLogout~FW")
logout_user(request)
if request.GET.get('api'):
return HttpResponse(json.encode(dict(code=1)), content_type='application/json')
else:
return HttpResponseRedirect(reverse('index'))
def autologin(request, username, secret):
next = request.GET.get('next', '')
if not username or not secret:
return HttpResponseForbidden()
profile = Profile.objects.filter(user__username=username, secret_token=secret)
if not profile:
return HttpResponseForbidden()
user = profile[0].user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',))
if next and not next.startswith('/'):
next = '?next=' + next
return HttpResponseRedirect(reverse('index') + next)
elif next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse('index'))
@ratelimit(minutes=1, requests=60)
@never_cache
@json.json_view
def load_feeds(request):
user = get_user(request)
feeds = {}
include_favicons = request.REQUEST.get('include_favicons', False)
flat = request.REQUEST.get('flat', False)
update_counts = request.REQUEST.get('update_counts', False)
version = int(request.REQUEST.get('v', 1))
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if flat == 'false': flat = False
if flat: return load_feeds_flat(request)
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
for sub in user_subs:
pk = sub.feed_id
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[pk] = sub.canonical(include_favicon=include_favicons)
if not sub.active: continue
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % (
len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
'feeds': feeds.values() if version == 2 else feeds,
'social_feeds': social_feeds,
'social_profile': social_profile,
'social_services': social_services,
'user_profile': user.profile,
"is_staff": user.is_staff,
'user_id': user.pk,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
'starred_counts': starred_counts,
'categories': categories
}
return data
@json.json_view
def load_feed_favicons(request):
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_ids')
if not feed_ids:
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [sub['feed__pk'] for sub in user_subs.values('feed__pk')]
feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)])
return feed_icons
def load_feeds_flat(request):
user = request.user
include_favicons = is_true(request.REQUEST.get('include_favicons', False))
update_counts = is_true(request.REQUEST.get('update_counts', True))
include_inactive = is_true(request.REQUEST.get('include_inactive', False))
feeds = {}
inactive_feeds = {}
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
iphone_version = "2.1" # Preserved forever. Don't change.
latest_ios_build = "52"
latest_ios_version = "5.0.0b2"
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if not user.is_authenticated():
return HttpResponseForbidden()
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
folders = []
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if not user_subs and folders:
folders.auto_activate()
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if include_inactive:
inactive_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=False)
for sub in user_subs:
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if include_inactive:
for sub in inactive_subs:
inactive_feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
flat_folders = []
flat_folders_with_inactive = []
if folders:
flat_folders = folders.flatten_folders(feeds=feeds)
flat_folders_with_inactive = folders.flatten_folders(feeds=feeds,
inactive_feeds=inactive_feeds)
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB/~FR%s~FB feeds/socials/inactive ~FMflat~FB%s" % (
len(feeds.keys()), len(social_feeds), len(inactive_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
"flat_folders": flat_folders,
"flat_folders_with_inactive": flat_folders_with_inactive,
"feeds": feeds if not include_inactive else {"0": "Don't include `include_inactive=true` if you want active feeds."},
"inactive_feeds": inactive_feeds if include_inactive else {"0": "Include `include_inactive=true`"},
"social_feeds": social_feeds,
"social_profile": social_profile,
"social_services": social_services,
"user": user.username,
"user_id": user.pk,
"is_staff": user.is_staff,
"user_profile": user.profile,
"iphone_version": iphone_version,
"latest_ios_build": latest_ios_build,
"latest_ios_version": latest_ios_version,
"categories": categories,
'starred_count': starred_count,
'starred_counts': starred_counts,
'share_ext_token': user.profile.secret_token,
}
return data
@ratelimit(minutes=1, requests=10)
@never_cache
@json.json_view
def refresh_feeds(request):
start = datetime.datetime.now()
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_id')
check_fetch_status = request.REQUEST.get('check_fetch_status')
favicons_fetching = request.REQUEST.getlist('favicons_fetching')
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids or (not social_feed_ids and not feed_ids):
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids,
check_fetch_status=check_fetch_status)
checkpoint1 = datetime.datetime.now()
social_feeds = {}
if social_feed_ids or (not social_feed_ids and not feed_ids):
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
checkpoint2 = datetime.datetime.now()
favicons_fetching = [int(f) for f in favicons_fetching if f]
feed_icons = {}
if favicons_fetching:
feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)])
for feed_id, feed in feeds.items():
if feed_id in favicons_fetching and feed_id in feed_icons:
feeds[feed_id]['favicon'] = feed_icons[feed_id].data
feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color
feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching')
user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed')
sub_feed_ids = [s.feed_id for s in user_subs]
if favicons_fetching:
moved_feed_ids = [f for f in favicons_fetching if f not in sub_feed_ids]
for moved_feed_id in moved_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=moved_feed_id)
if duplicate_feeds and duplicate_feeds[0].feed.pk in feeds:
feeds[moved_feed_id] = feeds[duplicate_feeds[0].feed_id]
feeds[moved_feed_id]['dupe_feed_id'] = duplicate_feeds[0].feed_id
if check_fetch_status:
missing_feed_ids = list(set(feed_ids) - set(sub_feed_ids))
if missing_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id__in=missing_feed_ids)
for duplicate_feed in duplicate_feeds:
feeds[duplicate_feed.duplicate_feed_id] = {'id': duplicate_feed.feed_id}
interactions_count = MInteraction.user_unread_count(user.pk)
if True or settings.DEBUG or check_fetch_status:
end = datetime.datetime.now()
extra_fetch = ""
if check_fetch_status or favicons_fetching:
extra_fetch = "(%s/%s)" % (check_fetch_status, len(favicons_fetching))
logging.user(request, "~FBRefreshing %s+%s feeds %s (%.4s/%.4s/%.4s)" % (
len(feeds.keys()), len(social_feeds.keys()), extra_fetch,
(checkpoint1-start).total_seconds(),
(checkpoint2-start).total_seconds(),
(end-start).total_seconds(),
))
return {
'feeds': feeds,
'social_feeds': social_feeds,
'interactions_count': interactions_count,
}
@json.json_view
def interactions_count(request):
user = get_user(request)
interactions_count = MInteraction.user_unread_count(user.pk)
return {
'interactions_count': interactions_count,
}
@never_cache
@ajax_login_required
@json.json_view
def feed_unread_count(request):
user = request.user
feed_ids = request.REQUEST.getlist('feed_id')
force = request.REQUEST.get('force', False)
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids:
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, force=force)
social_feeds = {}
if social_feed_ids:
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
if len(feed_ids) == 1:
if settings.DEBUG:
feed_title = Feed.get_by_id(feed_ids[0]).feed_title
else:
feed_title = feed_ids[0]
elif len(social_feed_ids) == 1:
feed_title = MSocialProfile.objects.get(user_id=social_feed_ids[0].replace('social:', '')).username
else:
feed_title = "%s feeds" % (len(feeds) + len(social_feeds))
logging.user(request, "~FBUpdating unread count on: %s" % feed_title)
return {'feeds': feeds, 'social_feeds': social_feeds}
def refresh_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
feed = feed.update(force=True, compute_scores=False)
usersub = UserSubscription.objects.get(user=user, feed=feed)
usersub.calculate_feed_scores(silent=False)
logging.user(request, "~FBRefreshing feed: %s" % feed)
return load_single_feed(request, feed_id)
@never_cache
@json.json_view
def load_single_feed(request, feed_id):
start = time.time()
user = get_user(request)
# offset = int(request.REQUEST.get('offset', 0))
# limit = int(request.REQUEST.get('limit', 6))
limit = 6
page = int(request.REQUEST.get('page', 1))
offset = limit * (page-1)
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'all')
query = request.REQUEST.get('query', '').strip()
include_story_content = is_true(request.REQUEST.get('include_story_content', True))
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
message = None
user_search = None
dupe_feed_id = None
user_profiles = []
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
if not feed_id: raise Http404
feed_address = request.REQUEST.get('feed_address')
feed = Feed.get_by_id(feed_id, feed_address=feed_address)
if not feed:
raise Http404
try:
usersub = UserSubscription.objects.get(user=user, feed=feed)
except UserSubscription.DoesNotExist:
usersub = None
if feed.is_newsletter and not usersub:
# User must be subscribed to a newsletter in order to read it
raise Http404
if query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
stories = feed.find_stories(query, order=order, offset=offset, limit=limit)
else:
stories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id=feed_id
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
elif usersub and (read_filter == 'unread' or order == 'oldest'):
stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit,
default_cutoff_date=user.profile.unread_cutoff)
else:
stories = feed.get_stories(offset, limit)
checkpoint1 = time.time()
try:
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
except redis.ConnectionError:
logging.user(request, "~BR~FK~SBRedis is unavailable for shared stories.")
checkpoint2 = time.time()
# Get intelligence classifier for user
if usersub and usersub.is_trained:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = get_classifiers_for_user(user, feed_id=feed_id,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
checkpoint3 = time.time()
unread_story_hashes = []
if stories:
if (read_filter == 'all' or query) and usersub:
unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
feed_ids=[usersub.feed_id],
usersubs=[usersub],
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id=feed.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date', 'user_tags')
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
checkpoint4 = time.time()
for story in stories:
if not include_story_content:
del story['story_content']
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
nowtz = localtime_for_timezone(now, user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if usersub:
story['read_status'] = 1
if story['story_date'] < user.profile.unread_cutoff:
story['read_status'] = 1
elif (read_filter == 'all' or query) and usersub:
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
elif read_filter == 'unread' and usersub:
story['read_status'] = 0
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
if story['story_hash'] in shared_stories:
story['shared'] = True
shared_date = localtime_for_timezone(shared_stories[story['story_hash']]['shared_date'],
user.profile.timezone)
story['shared_date'] = format_story_link_date__long(shared_date, now)
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
if usersub:
usersub.feed_opens += 1
usersub.needs_unread_recalc = True
usersub.save(update_fields=['feed_opens', 'needs_unread_recalc'])
diff1 = checkpoint1-start
diff2 = checkpoint2-start
diff3 = checkpoint3-start
diff4 = checkpoint4-start
timediff = time.time()-start
last_update = relative_timesince(feed.last_update)
time_breakdown = ""
if timediff > 1 or settings.DEBUG:
time_breakdown = "~SN~FR(~SB%.4s/%.4s/%.4s/%.4s~SN)" % (
diff1, diff2, diff3, diff4)
search_log = "~SN~FG(~SB%s~SN) " % query if query else ""
logging.user(request, "~FYLoading feed: ~SB%s%s (%s/%s) %s%s" % (
feed.feed_title[:22], ('~SN/p%s' % page) if page > 1 else '', order, read_filter, search_log, time_breakdown))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
data = dict(stories=stories,
user_profiles=user_profiles,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
updated=last_update,
user_search=user_search,
feed_id=feed.pk,
elapsed_time=round(float(timediff), 2),
message=message)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
if not usersub:
data.update(feed.canonical())
# if not usersub and feed.num_subscribers <= 1:
# data = dict(code=-1, message="You must be subscribed to this feed.")
# if page <= 3:
# import random
# time.sleep(random.randint(2, 4))
# if page == 2:
# assert False
return data
def load_feed_page(request, feed_id):
if not feed_id:
raise Http404
feed = Feed.get_by_id(feed_id)
if feed and feed.has_page and not feed.has_page_exception:
if settings.BACKED_BY_AWS.get('pages_on_node'):
url = "http://%s/original_page/%s" % (
settings.ORIGINAL_PAGE_SERVER,
feed.pk,
)
page_response = requests.get(url)
if page_response.status_code == 200:
response = HttpResponse(page_response.content, content_type="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
response['Last-Modified'] = page_response.headers.get('Last-modified')
response['Etag'] = page_response.headers.get('Etag')
response['Content-Length'] = str(len(page_response.content))
logging.user(request, "~FYLoading original page, proxied from node: ~SB%s bytes" %
(len(page_response.content)))
return response
if settings.BACKED_BY_AWS['pages_on_s3'] and feed.s3_page:
if settings.PROXY_S3_PAGES:
key = settings.S3_PAGES_BUCKET.get_key(feed.s3_pages_key)
if key:
compressed_data = key.get_contents_as_string()
response = HttpResponse(compressed_data, content_type="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
logging.user(request, "~FYLoading original page, proxied: ~SB%s bytes" %
(len(compressed_data)))
return response
else:
logging.user(request, "~FYLoading original page, non-proxied")
return HttpResponseRedirect('//%s/%s' % (settings.S3_PAGES_BUCKET_NAME,
feed.s3_pages_key))
data = MFeedPage.get_data(feed_id=feed_id)
if not data or not feed or not feed.has_page or feed.has_page_exception:
logging.user(request, "~FYLoading original page, ~FRmissing")
return render(request, 'static/404_original_page.xhtml', {},
content_type='text/html',
status=404)
logging.user(request, "~FYLoading original page, from the db")
return HttpResponse(data, content_type="text/html; charset=utf-8")
@json.json_view
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
query = request.REQUEST.get('query', '').strip()
order = request.REQUEST.get('order', 'newest')
tag = request.REQUEST.get('tag')
story_hashes = request.REQUEST.getlist('h')[:100]
version = int(request.REQUEST.get('v', 1))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
order_by = '-' if order == "newest" else ""
if page: offset = limit * (page - 1)
if query:
# results = SearchStarredStory.query(user.pk, query)
# story_ids = [result.db_id for result in results]
if user.profile.is_premium:
stories = MStarredStory.find_stories(query, user.pk, tag=tag, offset=offset, limit=limit,
order=order)
else:
stories = []
message = "You must be a premium subscriber to search."
elif tag:
if user.profile.is_premium:
mstories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
stories = []
message = "You must be a premium subscriber to read saved stories by tag."
elif story_hashes:
mstories = MStarredStory.objects(
user_id=user.pk,
story_hash__in=story_hashes
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
mstories = MStarredStory.objects(
user_id=user.pk
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in unsub_feeds)
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, nowtz)
story['starred_timestamp'] = starred_date.strftime('%s')
story['read_status'] = 1
story['starred'] = True
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading starred stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
'feeds': unsub_feeds.values() if version == 2 else unsub_feeds,
"message": message,
}
@json.json_view
def starred_story_hashes(request):
user = get_user(request)
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
mstories = MStarredStory.objects(
user_id=user.pk
).only('story_hash', 'starred_date').order_by('-starred_date')
if include_timestamps:
story_hashes = [(s.story_hash, s.starred_date.strftime("%s")) for s in mstories]
else:
story_hashes = [s.story_hash for s in mstories]
logging.user(request, "~FYLoading ~FCstarred story hashes~FY: %s story hashes" %
(len(story_hashes)))
return dict(starred_story_hashes=story_hashes)
def starred_stories_rss_feed(request, user_id, secret_token, tag_slug):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
try:
tag_counts = MStarredStoryCounts.objects.get(user_id=user_id, slug=tag_slug)
except MStarredStoryCounts.MultipleObjectsReturned:
tag_counts = MStarredStoryCounts.objects(user_id=user_id, slug=tag_slug).first()
except MStarredStoryCounts.DoesNotExist:
raise Http404
data = {}
data['title'] = "Saved Stories - %s" % tag_counts.tag
data['link'] = "%s%s" % (
settings.PYTUNE_URL,
reverse('saved-stories-tag', kwargs=dict(tag_name=tag_slug)))
data['description'] = "Stories saved by %s on PyTune with the tag \"%s\"." % (user.username,
tag_counts.tag)
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'PyTune - %s' % settings.PYTUNE_URL
data['docs'] = None
data['author_name'] = user.username
data['feed_url'] = "%s%s" % (
settings.PYTUNE_URL,
reverse('starred-stories-rss-feed',
kwargs=dict(user_id=user_id, secret_token=secret_token, tag_slug=tag_slug)),
)
rss = feedgenerator.Atom1Feed(**data)
if not tag_counts.tag:
starred_stories = MStarredStory.objects(
user_id=user.pk
).order_by('-starred_date').limit(25)
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag_counts.tag
).order_by('-starred_date').limit(25)
for starred_story in starred_stories:
story_data = {
'title': starred_story.story_title,
'link': starred_story.story_permalink,
'description': (starred_story.story_content_z and
zlib.decompress(starred_story.story_content_z)),
'author_name': starred_story.story_author_name,
'categories': starred_story.story_tags,
'unique_id': starred_story.story_guid,
'pubdate': starred_story.starred_date,
}
rss.add_item(**story_data)
logging.user(request, "~FBGenerating ~SB%s~SN's saved story RSS feed (%s, %s stories): ~FM%s" % (
user.username,
tag_counts.tag,
tag_counts.count,
request.META.get('HTTP_USER_AGENT', "")[:24]
))
return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')
def folder_rss_feed(request, user_id, secret_token, unread_filter, folder_slug):
domain = Site.objects.get_current().domain
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=user)
feed_ids, folder_title = user_sub_folders.feed_ids_under_folder_slug(folder_slug)
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids)
if feed_ids and user.profile.is_premium:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
"offset": 0,
"limit": 20,
"order": 'newest',
"read_filter": 'all',
"cache_prefix": "RSS:"
}
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
else:
story_hashes = []
unread_feed_story_hashes = []
mstories = MStory.objects(story_hash__in=story_hashes).order_by('-story_date')
stories = Feed.format_stories(mstories)
filtered_stories = []
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
if found_trained_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids,
social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
for story in stories:
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
if unread_filter == 'focus' and story['score'] >= 1:
filtered_stories.append(story)
elif unread_filter == 'unread' and story['score'] >= 0:
filtered_stories.append(story)
stories = filtered_stories
data = {}
data['title'] = "%s from %s (%s sites)" % (folder_title, user.username, len(feed_ids))
data['link'] = "https://%s%s" % (
domain,
reverse('folder', kwargs=dict(folder_name=folder_title)))
data['description'] = "Unread stories in %s on PyTune. From %s's account and contains %s sites." % (
folder_title,
user.username,
len(feed_ids))
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'PyTune - %s' % settings.PYTUNE_URL
data['docs'] = None
data['author_name'] = user.username
data['feed_url'] = "https://%s%s" % (
domain,
reverse('folder-rss-feed',
kwargs=dict(user_id=user_id, secret_token=secret_token, unread_filter=unread_filter, folder_slug=folder_slug)),
)
rss = feedgenerator.Atom1Feed(**data)
for story in stories:
feed = Feed.get_by_id(story['story_feed_id'])
story_content = """<img src="//%s/rss_feeds/icon/%s"> %s <br><br> %s""" % (
Site.objects.get_current().domain,
story['story_feed_id'],
feed.feed_title if feed else "",
smart_unicode(story['story_content'])
)
story_data = {
'title': story['story_title'],
'link': story['story_permalink'],
'description': story_content,
'categories': story['story_tags'],
'unique_id': 'https://%s/site/%s/%s/' % (domain, story['story_feed_id'], story['guid_hash']),
'pubdate': localtime_for_timezone(story['story_date'], user.profile.timezone),
}
if story['story_authors']:
story_data['author_name'] = story['story_authors']
rss.add_item(**story_data)
if not user.profile.is_premium:
story_data = {
'title': "You must have a premium account on PyTune to have RSS feeds for folders.",
'link': "https://%s" % domain,
'description': "You must have a premium account on PyTune to have RSS feeds for folders.",
'unique_id': "https://%s/premium_only" % domain,
'pubdate': localtime_for_timezone(datetime.datetime.now(), user.profile.timezone),
}
rss.add_item(**story_data)
logging.user(request, "~FBGenerating ~SB%s~SN's folder RSS feed (%s, %s stories): ~FM%s" % (
user.username,
folder_title,
len(stories),
request.META.get('HTTP_USER_AGENT', "")[:24]
))
return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')
@json.json_view
def load_read_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
order = request.REQUEST.get('order', 'newest')
query = request.REQUEST.get('query', '').strip()
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
if page: offset = limit * (page - 1)
if query:
stories = []
message = "Not implemented yet."
# if user.profile.is_premium:
# stories = MStarredStory.find_stories(query, user.pk, offset=offset, limit=limit)
# else:
# stories = []
# message = "You must be a premium subscriber to search."
else:
story_hashes = RUserStory.get_read_stories(user.pk, offset=offset, limit=limit, order=order)
mstories = MStory.objects(story_hash__in=story_hashes)
stories = Feed.format_stories(mstories)
stories = sorted(stories, key=lambda story: story_hashes.index(story['story_hash']),
reverse=bool(order=="oldest"))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
starred_stories = MStarredStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, story.starred_date)
for story in starred_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
story['read_status'] = 1
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading read stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
"feeds": unsub_feeds,
"message": message,
}
@json.json_view
def load_river_stories__redis(request):
limit = 12
start = time.time()
user = get_user(request)
message = None
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feeds') if feed_id]
if not feed_ids:
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('f') if feed_id]
story_hashes = request.REQUEST.getlist('h')[:100]
original_feed_ids = list(feed_ids)
page = int(request.REQUEST.get('page', 1))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
query = request.REQUEST.get('query', '').strip()
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
usersubs = []
code = 1
user_search = None
offset = (page-1) * limit
limit = page * limit
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
if story_hashes:
unread_feed_story_hashes = None
read_filter = 'unread'
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
elif query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter='all')
feed_ids = [sub.feed_id for sub in usersubs]
stories = Feed.find_feed_stories(feed_ids, query, order=order, offset=offset, limit=limit)
mstories = stories
unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
read_filter="unread", order=order,
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
else:
stories = []
mstories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=feed_ids
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter=read_filter)
all_feed_ids = [f for f in feed_ids]
feed_ids = [sub.feed_id for sub in usersubs]
if feed_ids:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
"all_feed_ids": all_feed_ids,
"offset": offset,
"limit": limit,
"order": order,
"read_filter": read_filter,
"usersubs": usersubs,
"cutoff_date": user.profile.unread_cutoff,
}
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
else:
story_hashes = []
unread_feed_story_hashes = []
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
if not usersubs:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=found_feed_ids,
read_filter=read_filter)
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
# Find starred stories
if found_feed_ids:
if read_filter == 'starred':
starred_stories = mstories
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=found_feed_ids
).only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
else:
starred_stories = {}
# Intelligence classifiers for all feeds involved
if found_trained_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids,
social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
# Just need to format stories
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
if read_filter == 'starred':
story['read_status'] = 1
else:
story['read_status'] = 0
if read_filter == 'all' or query:
if (unread_feed_story_hashes is not None and
story['story_hash'] not in unread_feed_story_hashes):
story['read_status'] = 1
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
if not user.profile.is_premium:
message = "The full River of News is a premium feature."
code = 0
# if page > 1:
# stories = []
# else:
# stories = stories[:5]
diff = time.time() - start
timediff = round(float(diff), 2)
logging.user(request, "~FYLoading ~FCriver stories~FY: ~SBp%s~SN (%s/%s "
"stories, ~SN%s/%s/%s feeds, %s/%s)" %
(page, len(stories), len(mstories), len(found_feed_ids),
len(feed_ids), len(original_feed_ids), order, read_filter))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
# if page <= 1:
# import random
# time.sleep(random.randint(0, 6))
data = dict(code=code,
message=message,
stories=stories,
classifiers=classifiers,
elapsed_time=timediff,
user_search=user_search,
user_profiles=user_profiles)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
return data
@json.json_view
def unread_story_hashes__old(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
usersubs = {}
if not feed_ids:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True)
feed_ids = [sub.feed_id for sub in usersubs]
else:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True, feed__in=feed_ids)
unread_feed_story_hashes = {}
story_hash_count = 0
usersubs = dict((sub.feed_id, sub) for sub in usersubs)
for feed_id in feed_ids:
if feed_id in usersubs:
us = usersubs[feed_id]
else:
continue
if not us.unread_count_neutral and not us.unread_count_positive:
continue
unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
withscores=include_timestamps,
hashes_only=True,
default_cutoff_date=user.profile.unread_cutoff)
story_hash_count += len(unread_feed_story_hashes[feed_id])
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hash_count)))
return dict(unread_feed_story_hashes=unread_feed_story_hashes)
@json.json_view
def unread_story_hashes(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
order=order, read_filter=read_filter,
include_timestamps=include_timestamps,
cutoff_date=user.profile.unread_cutoff)
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hashes)))
return dict(unread_feed_story_hashes=story_hashes)
@ajax_login_required
@json.json_view
def mark_all_as_read(request):
code = 1
try:
days = int(request.REQUEST.get('days', 0))
except ValueError:
return dict(code=-1, message="Days parameter must be an integer, not: %s" %
request.REQUEST.get('days'))
read_date = datetime.datetime.utcnow() - datetime.timedelta(days=days)
feeds = UserSubscription.objects.filter(user=request.user)
socialsubs = MSocialSubscription.objects.filter(user_id=request.user.pk)
for subtype in [feeds, socialsubs]:
for sub in subtype:
if days == 0:
sub.mark_feed_read()
else:
if sub.mark_read_date < read_date:
sub.needs_unread_recalc = True
sub.mark_read_date = read_date
sub.save()
logging.user(request, "~FMMarking all as read: ~SB%s days" % (days,))
return dict(code=code)
@ajax_login_required
@json.json_view
def mark_story_as_read(request):
story_ids = request.REQUEST.getlist('story_id')
try:
feed_id = int(get_argument_or_404(request, 'feed_id'))
except ValueError:
return dict(code=-1, errors=["You must pass a valid feed_id: %s" %
request.REQUEST.get('feed_id')])
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
feed_id = duplicate_feed[0].feed_id
try:
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
except (Feed.DoesNotExist):
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
else:
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
except UserSubscription.DoesNotExist:
usersub = None
if usersub:
data = usersub.mark_story_ids_as_read(story_ids, request=request)
else:
data = dict(code=-1, errors=["User is not subscribed to this feed."])
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_story_hashes_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
try:
story_hashes = request.REQUEST.getlist('story_hash')
except UnreadablePostError:
return dict(code=-1, message="Missing `story_hash` list parameter.")
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
# Also count on original subscription
for feed_id in feed_ids:
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
r.publish(request.user.username, 'feed:%s' % feed_id)
hash_count = len(story_hashes)
logging.user(request, "~FYRead %s %s in feed/socialsubs: %s/%s" % (
hash_count, 'story' if hash_count == 1 else 'stories', feed_ids, friend_ids))
return dict(code=1, story_hashes=story_hashes,
feed_ids=feed_ids, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_stories_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feeds_stories = request.REQUEST.get('feeds_stories', "{}")
feeds_stories = json.decode(feeds_stories)
data = {
'code': -1,
'message': 'Nothing was marked as read'
}
for feed_id, story_ids in feeds_stories.items():
try:
feed_id = int(feed_id)
except ValueError:
continue
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except UserSubscription.DoesNotExist:
return dict(code=-1, error="You are not subscribed to this feed_id: %d" % feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
try:
if not duplicate_feed: raise Feed.DoesNotExist
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
return dict(code=-1, error="No feed exists for feed_id: %d" % feed_id)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_social_stories_as_read(request):
code = 1
errors = []
data = {}
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
users_feeds_stories = request.REQUEST.get('users_feeds_stories', "{}")
users_feeds_stories = json.decode(users_feeds_stories)
for social_user_id, feeds in users_feeds_stories.items():
for feed_id, story_ids in feeds.items():
feed_id = int(feed_id)
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, feed_id, request=request)
except OperationError, e:
code = -1
errors.append("Already read story: %s" % e)
except MSocialSubscription.DoesNotExist:
MSocialSubscription.mark_unsub_story_ids_as_read(request.user.pk, social_user_id,
story_ids, feed_id,
request=request)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, duplicate_feed[0].feed.pk, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
code = -1
errors.append("No feed exists for feed_id %d." % feed_id)
else:
continue
r.publish(request.user.username, 'feed:%s' % feed_id)
r.publish(request.user.username, 'social:%s' % social_user_id)
data.update(code=code, errors=errors)
return data
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_unread(request):
story_id = request.REQUEST.get('story_id', None)
feed_id = int(request.REQUEST.get('feed_id', 0))
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
feed = usersub.feed
except UserSubscription.DoesNotExist:
usersub = None
feed = Feed.get_by_id(feed_id)
if usersub and not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = dict(code=0, payload=dict(story_id=story_id))
story, found_original = MStory.find_story(feed_id, story_id)
if not story:
logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed))
return dict(code=-1, message="Story not found.")
if usersub:
data = usersub.invert_read_stories_after_unread_story(story, request)
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data['code'] = -1
data['message'] = message
return data
social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk,
story_feed_id=feed_id,
story_guid_hash=story.guid_hash)
dirty_count = social_subs and social_subs.count()
dirty_count = ("(%s social_subs)" % dirty_count) if dirty_count else ""
RUserStory.mark_story_hash_unread(user_id=request.user.pk, story_hash=story.story_hash)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
logging.user(request, "~FY~SBUnread~SN story in feed: %s %s" % (feed, dirty_count))
return data
@ajax_login_required
@json.json_view
@required_params('story_hash')
def mark_story_hash_as_unread(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
story_hash = request.REQUEST.get('story_hash')
feed_id, _ = MStory.split_story_hash(story_hash)
story, _ = MStory.find_story(feed_id, story_hash)
if not story:
data = dict(code=-1, message="That story has been removed from the feed, no need to mark it unread.")
return data
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data = dict(code=-1, message=message)
return data
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = usersub.invert_read_stories_after_unread_story(story, request)
r.publish(request.user.username, 'feed:%s' % feed_id)
feed_id, friend_ids = RUserStory.mark_story_hash_unread(request.user.pk, story_hash)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
logging.user(request, "~FYUnread story in feed/socialsubs: %s/%s" % (feed_id, friend_ids))
return dict(code=1, story_hash=story_hash, feed_id=feed_id, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feed_ids = request.REQUEST.getlist('feed_id')
cutoff_timestamp = int(request.REQUEST.get('cutoff_timestamp', 0))
direction = request.REQUEST.get('direction', 'older')
multiple = len(feed_ids) > 1
code = 1
errors = []
cutoff_date = datetime.datetime.fromtimestamp(cutoff_timestamp) if cutoff_timestamp else None
if cutoff_date:
logging.user(request, "~FMMark %s feeds read, %s - cutoff: %s/%s" %
(len(feed_ids), direction, cutoff_timestamp, cutoff_date))
for feed_id in feed_ids:
if 'social:' in feed_id:
user_id = int(feed_id.replace('social:', ''))
try:
sub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=user_id)
except MSocialSubscription.DoesNotExist:
logging.user(request, "~FRCouldn't find socialsub: %s" % user_id)
continue
if not multiple:
sub_user = User.objects.get(pk=sub.subscription_user_id)
logging.user(request, "~FMMarking social feed as read: ~SB%s" % (sub_user.username,))
else:
try:
feed = Feed.objects.get(id=feed_id)
sub = UserSubscription.objects.get(feed=feed, user=request.user)
if not multiple:
logging.user(request, "~FMMarking feed as read: ~SB%s" % (feed,))
except (Feed.DoesNotExist, UserSubscription.DoesNotExist), e:
errors.append("User not subscribed: %s" % e)
continue
except (ValueError), e:
errors.append("Invalid feed_id: %s" % e)
continue
if not sub:
errors.append("User not subscribed: %s" % feed_id)
continue
try:
if direction == "older":
marked_read = sub.mark_feed_read(cutoff_date=cutoff_date)
else:
marked_read = sub.mark_newer_stories_read(cutoff_date=cutoff_date)
if marked_read and not multiple:
r.publish(request.user.username, 'feed:%s' % feed_id)
except IntegrityError, e:
errors.append("Could not mark feed as read: %s" % e)
code = -1
if multiple:
logging.user(request, "~FMMarking ~SB%s~SN feeds as read" % len(feed_ids))
r.publish(request.user.username, 'refresh:%s' % ','.join(feed_ids))
if errors:
logging.user(request, "~FMMarking read had errors: ~FR%s" % errors)
return dict(code=code, errors=errors, cutoff_date=cutoff_date, direction=direction)
def _parse_user_info(user):
return {
'user_info': {
'is_anonymous': json.encode(user.is_anonymous()),
'is_authenticated': json.encode(user.is_authenticated()),
'username': json.encode(user.username if user.is_authenticated() else 'Anonymous')
}
}
@ajax_login_required
@json.json_view
def add_url(request):
code = 0
url = request.POST['url']
folder = request.POST.get('folder', '')
new_folder = request.POST.get('new_folder')
auto_active = is_true(request.POST.get('auto_active', 1))
skip_fetch = is_true(request.POST.get('skip_fetch', False))
feed = None
if not url:
code = -1
message = 'Enter in the website address or the feed URL.'
elif any([(banned_url in url) for banned_url in BANNED_URLS]):
code = -1
message = "The publisher of this website has banned PyTune."
else:
if new_folder:
usf, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
usf.add_folder(folder, new_folder)
folder = new_folder
code, message, us = UserSubscription.add_subscription(user=request.user, feed_address=url,
folder=folder, auto_active=auto_active,
skip_fetch=skip_fetch)
feed = us and us.feed
if feed:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:%s' % feed.pk)
MUserSearch.schedule_index_feeds_for_search(feed.pk, request.user.pk)
return dict(code=code, message=message, feed=feed)
@ajax_login_required
@json.json_view
def add_folder(request):
folder = request.POST['folder']
parent_folder = request.POST.get('parent_folder', '')
folders = None
logging.user(request, "~FRAdding Folder: ~SB%s (in %s)" % (folder, parent_folder))
if folder:
code = 1
message = ""
user_sub_folders_object, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
user_sub_folders_object.add_folder(parent_folder, folder)
folders = json.decode(user_sub_folders_object.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
else:
code = -1
message = "Gotta write in a folder name."
return dict(code=code, message=message, folders=folders)
@ajax_login_required
@json.json_view
def delete_feed(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', None)
if not in_folder or in_folder == ' ':
in_folder = ""
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed_id, in_folder)
feed = Feed.objects.filter(pk=feed_id)
if feed:
feed[0].count_subscribers()
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, message="Removed %s from '%s'." % (feed, in_folder))
@ajax_login_required
@json.json_view
def delete_feed_by_url(request):
message = ""
code = 0
url = request.POST['url']
in_folder = request.POST.get('in_folder', '')
if in_folder == ' ':
in_folder = ""
logging.user(request.user, "~FBFinding feed (delete_feed_by_url): %s" % url)
feed = Feed.get_feed_from_url(url, create=False)
if feed:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed.pk, in_folder)
code = 1
feed = Feed.objects.filter(pk=feed.pk)
if feed:
feed[0].count_subscribers()
else:
code = -1
message = "URL not found."
return dict(code=code, message=message)
@ajax_login_required
@json.json_view
def delete_folder(request):
folder_to_delete = request.POST.get('folder_name') or request.POST.get('folder_to_delete')
in_folder = request.POST.get('in_folder', None)
feed_ids_in_folder = [int(f) for f in request.REQUEST.getlist('feed_id') if f]
request.user.profile.send_opml_export_email(reason="You have deleted an entire folder of feeds, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_folder(folder_to_delete, in_folder, feed_ids_in_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@required_params('feeds_by_folder')
@ajax_login_required
@json.json_view
def delete_feeds_by_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
request.user.profile.send_opml_export_email(reason="You have deleted a number of feeds at once, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feeds_by_folder(feeds_by_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@ajax_login_required
@json.json_view
def rename_feed(request):
feed = get_object_or_404(Feed, pk=int(request.POST['feed_id']))
user_sub = UserSubscription.objects.get(user=request.user, feed=feed)
feed_title = request.POST['feed_title']
logging.user(request, "~FRRenaming feed '~SB%s~SN' to: ~SB%s" % (
feed.feed_title, feed_title))
user_sub.user_title = feed_title
user_sub.save()
return dict(code=1)
@ajax_login_required
@json.json_view
def rename_folder(request):
folder_to_rename = request.POST.get('folder_name') or request.POST.get('folder_to_rename')
new_folder_name = request.POST['new_folder_name']
in_folder = request.POST.get('in_folder', '')
if 'Top Level' in in_folder: in_folder = ''
code = 0
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# renames all, but only in the same folder parent. But nobody should be doing that, right?
if folder_to_rename and new_folder_name:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.rename_folder(folder_to_rename, new_folder_name, in_folder)
code = 1
else:
code = -1
return dict(code=code)
@ajax_login_required
@json.json_view
def move_feed_to_folders(request):
feed_id = int(request.POST['feed_id'])
in_folders = request.POST.getlist('in_folders', '')
to_folders = request.POST.getlist('to_folders', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folders(feed_id, in_folders=in_folders,
to_folders=to_folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_feed_to_folder(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder,
to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_folder_to_folder(request):
folder_name = request.POST['folder_name']
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@required_params('feeds_by_folder', 'to_folder')
@ajax_login_required
@json.json_view
def move_feeds_by_folder_to_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
to_folder = request.POST['to_folder']
new_folder = request.POST.get('new_folder', None)
request.user.profile.send_opml_export_email(reason="You have moved a number of feeds at once, so here's a backup just in case.")
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
if new_folder:
user_sub_folders.add_folder(to_folder, new_folder)
to_folder = new_folder
user_sub_folders = user_sub_folders.move_feeds_by_folder_to_folder(feeds_by_folder, to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@login_required
def add_feature(request):
if not request.user.is_staff:
return HttpResponseForbidden()
code = -1
form = FeatureForm(request.POST)
if form.is_valid():
form.save()
code = 1
return HttpResponseRedirect(reverse('index'))
return dict(code=code)
@json.json_view
def load_features(request):
user = get_user(request)
page = max(int(request.REQUEST.get('page', 0)), 0)
if page > 1:
logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1))
features = Feature.objects.all()[page*3:(page+1)*3+1].values()
features = [{
'description': f['description'],
'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y")
} for f in features]
return features
@ajax_login_required
@json.json_view
def save_feed_order(request):
folders = request.POST.get('folders')
if folders:
# Test that folders can be JSON decoded
folders_list = json.decode(folders)
assert folders_list is not None
logging.user(request, "~FBFeed re-ordering: ~SB%s folders/feeds" % (len(folders_list)))
user_sub_folders = UserSubscriptionFolders.objects.get(user=request.user)
user_sub_folders.folders = folders
user_sub_folders.save()
return {}
@json.json_view
def feeds_trainer(request):
classifiers = []
feed_id = request.REQUEST.get('feed_id')
user = get_user(request)
usersubs = UserSubscription.objects.filter(user=user, active=True)
if feed_id:
feed = get_object_or_404(Feed, pk=feed_id)
usersubs = usersubs.filter(feed=feed)
usersubs = usersubs.select_related('feed').order_by('-feed__stories_last_month')
for us in usersubs:
if (not us.is_trained and us.feed.stories_last_month > 0) or feed_id:
classifier = dict()
classifier['classifiers'] = get_classifiers_for_user(user, feed_id=us.feed.pk)
classifier['feed_id'] = us.feed_id
classifier['stories_last_month'] = us.feed.stories_last_month
classifier['num_subscribers'] = us.feed.num_subscribers
classifier['feed_tags'] = json.decode(us.feed.data.popular_tags) if us.feed.data.popular_tags else []
classifier['feed_authors'] = json.decode(us.feed.data.popular_authors) if us.feed.data.popular_authors else []
classifiers.append(classifier)
user.profile.has_trained_intelligence = True
user.profile.save()
logging.user(user, "~FGLoading Trainer: ~SB%s feeds" % (len(classifiers)))
return classifiers
@ajax_login_required
@json.json_view
def save_feed_chooser(request):
is_premium = request.user.profile.is_premium
approved_feeds = [int(feed_id) for feed_id in request.POST.getlist('approved_feeds') if feed_id]
if not is_premium:
approved_feeds = approved_feeds[:64]
activated = 0
usersubs = UserSubscription.objects.filter(user=request.user)
for sub in usersubs:
try:
if sub.feed_id in approved_feeds:
activated += 1
if not sub.active:
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
elif sub.active:
sub.active = False
sub.save()
except Feed.DoesNotExist:
pass
UserSubscription.queue_new_feeds(request.user)
UserSubscription.refresh_stale_feeds(request.user, exclude_new=True)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
logging.user(request, "~BB~FW~SBFeed chooser: ~FC%s~SN/~SB%s" % (
activated,
usersubs.count()
))
return {'activated': activated}
@ajax_login_required
def retrain_all_sites(request):
for sub in UserSubscription.objects.filter(user=request.user):
sub.is_trained = False
sub.save()
return feeds_trainer(request)
@login_required
def activate_premium_account(request):
try:
usersubs = UserSubscription.objects.select_related('feed').filter(user=request.user)
for sub in usersubs:
sub.active = True
sub.save()
if sub.feed.premium_subscribers <= 0:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
except Exception, e:
subject = "Premium activation failed"
message = "%s -- %s\n\n%s" % (request.user, usersubs, e)
mail_admins(subject, message, fail_silently=True)
request.user.profile.is_premium = True
request.user.profile.save()
return HttpResponseRedirect(reverse('index'))
@login_required
def login_as(request):
if not request.user.is_staff:
logging.user(request, "~SKNON-STAFF LOGGING IN AS ANOTHER USER!")
assert False
return HttpResponseForbidden()
username = request.GET['user']
user = get_object_or_404(User, username__iexact=username)
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
return HttpResponseRedirect(reverse('index'))
def iframe_buster(request):
logging.user(request, "~FB~SBiFrame bust!")
return HttpResponse(status=204)
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_starred(request):
return _mark_story_as_starred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_starred(request):
return _mark_story_as_starred(request)
def _mark_story_as_starred(request):
code = 1
feed_id = int(request.REQUEST.get('feed_id', 0))
story_id = request.REQUEST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
user_tags = request.REQUEST.getlist('user_tags')
message = ""
if story_hash:
story, _ = MStory.find_story(story_hash=story_hash)
feed_id = story and story.story_feed_id
else:
story, _ = MStory.find_story(story_feed_id=feed_id, story_id=story_id)
if not story:
return {'code': -1, 'message': "Could not find story to save."}
story_db = dict([(k, v) for k, v in story._data.items()
if k is not None and v is not None])
story_db.pop('user_id', None)
story_db.pop('starred_date', None)
story_db.pop('id', None)
story_db.pop('user_tags', None)
now = datetime.datetime.now()
story_values = dict(starred_date=now, user_tags=user_tags, **story_db)
params = dict(story_guid=story.story_guid, user_id=request.user.pk)
starred_story = MStarredStory.objects(**params).limit(1)
created = False
removed_user_tags = []
if not starred_story:
params.update(story_values)
starred_story = MStarredStory.objects.create(**params)
created = True
MActivity.new_starred_story(user_id=request.user.pk,
story_title=story.story_title,
story_feed_id=feed_id,
story_id=starred_story.story_guid)
new_user_tags = user_tags
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=1)
else:
starred_story = starred_story[0]
new_user_tags = list(set(user_tags) - set(starred_story.user_tags or []))
removed_user_tags = list(set(starred_story.user_tags or []) - set(user_tags))
starred_story.user_tags = user_tags
starred_story.save()
for tag in new_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=1)
for tag in removed_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
if random.random() < 0.01:
MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=request.user.pk).count()
if created:
logging.user(request, "~FCStarring: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
else:
logging.user(request, "~FCUpdating starred:~SN~FC ~SB%s~SN (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
return {'code': code, 'message': message, 'starred_count': starred_count, 'starred_counts': starred_counts}
@required_params('story_id')
@ajax_login_required
@json.json_view
def mark_story_as_unstarred(request):
return _mark_story_as_unstarred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_unstarred(request):
return _mark_story_as_unstarred(request)
def _mark_story_as_unstarred(request):
code = 1
story_id = request.POST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
starred_counts = None
starred_story = None
if story_id:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
if not story_id or not starred_story:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_hash=story_hash or story_id)
if starred_story:
starred_story = starred_story[0]
logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story.story_title[:50]))
user_tags = starred_story.user_tags
feed_id = starred_story.story_feed_id
MActivity.remove_starred_story(user_id=request.user.pk,
story_feed_id=starred_story.story_feed_id,
story_id=starred_story.story_guid)
starred_story.user_id = 0
try:
starred_story.save()
except NotUniqueError:
starred_story.delete()
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=-1)
for tag in user_tags:
try:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
except MStarredStoryCounts.DoesNotExist:
pass
# MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts = MStarredStoryCounts.user_counts(request.user.pk)
else:
code = -1
return {'code': code, 'starred_counts': starred_counts}
@ajax_login_required
@json.json_view
def send_story_email(request):
code = 1
message = 'OK'
story_id = request.POST['story_id']
feed_id = request.POST['feed_id']
to_addresses = request.POST.get('to', '').replace(',', ' ').replace(' ', ' ').strip().split(' ')
from_name = request.POST['from_name']
from_email = request.POST['from_email']
email_cc = is_true(request.POST.get('email_cc', 'true'))
comments = request.POST['comments']
comments = comments[:2048] # Separated due to PyLint
from_address = 'share.pytune@gmail.com'
share_user_profile = MSocialProfile.get_user(request.user.pk)
if not to_addresses:
code = -1
message = 'Please provide at least one email address.'
elif not all(validate_email(to_address) for to_address in to_addresses if to_addresses):
code = -1
message = 'You need to send the email to a valid email address.'
elif not validate_email(from_email):
code = -1
message = 'You need to provide your email address.'
elif not from_name:
code = -1
message = 'You need to provide your name.'
else:
story, _ = MStory.find_story(feed_id, story_id)
story = Feed.format_story(story, feed_id, text=True)
feed = Feed.get_by_id(story['story_feed_id'])
params = {
"to_addresses": to_addresses,
"from_name": from_name,
"from_email": from_email,
"email_cc": email_cc,
"comments": comments,
"from_address": from_address,
"story": story,
"feed": feed,
"share_user_profile": share_user_profile,
}
text = render_to_string('mail/email_story.txt', params)
html = render_to_string('mail/email_story.xhtml', params)
subject = '%s' % (story['story_title'])
cc = None
if email_cc:
cc = ['%s <%s>' % (from_name, from_email)]
subject = subject.replace('\n', ' ')
msg = EmailMultiAlternatives(subject, text,
from_email='PyTune <%s>' % from_address,
to=to_addresses,
cc=cc,
headers={'Reply-To': '%s <%s>' % (from_name, from_email)})
msg.attach_alternative(html, "text/html")
try:
msg.send()
except boto.ses.connection.ResponseError, e:
code = -1
message = "Email error: %s" % str(e)
logging.user(request, '~BMSharing story by email to %s recipient%s: ~FY~SB%s~SN~BM~FY/~SB%s' %
(len(to_addresses), '' if len(to_addresses) == 1 else 's',
story['story_title'][:50], feed and feed.feed_title[:50]))
return {'code': code, 'message': message}
@json.json_view
def load_tutorial(request):
if request.REQUEST.get('finished'):
logging.user(request, '~BY~FW~SBFinishing Tutorial')
return {}
else:
pytune_feed = Feed.objects.filter(feed_address__icontains='blog.pytune.com').order_by('-pk')[0]
logging.user(request, '~BY~FW~SBLoading Tutorial')
return {
'pytune_feed': pytune_feed.canonical()
}
|
mit
| -6,293,551,005,867,734,000
| 42.350855
| 137
| 0.595985
| false
| 3.733947
| false
| false
| false
|
clchiou/garage
|
py/garage/examples/asyncs/supervisors.py
|
1
|
1115
|
"""Supervisor tree example."""
import logging
import curio
from garage.asyncs import TaskStack
from garage.asyncs.queues import Closed, Queue
async def supervisor():
print('supervisor start')
async with TaskStack() as stack:
queue = Queue()
await stack.spawn(consumer(queue)),
await stack.spawn(producer(queue)),
async for task in curio.TaskGroup(stack):
await task.join()
print('supervisor stop')
async def producer(queue):
print('producer start')
message = list('Hello world!')
while message:
await queue.put(message.pop(0))
queue.close()
print('producer stop')
async def consumer(queue):
print('consumer start')
try:
while True:
print('consume', repr(await queue.get()))
except Closed:
pass
finally:
print('consumer stop')
def main():
logging.basicConfig(level=logging.DEBUG)
print('main start')
try:
curio.run(supervisor())
except KeyboardInterrupt:
print('main quit')
print('main stop')
if __name__ == '__main__':
main()
|
mit
| 3,136,778,452,220,943,400
| 20.037736
| 53
| 0.620628
| false
| 3.996416
| false
| false
| false
|
PostRockFTW/ExcitingBike
|
src/screenscrolltest/__init__.py
|
1
|
2317
|
import sys
import pygame
from pygame.locals import *
def main():
pygame.init()
gameWidth = 460
gameHeight = 230
miniMapFactor = 8
mainSurface = pygame.display.set_mode((gameWidth, gameHeight))
mainClock = pygame.time.Clock()
FPS = 30
pygame.display.set_caption('Screen Scroll Test')
print "Move screen with left/right arrow keys"
print "Hold SHIFT to jump to edges"
backgroundSurface = pygame.image.load('background.png').convert()
miniMapSurface = pygame.Surface((backgroundSurface.get_width()/miniMapFactor, backgroundSurface.get_height()/miniMapFactor))
pygame.transform.scale(backgroundSurface, (miniMapSurface.get_width(), miniMapSurface.get_height()), miniMapSurface)
running = True
currOffset = 0
# Game loop
while running:
pygame.event.pump()
for event in pygame.event.get():
if ((event.type == QUIT) or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
running = False
# Draw the current section of the background
mainSurface.blit(backgroundSurface, (-currOffset, 0))
miniMapLeft = mainSurface.get_width() - miniMapSurface.get_width()
mainSurface.blit(miniMapSurface, (miniMapLeft, 0))
miniMapBorderRect = pygame.Rect(
miniMapLeft + currOffset * (float(miniMapSurface.get_width()) / backgroundSurface.get_width()),
0,
miniMapSurface.get_width() * (float(mainSurface.get_width()) / backgroundSurface.get_width()),
miniMapSurface.get_height()
)
pygame.draw.rect(mainSurface, pygame.color.Color('white'), miniMapBorderRect, 2)
pressedKeys = pygame.key.get_pressed()
shiftPressed = pressedKeys[K_LSHIFT] or pressedKeys[K_RSHIFT]
if pressedKeys[K_RIGHT]:
currOffset += 10
rightMost = (backgroundSurface.get_width() - mainSurface.get_width())
if (currOffset > rightMost) or shiftPressed:
currOffset = rightMost
elif pressedKeys[K_LEFT]:
currOffset -= 10
if (currOffset < 0) or shiftPressed:
currOffset = 0
pygame.display.update()
mainClock.tick(FPS)
pygame.quit()
sys.exit()
if __name__ == "__main__":
main()
|
gpl-2.0
| -1,759,312,278,505,376,500
| 30.739726
| 128
| 0.630988
| false
| 4.029565
| false
| false
| false
|
NaPs/Kolekto
|
kolekto/commands/edit.py
|
1
|
1031
|
import json
from kolekto.printer import printer
from kolekto.commands import Command
from kolekto.helpers import get_hash
class Edit(Command):
""" Edit a movie.
"""
help = 'edit a movie'
def prepare(self):
self.add_arg('input', metavar='movie-hash-or-file')
def run(self, args, config):
mdb = self.get_metadata_db(args.tree)
movie_hash = get_hash(args.input)
try:
movie = mdb.get(movie_hash)
except KeyError:
printer.p('Unknown movie hash.')
return
movie_json = json.dumps(movie, indent=4)
while True:
movie_json = printer.edit(movie_json)
try:
mdb.save(movie_hash, json.loads(movie_json))
except ValueError:
if printer.ask('Bad json data, would you like to try again?', default=True):
continue
else:
break
else:
printer.p('Saved.')
break
|
mit
| 4,217,550,169,612,711,400
| 22.976744
| 92
| 0.534433
| false
| 4.124
| false
| false
| false
|
synthesio/infra-ovh-ansible-module
|
plugins/modules/public_cloud_instance.py
|
1
|
3577
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: public_cloud_instance
short_description: Manage OVH API for public cloud instance creatikon
description:
- This module manage the creation of an instance on OVH public Cloud
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
name:
required: true
description: The instance name to create
ssh_key_id:
required: false
description: The sshKey Id to add
flavor_id:
required: true
description: The id of the commercial name
image_id:
required: true
description: The id of the image/os to deploy on the instance
region:
required: true
description: The region where to deploy the instance
networks:
required: false
description: The network configuration.
Can be the full array of the network configuration
service_name:
required: true
description: The service_name
monthly_billing:
required: false
default: false
description: Enable or not the monthly billing
'''
EXAMPLES = '''
- name: run installation
synthesio.ovh.ovh_public_cloud_instance:
name: "{{ inventory_hostname }}"
ssh_key_id: "{{ sshKeyId }}"
service_name: "{{ service_name }}"
networks: "{{ networks }}"
flavor_id: "{{ flavor_id }}"
region: "{{ region }}"
image_id: "{{ image_id }}"
delegate_to: localhost
register: instance_metadata
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
name=dict(required=True),
flavor_id=dict(required=True),
image_id=dict(required=True),
service_name=dict(required=True),
ssh_key_id=dict(required=False, default=None),
region=dict(required=True),
networks=dict(required=False, default=[], type="list"),
monthly_billing=dict(required=False, default=False, type="bool")
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
name = module.params['name']
service_name = module.params['service_name']
flavor_id = module.params['flavor_id']
image_id = module.params['image_id']
service_name = module.params['service_name']
ssh_key_id = module.params['ssh_key_id']
region = module.params['region']
networks = module.params['networks']
monthly_billing = module.params['monthly_billing']
try:
result = client.post('/cloud/project/%s/instance' % service_name,
flavorId=flavor_id,
imageId=image_id,
monthlyBilling=monthly_billing,
name=name,
region=region,
networks=networks,
sshKeyId=ssh_key_id
)
module.exit_json(changed=True, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
|
mit
| 4,153,331,261,659,755,000
| 27.846774
| 105
| 0.609729
| false
| 3.992188
| false
| false
| false
|
smlacombe/sageo
|
app/model/filters/filter_host_state.py
|
1
|
2263
|
#
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from .filter import Filter
from builtin import FILTER_HOST_STATE
from sqlalchemy import *
from sqlalchemy.orm import *
STATE_CODES = { FILTER_HOST_STATE + '_up': 0 , FILTER_HOST_STATE + '_down': 1 , FILTER_HOST_STATE + '_unreach': 2, FILTER_HOST_STATE + '_pending': 3 }
class FilterHostState(Filter):
def __init__(self, name, title, descr):
Filter.__init__(self, name, title, descr)
self.column_names = ['host_has_been_checked', 'host_state']
def filter(self, states):
"""
Filter host states.
states: dictionnary that contain states and is boolean value
"""
filter = "Filter: host_has_been_checked = 1\n"
state_code = 0
count = 0
for state, value in states.items():
if value:
state_code = STATE_CODES[state]
filter = filter + "Filter: host_state = " + str(state_code) + "\n"
count = count + 1
filter = filter + "Or: " + str(count) + "\n"
return filter
def get_col_def(self):
return [
Column(FILTER_HOST_STATE + '_up', Boolean, default=True, info={'label': 'UP'}),
Column(FILTER_HOST_STATE + '_down', Boolean, default=True, info={'label': 'DOWN'}),
Column(FILTER_HOST_STATE + '_unreach', Boolean, default=True, info={'label': 'UNREACHABLE'}),
Column(FILTER_HOST_STATE + '_pending', Boolean, default=True, info={'label': 'PENDING'})
]
|
gpl-3.0
| -3,413,612,498,410,856,400
| 40.145455
| 151
| 0.604949
| false
| 3.809764
| false
| false
| false
|
JDevlieghere/InTeXration
|
intexration/task.py
|
1
|
4809
|
import logging
import os
import shutil
import subprocess
import tempfile
from intexration.tools import create_dir, cd
from intexration.build import Identifier, Build
from intexration.document import Document
from intexration.parser import BuildParser
class Task():
def run(self):
pass
class CloneTask(Task):
def __init__(self, manager, request):
self.build_manager = manager
self.build_request = request
self.temp_directory = tempfile.mkdtemp()
self.clone_directory = self._create_dir()
def _create_dir(self):
return create_dir(os.path.join(self.temp_directory,
self.build_request.owner,
self.build_request.repository,
self.build_request.commit))
def _clone(self):
logging.info("Cloning to %s", self.clone_directory)
if subprocess.call(['git', 'clone', self.build_request.ssh_url(), self.clone_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0:
return
if subprocess.call(['git', 'clone', self.build_request.https_url(), self.clone_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0:
return
raise RuntimeError("Clone failed.")
def _submit_builds(self):
builds = dict()
build_parser = BuildParser(self.clone_directory)
for name in build_parser.names():
identifier = Identifier(self.build_request.owner,
self.build_request.repository,
name)
src_path = os.path.join(self.clone_directory, build_parser.dir(name))
dst_path = os.path.join(self.temp_directory, name)
shutil.copytree(src_path, dst_path)
build = Build(dst_path,
build_parser.tex(name),
build_parser.idx(name),
build_parser.bib(name))
builds[identifier] = build
self.build_manager.submit_builds(builds)
def _clean(self):
shutil.rmtree(self.clone_directory)
def run(self):
try:
self._clone()
self._submit_builds()
self._clean()
except RuntimeError as e:
logging.error(e)
except RuntimeWarning as e:
logging.warning(e)
class CompileTask(Task):
MAKEINDEX = 'makeindex'
BIBTEX = 'bibtex'
PDFLATEX = 'pdflatex'
def __init__(self, manager, identifier, build):
self.build_manager = manager
self.identifier = identifier
self.build = build
self.document_directory = self._create_dir()
def _create_dir(self):
return create_dir(os.path.join(self.build_manager.output,
self.identifier.owner,
self.identifier.repository))
def _makeindex(self):
"""Make index."""
with cd(self.build.path):
if subprocess.call([self.MAKEINDEX, self.build.idx],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Makeindex failed for %s", self.identifier, self.build.idx)
def _bibtex(self):
"""Compile bibtex."""
with cd(self.build.path):
if subprocess.call([self.BIBTEX, self.build.bib],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Bibtex failed for %s", self.identifier, self.build.bib)
def _compile(self):
"""Compile with pdflatex."""
with cd(self.build.path):
if subprocess.call([self.PDFLATEX, '-interaction=nonstopmode', self.build.tex],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Compilation finished with errors for %s", self.identifier, self.build.tex)
def _submit_documents(self):
document = Document(self.identifier.name, self.build.path)
document.move_to(self.document_directory)
self.build_manager.submit_document(self.identifier, document)
self.build.finish()
def run(self):
try:
self._compile()
self._makeindex()
self._bibtex()
self._compile()
self._compile()
self._submit_documents()
except RuntimeError as e:
logging.error(e)
except RuntimeWarning as e:
logging.warning(e)
|
apache-2.0
| 7,176,475,637,325,700,000
| 35.157895
| 110
| 0.550842
| false
| 4.456905
| false
| false
| false
|
maoy/zknova
|
nova/api/openstack/compute/contrib/extended_status.py
|
1
|
4009
|
# Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Status Admin API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'extended_status')
class ExtendedStatusController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % (Extended_status.alias, state)
server[key] = instance[state]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Extended_status(extensions.ExtensionDescriptor):
"""Extended Status support."""
name = "ExtendedStatus"
alias = "OS-EXT-STS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_status/api/v1.1")
updated = "2011-11-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedStatusController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}task_state' % Extended_status.namespace,
'%s:task_state' % Extended_status.alias)
elem.set('{%s}power_state' % Extended_status.namespace,
'%s:power_state' % Extended_status.alias)
elem.set('{%s}vm_state' % Extended_status.namespace,
'%s:vm_state' % Extended_status.alias)
class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
class ExtendedStatusesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
|
apache-2.0
| 3,888,264,689,838,659,000
| 38.693069
| 79
| 0.65777
| false
| 4.041331
| false
| false
| false
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0020_auto__add_image__chg_field_documentation_url.py
|
1
|
21689
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table(u'catalog_image', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('large_url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('small_url', self.gf('django.db.models.fields.URLField')(max_length=1000, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='images', null=True, to=orm['django_facebook.FacebookCustomUser'])),
('added_time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['Image'])
# Changing field 'Documentation.url'
db.alter_column(u'catalog_documentation', 'url', self.gf('django.db.models.fields.URLField')(max_length=1000))
# Adding M2M table for field images on 'Makey'
m2m_table_name = db.shorten_name(u'catalog_makey_images')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('makey', models.ForeignKey(orm[u'catalog.makey'], null=False)),
('image', models.ForeignKey(orm[u'catalog.image'], null=False))
))
db.create_unique(m2m_table_name, ['makey_id', 'image_id'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table(u'catalog_image')
# Changing field 'Documentation.url'
db.alter_column(u'catalog_documentation', 'url', self.gf('django.db.models.fields.URLField')(max_length=200))
# Removing M2M table for field images on 'Makey'
db.delete_table(db.shorten_name(u'catalog_makey_images'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeydocumentations'", 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeyimages'", 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeynotes'", 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
|
apache-2.0
| 7,494,500,861,876,699,000
| 74.051903
| 260
| 0.55867
| false
| 3.66864
| false
| false
| false
|
afaheem88/tempest
|
tempest/thirdparty/boto/base.py
|
1
|
14745
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging as orig_logging
import re
import boto
from boto import exception
from boto import s3
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
import tempest.clients
from tempest.common.utils import file_utils
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
def decision_maker():
S3_CAN_CONNECT_ERROR = None
secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
id_matcher = re.compile("[A-Za-z0-9]{20,}")
def all_read(*args):
return all(map(file_utils.have_effective_read_access, args))
boto_logger = logging.getLogger('boto')
level = boto_logger.logger.level
# suppress logging for boto
boto_logger.logger.setLevel(orig_logging.CRITICAL)
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
raise Exception("Invalid AWS access Key")
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
raise Exception("Invalid AWS secret Key")
raise Exception("Unknown (Authentication?) Error")
# NOTE(andreaf) Setting up an extra manager here is redundant,
# and should be removed.
openstack = tempest.clients.Manager()
try:
if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
raise Exception("Failed to get hostname from the s3_url")
s3client = openstack.s3_client
try:
s3client.get_bucket("^INVALID*#()@INVALID.")
except exception.BotoServerError as exc:
if exc.status == 403:
_cred_sub_check(s3client.connection_data)
except Exception as exc:
S3_CAN_CONNECT_ERROR = str(exc)
except lib_exc.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" failed to get them even by keystoneclient"
boto_logger.logger.setLevel(level)
return {'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR}
class BotoExceptionMatcher(object):
STATUS_RE = r'[45]\d\d'
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
""":returns: Returns with an error string if it does not match,
returns with None when it matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
return ("Status code (%s) does not match"
"the expected re pattern \"%s\""
% (exc.status, self.STATUS_RE))
if re.match(self.CODE_RE, str(exc.error_code)) is None:
return ("Error code (%s) does not match" +
"the expected re pattern \"%s\"") %\
(exc.error_code, self.CODE_RE)
return None
class ClientError(BotoExceptionMatcher):
STATUS_RE = r'4\d\d'
class ServerError(BotoExceptionMatcher):
STATUS_RE = r'5\d\d'
def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
"""
Usable for adding an ExceptionMatcher(s) into the exception tree.
The not leaf elements does wildcard match
"""
# in error_code just literal and '.' characters expected
if not isinstance(error_data, six.string_types):
(error_code, status_code) = map(str, error_data)
else:
status_code = None
error_code = error_data
parts = error_code.split('.')
basematch = ""
num_parts = len(parts)
max_index = num_parts - 1
add_cls = error_cls
for i_part in six.moves.xrange(num_parts):
part = parts[i_part]
leaf = i_part == max_index
if not leaf:
match = basematch + part + "[.].*"
else:
match = basematch + part
basematch += part + "[.]"
if not hasattr(add_cls, part):
cls_dict = {"CODE_RE": match}
if leaf and status_code is not None:
cls_dict["STATUS_RE"] = status_code
cls = type(part, (base, ), cls_dict)
setattr(add_cls, part, cls())
add_cls = cls
elif leaf:
raise LookupError("Tries to redefine an error code \"%s\"" % part)
else:
add_cls = getattr(add_cls, part)
# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
name += call_able.im_class.__name__ + "."
name += call_able.__name__
return name
def friendly_function_call_str(call_able, *args, **kwargs):
string = friendly_function_name_simple(call_able)
string += "(" + ", ".join(map(str, args))
if len(kwargs):
if len(args):
string += ", "
string += ", ".join("=".join(map(str, (key, value)))
for (key, value) in kwargs.items())
return string + ")"
class BotoTestCase(tempest.test.BaseTestCase):
"""Recommended to use as base class for boto related test."""
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BotoTestCase, cls).skip_checks()
if not CONF.compute_feature_enabled.ec2_api:
raise cls.skipException("The EC2 API is not available")
if not CONF.identity_feature_enabled.api_v2 or \
not CONF.identity.auth_version == 'v2':
raise cls.skipException("Identity v2 is not available")
@classmethod
def resource_setup(cls):
super(BotoTestCase, cls).resource_setup()
cls.conclusion = decision_maker()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
cls._sequence = -1
if (hasattr(cls, "S3") and
cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("S3 " + cls.__name__ + ": " +
cls.conclusion['S3_CAN_CONNECT_ERROR'])
@classmethod
def addResourceCleanUp(cls, function, *args, **kwargs):
"""Adds CleanUp callable, used by tearDownClass.
Recommended to a use (deep)copy on the mutable args.
"""
cls._sequence = cls._sequence + 1
cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
return cls._sequence
@classmethod
def cancelResourceCleanUp(cls, key):
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
# TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
self.assertBotoError(self.ec2_error_code.client.
InvalidKeyPair.Duplicate,
self.client.create_keypair,
key_name)
"""
try:
callableObj(*args, **kwargs)
except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException(error_msg)
else:
raise self.failureException("BotoServerError not raised")
@classmethod
def resource_cleanup(cls):
"""Calls the callables added by addResourceCleanUp,
when you overwrite this function don't forget to call this too.
"""
fail_count = 0
trash_keys = sorted(cls._resource_trash_bin, reverse=True)
for key in trash_keys:
(function, pos_args, kw_args) = cls._resource_trash_bin[key]
try:
func_name = friendly_function_call_str(function, *pos_args,
**kw_args)
LOG.debug("Cleaning up: %s" % func_name)
function(*pos_args, **kw_args)
except BaseException:
fail_count += 1
LOG.exception("Cleanup failed %s" % func_name)
finally:
del cls._resource_trash_bin[key]
super(BotoTestCase, cls).resource_cleanup()
# NOTE(afazekas): let the super called even on exceptions
# The real exceptions already logged, if the super throws another,
# does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
s3_error_code = BotoExceptionMatcher()
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
gone_set = set(('_GONE',))
def assertReSearch(self, regexp, string):
if re.search(regexp, string) is None:
raise self.failureException("regexp: '%s' not found in '%s'" %
(regexp, string))
def assertNotReSearch(self, regexp, string):
if re.search(regexp, string) is not None:
raise self.failureException("regexp: '%s' found in '%s'" %
(regexp, string))
def assertReMatch(self, regexp, string):
if re.match(regexp, string) is None:
raise self.failureException("regexp: '%s' not matches on '%s'" %
(regexp, string))
def assertNotReMatch(self, regexp, string):
if re.match(regexp, string) is not None:
raise self.failureException("regexp: '%s' matches on '%s'" %
(regexp, string))
@classmethod
def destroy_bucket(cls, connection_data, bucket):
"""Destroys the bucket and its content, just for teardown."""
exc_num = 0
try:
with contextlib.closing(
boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
obj.close()
except BaseException:
LOG.exception("Failed to delete key %s " % obj.key)
exc_num += 1
conn.delete_bucket(bucket)
except BaseException:
LOG.exception("Failed to destroy bucket %s " % bucket)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
# you can specify tuples if you want to specify the status pattern
for code in (('AccessDenied', 403),
('AccountProblem', 403),
('AmbiguousGrantByEmailAddress', 400),
('BadDigest', 400),
('BucketAlreadyExists', 409),
('BucketAlreadyOwnedByYou', 409),
('BucketNotEmpty', 409),
('CredentialsNotSupported', 400),
('CrossLocationLoggingProhibited', 403),
('EntityTooSmall', 400),
('EntityTooLarge', 400),
('ExpiredToken', 400),
('IllegalVersioningConfigurationException', 400),
('IncompleteBody', 400),
('IncorrectNumberOfFilesInPostRequest', 400),
('InlineDataTooLarge', 400),
('InvalidAccessKeyId', 403),
'InvalidAddressingHeader',
('InvalidArgument', 400),
('InvalidBucketName', 400),
('InvalidBucketState', 409),
('InvalidDigest', 400),
('InvalidLocationConstraint', 400),
('InvalidPart', 400),
('InvalidPartOrder', 400),
('InvalidPayer', 403),
('InvalidPolicyDocument', 400),
('InvalidRange', 416),
('InvalidRequest', 400),
('InvalidSecurity', 403),
('InvalidSOAPRequest', 400),
('InvalidStorageClass', 400),
('InvalidTargetBucketForLogging', 400),
('InvalidToken', 400),
('InvalidURI', 400),
('KeyTooLong', 400),
('MalformedACLError', 400),
('MalformedPOSTRequest', 400),
('MalformedXML', 400),
('MaxMessageLengthExceeded', 400),
('MaxPostPreDataLengthExceededError', 400),
('MetadataTooLarge', 400),
('MethodNotAllowed', 405),
('MissingAttachment'),
('MissingContentLength', 411),
('MissingRequestBodyError', 400),
('MissingSecurityElement', 400),
('MissingSecurityHeader', 400),
('NoLoggingStatusForKey', 400),
('NoSuchBucket', 404),
('NoSuchKey', 404),
('NoSuchLifecycleConfiguration', 404),
('NoSuchUpload', 404),
('NoSuchVersion', 404),
('NotSignedUp', 403),
('NotSuchBucketPolicy', 404),
('OperationAborted', 409),
('PermanentRedirect', 301),
('PreconditionFailed', 412),
('Redirect', 307),
('RequestIsNotMultiPartContent', 400),
('RequestTimeout', 400),
('RequestTimeTooSkewed', 403),
('RequestTorrentOfBucketError', 400),
('SignatureDoesNotMatch', 403),
('TemporaryRedirect', 307),
('TokenRefreshRequired', 400),
('TooManyBuckets', 400),
('UnexpectedContent', 400),
('UnresolvableGrantByEmailAddress', 400),
('UserKeyMustBeSpecified', 400)):
_add_matcher_class(BotoTestCase.s3_error_code.client,
code, base=ClientError)
for code in (('InternalError', 500),
('NotImplemented', 501),
('ServiceUnavailable', 503),
('SlowDown', 503)):
_add_matcher_class(BotoTestCase.s3_error_code.server,
code, base=ServerError)
|
apache-2.0
| -9,187,748,402,305,624,000
| 37.498695
| 78
| 0.571448
| false
| 4.275152
| true
| false
| false
|
pberkes/persistent_locals
|
test_deco.py
|
1
|
2351
|
import unittest
import deco
class _TestException(Exception):
pass
@deco.persistent_locals
def _globalfunc(x):
z = 2*x
return z
_a = 2
@deco.persistent_locals
def _globaldependent(x):
z = x + _a
return z
@deco.persistent_locals
def _toberemoved(x):
z = 2*x
return z
class TestPersistLocals(unittest.TestCase):
def test_outer_scope(self):
_globalfunc(2)
self.assertEqual(_globalfunc.locals['x'], 2)
self.assertEqual(_globalfunc.locals['z'], 4)
def test_global_name_removed(self):
global _toberemoved
f = _toberemoved
f(2) # should pass
del _toberemoved
f(2) # might fail if 'f' looks for a global name '_toberemoved'
def test_globals_are_flexible(self):
global _a
self.assertEqual(_globaldependent(2), 4)
_a = 3
self.assertEqual(_globaldependent(2), 5)
def test_inner_scope(self):
@deco.persistent_locals
def is_sum_lt_prod(a,b,c):
sum = a+b+c
prod = a*b*c
return sum<prod
self.assertEqual(is_sum_lt_prod.locals, {})
is_sum_lt_prod(2,3,4)
self.assertEqual(set(is_sum_lt_prod.locals.keys()),
set(['a','b','c','sum','prod']))
self.assertEqual(is_sum_lt_prod.locals['sum'], 2+3+4)
self.assertEqual(is_sum_lt_prod.locals['prod'], 2*3*4)
def test_args(self):
@deco.persistent_locals
def f(x, *args):
return x, args
x, args = f(2,3,4)
self.assertEqual(x, 2)
self.assertEqual(args, (3,4))
self.assertEqual(f.locals['x'], 2)
self.assertEqual(f.locals['args'], (3,4))
def test_exception(self):
@deco.persistent_locals
def f(x):
y = 3
raise _TestException
z = 4 # this local variable is never initialized
self.assertRaises(_TestException, f, 0)
self.assertEqual(f.locals, {'x': 0, 'y': 3})
def test_late_return(self):
def g(a):
return a
@deco.persistent_locals
def f(x):
try:
return x
finally:
g(1)
f(0)
self.assertEqual(f.locals, {'x': 0, 'g': g})
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| 4,609,870,817,239,272,400
| 24.27957
| 71
| 0.535517
| false
| 3.417151
| true
| false
| false
|
Ecotrust/TEKDB
|
TEKDB/TEKDB/settings.py
|
1
|
5770
|
"""
Django settings for TEKDB project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lbgg^obk_vnj1o%s-u)vy+6@%=)uk4011d!!vub_5s40(^+mzp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'localhost',
u'demo-tekdb.herokuapp.com',
]
# Application definition
INSTALLED_APPS = [
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
# 'registration',
'leaflet',
'nested_admin',
'ckeditor',
'explore',
'login',
'TEKDB',
'Lookup',
'Accounts',
'Relationships',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TEKDB.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'login/templates'),
os.path.join(BASE_DIR, 'explore/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TEKDB.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'tekdb',
'USER': 'postgres',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#Registration
ACCOUNT_ACTIVATION_DAYS = 14
REGISTRATION_OPEN = False
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "explore", "static"),
# os.path.join(BASE_DIR, "TEKDB", "static"),
# ]
### DJANGO-REGISTRATION SETTINGS ###
REGISTRATION_OPEN = True
SEARCH_CATEGORIES = [
'all',
'places',
'resources',
'activities',
'citations',
'media',
]
#Locality? People?
AUTH_USER_MODEL = 'Accounts.Users'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Full',
},
'custom': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Format'],
['Bold', 'Italic', 'Underline','Strike','Subscript','Superscript'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink'],
['Image','Table','HorizontalRule','SpecialChar'],
[ 'TextColor','BGColor' ],
['Undo','Redo'],
['RemoveFormat', 'Source']
]
}
}
RECORD_ICONS = {
'activity': '/static/explore/img/activity.png',
'citation': '/static/explore/img/citation.png',
'place': '/static/explore/img/place.png',
'media': '/static/explore/img/media.png',
'event': '/static/explore/img/activity.png',
'resource': '/static/explore/img/resource.png',
}
# Set this in local_settings.py
DATABASE_GEOGRAPHY = {
'default_lon': -11131949.08,
'default_lat': 4865942.28,
'default_zoom': 3,
'map_template': 'gis/admin/ol2osm.html'
}
ADMIN_SITE_HEADER = 'TEK DB Admin'
from TEKDB.local_settings import *
### HEROKU SETTINGS (NOT FOR PRODUCTION!!!)
### Update database configuration with $DATABASE_URL.
#
# import dj_database_url
# db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env)
#
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
mit
| -8,495,687,464,848,662,000
| 24.418502
| 140
| 0.64922
| false
| 3.316092
| false
| false
| false
|
gautelinga/BERNAISE
|
problems/porous.py
|
1
|
10258
|
import dolfin as df
import os
from . import *
from common.io import mpi_is_root, load_mesh
from common.bcs import Fixed, Pressure, Charged
from ufl import sign
import numpy as np
__author__ = "Gaute Linga"
class PeriodicBoundary(df.SubDomain):
# Left boundary is target domain
def __init__(self, Ly, grid_spacing):
self.Ly = Ly
self.grid_spacing = grid_spacing
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[1], -self.Ly/2) and on_boundary)
def map(self, x, y):
y[0] = x[0]
y[1] = x[1] - self.Ly
class Left(df.SubDomain):
def __init__(self, Lx):
self.Lx = Lx
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[0], -self.Lx/2) and on_boundary)
class Right(df.SubDomain):
def __init__(self, Lx):
self.Lx = Lx
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[0], self.Lx/2) and on_boundary)
class Obstacles(df.SubDomain):
def __init__(self, Lx, centroids, rad, grid_spacing):
self.Lx = Lx
self.centroids = centroids
self.rad = rad
self.grid_spacing = grid_spacing
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
dx = self.centroids - np.outer(np.ones(len(self.centroids)), x)
dist = np.sqrt(dx[:, 0]**2 + dx[:, 1]**2)
return bool(on_boundary
and any(dist < self.rad + 0.1*self.grid_spacing))
def problem():
info_cyan("Intrusion of one fluid into another in a porous medium.")
# Define solutes
# Format: name, valency, diffusivity in phase 1, diffusivity in phase
# 2, beta in phase 1, beta in phase 2
solutes = [["c_p", 1, 1e-4, 1e-2, 4., 1.],
["c_m", -1, 1e-4, 1e-2, 4., 1.]]
# Format: name : (family, degree, is_vector)
base_elements = dict(u=["Lagrange", 2, True],
p=["Lagrange", 1, False],
phi=["Lagrange", 1, False],
g=["Lagrange", 1, False],
c=["Lagrange", 1, False],
V=["Lagrange", 1, False])
factor = 1./2.
sigma_e = -1.
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="basic",
folder="results_porous",
restart_folder=False,
enable_NS=True,
enable_PF=True,
enable_EC=True,
save_intv=5,
stats_intv=5,
checkpoint_intv=50,
tstep=0,
dt=0.08,
t_0=0.,
T=20.,
grid_spacing=0.05,
interface_thickness=factor*0.060,
solutes=solutes,
base_elements=base_elements,
Lx=4.,
Ly=3.,
rad_init=0.25,
#
surface_tension=24.5,
grav_const=0.0,
# inlet_velocity=0.1,
pressure_left=1000.,
pressure_right=0.,
V_left=0.,
V_right=0.,
surface_charge=sigma_e,
concentration_init=1.,
front_position_init=0.1, # percentage "filled" initially
solutes_in_oil=False,
#
pf_mobility_coeff=factor*0.000040,
density=[1000., 1000.],
viscosity=[100., 10.],
permittivity=[1., 1.],
#
initial_interface="flat",
#
use_iterative_solvers=False,
use_pressure_stabilization=False
)
return parameters
def constrained_domain(Ly, grid_spacing, **namespace):
return PeriodicBoundary(Ly, grid_spacing)
def mesh(Lx=4., Ly=3., grid_spacing=0.04, **namespace):
return load_mesh("meshes/periodic_porous_dx" + str(grid_spacing) + ".h5")
def initialize(Lx, Ly, rad_init,
interface_thickness, solutes, restart_folder,
field_to_subspace, solutes_in_oil, # inlet_velocity,
front_position_init, concentration_init,
pressure_left, pressure_right,
enable_NS, enable_PF, enable_EC, initial_interface,
**namespace):
""" Create the initial state.
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
if not restart_folder:
# if enable_NS:
# try:
# subspace = field_to_subspace["u"].collapse()
# except:
# subspace = field_to_subspace["u"]
# w_init_field["u"] = initial_velocity(0.,
# subspace)
# Phase field
x_0 = -Lx/2 + Lx*front_position_init
if enable_PF:
w_init_field["phi"] = initial_phasefield(
x_0, Ly/2, rad_init, interface_thickness,
field_to_subspace["phi"].collapse(), shape=initial_interface)
if enable_EC:
for solute in solutes:
c_init = initial_phasefield(
x_0, Ly/2, rad_init, interface_thickness,
field_to_subspace[solute[0]].collapse(),
shape=initial_interface)
# Only have ions in phase 1 (phi=1)
if solutes_in_oil:
if bool(solutes[0][4] == solutes[1][4] or
solutes[0][5] == solutes[1][5]):
info_red("Warning! The beta values of the two "
"ions are different; not supported for "
"initialization")
exp_beta = np.exp(-solutes[0][4] + solutes[0][5])
c_init.vector().set_local(
concentration_init*((1-exp_beta)*0.5*(
1. - c_init.vector().get_local()) + exp_beta))
w_init_field[solute[0]] = c_init
else:
c_init.vector().set_local(
concentration_init*0.5*(
1.-c_init.vector().get_local()))
w_init_field[solute[0]] = c_init
return w_init_field
def create_bcs(Lx, Ly, grid_spacing, # inlet_velocity,
concentration_init, solutes,
surface_charge, V_left, V_right,
pressure_left, pressure_right,
enable_NS, enable_PF, enable_EC, **namespace):
""" The boundaries and boundary conditions are defined here. """
data = np.loadtxt("meshes/periodic_porous_dx" + str(grid_spacing) + ".dat")
centroids = data[:, :2]
rad = data[:, 2]
boundaries = dict(
right=[Right(Lx)],
left=[Left(Lx)],
obstacles=[Obstacles(Lx, centroids, rad, grid_spacing)]
)
# Allocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
# u_inlet = Fixed((inlet_velocity, 0.))
noslip = Fixed((0., 0.))
p_inlet = Pressure(pressure_left)
p_outlet = Pressure(pressure_right)
phi_inlet = Fixed(-1.0)
phi_outlet = Fixed(1.0)
if enable_NS:
# bcs["left"]["u"] = u_inlet
bcs["obstacles"]["u"] = noslip
bcs["right"]["p"] = p_outlet
bcs["left"]["p"] = p_inlet
# bcs_pointwise["p"] = (0., "x[0] < -{Lx}/2+DOLFIN_EPS && x[1] > {Ly}/2-DOLFIN_EPS".format(Lx=Lx, Ly=Ly))
if enable_PF:
bcs["left"]["phi"] = phi_inlet
bcs["right"]["phi"] = phi_outlet
if enable_EC:
for solute in solutes:
bcs["left"][solute[0]] = Fixed(concentration_init)
# bcs["right"][solute[0]] = Fixed(0.)
bcs["left"]["V"] = Fixed(V_left)
bcs["right"]["V"] = Fixed(V_right)
bcs["obstacles"]["V"] = Charged(surface_charge)
return boundaries, bcs, bcs_pointwise
def initial_phasefield(x0, y0, rad, eps, function_space, shape="flat"):
if shape == "flat":
expr_str = "tanh((x[0]-x0)/(sqrt(2)*eps))"
elif shape == "sine":
expr_str = "tanh((x[0]-x0-eps*sin(2*x[1]*pi))/(sqrt(2)*eps))"
elif shape == "circle":
expr_str = ("tanh(sqrt(2)*(sqrt(pow(x[0]-x0,2)" +
"+pow(x[1]-y0,2))-rad)/eps)")
else:
info_red("Unrecognized shape: " + shape)
exit()
phi_init_expr = df.Expression(expr_str, x0=x0, y0=y0, rad=rad,
eps=eps, degree=2)
phi_init = df.interpolate(phi_init_expr, function_space)
return phi_init
def initial_velocity(inlet_velocity, function_space):
#u_init_expr = df.Constant((inlet_velocity, 0.))
u_init_expr = df.Constant((0., 0.))
u_init = df.interpolate(u_init_expr, function_space)
return u_init
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_,
enable_PF,
**namespace):
info_blue("Timestep = {}".format(tstep))
if enable_PF and stats_intv and tstep % stats_intv == 0:
# GL: Seems like a rather awkward way of doing this,
# but any other way seems to fuck up the simulation.
# Anyhow, a better idea could be to move some of this to a post-processing stage.
# GL: Move into common/utilities at a certain point.
subproblem_name, subproblem_i = field_to_subproblem["phi"]
phi = w_[subproblem_name].split(deepcopy=True)[subproblem_i]
bubble = 0.5*(1.-sign(phi))
mass = df.assemble(bubble*df.dx)
massy = df.assemble(
bubble*df.Expression("x[1]", degree=1)*df.dx)
if mpi_is_root():
with open(statsfile, "a") as outfile:
outfile.write("{} {} {} \n".format(t, mass, massy))
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2
# return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func
return gamma
def start_hook(newfolder, **namespace):
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
|
mit
| 7,689,344,559,994,583,000
| 32.854785
| 113
| 0.540456
| false
| 3.359974
| false
| false
| false
|
passy/glashammer-rdrei
|
glashammer/bundles/csrf.py
|
1
|
3865
|
# -*- coding: utf-8 -*-
"""
glashammer.bundles.middleware.csrf_protection
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a simple middleware to secure against Cross Site Remote Forgery
attacks by setting cookies on every request and validate them on post
requests.
:copyright: 2010, The Glashammer Authors
:license: MIT
"""
from hashlib import sha1
from functools import wraps
from time import time
from glashammer.utils.wrappers import Request
from werkzeug.exceptions import Forbidden
import logging
log = logging.getLogger('glashammer.bundles.csrf')
class CSRFProtectionMiddleware(object):
"""
Middleware that sets a random string to a cookie. This can be used
to validate the the request comes from the expected origin.
Use :func:`setup_csrf_protection` and don't use this directly.
"""
def __init__(self, app, cookie_name):
self.app = app
self.cookie_name = cookie_name
app.connect_event('response-start', self.set_cookie)
def set_cookie(self, response):
"""Sets a unique string to the cookie."""
if not hasattr(response, 'no_csrf_cookie'):
response.set_cookie(self.cookie_name, self._generate_token())
def _generate_token(self):
"""Generate a new random string based on time and secret set in the
config."""
return sha1("%s#%s" % (time(),
self.app.cfg['sessions/secret'])).hexdigest()
def setup_csrf_protection(app, cookie_name='glashammer_csrf'):
"""Sets up the csrf protection middleware.
:param cookie_name: Cookie to store the secret key in. Remember that you
have to create a new ``require_csrf_token`` decorator, if you change this
value.
"""
# In case the session bundle is not activated.
app.add_config_var('sessions/secret', str, 'glashammer_secret')
middleware = CSRFProtectionMiddleware(app, cookie_name)
def require_csrf_token_factory(form_var='_csrf_token',
cookie_var='glashammer_csrf',
exception_type=Forbidden):
"""Create a new ``require_csrf_token`` decorator based on the options
submitted."""
def get_request(args):
"""Tries to retrieve the request object from a list of arguments.
Returns the first argument in the list that looks like a request
object.
This is used to make function-style views and method-style controllers
both work.
"""
for arg in args:
if isinstance(arg, Request):
return arg
raise TypeError("No request object found in function call!")
def require_csrf_token(func):
"""Raises a Forbidden by default if posted '_csrf_token' does
not match the cookie value."""
@wraps(func)
def decorator(*args, **kwargs):
req = get_request(args)
if form_var not in req.form or \
cookie_var not in req.cookies:
log.info("CSRF-Protection failed. Either cookie or post "
"value not found!")
raise exception_type("CSRF protection validation failed! "
"Form data missing!")
elif req.form[form_var] != req.cookies[cookie_var]:
log.info("CSRF-Protection failed. Expected %s, got %s.",
req.cookies[cookie_var], req.form[form_var])
raise exception_type("CSRF protection validation failed! "
"Form data invalid!")
else:
return func(*args, **kwargs)
return decorator
return require_csrf_token
# Default decorators.
require_csrf_token = require_csrf_token_factory()
__all__ = ('setup_csrf_protection', 'require_csrf_token',
'require_csrf_token_factory')
|
mit
| 3,107,017,994,892,241,400
| 32.034188
| 78
| 0.615783
| false
| 4.392045
| false
| false
| false
|
coderb0t/CouchPotatoServer
|
couchpotato/core/media/movie/providers/info/couchpotatoapi.py
|
1
|
4212
|
import base64
import time
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'CouchPotatoApi'
class CouchPotatoApi(MovieProvider):
urls = {
'validate': 'https://api.couchpota.to/validate/%s/',
'search': 'https://api.couchpota.to/search/%s/',
'info': 'https://api.couchpota.to/info/%s/',
'is_movie': 'https://api.couchpota.to/ismovie/%s/',
'eta': 'https://api.couchpota.to/eta/%s/',
'suggest': 'https://api.couchpota.to/suggest/',
'updater': 'https://raw.githubusercontent.com/%s/%s/%s/updater.json',
'messages': 'https://api.couchpota.to/messages/?%s',
}
http_time_between_calls = 0
api_version = 1
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info.release_date', self.getReleaseDate)
addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2)
addEvent('movie.suggest', self.getSuggestions)
addEvent('movie.is_movie', self.isMovie)
addEvent('release.validate', self.validate)
addEvent('cp.api_call', self.call)
addEvent('cp.source_url', self.getSourceUrl)
addEvent('cp.messages', self.getMessages)
def call(self, url, **kwargs):
return self.getJsonData(url, headers = self.getRequestHeaders(), **kwargs)
def getMessages(self, last_check = 0):
data = self.getJsonData(self.urls['messages'] % tryUrlencode({
'last_check': last_check,
}), headers = self.getRequestHeaders(), cache_timeout = 10)
return data
def getSourceUrl(self, repo = None, repo_name = None, branch = None):
return self.getJsonData(self.urls['updater'] % (repo, repo_name, branch), headers = self.getRequestHeaders())
def search(self, q, limit = 5):
return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders())
def validate(self, name = None):
if not name:
return
name_enc = base64.b64encode(ss(name))
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None, adding = False):
if not identifier:
return
url = self.urls['is_movie'] % identifier
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data:
return data.get('is_movie', True)
return True
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return
result = self.getJsonData(self.urls['info'] % identifier, headers = self.getRequestHeaders())
if result:
return dict((k, v) for k, v in result.items() if v)
return {}
def getReleaseDate(self, identifier = None):
if identifier is None: return {}
dates = self.getJsonData(self.urls['eta'] % identifier, headers = self.getRequestHeaders())
log.debug('Found ETA for %s: %s', (identifier, dates))
return dates
def getSuggestions(self, movies = None, ignore = None):
if not ignore: ignore = []
if not movies: movies = []
suggestions = self.getJsonData(self.urls['suggest'], data = {
'movies': ','.join(movies),
'ignore': ','.join(ignore),
}, headers = self.getRequestHeaders())
log.info('Found suggestions for %s movies, %s ignored', (len(movies), len(ignore)))
return suggestions
def getRequestHeaders(self):
return {
'X-CP-Version': fireEvent('app.version', single = True),
'X-CP-API': self.api_version,
'X-CP-Time': time.time(),
'X-CP-Identifier': '+%s' % Env.setting('api_key', 'core')[:10], # Use first 10 as identifier, so we don't need to use IP address in api stats
}
|
gpl-3.0
| 2,660,003,703,913,347,000
| 32.967742
| 154
| 0.616809
| false
| 3.618557
| false
| false
| false
|
timothyclemansinsea/smc
|
src/scripts/gce/pricing.py
|
1
|
1971
|
# https://cloud.google.com/compute/pricing
# all storage prices are per GB per month.
PRICING = {
'gcs-standard' : 0.026,
'gcs-reduced' : 0.02,
'gcs-nearline' : 0.01,
'snapshot' : 0.026,
'local-ssd' : 0.218,
'pd-ssd' : 0.17,
'pd-standard' : 0.04,
'n1-standard-hour' : 0.05, # for equivalent of -1, so multiply by number of cpu's (the suffix)
'n1-standard-hour-pre' : 0.015,
'n1-standard-month': 0.035*30.5*24, # price for sustained use for a month
'n1-standard-ram' : 3.75, # amount in GB of base machine
'n1-highmem-hour' : 0.096/2,
'n1-highmem-hour-pre' : 0.035/2,
'n1-highmem-month' : 0.088*30.5*24/2,
'n1-highmem-ram' : 6.5,
'n1-highcpu-hour' : 0.076/2,
'n1-highcpu-hour-pre' : 0.02/2,
'n1-highcpu-month' : 0.053*30.5*24/2,
'n1-highcpu-ram' : 0.9,
'g1-small-hour' : 0.021,
'g1-small-hour-pre': 0.01,
'g1-small-month' : 0.019*30.5*24,
'g1-small-ram' : 1.7,
'f1-micro-hour' : 0.008,
'f1-micro-hour-pre': 0.005,
'f1-micro-month' : 0.0056*30.5*24,
'f1-micro-ram' : 0.60,
'europe' : 1.096,
'asia' : 1.096,
'us' : 1,
'egress' : 0.12,
'egress-china' : 0.23,
'egress-australia' : 0.19,
}
def cpu_cost(size='n1-standard-1', preemptible=False, region='us'):
if size.count('-') == 2:
i = size.rfind('-')
m = int(size[i+1:])
else:
i = len(size)
m = 1
if preemptible:
x = PRICING[size[:i] + '-hour-pre']*24*30.5*m
return [x, x]
else:
return [m*PRICING[size[:i] + '-month'], m*PRICING[size[:i] + '-hour']*24*30.5]
def disk_cost(disk_size=10, disk_type='pd-standard'):
x = PRICING[disk_type] * disk_size
return [x, x]
import locale
locale.setlocale( locale.LC_ALL, '' )
def money(s):
return locale.currency(s)
|
gpl-3.0
| 4,755,622,627,190,812,000
| 27.565217
| 107
| 0.515982
| false
| 2.469925
| false
| false
| false
|
rdevon/cortex
|
demos/demo_classifier.py
|
1
|
2354
|
'''Simple classifier model
'''
from cortex.main import run
from cortex.plugins import ModelPlugin
import torch
import torch.nn as nn
import torch.nn.functional as F
from cortex.built_ins.models.utils import update_encoder_args
class MyClassifier(ModelPlugin):
'''Basic image classifier.
Classifies images using standard convnets.
'''
defaults = dict(
data=dict(batch_size=128, inputs=dict(inputs='images')),
optimizer=dict(optimizer='Adam', learning_rate=1e-3),
train=dict(epochs=200, save_on_best='losses.classifier'))
def build(self, classifier_type='convnet',
classifier_args=dict(dropout=0.2)):
'''Builds a simple image classifier.
Args:
classifier_type (str): Network type for the classifier.
classifier_args: Classifier arguments. Can include dropout,
batch_norm, layer_norm, etc.
'''
classifier_args = classifier_args or {}
shape = self.get_dims('x', 'y', 'c')
dim_l = self.get_dims('labels')
Encoder, args = update_encoder_args(
shape, model_type=classifier_type, encoder_args=classifier_args)
args.update(**classifier_args)
classifier = Encoder(shape, dim_out=dim_l, **args)
self.nets.classifier = classifier
def routine(self, inputs, targets, criterion=nn.CrossEntropyLoss()):
'''
Args:
criterion: Classifier criterion.
'''
classifier = self.nets.classifier
outputs = classifier(inputs)
predicted = torch.max(F.log_softmax(outputs, dim=1).data, 1)[1]
loss = criterion(outputs, targets)
correct = 100. * predicted.eq(
targets.data).cpu().sum() / targets.size(0)
self.losses.classifier = loss
self.results.accuracy = correct
def predict(self, inputs):
classifier = self.nets.classifier
outputs = classifier(inputs)
predicted = torch.max(F.log_softmax(outputs, dim=1).data, 1)[1]
return predicted
def visualize(self, images, inputs, targets):
predicted = self.predict(inputs)
self.add_image(images.data, labels=(targets.data, predicted.data),
name='gt_pred')
if __name__ == '__main__':
classifier = MyClassifier()
run(model=classifier)
|
bsd-3-clause
| -8,868,400,450,535,550,000
| 26.057471
| 76
| 0.623195
| false
| 4.037736
| false
| false
| false
|
phborba/dsgtoolsop
|
auxiliar/geopy/geocoders/dot_us.py
|
1
|
5485
|
"""
:class:`GeocoderDotUS` geocoder.
"""
import csv
from base64 import b64encode
from geopy.compat import urlencode, py3k, Request
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT,
)
from geopy.location import Location
from geopy.exc import ConfigurationError
from geopy.util import logger, join_filter
__all__ = ("GeocoderDotUS", )
class GeocoderDotUS(Geocoder): # pylint: disable=W0223
"""
GeocoderDotUS geocoder, documentation at:
http://geocoder.us/
Note that GeocoderDotUS does not support SSL.
"""
def __init__(
self,
username=None,
password=None,
format_string=DEFAULT_FORMAT_STRING,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
:param str username:
:param str password:
:param str format_string: String containing '%s' where the
string to geocode should be interpolated before querying the
geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising an :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
:param str user_agent: Use a custom User-Agent header.
.. versionadded:: 1.12.0
"""
super(GeocoderDotUS, self).__init__(
format_string=format_string, timeout=timeout, proxies=proxies, user_agent=user_agent
)
if username or password:
if not (username and password):
raise ConfigurationError(
"Username and password must both specified"
)
self.authenticated = True
self.api = "http://geocoder.us/member/service/namedcsv"
else:
self.authenticated = False
self.api = "http://geocoder.us/service/namedcsv"
self.username = username
self.password = password
def geocode(self, query, exactly_one=True, timeout=None):
"""
Geocode a location query.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
query_str = self.format_string % query
url = "?".join((self.api, urlencode({'address':query_str})))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
url = Request(url, headers=self._get_headers())
page = self._call_geocoder(url, timeout=timeout, raw=True)
content = page.read().decode("utf-8") if py3k else page.read() # pylint: disable=E1101,E1103
places = [
r for r in csv.reader(
[content, ] if not isinstance(content, list)
else content
)
]
if not len(places):
return None
if exactly_one:
return self._parse_result(places[0])
else:
result = [self._parse_result(res) for res in places]
if None in result: # todo
return None
return result
@staticmethod
def _parse_result(result):
"""
Parse individual results. Different, but lazy actually, so... ok.
"""
# turn x=y pairs ("lat=47.6", "long=-117.426")
# into dict key/value pairs:
place = dict(
[x.split('=') for x in result if len(x.split('=')) > 1]
)
if 'error' in place:
if "couldn't find" in place['error']:
return None
address = [
place.get('number', None),
place.get('prefix', None),
place.get('street', None),
place.get('type', None),
place.get('suffix', None)
]
city = place.get('city', None)
state = place.get('state', None)
zip_code = place.get('zip', None)
name = join_filter(", ", [
join_filter(" ", address),
city,
join_filter(" ", [state, zip_code])
])
latitude = place.get('lat', None)
longitude = place.get('long', None)
if latitude and longitude:
latlon = float(latitude), float(longitude)
else:
return None
return Location(name, latlon, place)
def _get_headers(self):
headers = {}
if self.authenticated:
username_password = ":".join((self.username, self.password))
auth = " ".join((
"Basic",
b64encode(username_password.encode('utf-8')).decode('utf-8')
))
headers["Authorization"] = auth
return headers
|
gpl-2.0
| 2,699,461,029,512,830,000
| 31.64881
| 100
| 0.558067
| false
| 4.248644
| false
| false
| false
|
anergictcell/SonosBar
|
sonosBar.py
|
1
|
13759
|
#!/usr/bin/env python -W ignore
# -*- coding: utf-8 -*-
"""
Control you Sonos system from you Mac Menu Bar
"""
# <bitbar.title>SonosBar</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author>Jonas Marcello</bitbar.author>
# <bitbar.author.github>anergictcell</bitbar.author.github>
# <bitbar.desc>Control you Sonos system from you Mac Menu Bar.</bitbar.desc>
# <bitbar.image>https://raw.githubusercontent.com/anergictcell/SonosBar/master/resources/SonosBar.png</bitbar.image>
# <bitbar.dependencies>python,SoCo</bitbar.dependencies>
# <bitbar.abouturl>https://github.com/anergictcell/SonosBar/</bitbar.abouturl>
import argparse
import socket
import os
import sys
try:
import soco
from soco.music_services import MusicService
from soco.data_structures import DidlItem, to_didl_string
except ImportError:
print("Error")
print("---")
print("You need to istall >>soco<< | href=https://github.com/SoCo/SoCo")
sys.exit(0)
def parse_ip(ip_string):
"""Parsing the user supplied IP address to use on the local subnet"""
host_ip = socket.gethostbyname(socket.gethostname())
subnets = host_ip.split(".")
sonos_subnets = ip_string.split(".")
new_ip = subnets[0:(4-len(sonos_subnets))] + sonos_subnets
return ".".join(new_ip)
def parse_cli_arguments():
"""Main function that parses command line arguments"""
parser = argparse.ArgumentParser(description='Control your Sonos')
player_args = parser.add_mutually_exclusive_group()
player_args.add_argument(
"-p", "--player",
metavar="SPEAKER_NAME",
type=str,
# default="Living Room",
help="The name of the player/zone")
player_args.add_argument(
"-i", "--ip",
metavar="IP_ADDRESS",
type=str,
help="The IP address of the player/zone")
control_args = parser.add_mutually_exclusive_group()
control_args.add_argument(
"-l", "--playlist",
metavar="PLAYLIST_NAME",
type=str,
help="The name of the playlist to play")
control_args.add_argument(
"-r", "--radio",
metavar="RADIO_STATION",
type=str,
help="The name of the radio station to play")
control_args.add_argument(
"-v", "--vol",
metavar="VOLUME",
type=int,
choices=range(0, 101),
help="0-100")
control_args.add_argument(
"-j", "--join",
metavar="SPEAKER_NAME",
type=str,
help="Name of the speaker to join")
control_args.add_argument(
"-k", "--ipjoin",
metavar="SPEAKER_IP",
type=str,
help="IP of the speaker to join")
control_args.add_argument(
"-u", "--unjoin",
action='store_const',
const=True,
help="Unjoin the player from all groups")
control_args.add_argument(
'action',
metavar='action',
nargs="?",
choices=["play", "pause", "next", "previous", "shuffle", "normal"],
help="""Action to take if non is set via flags.
Can be either: play, pause, next, previous, shuffle, normal""")
parser.add_argument(
"-g", "--group",
action='store_const',
const=True,
help="Apply the action to the whole group")
output = parser.add_mutually_exclusive_group()
output.add_argument(
"-o", "--verbose",
action='store_const',
const=True,
help="Display feedback about current actions")
output.add_argument(
"-b", "--bitbar",
action='store_const',
const=True,
help="Display bitbar controls")
args = parser.parse_args()
if args.ip:
args.ip = parse_ip(args.ip)
if args.ipjoin:
args.ipjoin = parse_ip(args.ipjoin)
return args
def output_for_bitbar(zones):
"""Prints the topology display"""
print("🔊Sonos")
print("---")
for zone in zones:
print_zone(zone)
def print_zone(zone):
"""Prints basic info about the zone and calls functions to
print more detailed info"""
print("---")
print("Zone:")
print("{0}: {1}".format(zone["kind"], zone["master"].player_name))
if zone["kind"] == "P":
print_single_player(zone["master"])
else:
print_group(zone["master"])
def print_single_player(player):
"""Controls printing of control elements for a single-player zone"""
print_music_controls(player, "--")
print_player_controls(player, "--")
print_top_level_controls(player, "")
def print_group(master):
"""Controls printing of control elements for a multi-player zone"""
print_music_controls(master, "--")
print_top_level_controls(master, "")
for player in master.group.members:
print("➤ {0}".format(player.player_name))
print_player_controls(player, "--")
print("--Volume")
print_volume_controls(player, "--")
def create_command(player, *params):
"""Creates the Bitbar specific command"""
string = "bash={0} param1=-i param2={1}"
i = 3
for param in params:
string += " param{0}={1}".format(i, param)
i += 1
string += " terminal=false refresh=true"
return string.format(PATH_TO_SCRIPT, player.ip_address)
def print_player_controls(player, indent):
"""Prints Player controls for Bitbar"""
print("{0}Join".format(indent))
for single_player in player.all_zones:
if single_player != player:
print("{0}--{1} | ".format(indent, single_player.player_name) +
create_command(player, "--ipjoin", single_player.ip_address)
)
print("{0}Unjoin | ".format(indent) +
create_command(player, "--unjoin")
)
def print_music_controls(player, indent):
"""Prints Music controls for Bitbar"""
print("{0}Playlists".format(indent))
for playlist in player.get_sonos_playlists():
print("{0}--{1} | ".format(indent, playlist.title) +
create_command(player, "-gl", '"' + playlist.title + '"')
)
print("{0}Radios".format(indent))
for station in player.get_favorite_radio_stations()["favorites"]:
print("{0}--{1} | ".format(indent, station["title"]) +
create_command(player, "-gr", '"' + station["uri"] + '"')
)
def print_top_level_controls(player, indent):
"""Prints the controls that are displayed on the base level for each
player / group"""
playing = player.get_current_transport_info()["current_transport_state"]
if playing == "PLAYING":
print("{0}├ Pause | ".format(indent) +
create_command(player, "pause", "-g"))
print("{0}├ Next | ".format(indent) +
create_command(player, "next", "-g"))
else:
print("{0}├ Play | ".format(indent) +
create_command(player, "play", "-g"))
print("{0}└ Volume | ".format(indent))
print_volume_controls(player, indent)
def print_volume_controls(player, indent):
"""Prints controls to adjust the volume"""
for vol in range(0, 11):
if (vol-1) * 10 < player.volume and vol*10 >= player.volume:
# print checkmark
print(("{0}--{1}{2}").format(indent, u'\u2713'.encode("utf-8"), vol))
else:
print("{0}--{1} | ".format(indent, vol) +
create_command(player, "--vol", vol*10)
)
PATH_TO_SCRIPT = os.path.realpath(__file__)
ARGUMENTS = parse_cli_arguments()
GROUP = ARGUMENTS.group
def get_player_by_name(name):
"""Returns a SoCo object for the given name (if it exists)"""
for device in soco.discover():
if device.player_name == name:
return device
def define_player(ip_address, name):
"""Returning a SoCo object of the chosen player"""
player = None
if ip_address:
player = soco.SoCo(ip_address)
if name:
player = get_player_by_name(name)
if player and GROUP:
# Change player to be the coordinator of the group
player = player.group.coordinator
return player
def find_random_player():
"""Searches the network for Sonos zones and picks one randomly"""
zones = soco.discover()
if zones:
# picking a random player
player = next(iter(zones))
return player
return None
def parse_zone_groups(player):
"""Creates a list of all Zones with attrbute
whether they are a group or a single player"""
all_zones = []
for group in player.all_groups:
if len(group.members) > 1:
all_zones.append({"kind":"G", "master":group.coordinator})
else:
all_zones.append({"kind":"P", "master":group.coordinator})
return all_zones
def verbose_output(string):
"""Printing the passed commands to stdout"""
if ARGUMENTS.verbose:
print("{0}: {1}".format(
("Group " if GROUP else "Player "), string))
def group_coordinate(function):
"""Wrapper function to ensure unjoining for single players"""
def inner_function(*arguments):
"""Inner function"""
if GROUP:
function(*arguments)
else:
# First argument always has to be the player SoCo object
arguments[0].unjoin()
function(*arguments)
return inner_function
def get_songs_from_playlist(player, playlist_name):
"""Returns a list of songs from the given playlist"""
lists = player.get_sonos_playlists()
for playlist in lists:
if playlist.title == playlist_name:
return player.music_library.browse(playlist)
@group_coordinate
def play_playlist(player, playlist_name):
"""Replaces the queue with the selected playlist"""
verbose_output("Play playlist {0}".format(playlist_name))
songs = get_songs_from_playlist(player, playlist_name)
player.clear_queue()
for song in songs:
player.add_to_queue(song)
player.play_from_queue(0)
@group_coordinate
def play_radio_station(player, uri):
"""Plays the selected radio station. The URI must be in the
format as it is currently returned from soco:
x-sonosapi-stream:s25111?sid=254&flags=32
"""
verbose_output("Switching to radio station {0}".format(uri))
service = MusicService('TuneIn')
didl = DidlItem(
title="DUMMY", parent_id="DUMMY", item_id="DUMMY", desc=service.desc)
meta = to_didl_string(didl)
player.avTransport.SetAVTransportURI(
[('InstanceID', 0), ('CurrentURI', uri), ('CurrentURIMetaData', meta)])
player.play()
@group_coordinate
def play(player):
"""Play the selected song"""
verbose_output("Play")
player.play()
@group_coordinate
def pause(player):
"""Pause the current playback"""
verbose_output("Pause")
player.pause()
@group_coordinate
def next_track(player):
"""Play the next track"""
verbose_output("Next track")
player.next()
@group_coordinate
def previous_track(player):
"""Play the previous track"""
verbose_output("Previous track")
player.previous()
@group_coordinate
def turn_on_shuffle(player):
"""Turn on shuffle"""
verbose_output("Shuffle ON")
player.play_mode = "SHUFFLE_NOREPEAT"
@group_coordinate
def turn_off_shuffle(player):
"""Turn off shuffle"""
verbose_output("Shuffle OFF")
player.play_mode = "NORMAL"
def set_volume(player, volume):
"""Sets the volume"""
verbose_output("Setting the volume to {0}".format(volume))
player.volume = volume
def join(source, target):
"""Joining another group"""
if target is None:
return invalid_command("Target to join is not known")
if GROUP:
for single_player in source.group.members:
single_player.join(target.group.coordinator)
else:
source.join(target.group.coordinator)
def invalid_command(err):
"""Handles errors and prints error messages"""
print("ERROR: {0}".format(err))
return
def main(args):
"""Main function"""
player = define_player(args.ip, args.player)
if player is None or args.bitbar:
player = player or find_random_player()
print_bitbar_controls(player)
return
if GROUP:
# Change player to the coordinator of the group
player = player.group.coordinator
if args.playlist:
return play_playlist(player, args.playlist)
if args.radio:
return play_radio_station(player, args.radio)
if args.vol is not None:
return set_volume(player, args.vol)
if args.join:
verbose_output("Joining {0}".format(args.join))
to_join = define_player(None, args.join)
return join(player, to_join)
if args.ipjoin:
verbose_output("Joining {0}".format(args.ipjoin))
to_join = define_player(args.ipjoin, None)
return join(player, to_join)
if args.unjoin:
verbose_output("Unjoin")
player.unjoin()
return
if args.action is None:
return
if args.action.lower() == "play":
play(player)
return
if args.action.lower() == "pause":
pause(player)
return
if args.action.lower() == "next":
next_track(player)
return
if args.action.lower() == "previous":
previous_track(player)
return
if args.action.lower() == "shuffle":
turn_on_shuffle(player)
return
if args.action.lower() == "normal":
turn_off_shuffle(player)
return
def print_bitbar_controls(player):
"""Prints the lines used for Bitbar to stdout"""
if player is None:
print("🔇 Sonos")
print("---")
print("No Sonos Zone present")
else:
output_for_bitbar(parse_zone_groups(player))
if __name__ == "__main__":
main(ARGUMENTS)
|
mit
| -7,581,023,862,845,307,000
| 28.941176
| 116
| 0.612821
| false
| 3.681489
| false
| false
| false
|
altair-viz/altair
|
altair/sphinxext/utils.py
|
1
|
5859
|
import ast
import hashlib
import itertools
import json
import re
def create_thumbnail(image_filename, thumb_filename, window_size=(280, 160)):
"""Create a thumbnail whose shortest dimension matches the window"""
from PIL import Image
im = Image.open(image_filename)
im_width, im_height = im.size
width, height = window_size
width_factor, height_factor = width / im_width, height / im_height
if width_factor > height_factor:
final_width = width
final_height = int(im_height * width_factor)
else:
final_height = height
final_width = int(im_width * height_factor)
thumb = im.resize((final_width, final_height), Image.ANTIALIAS)
thumb.save(thumb_filename)
def create_generic_image(filename, shape=(200, 300), gradient=True):
"""Create a generic image"""
from PIL import Image
import numpy as np
assert len(shape) == 2
arr = np.zeros((shape[0], shape[1], 3))
if gradient:
# gradient from gray to white
arr += np.linspace(128, 255, shape[1])[:, None]
im = Image.fromarray(arr.astype("uint8"))
im.save(filename)
SYNTAX_ERROR_DOCSTRING = """
SyntaxError
===========
Example script with invalid Python syntax
"""
def _parse_source_file(filename):
"""Parse source file into AST node
Parameters
----------
filename : str
File path
Returns
-------
node : AST node
content : utf-8 encoded string
Notes
-----
This function adapted from the sphinx-gallery project; license: BSD-3
https://github.com/sphinx-gallery/sphinx-gallery/
"""
with open(filename, "r", encoding="utf-8") as fid:
content = fid.read()
# change from Windows format to UNIX for uniformity
content = content.replace("\r\n", "\n")
try:
node = ast.parse(content)
except SyntaxError:
node = None
return node, content
def get_docstring_and_rest(filename):
"""Separate ``filename`` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Parameters
----------
filename: str
The path to the file containing the code to be read
Returns
-------
docstring: str
docstring of ``filename``
category: list
list of categories specified by the "# category:" comment
rest: str
``filename`` content without the docstring
lineno: int
the line number on which the code starts
Notes
-----
This function adapted from the sphinx-gallery project; license: BSD-3
https://github.com/sphinx-gallery/sphinx-gallery/
"""
node, content = _parse_source_file(filename)
# Find the category comment
find_category = re.compile(r"^#\s*category:\s*(.*)$", re.MULTILINE)
match = find_category.search(content)
if match is not None:
category = match.groups()[0]
# remove this comment from the content
content = find_category.sub("", content)
else:
category = None
if node is None:
return SYNTAX_ERROR_DOCSTRING, category, content, 1
if not isinstance(node, ast.Module):
raise TypeError(
"This function only supports modules. "
"You provided {}".format(node.__class__.__name__)
)
try:
# In python 3.7 module knows its docstring.
# Everything else will raise an attribute error
docstring = node.docstring
import tokenize
from io import BytesIO
ts = tokenize.tokenize(BytesIO(content).readline)
ds_lines = 0
# find the first string according to the tokenizer and get
# it's end row
for tk in ts:
if tk.exact_type == 3:
ds_lines, _ = tk.end
break
# grab the rest of the file
rest = "\n".join(content.split("\n")[ds_lines:])
lineno = ds_lines + 1
except AttributeError:
# this block can be removed when python 3.6 support is dropped
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, (ast.Str, ast.Constant))
):
docstring_node = node.body[0]
docstring = docstring_node.value.s
# python2.7: Code was read in bytes needs decoding to utf-8
# unless future unicode_literals is imported in source which
# make ast output unicode strings
if hasattr(docstring, "decode") and not isinstance(docstring, str):
docstring = docstring.decode("utf-8")
# python3.8: has end_lineno
lineno = (
getattr(docstring_node, "end_lineno", None) or docstring_node.lineno
) # The last line of the string.
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = content.split("\n", lineno)[-1]
lineno += 1
else:
docstring, rest = "", ""
if not docstring:
raise ValueError(
(
'Could not find docstring in file "{0}". '
"A docstring is required for the example gallery."
).format(filename)
)
return docstring, category, rest, lineno
def prev_this_next(it, sentinel=None):
"""Utility to return (prev, this, next) tuples from an iterator"""
i1, i2, i3 = itertools.tee(it, 3)
next(i3, None)
return zip(itertools.chain([sentinel], i1), i2, itertools.chain(i3, [sentinel]))
def dict_hash(dct):
"""Return a hash of the contents of a dictionary"""
serialized = json.dumps(dct, sort_keys=True)
try:
m = hashlib.md5(serialized)
except TypeError:
m = hashlib.md5(serialized.encode())
return m.hexdigest()
|
bsd-3-clause
| 2,410,201,326,460,441,600
| 28.442211
| 84
| 0.604028
| false
| 4.20603
| false
| false
| false
|
jajberni/pcse_web
|
main/model/simulation.py
|
1
|
7917
|
# coding: utf-8
"""Provides implementation of Simulation model and Simulation"""
from __future__ import absolute_import
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import GeoPt
import model
import time
from pcse.db import NASAPowerWeatherDataProvider
from pcse.fileinput import CABOFileReader
from pcse.base_classes import ParameterProvider, WeatherDataProvider
from pcse.models import Wofost71_WLP_FD
import datetime as dt
import json
from dateutil.parser import parse
from flask import jsonify
from operator import itemgetter
from .model_defaults import default_amgt, default_crop, default_site, default_soil
soil_defaults = {'SMW': 0.3, 'SMFCF': 0.46, 'SM0': 0.57, 'CRAIRC': 0.05, 'RDMSOL': 0.45}
class SimulationValidator(model.BaseValidator):
"""Defines how to create validators for simulation properties. For detailed description see BaseValidator"""
name = [1, 100]
description = [3, 400]
latlon = [-180, 180]
tsum = [0, 2000]
@classmethod
def existing_name(cls, name):
"""Validates if given name is in datastore"""
simulation_db = Simulation.get_by('name', name)
if not simulation_db:
raise ValueError('This name is not in our database.')
return name
@classmethod
def unique_name(cls, name):
"""Validates if given name is not in datastore"""
simulation_db = Simulation.get_by('name', name)
if simulation_db:
raise ValueError('Sorry, this name is already taken.')
return name
def is_date(prop, value):
if isinstance(value, dt.date):
return value
elif isinstance(value, dt.datetime):
return value
else:
o = parse(value).date()
return o
def is_geoPt(prop, value):
if isinstance(value, GeoPt):
return value
else:
pt = GeoPt(value.lat, value.lon)
return pt
class StringDateProperty(ndb.DateProperty):
def _validate(self, value):
if isinstance(value, basestring):
o = parse(value).date()
return o
class DictGeoPt(ndb.GeoPtProperty):
def _validate(self, value):
if isinstance(value, dict):
pt = GeoPt(value['lat'], value['lon'])
return pt
class WeatherDataProviderProperty(ndb.PickleProperty):
def _validate(self, value):
# TODO: integrity check
self.store = value[0]
self.elevation = value[1]
self.longitude = value[2]
self.latitude = value[3]
self.description = value[4]
self.ETmodel = value[5]
print("WDP latitude: ", value[3])
return value
def getWDP(self):
wdp = WeatherDataProvider()
wdp.store = self.store
wdp.elevation = self.elevation
wdp.longitude = self.longitude
wdp.latitude = self.latitude
wdp.description = self.description
wdp.ETmodel = self.ETmodel
class Simulation(model.Base):
"""A class describing datastore users."""
name = ndb.StringProperty(required=True, validator=SimulationValidator.create('name'))
description = ndb.StringProperty(default="Demo simulation", validator=SimulationValidator.create('description'))
location = DictGeoPt(default=GeoPt(37.4, -4.03))
soil_attributes = ndb.JsonProperty(default=default_soil)
start_date = StringDateProperty(default=dt.date(2014, 9, 1))
sowing_date = StringDateProperty(default=dt.date(2014, 10, 1))
end_date = StringDateProperty(default=dt.date(2015, 7, 1))
crop_name = ndb.StringProperty(default='wheat')
tsum1 = ndb.FloatProperty(default=900.0)
tsum2 = ndb.FloatProperty(default=900.0)
owner_id = ndb.StringProperty(default='')
simulation_output = ndb.JsonProperty(default={})
plot_data = ndb.JsonProperty(default={})
results_ok = ndb.BooleanProperty(default=False)
#weather_data = WeatherDataProviderProperty()
weather_data = ndb.PickleProperty(compressed=True)
wdp = None
simulation_dict = {}
PUBLIC_PROPERTIES = ['name', 'description', 'location', 'results_ok', 'plot_data',
'soil_attributes', 'start_date', 'sowing_date', 'end_date', 'crop_name', 'tsum1', 'tsum2']
PRIVATE_PROPERTIES = ['owner_id']
@ndb.transactional
def update_simulation_results(self):
print('Updating simulation')
json_data = json.dumps(self.run_simulation(), default=json_timestamp)
self.simulation_output = json_data
self.plot_data = self.plot_dict()
self.weather_data = {'store': self.wdp.store,
'elevation': self.wdp.elevation,
'longitude': self.wdp.longitude,
'latitude': self.wdp.latitude,
'description': self.wdp.description,
'ETmodel': self.wdp.ETmodel}
self.results_ok = True
def plot_dict(self):
ts = map(fuzzydate_to_timestamp, self.simulation_dict.keys())
lai = [v['LAI'] for v in self.simulation_dict.itervalues()]
sm = [v['SM'] for v in self.simulation_dict.itervalues()]
twso = [v['TWSO'] for v in self.simulation_dict.itervalues()]
tagp = [v['TAGP'] for v in self.simulation_dict.itervalues()]
json.dumps(sorted(zip(lai, sm), key=itemgetter(0)))
plot_data = json.dumps([
{'key': "LAI", "values": sorted(zip(ts, lai), key=itemgetter(0))},
{'key': "SM", "values": sorted(zip(ts, sm), key=itemgetter(0))},
{'key': "TAGP", "values": sorted(zip(ts, tagp), key=itemgetter(0))},
{'key': "TWSO", "values": sorted(zip(ts, twso), key=itemgetter(0))}])
#print("Plot DATA: ", plot_data)
return plot_data
def run_simulation(self):
if not isinstance(self.weather_data, dict):
print("Fetching NASA weather...")
self.wdp = NASAPowerWeatherDataProvider(self.location.lat, self.location.lon)
else:
print("Weather data is cached...")
if (self.location.lat != self.weather_data['latitude']) or (self.location.lon != self.weather_data['longitude']):
print("Location changed, fetching NASA weather again")
self.wdp = NASAPowerWeatherDataProvider(self.location.lat, self.location.lon)
else:
self.wdp = WeatherDataProvider()
self.wdp.store = self.weather_data['store']
self.wdp.elevation = self.weather_data['elevation']
self.wdp.longitude = self.weather_data['longitude']
self.wdp.latitude = self.weather_data['latitude']
self.wdp.description = self.weather_data['description']
self.wdp.ETmodel = self.weather_data['ETmodel']
print(self.wdp)
amgt = default_amgt
soil = default_soil
site = default_site
crop = default_crop
amgt[0][self.start_date] = amgt[0].pop(amgt[0].keys()[0])
amgt[0][self.start_date]['CropCalendar']['crop_start_date'] = self.sowing_date
amgt[0][self.start_date]['CropCalendar']['crop_end_date'] = self.end_date
parvalues = ParameterProvider(sitedata=site, soildata=soil, cropdata=crop)
crop['TSUM1'] = self.tsum1
crop['TSUM2'] = self.tsum2
soil.update(self.soil_attributes)
wofsim = Wofost71_WLP_FD(parvalues, self.wdp, agromanagement=amgt)
wofsim.run_till_terminate()
output = wofsim.get_output()
results_dict = {}
for a in output:
results_dict[a.pop('day').isoformat()] = a
self.simulation_dict = results_dict
return results_dict
@classmethod
def qry(cls, name=None, **kwargs):
"""Query for simulations"""
# qry = cls.query(**kwargs)
qry = model.Base.qry(model.Simulation, **kwargs)
if name:
qry = qry.filter(cls.name == name)
# else filter for private True and False
return qry
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (dt.datetime, dt.date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def json_timestamp(obj):
if isinstance(obj, (dt.datetime, dt.date)):
return int(time.mktime(obj.timetuple()))
raise TypeError("Type %s not serializable" % type(obj))
def fuzzydate_to_timestamp(obj):
return time.mktime(is_date(None, obj).timetuple())
|
apache-2.0
| 3,805,909,013,444,385,000
| 34.186667
| 119
| 0.674119
| false
| 3.41987
| false
| false
| false
|
gnmathur/aMAZEd
|
solution_grid.py
|
1
|
3092
|
"""
Solution-Grid definition
MIT License
Copyright (c) 2017 Gaurav Mathur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from grid import Grid
from distance import Distances
class SolutionGrid(Grid):
""" A distance grid is a specialized grid that is capable of computing
distances between cells.
"""
def __init__(self, nRows, nColumns):
super(SolutionGrid, self).__init__(nRows, nColumns)
self.distances = None
self.crumbs = None
def compute_distances(self, start):
""" This method computes the distance of each cell in the
grid from <start>
"""
self.distances = Distances(start)
frontier = [start]
while len(frontier) > 0:
new_frontier = []
for cell in frontier:
for linked_cell in cell.getLinks():
if self.distances[linked_cell] != None:
continue
self.distances[linked_cell] = self.distances[cell] + 1
new_frontier.append(linked_cell)
frontier = new_frontier
return self.distances
def solve(self, start, goal):
self.compute_distances(start)
current = goal
self.crumbs = Distances(start)
self.crumbs[current] = self.distances[current]
while current is not start:
for neighbor in current.getLinks():
if self.distances[neighbor] < self.distances[current]:
self.crumbs[neighbor] = self.distances[neighbor]
current = neighbor
def contents_of(self, cell):
""" This routine prints the contents of this cell. This overloaded
function defines the contents of this cell as the distance of this cell
from some defined root cell
"""
if self.crumbs[cell] is not None:
return str(self.crumbs[cell])
else:
return super(SolutionGrid, self).contents_of(cell)
if __name__ == "__main__":
"""
Unit tests
"""
pass
|
mit
| 1,898,911,336,600,446,000
| 35.376471
| 82
| 0.645213
| false
| 4.60119
| false
| false
| false
|
leaprovenzano/kutils
|
kutils/metrics.py
|
1
|
1376
|
from keras import backend as K
def d_precision(y_true, y_pred):
'''this is basically precision metric from keras 1. but
I've attempted to make it differentiable
'''
true_positives = K.sum(K.clip(y_true * y_pred, 0, 1))
predicted_positives = K.sum(K.clip(y_pred, 0, 1))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def d_recall(y_true, y_pred):
'''this is basically reall metric from keras 1. but
I've attempted to make it differentiable.
'''
true_positives = K.sum(K.clip(y_true * y_pred, 0, 1))
possible_positives = K.sum(K.clip(y_true, 0, 1))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def d_fbeta_score(y_true, y_pred, beta=1):
"""this is basically fbeta from keras 1. but
I've attempted to make it differentiable.
"""
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = d_precision(y_true, y_pred)
r = d_recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def dice_coef(y_true, y_pred, smooth=.000001):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f, axis=-1)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
|
mit
| 163,005,164,945,517,700
| 30.272727
| 86
| 0.613372
| false
| 2.92766
| false
| false
| false
|
eri-trabiccolo/exaile
|
plugins/daapclient/__init__.py
|
1
|
25181
|
# Copyright (C) 2006-2007 Aren Olson
# 2011 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import traceback
import os
import gtk
import dbus
import dbus.exceptions
import logging
import time
import threading
import gobject
import xlgui
import pickle
from gettext import gettext as _
from xlgui.panel.collection import CollectionPanel
from xlgui import guiutil
from xlgui.widgets import dialogs, menu, menuitems
from daap import DAAPClient, DAAPError
from xl import (
collection,
event,
trax,
common,
providers,
settings,
xdg
)
logger = logging.getLogger(__name__)
gobject.threads_init()
_smi = menu.simple_menu_item
_sep = menu.simple_separator
#
# Check For python-avahi, we can work without
# avahi, but wont be able to discover shares.
#
try:
import avahi
AVAHI = True
except ImportError:
logger.warning('avahi not installed, can\'t auto-discover servers')
AVAHI = False
# detect authoriztion support in python-daap
try:
tmp = DAAPClient()
tmp.connect("spam","eggs","sausage") #dummy login
del tmp
except TypeError:
AUTH = False
except:
AUTH = True
# Globals Warming
MANAGER = None
class AttrDict(dict):
def __getattr__(self, name):
return self[name]
import functools
# helper function to parse avahi info into a list of tuples (for dict())
parse = functools.partial(zip,
['interface',
'protocol',
'name',
'type',
'domain',
'host',
'aprotocol',
'address',
'port',
'txt',
'flags'])
class DaapAvahiInterface(gobject.GObject): #derived from python-daap/examples
"""
Handles detection of DAAP shares via Avahi and manages the menu
showing the shares.
Fires a "connect" signal when a menu item is clicked.
"""
__gsignals__ = {
'connect' : ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
( gobject.TYPE_PYOBJECT, ) ) }
def new_service(self, interface, protocol, name, type, domain, flags):
"""
Called when a new share is found.
"""
x = self.server.ResolveService(interface, protocol, name, type, domain,
avahi.PROTO_UNSPEC, dbus.UInt32(0))
x = AttrDict(parse(x))
logger.info("DAAP share found: '{0}' at ({1},{2})."
.format(x.name, x.address, x.port))
# gstreamer can't handle link-local ipv6
if 'fe80' in x.address:
return
#Use all available info in key to avoid name conflicts.
nstr = '%s%s%s%s%s' % (interface, protocol, name, type, domain)
if nstr in self.services:
return
self.services[nstr] = x
self.rebuild_share_menu_items()
# self.new_share_menu_item(x)
def remove_service(self, interface, protocol, name, type, domain, flags):
"""
Called when the connection to a share is lost.
"""
logger.info("DAAP share lost: %s." % name)
nstr = '%s%s%s%s%s' % (interface, protocol, name, type, domain)
if nstr in self.services:
# self.remove_share_menu_item(name)
del self.services[nstr]
self.rebuild_share_menu_items()
def new_share_menu_item(self, name, key):
'''
This function is called to add a server to the connect menu.
'''
# check if the menu exist and check if it's ipv4 or we are allowing
# ipv6
print 'adding menu',name,key
if self.menu:
menu_item = _smi(name, ['sep'], name,
callback=lambda *x: self.clicked(key))
self.menu.add_item(menu_item)
def remove_share_menu_item(self, name):
'''
This function is called to remove a server from the connect menu.
'''
if self.menu:
for item in self.menu._items:
if item.name == name:
self.menu.remove_item(item)
break
def clear_share_menu_items(self):
'''
This function is used to clear all the menu items out of a menu.
'''
if self.menu:
for item in self.menu._items:
if item.name == 'manual' or item.name == 'sep':
continue
self.menu.remove_item(item)
def rebuild_share_menu_items(self):
'''
This function fills the menu with known servers.
'''
self.clear_share_menu_items()
show_ipv6 = settings.get_option('plugin/daapclient/ipv6', False)
items = {}
for key,x in self.services.items():
name = '{0} ({1})'.format(x.name,x.host)
if x.protocol == avahi.PROTO_INET6:
if not show_ipv6:
continue
name += ' - ipv6'
if name not in items:
items[name] = (key,x)
# this dedups based on name-host, replacing ipv4 with ipv6
# for key,x in self.services.items():
# name = '{0} ({1})'.format(x.name,x.host)
# if x.protocol == avahi.PROTO_INET6 and show_ipv6:
# if name in items:
# # prefer ipv6
# if items[name][1].protocol == avahi.PROTO_INET:
# items[name] = (key,x)
# elif x.protocol == avahi.PROTO_INET:
# if name not in items:
# items[name] = (key,x)
for name in items:
self.new_share_menu_item(name, key=items[name][0])
def clicked(self, key):
'''
This function is called in response to a menu_item click.
Fire away.
'''
x = self.services[key]
gobject.idle_add(self.emit, "connect", (x.name, x.address, x.port, x))
def __init__(self, exaile, _menu):
"""
Sets up the avahi listener.
"""
gobject.GObject.__init__(self)
self.services = {}
self.menu = _menu
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.stype = '_daap._tcp'
self.domain = 'local'
self.browser = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
self.server.ServiceBrowserNew(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC,
self.stype, self.domain, dbus.UInt32(0))),
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
self.browser.connect_to_signal('ItemNew', self.new_service)
self.browser.connect_to_signal('ItemRemove', self.remove_service)
class DaapHistory(common.LimitedCache):
def __init__(self, limit=5, location=None, menu=None, callback=None):
common.LimitedCache.__init__(self, limit)
if location is None:
location = os.path.join(xdg.get_cache_dir(), 'daaphistory.dat')
self.location = location
self.menu = menu
self.callback = callback
self.load()
def __setitem__(self, item, value):
common.LimitedCache.__setitem__(self, item, value)
# add new menu item
if self.menu is not None and self.callback is not None:
menu_item = _smi('hist'+item, ['sep'], item,
callback=lambda *x: self.callback(None, value+(None,)))
self.menu.add_item(menu_item)
def load(self):
with open(self.location, 'rb') as f:
try:
d = pickle.load(f)
self.update(d)
except (IOError, EOFError):
# no file
pass
def save(self):
with open(self.location, 'wb') as f:
pickle.dump(self.cache, f, common.PICKLE_PROTOCOL)
class DaapManager:
'''
DaapManager is a class that manages DaapConnections, both manual
and avahi-generated.
'''
def __init__(self, exaile, _menu, avahi):
'''
Init! Create manual menu item, and connect to avahi signal.
'''
self.exaile = exaile
self.avahi = avahi
self.panels = {}
hmenu = menu.Menu(None)
def hmfactory(menu, parent, context):
item = gtk.MenuItem(_('History'))
item.set_submenu(hmenu)
sens = settings.get_option('plugin/daapclient/history', True)
item.set_sensitive(sens)
return item
_menu.add_item(_smi('manual', [], _('Manually...'),
callback=self.manual_connect))
_menu.add_item(menu.MenuItem('history', hmfactory, ['manual']))
_menu.add_item(_sep('sep', ['history']))
if avahi is not None:
avahi.connect("connect", self.connect_share)
self.history = DaapHistory(5, menu=hmenu, callback=self.connect_share)
def connect_share(self, obj, (name, address, port, svc)):
'''
This function is called when a user wants to connec to
a DAAP share. It creates a new panel for the share, and
requests a track list.
'''
conn = DaapConnection(name, address, port)
conn.connect()
library = DaapLibrary(conn)
panel = NetworkPanel(self.exaile.gui.main.window, library, self)
# cst = CollectionScanThread(None, panel.net_collection, panel)
# cst.start()
panel.refresh() # threaded
providers.register('main-panel', panel)
self.panels[name] = panel
# history
if settings.get_option('plugin/daapclient/history', True):
self.history[name] = (name, address, port)
self.history.save()
def disconnect_share(self, name):
'''
This function is called to disconnect a previously connected
share. It calls the DAAP disconnect, and removes the panel.
'''
panel = self.panels[name]
# panel.library.daap_share.disconnect()
panel.daap_share.disconnect()
# panel.net_collection.remove_library(panel.library)
providers.unregister('main-panel', panel)
del self.panels[name]
def manual_connect(self, *args):
'''
This function is called when the user selects the manual
connection option from the menu. It requests a host/ip to connect
to.
'''
dialog = dialogs.TextEntryDialog(
_("Enter IP address and port for share"),
_("Enter IP address and port."))
resp = dialog.run()
if resp == gtk.RESPONSE_OK:
loc = dialog.get_value().strip()
host = loc
# the port will be anything after the last :
p = host.rfind(":")
# ipv6 literals should have a closing brace before the port
b = host.rfind("]")
if p > b:
try:
port = int(host[p+1:])
host = host[:p]
except ValueError:
logger.error('non-numeric port specified')
return
else:
port = 3689 # if no port specified, use default DAAP port
# if it's an ipv6 host with brackets, strip them
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
nstr = 'custom%s%s' % (host, port)
conn = DaapConnection(loc, host, port)
self.connect_share(None, (loc, host, port, None))
def refresh_share(self, name):
panel = self.panels[name]
rev = panel.daap_share.session.revision
# check for changes
panel.daap_share.session.update()
logger.debug('DAAP Server %s returned revision %d ( old: %d ) after'
+' update request'
% (name, panel.daap_share.session.revision, rev))
# if changes, refresh
if rev != panel.daap_share.session.revision:
logger.info('DAAP Server %s changed, refreshing... (revision %d)'
% (name, panel.daap_share.session.revision))
panel.refresh()
def close(self, remove=False):
'''
This function disconnects active DaapConnections, and optionally
removes the panels from the UI.
'''
# disconnect active shares
for panel in self.panels.values():
panel.daap_share.disconnect()
# there's no point in doing this if we're just shutting down, only on
# disable
if remove:
providers.unregister('main-panel', panel)
class DaapConnection(object):
"""
A connection to a DAAP share.
"""
def __init__(self, name, server, port):
# if it's an ipv6 address
if ':' in server and server[0] != '[':
server = '['+server+']'
self.all = []
self.session = None
self.connected = False
self.tracks = None
self.server = server
self.port = port
self.name = name
self.auth = False
self.password = None
def connect(self, password = None):
"""
Connect, login, and retrieve the track list.
"""
try:
client = DAAPClient()
if AUTH and password:
client.connect(self.server, self.port, password)
else:
client.connect(self.server, self.port)
self.session = client.login()
self.connected = True
# except DAAPError:
except Exception, inst:
logger.warning('failed to connect to ({0},{1})'.format(
self.server, self.port))
logger.debug(traceback.format_exc())
self.auth = True
self.connected = False
raise
def disconnect(self):
"""
Disconnect, clean up.
"""
try:
self.session.logout()
except:
pass
self.session = None
self.tracks = None
self.database = None
self.all = []
self.connected = False
def reload(self):
"""
Reload the tracks from the server
"""
self.tracks = None
self.database = None
self.all = []
self.get_database()
t = time.time()
self.convert_list()
logger.debug('{0} tracks loaded in {1}s'.format(len(self.all),
time.time()-t))
def get_database(self):
"""
Get a DAAP database and its track list.
"""
if self.session:
self.database = self.session.library()
self.get_tracks(1)
def get_tracks(self, reset = False):
"""
Get the track list from a DAAP database
"""
if reset or self.tracks == None:
if self.database is None:
self.database = self.session.library()
self.tracks = self.database.tracks()
return self.tracks
def convert_list(self):
"""
Converts the DAAP track database into Exaile Tracks.
"""
# Convert DAAPTrack's attributes to Tracks.
eqiv = {'title':'minm','artist':'asar','album':'asal','tracknumber':'astn',}
# 'genre':'asgn','enc':'asfm','bitrate':'asbr'}
for tr in self.tracks:
if tr is not None:
#http://<server>:<port>/databases/<dbid>/items/<id>.<type>?session-id=<sessionid>
uri = "http://%s:%s/databases/%s/items/%s.%s?session-id=%s" % \
(self.server, self.port, self.database.id, tr.id,
tr.type, self.session.sessionid)
# Don't scan tracks because gio is slow!
temp = trax.Track(uri, scan=False)
for field in eqiv.keys():
try:
tag = u'%s'%tr.atom.getAtom(eqiv[field])
if tag != 'None':
temp.set_tag_raw(field, [tag], notify_changed=False)
except:
if field is 'tracknumber':
temp.set_tag_raw('tracknumber', [0], notify_changed=False)
logger.debug(traceback.format_exc())
#TODO: convert year (asyr) here as well, what's the formula?
try:
temp.set_tag_raw("__length", tr.atom.getAtom('astm') / 1000,
notify_changed=False)
except:
temp.set_tag_raw("__length", 0, notify_changed=False)
self.all.append(temp)
@common.threaded
def get_track(self, track_id, filename):
"""
Save the track with track_id to filename
"""
for t in self.tracks:
if t.id == track_id:
try:
t.save(filename)
except CannotSendRequest:
dialog = gtk.MessageDialog(APP.window,
gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_("""This server does not support multiple connections.
You must stop playback before downloading songs."""))
class DaapLibrary(collection.Library):
'''
Library subclass for better management of collection??
Or something to do with devices or somesuch. Ask Aren.
'''
def __init__(self, daap_share, col=None):
# location = "http://%s:%s/databasese/%s/items/" % (daap_share.server, daap_share.port, daap_share.database.id)
# Libraries need locations...
location = "http://%s:%s/" % (daap_share.server, daap_share.port)
collection.Library.__init__(self, location)
self.daap_share = daap_share
#self.collection = col
def rescan(self, notify_interval=None):
'''
Called when a library needs to refresh it's track list.
'''
if self.collection is None:
return True
if self.scanning:
return
t = time.time()
logger.info('Scanning library: %s' % self.daap_share.name)
self.scanning = True
db = self.collection
# DAAP gives us all the tracks in one dump
self.daap_share.reload()
if self.daap_share.all:
count = len(self.daap_share.all)
else:
count = 0
if count > 0:
logger.info('Adding %d tracks from %s. (%f s)' % (count,
self.daap_share.name, time.time()-t))
self.collection.add_tracks(self.daap_share.all)
if notify_interval is not None:
event.log_event('tracks_scanned', self, count)
# track removal?
self.scanning = False
#return True
# Needed to be overriden for who knows why (exceptions)
def _count_files(self):
count = 0
if self.daap_share:
count = len(self.daap_share.all)
return count
class NetworkPanel(CollectionPanel):
"""
A panel that displays a collection of tracks from the DAAP share.
"""
def __init__(self, parent, library, mgr):
"""
Expects a parent gtk.Window, and a daap connection.
"""
self.name = library.daap_share.name
self.daap_share = library.daap_share
self.net_collection = collection.Collection(self.name)
self.net_collection.add_library(library)
CollectionPanel.__init__(self, parent, self.net_collection,
self.name, _show_collection_empty_message=False)
self.all = []
self.label = self.name
self.connect_id = None
self.menu = menu.Menu(self)
def get_tracks_func(*args):
return self.tree.get_selected_tracks()
self.menu.add_item(menuitems.AppendMenuItem('append', [],
get_tracks_func))
self.menu.add_item(menuitems.EnqueueMenuItem('enqueue', ['append'],
get_tracks_func))
self.menu.add_item(menuitems.PropertiesMenuItem('props', ['enqueue'],
get_tracks_func))
self.menu.add_item(_sep('sep',['props']))
self.menu.add_item(_smi('refresh', ['sep'], _('Refresh Server List'),
callback = lambda *x: mgr.refresh_share(self.name)))
self.menu.add_item(_smi('disconnect', ['refresh'],
_('Disconnect from Server'),
callback = lambda *x: mgr.disconnect_share(self.name)))
@common.threaded
def refresh(self):
'''
This is called to refresh the track list.
'''
# Since we don't use a ProgressManager/Thingy, we have to call these w/out
# a ScanThread
self.net_collection.rescan_libraries()
gobject.idle_add(self._refresh_tags_in_tree)
def save_selected(self, widget=None, event=None):
"""
Save the selected tracks to disk.
"""
items = self.get_selected_items()
dialog = gtk.FileChooserDialog(_("Select a Location for Saving"),
APP.window, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_OPEN, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
dialog.set_current_folder(APP.get_last_dir())
dialog.set_select_multiple(False)
result = dialog.run()
dialog.hide()
if result == gtk.RESPONSE_OK:
folder = dialog.get_current_folder()
self.save_items(items, folder)
@common.threaded
def save_items(self, items, folder):
for i in items:
tnum = i.get_track()
if tnum < 10: tnum = "0%s"%tnum
else: tnum = str(tnum)
filename = "%s%s%s - %s.%s"%(folder, os.sep, tnum,
i.get_title(), i.type)
i.connection.get_track(i.daapid, filename)
# print "DAAP: saving track %s to %s."%(i.daapid, filename)
def enable(exaile):
'''
Plugin Enabled.
'''
if exaile.loading:
event.add_callback(__enb, 'gui_loaded')
else:
__enb(None, exaile, None)
def __enb(eventname, exaile, wat):
gobject.idle_add(_enable, exaile)
def _enable(exaile):
global MANAGER
event.add_callback(on_settings_change, 'plugin_daapclient_option_set')
menu_ = menu.Menu(None)
providers.register('menubar-tools-menu', _sep('plugin-sep', ['track-properties']))
item = _smi('daap', ['plugin-sep'], _('Connect to DAAP...'),
submenu=menu_)
providers.register('menubar-tools-menu', item)
if AVAHI:
try:
avahi_interface = DaapAvahiInterface(exaile, menu_)
except RuntimeError: # no dbus?
avahi_interface = None
logger.warning('avahi interface could not be initialized (no dbus?)')
except dbus.exceptions.DBusException, s:
avahi_interface = None
logger.error('Got DBUS error: %s' % s)
logger.error('is avahi-daemon running?')
else:
avahi_interface = None
logger.warn('AVAHI could not be imported, you will not see broadcast shares.')
MANAGER = DaapManager(exaile, menu_, avahi_interface)
def teardown(exaile):
'''
Exaile Shutdown.
'''
if MANAGER is not None:
MANAGER.close()
def disable(exaile):
'''
Plugin Disabled.
'''
# disconnect from active shares
if MANAGER is not None:
# MANAGER.clear()
MANAGER.close(True)
for item in providers.get('menubar-tools-menu'):
if item.name == 'daap':
providers.unregister('menubar-tools-menu', item)
break
event.remove_callback(__enb, 'gui_loaded')
# settings stuff
import daapclientprefs
def get_preferences_pane():
return daapclientprefs
def on_settings_change(event, setting, option):
if option == 'plugin/daapclient/ipv6' and MANAGER is not None:
MANAGER.avahi.rebuild_share_menu_items()
# vi: et ts=4 sts=4 sw=4
|
gpl-2.0
| -3,843,772,557,405,208,000
| 31.533592
| 118
| 0.547079
| false
| 3.981186
| false
| false
| false
|
MRN-Code/pl2mind
|
models/nice_mlp.py
|
1
|
1883
|
"""
Module for classes to simplify MLPs for NICE training.
"""
import pylearn2
import pylearn2.models
import nice
import nice.pylearn2.models.mlp
from pylearn2.models.mlp import MLP
from pylearn2.models.mlp import Linear
from pylearn2.models.mlp import RectifiedLinear
from nice.pylearn2.models.mlp import CouplingLayer
from nice.pylearn2.models.mlp import Homothety
from nice.pylearn2.models.mlp import SigmaScaling
from nice.pylearn2.models.mlp import TriangularMLP
class Simple_MLP(MLP):
def __init__(self, layer_name, depth, half_vis, nhid, irange=0.01):
layers = []
for i, d in enumerate(xrange(depth)):
layer = RectifiedLinear(dim=nhid,
layer_name="%s_h%d" % (layer_name, i),
irange=irange)
layers.append(layer)
layer = Linear(dim=half_vis,
layer_name="%s_out" % layer_name,
irange=irange)
layers.append(layer)
super(Simple_MLP, self).__init__(layers, layer_name=layer_name)
class Simple_TriangularMLP(TriangularMLP):
def __init__(self, layer_name, layer_depths, nvis, nhid, top_layer=None):
layers = []
for i, depth in enumerate(layer_depths):
layer = CouplingLayer(split=nvis // 2,
coupling=Simple_MLP("coupling_%d" % (i + 1),
depth,
nvis // 2,
nhid))
layers.append(layer)
if top_layer is None:
layer = Homothety(layer_name="z")
layers.append(layer)
else:
layer = top_layer
layers.append(layer)
super(Simple_TriangularMLP, self).__init__(layers, layer_name=layer_name)
|
gpl-2.0
| 9,018,267,773,866,419,000
| 35.211538
| 81
| 0.553372
| false
| 3.827236
| false
| false
| false
|
mouradmourafiq/django-subways
|
subways/views.py
|
1
|
2167
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext, Context, loader
from subways.models import Map, Line, Stop
from subways.utilis import ride_path, longest_ride_path
def map(request, map_name, template_name=None):
""" view a map """
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
stops = Stop.objects.all().values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops
})
return render_to_response(template_name, c)
def longest_ride(request, map_name, template_name=None):
""""Return the longest possible (in terms of stops)
ride between any two stops in the system."""
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
stops = Stop.objects.all()
path_stops = longest_ride_path(stops)
stops = stops.values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops,
'path_stops': path_stops
})
return render_to_response(template_name, c)
def ride(request, map_name, here='mit', there='government', template_name=None):
""""Return the longest possible
ride between any two stops in the system."""
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
here_stop = Stop.objects.get(name=here)
there_stop = Stop.objects.get(name=there)
path_stops = ride_path(here_stop, there_stop)
stops = Stop.objects.all().values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops,
'path_stops': path_stops,
})
return render_to_response(template_name, c)
|
bsd-2-clause
| 7,201,897,100,008,982,000
| 45.106383
| 91
| 0.53761
| false
| 4.073308
| false
| false
| false
|
joyhchen/zulip
|
zerver/views/messages.py
|
1
|
44785
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Any, AnyStr, Callable, Iterable, Optional, Tuple, Union
from zerver.lib.str_utils import force_bytes, force_text
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message
from zerver.lib.queue import queue_json_publish
from zerver.lib.cache import (
generic_bulk_cached_fetch,
to_dict_cache_key_id,
)
from zerver.lib.message import (
access_message,
MessageDict,
extract_message_dict,
render_markdown,
stringify_message_dict,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Realm, RealmAlias, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, \
parse_usermessage_flags, \
email_to_domain, get_realm, get_active_streams, \
bulk_get_streams, get_user_profile_by_id
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, Select, ColumnElement
import re
import ujson
import datetime
from six.moves import map
import six
class BadNarrowOperator(JsonableError):
def __init__(self, desc, status_code=400):
# type: (str, int) -> None
self.desc = desc
self.status_code = status_code
def to_json_error_msg(self):
# type: () -> str
return _('Invalid narrow operator: {}').format(self.desc)
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
# type: (UserProfile, str) -> None
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# type: (Query, Dict[str, Any]) -> Query
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
# type: (text_type) -> text_type
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
if m:
base_topic = m.group(1)
else:
base_topic = operand
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
regex = r'^(|personal|\(instance ""\))(\.d)*$'
else:
regex = r'^%s(\.d)*$' % (self._pg_re_escape(base_topic),)
cond = column("subject").op("~*")(regex)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
sender = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
return query
def by_id(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if ',' in operand:
# Huddle
try:
emails = [e.strip() for e in operand.split(',')]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
match_positions_byte = func.pgroonga.match_positions_byte
query_extract_keywords = func.pgroonga.query_extract_keywords
keywords = query_extract_keywords(operand)
query = query.column(match_positions_byte(column("rendered_content"),
keywords).label("content_matches"))
query = query.column(match_positions_byte(column("subject"),
keywords).label("subject_matches"))
condition = column("search_pgroonga").op("@@")(operand)
return query.where(maybe_negate(condition))
def _by_search_tsearch(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
# Apparently, the offsets we get from tsearch_extras are counted in
# unicode characters, not in bytes, so we do our processing with text,
# not bytes.
def highlight_string_text_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_text(text)
highlight_start = u'<span class="highlight">'
highlight_stop = u'</span>'
pos = 0
result = u''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result
def highlight_string_bytes_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_bytes(text)
highlight_start = b'<span class="highlight">'
highlight_stop = b'</span>'
pos = 0
result = b''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return force_text(result)
def highlight_string(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
if settings.USING_PGROONGA:
return highlight_string_bytes_offsets(text, locs)
else:
return highlight_string_text_offsets(text, locs)
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
# type: (text_type, text_type, Iterable[Tuple[int, int]], Iterable[Tuple[int, int]]) -> Dict[str, text_type]
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# type: (str) -> List[Dict[str, Any]]
# FIXME: A hack to support old mobile clients
if json == '{}':
return None
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
def convert_term(elem):
# type: (Union[Dict, List]) -> Dict[str, Any]
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2
or any(not isinstance(x, str) and not isinstance(x, six.text_type)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def is_public_stream(stream_name, realm):
# type: (text_type, Realm) -> bool
"""
Determine whether a stream is public, so that
our caller can decide whether we can get
historical messages for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
"""
stream = get_stream(stream_name, realm)
if stream is None:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# type: (Iterable[Dict[str, Any]], Realm) -> bool
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
# type: (Iterable[Dict[str, Any]]) -> Optional[text_type]
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
# type: (UserProfile, Iterable[Dict[str, Any]]) -> List[Selectable]
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in six.itervalues(muted_streams)])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in six.itervalues(muted_streams))
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
# type: (Tuple[str, str]) -> Selectable
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, muted_topics))))
return conditions + [condition]
return conditions
@has_request_variables
def get_old_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
# type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
query = select([column("id").label("message_id")], None, "zerver_message")
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
"zerver_usermessage")
inner_msg_id_col = column("message_id")
else:
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
for term in narrow:
if term['operator'] == 'search' and not is_search:
query = query.column("subject").column("rendered_content")
is_search = True
query = builder.add_term(query, term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = 10000000000000000
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if num_before == 0 and num_after == 0:
# This can happen when a narrow is specified.
after_query = query.where(inner_msg_id_col == anchor)
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
else:
query = after_query
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_old_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
search_fields = dict() # type: Dict[int, Dict[str, text_type]]
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ(validator=check_list(check_int)),
operation=REQ('op'), flag=REQ(),
all=REQ(validator=check_bool, default=False),
stream_name=REQ(default=None),
topic_name=REQ(default=None)):
# type: (HttpRequest, UserProfile, List[int], text_type, text_type, bool, Optional[text_type], Optional[text_type]) -> HttpResponse
if all:
target_count_str = "all"
else:
target_count_str = str(len(messages))
log_data_str = "[%s %s/%s]" % (operation, flag, target_count_str)
request._log_data["extra"] = log_data_str
stream = None
if stream_name is not None:
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
if topic_name:
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
message__recipient__type_id=stream.id,
message__recipient__type=Recipient.STREAM,
message__subject__iexact=topic_name).exists()
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
count = do_update_message_flags(user_profile, operation, flag, messages,
all, stream, topic_name)
# If we succeed, update log data str with the actual count for how
# many messages were updated.
if count != len(messages):
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
# type: (HttpResponse, UserProfile, Iterable[text_type]) -> Tuple[bool, UserProfile]
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email)
return user_profile.realm.is_zephyr_mirror_realm and \
RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_irc_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email).replace("irc.", "")
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_jabber_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
try:
validators.validate_email(email)
except ValidationError:
return False
# If your Jabber users have a different email domain than the
# Zulip users, this is where you would do any translation.
domain = email_to_domain(email)
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
# type: (HttpRequest, UserProfile, text_type, List[text_type], bool, Optional[text_type], text_type, Optional[text_type], Optional[text_type], Optional[text_type]) -> HttpResponse
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
realm = get_realm(domain)
if not realm:
return json_error(_("Unknown domain %s") % (domain,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
return json_error(_("Invalid mirrored realm"))
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error(_("IRC stream names must start with #"))
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
@authenticated_json_post_view
def json_update_message(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return update_message_backend(request, user_profile)
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
# type: (HttpRequest, UserProfile, int, Optional[text_type], Optional[str], Optional[text_type]) -> HttpResponse
if not user_profile.realm.allow_message_editing:
return json_error(_("Your organization has turned off message editing."))
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Unknown message id"))
# You only have permission to edit a message if:
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin.
if message.sender == user_profile:
pass
elif (content is None) and ((message.topic_name() == "(no topic)") or
user_profile.is_realm_admin):
pass
else:
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has past"))
if subject is None and content is None:
return json_error(_("Nothing to change"))
if subject is not None:
subject = subject.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
rendered_content = None
links_for_embed = set() # type: Set[text_type]
if content is not None:
content = content.strip()
if content == "":
content = "(deleted)"
content = truncate_body(content)
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
ums = UserMessage.objects.filter(message=message.id,
flags=~UserMessage.flags.historical)
message_users = {get_user_profile_by_id(um.user_profile_id) for um in ums}
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(message,
content=content,
message_users=message_users)
links_for_embed |= message.links_for_preview
do_update_message(user_profile, message, subject, propagate_mode, content, rendered_content)
if links_for_embed and getattr(settings, 'INLINE_URL_EMBED_PREVIEW', None):
event_data = {
'message_id': message.id,
'message_content': message.content,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data, lambda x: None)
return json_success()
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
(message, user_message) = access_message(user_profile, message_id)
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ()):
# type: (HttpRequest, UserProfile, text_type) -> HttpResponse
message = Message()
message.sender = user_profile
message.content = content
message.sending_client = request.client
rendered_content = render_markdown(message, content, domain=user_profile.realm.domain)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse
# Note that this function will only work on messages the user
# actually received
# TODO: We assume that the narrow is a search. For now this works because
# the browser only ever calls this function for searches, since it can't
# apply that narrow operator itself.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
|
apache-2.0
| 8,876,619,067,596,775,000
| 43.919759
| 183
| 0.604488
| false
| 4.130314
| false
| false
| false
|
ebrian/dispenserd
|
tests/test.py
|
1
|
7171
|
import unittest
import requests
import sys
import random
import json
class TestDispenserd(unittest.TestCase):
base_url = 'http://127.0.0.1:8282'
def test010_is_running(self):
res = requests.get(self.base_url + '/')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
def test020_queue_is_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 0)
def test030_queue_fills(self):
for i in range(0, 100):
res = requests.post(self.base_url + '/schedule', \
json={'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
def test031_queue_not_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 100)
def test032_queue_properly_ordered(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['main']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
def test033_queue_drains(self):
for i in range(0, 100):
res = requests.post(self.base_url + '/receive_noblock')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test034_queue_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 0)
def test040_queues_fill(self):
for i in range(0, 30):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane1', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
for i in range(0, 50):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane2', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
for i in range(0, 70):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane3', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
def test041_queues_not_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 30)
self.assertEqual(len(json['lane2']), 50)
self.assertEqual(len(json['lane3']), 70)
def test042_queues_properly_ordered(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane1']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane2']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane3']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
def test043_queue1_drains(self):
for i in range(0, 30):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane1'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test044_queue1_empty_queue23_full(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 50)
self.assertEqual(len(json['lane3']), 70)
def test045_queue2_drains(self):
for i in range(0, 50):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane2'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test046_queue12_empty_queue3_full(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 0)
self.assertEqual(len(json['lane3']), 70)
def test047_queue3_drains(self):
for i in range(0, 70):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane3'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test048_queue123_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 0)
self.assertEqual(len(json['lane3']), 0)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDispenserd)
ret = unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(not ret)
|
mit
| 5,116,270,982,148,051,000
| 39.514124
| 104
| 0.569516
| false
| 3.786167
| true
| false
| false
|
chop-dbhi/brptoolkit-demo-harvest
|
brp_demo/formatters.py
|
1
|
36221
|
from django.core.urlresolvers import reverse
from avocado.formatters import registry
from serrano.formatters import HTMLFormatter
from brp_demo.models import *
from django.template import Context
from django.template.loader import get_template
from django.conf import settings
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import logging
log = logging.getLogger(__name__)
class cBioLinkFormatter(HTMLFormatter):
def to_html(self, value, **context):
# http://reslnbrp_demobio01.research.chop.edu:8080/cbioportal/case.do?cancer_study_id=cranio_resnicklab_2013&case_id=7316_100
from .models import NautilusSubject, PortalSubject
sub = PortalSubject.objects.get(pk=value)
sdgs = sub.nautilussubject_set.all()
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if hasattr(sdg, 'cbiosample'):
html += '<li><a href="{0}case.do?cancer_study_id={1}&case_id={2}" target="_blank">View in cBio</a></li>'.format(settings.CBIO_HOST, sdg.cbiosample.cancer_study_identifier, sdg.cbiosample.stable_id)
else:
html += '<li>Not Available</li>'
return html
class SpecimenLocationFormatter(HTMLFormatter):
def to_html(self, values, **context):
from avocado.models import DataField
plate_locations = ['plate_order', 'plate_column', 'plate_row']
html_str = ""
for name in plate_locations:
if values[name] is not None:
data_field = DataField.objects.get_by_natural_key('brp_demo', 'specimen', name)
html_str += "<tr><td>{0}</td><td>{1}</td></tr>".format(data_field, values[name])
if html_str != "":
return "<table class='table table-striped table-condensed'>{0}</table>".format(html_str)
return ""
to_html.process_multiple = True
class PatientSummaryFormatter(HTMLFormatter):
def to_html(self, value, **context):
url = reverse('patient-detail', kwargs={'pk': value})
return '<a href="{0}">View Summary</a>'.format(url)
def to_csv(self, value, **context):
return ''
class PathologyReportFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import NautilusSubject, PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
sdgs = sub.nautilussubject_set.all()
except:
return '<em>Not Available</em>'
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if not sdg.pathreport_set.all():
html+= '<li><em>Not Available</em></li>'
for each in sdg.pathreport_set.all():
html += '<li><a href="{0}">Pathology Report</a></li>'.format(each.path_url)
html += '</ul>'
return html
def to_csv(self, value, **context):
from .models import NautilusSubject
try:
sub = NautilusSubject.objects.get(sample_subject_id=value)
except:
return ''
if not sub.pathreport_set.all():
return ''
csv_ = ''
for each in sub.pathreport_set.all():
csv_ += '{0},'.format(each.path_url)
csv_.rstrip(',')
return csv_
class OperativeReportFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import NautilusSubject, PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
sdgs = sub.nautilussubject_set.all()
except:
return '<em>Not Available</em>'
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if not sdg.operativereport_set.all():
html+= '<li><em>Not Available</em></li>'
for each in sdg.operativereport_set.all():
html += '<li><a href="{0}">Operative Report</a></li>'.format(each.op_url)
html += '</ul>'
return html
def to_csv(self, value, **context):
from .models import NautilusSubject
try:
sub = NautilusSubject.objects.get(sample_subject_id=value)
except:
return ''
if not sub.operativereport_set.all():
return ''
csv_ = ''
for each in sub.operativereport_set.all():
csv_ += '{0},'.format(each.op_url)
csv_.rstrip(',')
return csv_
class EnrollmentTypeFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return '<em>Not Available</em>'
count = 1
for diag in diags:
html = '<ul>'
if diag.diagnosis_type:
html += '<em>{0}</em>'.format(diag.diagnosis_type)
else:
html += '<em>Diagnosis {0}</em>'.format(count)
if not diag.enrollment_type:
html += '<li><em>Unknown</em></li>'
else:
html += '<li>{0}</li>'.format(diag.enrollment_type)
html += '</ul>'
count += 1
return html
def to_csv(self, value, **context):
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return ''
for diag in diags:
csv = ''
if diag.diagnosis_type:
csv += '{0} - '.format(diag.diagnosis_type)
if not diag.enrollment_type:
csv += 'Unknown,'
else:
csv += '{0},'.format(diag.enrollment_type)
return csv.rstrip(',')
class AltEnrollmentTypeFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return '<em>Not Available</em>'
for diag in diags:
html = '<ul>'
if diag.diagnosis_type:
html += '<em>{0}</em>'.format(diag.diagnosis_type)
if not diag.enrollment_type:
html += '<li><em>Unknown</em></li>'
else:
html += '<li>{0}</li>'.format(diag.enrollment_type)
html += '</ul>'
return html
def to_csv(self, value, **context):
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return ''
for diag in diags:
csv = ''
if diag.diagnosis_type:
csv += '{0} - '.format(diag.diagnosis_type)
if not diag.enrollment_type:
csv += 'Unknown,'
else:
csv += '{0},'.format(diag.enrollment_type)
return csv.rstrip(',')
class LinkAggFormatter(HTMLFormatter):
def to_html(self, values, **kwargs):
from .models import PathFolders, PortalSubject
sub = PortalSubject.objects.get(ehb_id=values['ehb_id'])
sdgs = sub.nautilussubject_set.all()
html = '<i class="icon-info-sign"></i>'
content = "Pathology slide images and scans are provided in .svs format which is viewable using Aperio ImageScope software. <br><br>Aperio ImageScope software can be downloaded <a target=\'_blank\' href=\'http://www.aperio.com/appcenter\'>here</a>"
popover = '<script>$(".icon-info-sign").popover({"html":true,"title":"File format info","content":"' + content + '"})</script>'
urls = ['<ul>']
for sdg in sdgs:
urls.append('<ul><em>{0}</em>'.format(sdg.sample_subject_id))
folders = PathFolders.objects.filter(sample_subject_id=sdg.sample_subject_id)
links = folders.values('description', 'folder_link')
for link in links:
urls.append('<li><a href="{folder_link}">{description}</a></li>'.format(**link))
urls.append('</ul>')
if sdgs and links:
return html + ''.join(urls) + '</ul>' + popover
else:
return ''
to_html.process_multiple = True
def to_csv(self, values, **kwargs):
folders = PathFolders.objects.filter(sample_subject_id=values['sample_subject_id'])
links = folders.values('description', 'folder_link')
_str = ''
for link in links:
_str += '{folder_link},'.format(**link)
return _str
to_csv.process_multiple = True
class AliquotAggFormatter(HTMLFormatter):
field = 'aliquots'
def _object_to_string(self, aliquot):
xstr = lambda s: '' if s is None else str(s)
fmt = '%s %s\n' % (
xstr(aliquot.aliquot_name),
xstr(aliquot.secondary_sample_type))
if aliquot.volume_remaining:
fmt += '<br>\tVolume Remaining: %s %s' % (
xstr(aliquot.volume_remaining),
xstr(aliquot.vol_units))
if aliquot.concentration:
fmt += '<br>\tConcentration: %s %s' % (
xstr(aliquot.concentration),
xstr(aliquot.conc_units))
if aliquot.concentration is None and aliquot.volume_remaining is None:
fmt += '<br>\tVolume and Concentration Unknown'
return fmt
def _object_detail(self, aliquot):
fmt = 'Name: %s' % aliquot.aliquot_name
fmt += '<br>Type: %s' % aliquot.tissue_type
fmt += '<br>Received On: %s' % aliquot.received_on
fmt += '<br>Event: %s' % aliquot.collection_event_name
fmt += '<br>Note: <br> %s' % aliquot.draw_note
try:
if aliquot.sample_type == 'Tissue':
if aliquot.diagnosis_id.diagnosis_type:
fmt += '<br>Associated Diagnosis: <br> %s' % aliquot.diagnosis_id.diagnosis_type
except:
pass
if aliquot.volume_remaining is None or aliquot.volume_received is None:
fmt += '<br>Availability: <i>Unknown</i> <br>'
try:
avail = float(aliquot.volume_received) / float(aliquot.volume_remaining) * 100
except:
avail = 0.00
fmt += '<br>Availability: %s <br>' % ('''<div class=\\\"progress progress-striped\\\"><div class=\\\"bar\\\" style=\\\"width: {}%;\\\"></div></div>'''.format(avail))
return fmt
def _build_html(self, pk):
sdgs = NautilusSubject.objects.filter(ehb_id=pk).all()
visit_aliquot_set = {}
for subject in sdgs:
visits = subject.nautilusvisit_set.all()
visit_aliquot_set[subject.sample_subject_id] = {}
for visit in visits:
visit_aliquot_set[subject.sample_subject_id][visit.visit_name] = {}
for sample_type in visit.nautilusaliquot_set.filter(parent_aliquot_id__isnull=True).distinct('sample_type').all():
visit_aliquot_set[subject.sample_subject_id][visit.visit_name][sample_type.sample_type] = []
for aliq in visit.nautilusaliquot_set.filter(sample_type=sample_type.sample_type).filter(parent_aliquot_id__isnull=True).all():
aliquot = {
'aliquot': self._object_to_string(aliq),
'id': aliq.aliquot_id,
'content': self._object_detail(aliq),
'children': []
}
for child in visit.nautilusaliquot_set.filter(parent_aliquot_id=aliq.aliquot_id).all():
aliquot['children'].append({
'id': child.aliquot_id,
'aliquot': self._object_to_string(child),
'content': self._object_detail(child)
})
visit_aliquot_set[subject.sample_subject_id][visit.visit_name][sample_type.sample_type].append(aliquot)
return visit_aliquot_set
def _build_csv(self, pk, **context):
sdgs = NautilusSubject.objects.filter(ehb_id=pk).all()
aliquots = ''
for sdg in sdgs:
visits = sdg.nautilusvisit_set.all()
for visit in visits:
for aliq in visit.nautilusaliquot_set.all():
if aliq.secondary_sample_code:
aliquots += "{0} - {1},".format(aliq.aliquot_name, aliq.secondary_sample_code)
else:
aliquots += "{0},".format(aliq.aliquot_name)
return aliquots.rstrip(',')
def to_csv(self, value, **context):
return self._build_csv(value)
def to_html(self, value, **context):
return '<button class="btn btn-primary aliquot_button" data-toggle="modal" data-target="#aliquotList" data-id="{0}">Aliquots</button>'.format(value)
def __call__(self, values, preferred_formats=None, **context):
# Create a copy of the preferred formats since each set values may
# be processed slightly differently (e.g. mixed data type in column)
# which could cause exceptions that would not be present during
# processing of other values
if preferred_formats is None:
preferred_formats = self.default_formats
preferred_formats = list(preferred_formats) + ['raw']
# Create a OrderedDict of the values relative to the
# concept fields objects the values represent. This
# enables key-based access to the values rather than
# relying on position.
if not isinstance(values, OrderedDict):
# Wrap single values
if not isinstance(values, (list, tuple)):
values = [values]
values = OrderedDict(zip(self.keys, values))
# Iterate over all preferred formats and attempt to process the values.
# For formatter methods that process all values must be tracked and
# attempted only once. They are removed from the list once attempted.
# If no preferred multi-value methods succeed, each value is processed
# independently with the remaining formats
for f in iter(preferred_formats):
method = getattr(self, u'to_{0}'.format(f), None)
# This formatter does not support this format, remove it
# from the available list
if not method:
preferred_formats.pop(0)
continue
# The implicit behavior when handling multiple values is to process
# them independently since, in most cases, they are not dependent
# on one another, but rather should be represented together since
# the data is related. A formatter method can be flagged to process
# all values together by setting the attribute
# `process_multiple=True`. we must # check to if that flag has been
# set and simply pass through the values and context to the method
# as is. if ``process_multiple`` is not set, each value is handled
# independently
if getattr(method, 'process_multiple', False):
try:
output = method(values, fields=self.fields,
concept=self.concept,
process_multiple=True, **context)
if not isinstance(output, dict):
return OrderedDict([(self.concept.name, output)])
return output
# Remove from the preferred formats list since it failed
except Exception:
if self.concept and self.concept not in self._errors:
self._errors[self.concept] = None
log.warning(u'Multi-value formatter error',
exc_info=True)
preferred_formats.pop(0)
# The output is independent of the input. Formatters may output more
# or less values than what was entered.
output = OrderedDict()
# Attempt to process each
for i, (key, value) in enumerate(values.iteritems()):
for f in preferred_formats:
method = getattr(self, u'to_{0}'.format(f))
field = self.fields[key] if self.fields else None
try:
fvalue = method(value, field=field, concept=self.concept,
process_multiple=False, **context)
if isinstance(fvalue, dict):
output.update(fvalue)
else:
output[self.field] = fvalue
break
except Exception:
if field and field not in self._errors:
self._errors[field] = None
log.warning(u'Single-value formatter error',
exc_info=True)
return output
class AggregationFormatter(HTMLFormatter):
'''
Formatter that aggregates 1-N relationships where the base model
is related to a PortalSubject
'''
model = None
order_by = None
field = None
def _aggregate(self):
pass
def _aggregates_to_html(self):
aggregates = self._aggregate()
if aggregates:
return '<ul><li>{0}</li></ul>'.format(
'</li><li>'.join(str(v) for v in aggregates))
else:
return '<em> None Listed </em>'
def _aggregates_to_csv(self):
aggregates = self._aggregate()
if aggregates:
return'{0}'.format(','.join(str(v) for v in aggregates))
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
return self._aggregates_to_csv()
def to_html(self, pk, **context):
self.pk = pk
self.context = context
return self._aggregates_to_html()
def __call__(self, values, preferred_formats=None, **context):
# Create a copy of the preferred formats since each set values may
# be processed slightly differently (e.g. mixed data type in column)
# which could cause exceptions that would not be present during
# processing of other values
if preferred_formats is None:
preferred_formats = self.default_formats
preferred_formats = list(preferred_formats) + ['raw']
# Create a OrderedDict of the values relative to the
# concept fields objects the values represent. This
# enables key-based access to the values rather than
# relying on position.
if not isinstance(values, OrderedDict):
# Wrap single values
if not isinstance(values, (list, tuple)):
values = [values]
values = OrderedDict(zip(self.keys, values))
# Iterate over all preferred formats and attempt to process the values.
# For formatter methods that process all values must be tracked and
# attempted only once. They are removed from the list once attempted.
# If no preferred multi-value methods succeed, each value is processed
# independently with the remaining formats
for f in iter(preferred_formats):
method = getattr(self, u'to_{0}'.format(f), None)
# This formatter does not support this format, remove it
# from the available list
if not method:
preferred_formats.pop(0)
continue
# The implicit behavior when handling multiple values is to process
# them independently since, in most cases, they are not dependent
# on one another, but rather should be represented together since
# the data is related. A formatter method can be flagged to process
# all values together by setting the attribute
# `process_multiple=True`. we must # check to if that flag has been
# set and simply pass through the values and context to the method
# as is. if ``process_multiple`` is not set, each value is handled
# independently
if getattr(method, 'process_multiple', False):
try:
output = method(values, fields=self.fields, concept=self.concept, process_multiple=True, **context)
if not isinstance(output, dict):
return OrderedDict([(self.concept.name, output)])
return output
# Remove from the preferred formats list since it failed
except Exception:
if self.concept and self.concept not in self._errors:
self._errors[self.concept] = None
log.warning(u'Multi-value formatter error', exc_info=True)
preferred_formats.pop(0)
# The output is independent of the input. Formatters may output more
# or less values than what was entered.
output = OrderedDict()
# Attempt to process each
for i, (key, value) in enumerate(values.iteritems()):
for f in preferred_formats:
method = getattr(self, u'to_{0}'.format(f))
field = self.fields[key] if self.fields else None
try:
fvalue = method(value, field=field, concept=self.concept, process_multiple=False, **context)
if isinstance(fvalue, dict):
output.update(fvalue)
else:
# Override the key value so that CSV exports have the correct header name
output[self.field] = fvalue
break
except Exception:
raise
if field and field not in self._errors:
self._errors[field] = None
# log.warning(u'Single-value formatter error', exc_info=True)
return output
# Model Specific Base Aggregators
class SubjectAggregationFormatter(AggregationFormatter):
def _aggregate(self):
if self.distinct:
if self.order_by:
aggregates = self.model.objects.filter(ehb_id=self.pk).order_by(self.order_by).distinct().values_list(self.field, flat=True)
else:
aggregates = self.model.objects.filter(ehb_id=self.pk).distinct().values_list(self.field, flat=True)
else:
if self.order_by:
aggregates = self.model.objects.filter(ehb_id=self.pk).order_by(self.order_by).values_list(self.value, flat=True)
else:
aggregates = self.model.objects.filter(ehb_id=self.pk).values_list(self.value, flat=True)
if None in aggregates:
return None
else:
return aggregates
class AgeAtDiagAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0} Months</li>'.format(diagnosis.age)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class AgeDescAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0}</li>'.format(diagnosis.age_description)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class AgeYmdAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0}</li>'.format(diagnosis.age_ymd)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class DiagnosisAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for each in aggregates:
html += '<li>{0}</li>'.format(getattr(each, self.field))
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for each in aggregates:
_str += '{0}'.format(getattr(each, self.field))
else:
_str += ','
return _str
class DiagnosisTypeAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
try:
last_dx = diagnosis.monthsbetweendx.months_last_diag
except:
last_dx = None
if last_dx:
label = label + " ({0} months since last Dx)".format(diagnosis.monthsbetweendx.months_last_diag)
html += '<li>{0}</li><ul>'.format(label)
html += '<li>{0}</li>'.format(diagnosis.pathhistology_aggr)
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0};{1},'.format(diagnosis.diagnosis_type, diagnosis.pathhistology_aggr)
else:
_str += 'Diagnosis {0};{1}'.format(diag_count, diagnosis.pathhistology_aggr)
diag_count += 1
return _str
class UpdateAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('date_of_diagnosis').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for update in aggregates:
html += '<li>{0}</li>'.format(update.update_type)
field = getattr(update, self.field)
if field:
html += '<ul><li>{0}</li></ul>'.format(field)
else:
html += '<ul><li>{0}</li></ul>'.format('<em>Unknown</em>')
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for update in aggregates:
_str += '{0} Month Update,'.format(update.update_month)
field = getattr(update, self.field)
if field:
_str += '{0},'.format(field)
else:
_str += ','
else:
_str += ','
return _str
# Diagnosis Based Aggregations
class PathDiagFormatter(DiagnosisAggregationFormatter):
model = PathHistology
field = 'path_histology'
distinct = True
class MolecularTestsDoneFormatter(DiagnosisAggregationFormatter):
model = TumorOrMolecularTestsD
field = 'tumor_or_molecular_tests_d'
distinct = True
class MetasAtSubmitSiteFormatter(DiagnosisAggregationFormatter):
model = MetasAtSubmitSite
field = 'metas_at_submit_site'
distinct = True
class SubjectClinStatusFormatter(UpdateAggregationFormatter):
model = Update
field = 'clin_status'
distinct = True
# Portal Subject Based Aggregations
class FamilyHistoryFormatter(SubjectAggregationFormatter):
model = MedicalHistoryMain
field = 'family_history'
distinct = True
class TumorLocFormatter(SubjectAggregationFormatter):
model = TumorLocationIn
field = 'tumor_location_in'
distinct = True
class RaceFormatter(SubjectAggregationFormatter):
model = Race
field = 'race'
distinct = True
class RelapseNumberFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'relapse_number2_7d6'
distinct = True
order_by = 'date_of_diagnosis'
class SiteOfProgressionFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'site_prog'
distinct = True
order_by = 'date_of_diagnosis'
class DiagnosisTypeListFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'diagnosis_type'
distinct = True
order_by = 'date_of_diagnosis'
class CancerPredispositionFormatter(SubjectAggregationFormatter):
model = CancPredispCondition
field = 'canc_predisp_condition'
distinct = True
class OtherMedConditionFormatter(SubjectAggregationFormatter):
model = OtherMedCondition
field = 'other_med_condition'
distinct = True
class LimsIDFormatter(SubjectAggregationFormatter):
model = NautilusSubject
field = 'sample_subject_id'
distinct = True
registry.register(PathologyReportFormatter, 'PathologyReportFormatter')
registry.register(OperativeReportFormatter, 'OperativeReportFormatter')
registry.register(AgeDescAggregationFormatter, 'AgeDescAggregationFormatter')
registry.register(AgeAtDiagAggregationFormatter, 'AgeAtDiagAggregationFormatter')
registry.register(AgeYmdAggregationFormatter, 'AgeYmdAggregationFormatter')
registry.register(PatientSummaryFormatter, 'PatientSummaryFormatter')
registry.register(LinkAggFormatter, 'LinkAggFormatter')
registry.register(AliquotAggFormatter, 'AliqAggFormatter')
registry.register(TumorLocFormatter, 'TumorLocFormatter')
registry.register(OtherMedConditionFormatter, 'OtherMedConditionFormatter')
registry.register(PathDiagFormatter, 'PathDiagFormatter')
registry.register(RaceFormatter, 'RaceFormatter')
registry.register(MolecularTestsDoneFormatter, 'MolecularTestsDoneFormatter')
registry.register(DiagnosisTypeListFormatter, 'DiagnosisTypeListFormatter')
registry.register(CancerPredispositionFormatter, 'CancerPredispositionFormatter')
registry.register(RelapseNumberFormatter, 'RelapseNumberFormatter')
registry.register(SiteOfProgressionFormatter, 'SiteOfProgressionFormatter')
registry.register(MetasAtSubmitSiteFormatter, 'MetasAtSubmitSiteFormatter')
registry.register(FamilyHistoryFormatter, 'FamilyHistoryFormatter')
registry.register(SubjectClinStatusFormatter, 'SubjectClinStatusFormatter')
registry.register(LimsIDFormatter, 'LimsIDFormatter')
registry.register(EnrollmentTypeFormatter, 'EnrollmentTypeFormatter')
registry.register(AltEnrollmentTypeFormatter, 'AltEnrollmentTypeFormatter')
registry.register(DiagnosisTypeAggregationFormatter, 'DiagnosisTypeAggregationFormatter')
registry.register(cBioLinkFormatter, 'cBioLinkFormatter')
|
bsd-2-clause
| 5,607,928,141,524,938,000
| 36.690947
| 256
| 0.574032
| false
| 4.045231
| false
| false
| false
|
budurli/python-paytrail
|
paytrail/base.py
|
1
|
2679
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import hmac
import base64
import hashlib
from requests import Request
from paytrail.settings import BASE_API_URL, PAYTRAIL_AUTH_KEY, PAYTRAIL_ID, PAYTRAIL_SECRET
class PaytrailConnectAPIRequest(Request):
def __init__(self, **kwargs):
self.merchant_id = kwargs.pop('merchant_id')
self.merchant_secret = kwargs.pop('merchant_secret')
super(PaytrailConnectAPIRequest, self).__init__(**kwargs)
self.headers['Timestamp'] = self.get_timestamp()
self.headers['Content-MD5'] = self.get_content_md5()
self.headers['Authorization'] = self.get_authorization_signature()
def get_content_md5(self):
return hashlib.md5(self.prepare().body).digest().encode('base64').strip()
@staticmethod
def get_timestamp():
return str(datetime.now().isoformat())
def get_authorization_signature(self):
base_signature = '\n'.join([
self.method,
self.url,
'PaytrailConnectAPI {merchant_id}'.format(self.merchant_id),
self.headers['Timestamp'],
self.headers['Authorization']
])
digest = hmac.new(
key=self.merchant_secret,
msg=base_signature,
digestmod=hashlib.sha256
).digest()
signature = base64.b64encode(digest).decode()
return 'PaytrailConnectAPI {merchant_id}:{signature}'.format(self.merchant_id, signature)
class BasePaytrailClient(object):
URL_MAP = {
'authorization':
{
'url': '/connectapi/authorizations',
'method': 'POST'
},
'confirming_authorization':
{
'url': '/connectapi/authorizations/{id}/confirmation',
'method': 'POST'
},
'invalidatin_authorization':
{
'url': '/authorizations/{id}',
'method': 'POST'
}
,
'charging': '/connectapi/authorizations/{id}/charges',
'fetching_payment_status': '/connectapi/authorizations/{id}/charges/{id}',
'fetching_delivery_address': ' /connectapi/authorizations/{id}/deliveryAddresses',
}
def __init__(self, base_url=BASE_API_URL, merchant_id=PAYTRAIL_ID, merchant_secret=PAYTRAIL_SECRET):
self.base_url = base_url
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
def authorize(self, auth_key=PAYTRAIL_AUTH_KEY):
pass
def confirm_authorization(self):
pass
test_client = BasePaytrailClient()
test_client.authorize()
|
mit
| -6,063,210,949,374,005,000
| 29.804598
| 104
| 0.605077
| false
| 3.933921
| false
| false
| false
|
jazzband/site
|
jazzband/projects/models.py
|
1
|
5929
|
import os
from datetime import datetime
from uuid import uuid4
from flask import current_app, safe_join
from flask_login import current_user
from sqlalchemy import func, orm
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy_utils import aggregated, generic_repr
from ..auth import current_user_is_roadie
from ..db import postgres as db
from ..members.models import User
from ..mixins import Syncable
@generic_repr("id", "name")
class Project(db.Model, Syncable):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, index=True)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
description = db.Column(db.Text)
html_url = db.Column(db.String(255))
subscribers_count = db.Column(db.SmallInteger, default=0, nullable=False)
stargazers_count = db.Column(db.SmallInteger, default=0, nullable=False)
forks_count = db.Column(db.SmallInteger, default=0, nullable=False)
open_issues_count = db.Column(db.SmallInteger, default=0, nullable=False)
is_active = db.Column(db.Boolean, default=True, nullable=False, index=True)
transfer_issue_url = db.Column(db.String(255))
membership = db.relationship("ProjectMembership", backref="project", lazy="dynamic")
credentials = db.relationship(
"ProjectCredential", backref="project", lazy="dynamic"
)
uploads = db.relationship(
"ProjectUpload",
backref="project",
lazy="dynamic",
order_by=lambda: ProjectUpload.ordering.desc().nullslast(),
)
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
pushed_at = db.Column(db.DateTime, nullable=True)
__tablename__ = "projects"
__table_args__ = (
db.Index("release_name_idx", "name"),
db.Index("release_name_is_active_idx", "name", "is_active"),
)
def __str__(self):
return self.name
@aggregated("uploads", db.Column(db.SmallInteger))
def uploads_count(self):
return db.func.count("1")
@property
def current_user_is_member(self):
if not current_user:
return False
elif not current_user.is_authenticated:
return False
elif current_user_is_roadie():
return True
else:
return current_user.id in self.member_ids
@property
def member_ids(self):
return [member.user.id for member in self.membership.all()]
@property
def leads(self):
leads = self.membership.filter(
ProjectMembership.is_lead.is_(True),
ProjectMembership.user_id.in_(
User.active_members().options(orm.load_only("id"))
),
)
return [member.user for member in leads]
@property
def pypi_json_url(self):
return f"https://pypi.org/pypi/{self.normalized_name}/json" # noqa
@generic_repr("id", "project_id", "is_active", "key")
class ProjectCredential(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
is_active = db.Column(db.Boolean, default=True, nullable=False, index=True)
key = db.Column(UUID(as_uuid=True), default=uuid4)
__tablename__ = "project_credentials"
__table_args__ = (db.Index("release_key_is_active_idx", "key", "is_active"),)
def __str__(self):
return self.key.hex
@generic_repr("id", "user_id", "project_id", "is_lead")
class ProjectMembership(db.Model):
id = db.Column("id", db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
joined_at = db.Column(db.DateTime, default=datetime.utcnow)
is_lead = db.Column(db.Boolean, default=False, nullable=False, index=True)
__tablename__ = "project_memberships"
def __str__(self):
return f"User: {self.user}, Project: {self.project}"
@generic_repr("id", "project_id", "filename")
class ProjectUpload(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
version = db.Column(db.Text, index=True)
path = db.Column(db.Text, unique=True, index=True)
filename = db.Column(db.Text, unique=True, index=True)
signaturename = orm.column_property(filename + ".asc")
size = db.Column(db.Integer)
md5_digest = db.Column(db.Text, unique=True, nullable=False)
sha256_digest = db.Column(db.Text, unique=True, nullable=False)
blake2_256_digest = db.Column(db.Text, unique=True, nullable=False)
uploaded_at = db.Column(db.DateTime, default=datetime.utcnow)
released_at = db.Column(db.DateTime, nullable=True)
notified_at = db.Column(db.DateTime, nullable=True, index=True)
form_data = db.Column(JSONB)
user_agent = db.Column(db.Text)
remote_addr = db.Column(db.Text)
ordering = db.Column(db.Integer, default=0)
__tablename__ = "project_uploads"
__table_args__ = (
db.CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
db.CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
db.Index("project_uploads_project_version", "project_id", "version"),
)
@property
def full_path(self):
# build storage path, e.g.
# /app/uploads/acme/2coffee12345678123123123123123123
return safe_join(current_app.config["UPLOAD_ROOT"], self.path)
@property
def signature_path(self):
return self.full_path + ".asc"
def __str__(self):
return self.filename
@db.event.listens_for(ProjectUpload, "after_delete")
def delete_upload_file(mapper, connection, target):
# When a model with a timestamp is updated; force update the updated
# timestamp.
os.remove(target.full_path)
if os.path.exists(target.signature_path):
os.remove(target.signature_path)
|
mit
| 7,964,328,019,318,782,000
| 34.933333
| 88
| 0.661326
| false
| 3.42321
| false
| false
| false
|
olof/svtplay-dl
|
lib/svtplay_dl/service/raw.py
|
1
|
1082
|
from __future__ import absolute_import
import os
import re
from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import Service
class Raw(Service):
def get(self):
filename = os.path.basename(self.url[: self.url.rfind("/")])
self.output["title"] = filename
streams = []
if re.search(".f4m", self.url):
self.output["ext"] = "flv"
streams.append(hdsparse(self.config, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url, output=self.output))
if re.search(".m3u8", self.url):
streams.append(hlsparse(self.config, self.http.request("get", self.url), self.url, output=self.output))
if re.search(".mpd", self.url):
streams.append(dashparse(self.config, self.http.request("get", self.url), self.url, output=self.output))
for stream in streams:
if stream:
for n in list(stream.keys()):
yield stream[n]
|
mit
| -1,032,772,615,640,684,500
| 33.903226
| 143
| 0.624769
| false
| 3.456869
| false
| false
| false
|
redhat-cip/dci-ansible
|
callback/dci.py
|
1
|
9044
|
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.callback.default import CallbackModule as CM_default
from ansible.release import __version__ as ansible_version
from dciauth.version import __version__ as dciauth_version
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import file as dci_file
from dciclient.v1.api import jobstate as dci_jobstate
from dciclient.version import __version__ as dciclient_version
COMPAT_OPTIONS = (('display_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS),
('display_ok_hosts', True),
('show_custom_stats', C.SHOW_CUSTOM_STATS),
('display_failed_stderr', False),
('check_mode_markers', False),
('show_per_host_start', False))
class CallbackModule(CM_default):
"""This callback module uploads the Ansible output to a DCI control
server."""
CALLBACK_VERSION = '2.0'
CALLBACK_TYPE = 'dci'
CALLBACK_NAME = 'dci'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self._real_display = self._display
self.verbosity = self._display.verbosity
self._display = self
self._jobstate_id = None
self._job_id = None
self._current_status = None
self._dci_context = self._build_dci_context()
self._explicit = False
self._backlog = []
self._name = None
self._content = ''
self._color = None
def get_option(self, name):
for key, val in COMPAT_OPTIONS:
if key == name:
return val
return False
@staticmethod
def _get_details():
"""Method that retrieves the appropriate credentials. """
login = os.getenv('DCI_LOGIN')
password = os.getenv('DCI_PASSWORD')
client_id = os.getenv('DCI_CLIENT_ID')
api_secret = os.getenv('DCI_API_SECRET')
url = os.getenv('DCI_CS_URL', 'https://api.distributed-ci.io')
return login, password, url, client_id, api_secret
def _build_dci_context(self):
login, password, url, client_id, api_secret = self._get_details()
user_agent = ('Ansible/%s (python-dciclient/%s, python-dciauth/%s)'
) % (ansible_version, dciclient_version, dciauth_version)
if login is not None and password is not None:
return dci_context.build_dci_context(url, login, password,
user_agent)
elif client_id is not None and api_secret is not None:
return dci_context.build_signature_context(url, client_id,
api_secret, user_agent)
def display(self, msg, color=None, screen_only=False, *args, **kwargs):
if screen_only:
return
if color is not None:
self._color = color
self._content += msg + '\n'
def banner(self, msg):
# upload the previous content when we have a new banner (start
# of task/play/playbook...)
if self._name:
if self._color == C.COLOR_SKIP:
prefix = 'skipped/'
elif self._color == C.COLOR_UNREACHABLE:
prefix = "unreachable/"
elif self._color == C.COLOR_ERROR:
prefix = 'failed/'
else:
prefix = ''
self.create_file(prefix + self._name,
self._content if self._content != '' else ' ')
self._content = ''
self._name = msg
def deprecated(self, *args, **kwargs):
pass
def create_file(self, name, content):
if self._job_id is None:
self._backlog.append({'name': name, 'content': content})
else:
kwargs = {
'name': name,
'content': content and content.encode('UTF-8'),
'mime': 'application/x-ansible-output',
'job_id': self._job_id,
'jobstate_id': self._jobstate_id
}
dci_file.create(self._dci_context, **kwargs)
def create_jobstate(self, comment, status):
if self._explicit:
return
if not status or self._current_status == status:
return
self._current_status = status
r = dci_jobstate.create(
self._dci_context,
status=self._current_status,
comment=comment,
job_id=self._job_id
)
ns = r.json()
if 'jobstate' in ns and 'id' in ns['jobstate']:
self._jobstate_id = ns['jobstate']['id']
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
# do a fake call to banner to output the last content
self.banner('')
def v2_runner_on_ok(self, result, **kwargs):
"""Event executed after each command when it succeed. Get the output
of the command and create a file associated to the current
jobstate.
"""
# Store the jobstate id when the there is an explicit call to
# set it. Example in a playbook:
#
# dci_job:
# id: "{{ job_id }}"
# status: running
#
# switch to explicit mode (not reacting to the dci_status
# variable anymore).
if ("jobstate" in result._result and
"id" in result._result["jobstate"]):
self._jobstate_id = result._result["jobstate"]["id"]
self._explicit = True
# Check if the task that just run was the schedule of an upgrade
# job. If so, set self._job_id to the new job ID
if (result._task.action == 'dci_job' and (
result._result['invocation']['module_args']['upgrade'] or
result._result['invocation']['module_args']['update'])):
self._job_id = result._result['job']['id']
self.create_jobstate(
comment='starting the update/upgrade',
status='pre-run'
)
elif (result._task.action == 'set_fact' and
'ansible_facts' in result._result and
'job_id' in result._result['ansible_facts'] and
result._result['ansible_facts']['job_id'] is not None):
self._job_id = result._result['ansible_facts']['job_id']
self.create_jobstate(comment='start up', status='new')
for rec in self._backlog:
self.create_file(rec['name'],
rec['content'])
self._backlog = []
super(CallbackModule, self).v2_runner_on_ok(result, **kwargs)
def v2_playbook_on_play_start(self, play):
"""Event executed before each play. Create a new jobstate and save
the current jobstate id.
"""
def _get_comment(play):
""" Return the comment for the new jobstate
The order of priority is as follow:
* play/vars/dci_comment
* play/name
* '' (Empty String)
"""
if play.get_vars() and 'dci_comment' in play.get_vars():
comment = play.get_vars()['dci_comment']
# If no name has been specified to the play, play.name is equal
# to the hosts value
elif play.name and play.name not in play.hosts:
comment = play.name
else:
comment = ''
return comment
super(CallbackModule, self).v2_playbook_on_play_start(play)
if not self._job_id:
return
comment = _get_comment(play)
self.create_jobstate(
comment=comment,
status=play.get_vars().get('dci_status')
)
def task_name(self, result):
"""Ensure we alway return a string"""
name = result._task.get_name()
# add the included file name in the task's name
if name == 'include_tasks':
if hasattr(result._task, 'get_ds'):
if 'include_tasks' in result._task.get_ds():
name = '%s: %s' % (name, result._task.get_ds()['include_tasks']) # noqa
return name
def v2_runner_on_unreachable(self, result):
self.create_jobstate(comment=self.task_name(result), status='failure')
super(CallbackModule, self).v2_runner_on_unreachable(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
"""Event executed after each command when it fails. Get the output
of the command and create a failure jobstate and a file associated.
"""
if not ignore_errors:
self.create_jobstate(comment=self.task_name(result),
status='failure')
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
|
apache-2.0
| 2,457,845,636,947,778,000
| 34.466667
| 92
| 0.555617
| false
| 4.08676
| false
| false
| false
|
BetaNYC/tree-one-one
|
app.py
|
1
|
1276
|
from flask import Flask
from flask import jsonify, render_template
#from flask_cors import CORS
import math
import pandas as pd
import os
import datetime
import json
app = Flask(__name__)
@app.route("/")
def default():
return render_template('index.html')
@app.route('/test')
@app.route('/test/<metric>')
def test(metric=None):
global sample
for s in sample:
s['color'] = s[metric+'_col']
return jsonify({'polygons':sample})
def popup_text(s):
return """percent alive: %s<br>
average size: %s<br>
number of species: %s<br>"""%(s['aliveness'],s['average_size'],s['diversity'])
port = os.getenv('VCAP_APP_PORT', '5000')
if __name__ == "__main__":
# run the app
print 'loading the data...'
sample = json.load(open('data/square.json', 'r'))
for s in sample:
del s['']
try:
s['bounds'] = json.loads(s['bounds'])
s['size_col'] = s['dbh_col']
s['diversity_col'] = s['species_col']
s['size'] = s['average_size']
s['popup_text'] = popup_text(s)
except KeyError as e:
#print e, '||', s
continue
print '...done.'
app.run(debug = True)
#app.run(host='0.0.0.0', port=int(port))
|
gpl-3.0
| 3,023,585,623,126,684,000
| 26.148936
| 92
| 0.552508
| false
| 3.340314
| false
| false
| false
|
deepakgupta1313/models
|
video_prediction/prediction_train.py
|
1
|
8670
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input import build_tfrecord_input
from prediction_model import construct_model
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 2000
# tf record data location:
DATA_DIR = 'push/push_train'
# local output directory
OUT_DIR = '/tmp/data'
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.')
flags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_integer('sequence_length', 10,
'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_state', 1,
'Whether or not to give the state+action to the model')
flags.DEFINE_string('model', 'CDNA',
'model architecture to use - CDNA, DNA, or STP')
flags.DEFINE_integer('num_masks', 10,
'number of masks, usually 1 for DNA, 10 for CDNA, STN.')
flags.DEFINE_float('schedsamp_k', 900.0,
'The k hyperparameter for scheduled sampling,'
'-1 for no scheduled sampling.')
flags.DEFINE_float('train_val_split', 0.95,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
class Model(object):
def __init__(self,
images=None,
actions=None,
states=None,
sequence_length=None,
reuse_scope=None):
if sequence_length is None:
sequence_length = FLAGS.sequence_length
self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
# Split into timesteps.
actions = tf.split(1, actions.get_shape()[1], actions)
actions = [tf.squeeze(act) for act in actions]
states = tf.split(1, states.get_shape()[1], states)
states = [tf.squeeze(st) for st in states]
images = tf.split(1, images.get_shape()[1], images)
images = [tf.squeeze(img) for img in images]
if reuse_scope is None:
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
else: # If it's a validation or test model.
with tf.variable_scope(reuse_scope, reuse=True):
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
# L2 loss, PSNR for eval.
loss, psnr_all = 0.0, 0.0
for i, x, gx in zip(
range(len(gen_images)), images[FLAGS.context_frames:],
gen_images[FLAGS.context_frames - 1:]):
recon_cost = mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(
tf.scalar_summary(prefix + '_recon_cost' + str(i), recon_cost))
summaries.append(tf.scalar_summary(prefix + '_psnr' + str(i), psnr_i))
loss += recon_cost
for i, state, gen_state in zip(
range(len(gen_states)), states[FLAGS.context_frames:],
gen_states[FLAGS.context_frames - 1:]):
state_cost = mean_squared_error(state, gen_state) * 1e-4
summaries.append(
tf.scalar_summary(prefix + '_state_cost' + str(i), state_cost))
loss += state_cost
summaries.append(tf.scalar_summary(prefix + '_psnr_all', psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames)
summaries.append(tf.scalar_summary(prefix + '_loss', loss))
self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ())
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
self.summ_op = tf.merge_summary(summaries)
def main(unused_argv):
print 'Constructing models and inputs.'
with tf.variable_scope('model', reuse=None) as training_scope:
images, actions, states = build_tfrecord_input(training=True)
model = Model(images, actions, states, FLAGS.sequence_length)
with tf.variable_scope('val_model', reuse=None):
val_images, val_actions, val_states = build_tfrecord_input(training=False)
val_model = Model(val_images, val_actions, val_states,
FLAGS.sequence_length, training_scope)
print 'Constructing saver.'
# Make saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.VARIABLES), max_to_keep=0)
# Make training session.
sess = tf.InteractiveSession()
summary_writer = tf.train.SummaryWriter(
FLAGS.event_log_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
tf.train.start_queue_runners(sess)
sess.run(tf.initialize_all_variables())
tf.logging.info('iteration number, cost')
# Run training.
for itr in range(FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {model.prefix: 'train',
model.iter_num: np.float32(itr),
model.lr: FLAGS.learning_rate}
cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op],
feed_dict)
# Print info: iteration #, cost.
tf.logging.info(str(itr) + ' ' + str(cost))
if (itr) % VAL_INTERVAL == 2:
# Run through validation set.
feed_dict = {val_model.lr: 0.0,
val_model.prefix: 'val',
val_model.iter_num: np.float32(itr)}
_, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],
feed_dict)
summary_writer.add_summary(val_summary_str, itr)
if (itr) % SAVE_INTERVAL == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush()
if __name__ == '__main__':
app.run()
|
apache-2.0
| 3,539,439,255,335,017,500
| 33.819277
| 80
| 0.633218
| false
| 3.59751
| false
| false
| false
|
JavierGarciaD/banking
|
banking/credit/forecast.py
|
1
|
8013
|
# -*- coding: utf-8 -*-
import pandas as pd
from rates.models import InterestRateModel
from credit.prepayment import PrepaymentModel
from common.db_manager import DB
from sqlalchemy import select
from sqlalchemy import and_
from sqlalchemy import asc
def vintage_sett_manual(name, forecast, nper, sdate, repricing, rate_type,
rate_level, notional, scores, pay, prepay, w_off,
rolling, credit_type):
"""
Manual constructor of settings dictionary for a Credit Vintage.
All data must be provided, no conection to external databases.
:param name:
:param forecast:
:param nper:
:param sdate:
:param repricing:
:param rate_type:
:param rate_level:
:param notional:
:param scores:
:param pay:
:param prepay:
:param w_off:
:param rolling:
:param credit_type:
:return: dictionary
"""
ans_dict = dict()
ans_dict['name'] = name
ans_dict['forecast'] = forecast
ans_dict['nper'] = nper
ans_dict['sdate'] = sdate
ans_dict['repricing'] = repricing
ans_dict['rate_type'] = rate_type
ans_dict['notional'] = notional
ans_dict['index_rates_array'] = InterestRateModel.zero(nper = forecast,
sdate = sdate)
ans_dict['rate_spreads_array'] = InterestRateModel.fixed(nper = forecast,
sdate = sdate,
level =
rate_level)
ans_dict['prepay_array'] = PrepaymentModel.psa(nper = forecast,
ceil = 0.03,
stable_period = 12)
ans_dict['prepay_per_score'] = pd.Series(data = prepay, index = scores)
ans_dict['rolling_m'] = rolling
ans_dict['scores'] = scores
ans_dict['pay_per_score'] = pd.Series(data = pay, index = scores)
ans_dict['writeoff_per_score'] = pd.Series(data = w_off, index = scores)
ans_dict['credit_type'] = credit_type
return ans_dict
def get_contract_info(product_name):
"""
Get contract info from forecast database
:return: dict with nper, rate_type, repricing_ rate_spread. And per score
provision, payment probability, prepayment probability, writeoff prob.
"""
db = DB('forecast')
########################################
# Query contract info
########################################
table = db.table('contract_info')
sql = select([table.c.nper,
table.c.credit_type,
table.c.rate_type,
table.c.repricing,
table.c.rate_spread]).where(
table.c.product_name == product_name)
# execute and fetch result
ans = db.query(sql).fetchone()
ans_dict = dict(nper = int(ans[0]),
credit_type = str(ans[1]),
rate_type = str(ans[2]),
repricing = int(ans[3]),
rate_spread = float(ans[4]))
return ans_dict
def get_credit_info(product_name):
db = DB('forecast')
table = db.table('credit_info')
scores = get_scores()
ans_dict = dict()
sql = select([table.c.payment,
table.c.prepayment,
table.c.provision,
table.c.writeoff]).where(table.c.product_name ==
product_name).order_by(asc('score'))
# Execute and fetch result
ans = db.query(sql)
pay = []
pre = []
prov = []
wo = []
for row in ans:
pay.append(row[0])
pre.append(row[1])
prov.append(row[2])
wo.append(row[3])
ans_dict['pay_per_score'] = pd.Series(data = pay, index = scores)
ans_dict['prepay_per_score'] = pd.Series(data = pre, index = scores)
ans_dict['provision_per_score'] = pd.Series(data = prov, index = scores)
ans_dict['writeoff_per_score'] = pd.Series(data = wo, index = scores)
return ans_dict
def get_scores():
"""
:return: list with available scores
"""
db = DB('forecast')
table = db.table('scores')
sql = select([table.c.score]).order_by(asc('score'))
ans = db.query(sql)
ret = []
for row in ans:
ret.append(int(row[0]))
return ret
def get_rolling(product_name):
"""
Get the rolling matrixes for a specific product
:param product_name:
:return: dict with rolling matrix for each month
"""
db = DB('forecast')
table = db.table('rolling')
scores = get_scores()
ans_dict = dict()
for each_month in range(12):
ret = []
for each_score in scores:
sql = select([table.c.roll0,
table.c.roll30,
table.c.roll60,
table.c.roll90,
table.c.roll120,
table.c.roll150,
table.c.roll180]).where(
and_(table.c.product_name == product_name,
table.c.month == each_month + 1,
table.c.score == each_score))
# Execute and fetch result
ans = list(db.query(sql).fetchone())
ret.append(ans)
ans_dict[each_month + 1] = ret
return ans_dict
def get_budget(product_name, sdate):
"""
Budget for a product, limited to data available at the database
:param product_name:
:param sdate: starting date
:return: pandas series
"""
db = DB('forecast')
table = db.table('budget')
sql = select([table.c.budget]).where(table.c.product_name ==
product_name).order_by(asc('month'))
ans = db.query(sql).fetchall()
ret = []
for row in ans:
ret.append(float(row[0]))
date_index = pd.date_range(start = sdate, periods = len(ret), freq = 'M')
return pd.Series(data = ret, index = date_index)
def vintage_sett_db(product_name, sdate, disburment, fore_length,
prepay_array, index_array):
# Gets information from forecast database about the contract_info:
contract_info = get_contract_info(product_name)
# Gets information from forecast database about the credit_info:
credit_info = get_credit_info(product_name)
# spread over index is fixed
spreads_array = InterestRateModel.fixed(nper = fore_length,
sdate = sdate,
level = contract_info[
'rate_spread'])
settings = dict(name = product_name,
nper = contract_info['nper'],
credit_type = contract_info['credit_type'],
rate_type = contract_info['rate_type'],
repricing = contract_info['repricing'],
forecast = int(fore_length),
scores = get_scores(),
sdate = pd.to_datetime(sdate),
notional = float(disburment),
index_rates_array = index_array,
rate_spreads_array=spreads_array,
prepay_array=prepay_array,
prepay_per_score=credit_info['prepay_per_score'],
rolling_m=get_rolling(product_name),
pay_per_score=credit_info['pay_per_score'],
writeoff_per_score=credit_info['writeoff_per_score']
)
return settings
if __name__ == '__main__':
from pprint import pprint
# scr = get_contract_info('tarjeta de credito')
# pprint(scr)
# score = get_scores()
# print(score)
# x = get_rolling('tarjeta de credito')
# print(x)
bdg = get_budget('tarjeta de credito', '31-01-2017')
print(bdg)
# x = get_credit_info('tarjeta de credito')
# pprint(x)
|
mit
| 512,306,728,074,246,300
| 31.840164
| 79
| 0.527393
| false
| 3.893586
| false
| false
| false
|
eirannejad/pyRevit
|
pyrevitlib/pyrevit/revit/selection.py
|
1
|
9893
|
from pyrevit import HOST_APP, DOCS, PyRevitException
from pyrevit import framework, DB, UI
from pyrevit.coreutils.logger import get_logger
from pyrevit.revit import ensure
from pyrevit.revit import query
__all__ = ('pick_element', 'pick_element_by_category',
'pick_elements', 'pick_elements_by_category',
'get_picked_elements', 'get_picked_elements_by_category',
'pick_edge', 'pick_edges',
'pick_face', 'pick_faces',
'pick_linked', 'pick_linkeds',
'pick_elementpoint', 'pick_elementpoints',
'pick_point', 'pick_rectangle', 'get_selection_category_set',
'get_selection')
#pylint: disable=W0703,C0302,C0103
mlogger = get_logger(__name__)
class ElementSelection:
def __init__(self, element_list=None):
if element_list is None:
if HOST_APP.uidoc:
self._refs = \
[x for x in HOST_APP.uidoc.Selection.GetElementIds()]
else:
self._refs = []
else:
self._refs = ElementSelection.get_element_ids(element_list)
def __len__(self):
return len(self._refs)
def __iter__(self):
for elref in self._refs:
yield DOCS.doc.GetElement(elref)
def __getitem__(self, index):
return self.elements[index]
def __contains__(self, item):
if isinstance(item, DB.Element):
elref = item.Id
elif isinstance(item, DB.ElementId):
elref = item
else:
elref = DB.ElementId.InvalidElementId
return elref in self._refs
@classmethod
def get_element_ids(cls, mixed_list):
return ensure.ensure_element_ids(mixed_list)
@property
def is_empty(self):
return len(self._refs) == 0
@property
def elements(self):
return [DOCS.doc.GetElement(x) for x in self._refs]
@property
def element_ids(self):
return self._refs
@property
def first(self):
if self._refs:
return DOCS.doc.GetElement(self._refs[0])
@property
def last(self):
if self._refs:
return DOCS.doc.GetElement(self._refs[-1])
def set_to(self, element_list):
self._refs = ElementSelection.get_element_ids(element_list)
HOST_APP.uidoc.Selection.SetElementIds(
framework.List[DB.ElementId](self._refs)
)
HOST_APP.uidoc.RefreshActiveView()
def append(self, element_list):
self._refs.extend(ElementSelection.get_element_ids(element_list))
self.set_to(self._refs)
def include(self, element_type):
refs = [x for x in self._refs
if isinstance(DOCS.doc.GetElement(x),
element_type)]
return ElementSelection(refs)
def exclude(self, element_type):
refs = [x for x in self._refs
if not isinstance(DOCS.doc.GetElement(x),
element_type)]
return ElementSelection(refs)
def no_views(self):
return self.exclude(DB.View)
def only_views(self):
return self.include(DB.View)
def expand_groups(self):
expanded_refs = []
for element in self.elements:
if isinstance(element, DB.Group):
expanded_refs.extend(element.GetMemberIds())
else:
expanded_refs.append(element.Id)
self._refs = expanded_refs
class PickByCategorySelectionFilter(UI.Selection.ISelectionFilter):
def __init__(self, category_id):
self.category_id = category_id
# standard API override function
def AllowElement(self, element):
if element.Category and self.category_id == element.Category.Id:
return True
else:
return False
# standard API override function
def AllowReference(self, refer, point): # pylint: disable=W0613
return False
def _pick_obj(obj_type, message, multiple=False, world=False, selection_filter=None):
refs = []
try:
mlogger.debug('Picking elements: %s '
'message: %s '
'multiple: %s '
'world: %s', obj_type, message, multiple, world)
# decide which picker method to use
picker_func = HOST_APP.uidoc.Selection.PickObject
if multiple:
picker_func = HOST_APP.uidoc.Selection.PickObjects
# call the correct signature of the picker function
# if selection filter is provided
if selection_filter:
pick_result = \
picker_func(
obj_type,
selection_filter,
message
)
else:
pick_result = \
picker_func(
obj_type,
message
)
# process the results
if multiple:
refs = list(pick_result)
else:
refs = []
refs.append(pick_result)
if not refs:
mlogger.debug('Nothing picked by user...Returning None')
return None
mlogger.debug('Picked elements are: %s', refs)
if obj_type == UI.Selection.ObjectType.Element:
return_values = \
[DOCS.doc.GetElement(ref)
for ref in refs]
elif obj_type == UI.Selection.ObjectType.PointOnElement:
if world:
return_values = [ref.GlobalPoint for ref in refs]
else:
return_values = [ref.UVPoint for ref in refs]
else:
return_values = \
[DOCS.doc.GetElement(ref)
.GetGeometryObjectFromReference(ref)
for ref in refs]
mlogger.debug('Processed return elements are: %s', return_values)
if len(return_values) > 1 or multiple:
return return_values
elif len(return_values) == 1:
return return_values[0]
else:
mlogger.error('Error processing picked elements. '
'return_values should be a list.')
except Exception:
return None
def pick_element(message=''):
return _pick_obj(UI.Selection.ObjectType.Element,
message)
def pick_element_by_category(cat_name_or_builtin, message=''):
category = query.get_category(cat_name_or_builtin)
if category:
pick_filter = PickByCategorySelectionFilter(category.Id)
return _pick_obj(UI.Selection.ObjectType.Element,
message,
selection_filter=pick_filter)
else:
raise PyRevitException("Can not determine category id from: {}"
.format(cat_name_or_builtin))
def pick_elementpoint(message='', world=False):
return _pick_obj(UI.Selection.ObjectType.PointOnElement,
message,
world=world)
def pick_edge(message=''):
return _pick_obj(UI.Selection.ObjectType.Edge,
message)
def pick_face(message=''):
return _pick_obj(UI.Selection.ObjectType.Face,
message)
def pick_linked(message=''):
return _pick_obj(UI.Selection.ObjectType.LinkedElement,
message)
def pick_elements(message=''):
return _pick_obj(UI.Selection.ObjectType.Element,
message,
multiple=True)
def pick_elements_by_category(cat_name_or_builtin, message=''):
category = query.get_category(cat_name_or_builtin)
if category:
pick_filter = PickByCategorySelectionFilter(category.Id)
return _pick_obj(UI.Selection.ObjectType.Element,
message,
multiple=True,
selection_filter=pick_filter)
else:
raise PyRevitException("Can not determine category id from: {}"
.format(cat_name_or_builtin))
def get_picked_elements(message=''):
picked_element = True
while picked_element:
picked_element = pick_element(message=message)
if not picked_element:
break
yield picked_element
def get_picked_elements_by_category(cat_name_or_builtin, message=''):
picked_element = True
while picked_element:
picked_element = pick_element_by_category(cat_name_or_builtin,
message=message)
if not picked_element:
break
yield picked_element
def pick_elementpoints(message='', world=False):
return _pick_obj(UI.Selection.ObjectType.PointOnElement,
message,
multiple=True, world=world)
def pick_edges(message=''):
return _pick_obj(UI.Selection.ObjectType.Edge,
message,
multiple=True)
def pick_faces(message=''):
return _pick_obj(UI.Selection.ObjectType.Face,
message,
multiple=True)
def pick_linkeds(message=''):
return _pick_obj(UI.Selection.ObjectType.LinkedElement,
message,
multiple=True)
def pick_point(message=''):
try:
return HOST_APP.uidoc.Selection.PickPoint(message)
except Exception:
return None
def pick_rectangle(message='', pick_filter=None):
if pick_filter:
return HOST_APP.uidoc.Selection.PickElementsByRectangle(pick_filter,
message)
else:
return HOST_APP.uidoc.Selection.PickElementsByRectangle(message)
def get_selection_category_set():
selection = ElementSelection()
cset = DB.CategorySet()
for element in selection:
cset.Insert(element.Category)
return cset
def get_selection():
return ElementSelection()
|
gpl-3.0
| -2,571,831,693,091,678,700
| 28.978788
| 85
| 0.570403
| false
| 4.206207
| false
| false
| false
|
pontikos/uclex_browser
|
lookups.py
|
1
|
17567
|
import re
from utils import *
import itertools
import pysam
import csv
#hpo lookup
import phizz
import random
import pickle
import hashlib
import pprint
import utils
import orm
SEARCH_LIMIT = 10000
# massive genes?
#UNSUPPORTED_QUERIES = ['TTN', 'ENSG00000155657', 'CMD1G', 'CMH9', 'CMPD4', 'FLJ32040', 'LGMD2J', 'MYLK5', 'TMD', u'ENST00000342175', u'ENST00000359218', u'ENST00000342992', u'ENST00000460472', u'ENST00000589042', u'ENST00000591111']
def xpos_to_pos(xpos): return int(xpos % 1e9)
def get_gene(db, gene_id):
print(gene_id)
for g in db.genes.find({'gene_id': gene_id}): print(g)
#return g
return db.genes.find_one({'gene_id': gene_id}, fields={'_id': False})
def get_gene_by_name(db, gene_name):
# try gene_name field first
gene = db.genes.find_one({'gene_name': gene_name}, fields={'_id': False})
if gene:
return gene
# if not, try gene['other_names']
return db.genes.find_one({'other_names': gene_name}, fields={'_id': False})
def get_transcript(db, transcript_id):
transcript = db.transcripts.find_one({'transcript_id': transcript_id}, fields={'_id': False})
if not transcript:
return None
transcript['exons'] = get_exons_in_transcript(db, transcript_id)
return transcript
def get_raw_variant(db, xpos, ref, alt, get_id=False):
return db.variants.find_one({'xpos': xpos, 'ref': ref, 'alt': alt}, fields={'_id': get_id})
def get_variant(db, variant_id):
return db.variants.find_one({'variant_id':variant_id})
def get_variant(db, xpos, ref, alt):
variant = get_raw_variant(db, xpos, ref, alt, False)
print(variant)
if variant is None or 'rsid' not in variant: return variant
if variant['rsid'] == '.' or variant['rsid'] is None:
rsid = db.dbsnp.find_one({'xpos': xpos})
if rsid:
variant['rsid'] = 'rs%s' % rsid['rsid']
return variant
def get_variants_from_dbsnp(db, rsid):
if not rsid.startswith('rs'):
return None
try:
rsid = int(rsid.lstrip('rs'))
except Exception, e:
return None
position = db.dbsnp.find_one({'rsid': rsid})
if position:
variants = list(db.variants.find({'xpos': {'$lte': position['xpos'], '$gte': position['xpos']}}, fields={'_id': False}))
if variants:
#add_consequence_to_variants(variants)
return variants
return []
def get_coverage_for_bases(db, xstart, xstop=None):
"""
Get the coverage for the list of bases given by xstart->xstop, inclusive
Returns list of coverage dicts
xstop can be None if just one base, but you'll still get back a list
"""
if xstop is None:
xstop = xstart
coverages = {
doc['xpos']: doc for doc in db.base_coverage.find(
{'xpos': {'$gte': xstart, '$lte': xstop}},
fields={'_id': False}
)
}
ret = []
for i in range(xstart, xstop+1):
if i in coverages:
ret.append(coverages[i])
else:
ret.append({'xpos': i, 'pos': xpos_to_pos(i)})
for item in ret:
item['has_coverage'] = 'mean' in item
del item['xpos']
print '+++++++++++++++++++++++++++'
temp = db.base_coverage.find({'xpos': {'$gte': xstart, '$lte': xstop}})
from bson.json_util import dumps
dumps(temp)
print xstart
print xstop
print '+++++++++++++++++++++++++++++'
return ret
def get_coverage_for_transcript(db, xstart, xstop=None):
"""
:param db:
:param genomic_coord_to_exon:
:param xstart:
:param xstop:
:return:
"""
coverage_array = get_coverage_for_bases(db, xstart, xstop)
# only return coverages that have coverage (if that makes any sense?)
# return coverage_array
#print '+++++++++++++++++++++++++'
#print coverage_array
#print '+++++++++++++++++++++++++'
covered = [c for c in coverage_array if c['has_coverage']]
for c in covered: del c['has_coverage']
return covered
def get_constraint_for_transcript(db, transcript):
return db.constraint.find_one({'transcript': transcript}, fields={'_id': False})
def get_awesomebar_suggestions(g, query):
"""
This generates autocomplete suggestions when user
query is the string that user types
If it is the prefix for a gene, return list of gene names
"""
regex = re.compile('^' + re.escape(query), re.IGNORECASE)
results = (r for r in g.autocomplete_strings if regex.match(r))
results = itertools.islice(results, 0, 20)
return list(results)
# 1:1-1000
R1 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)-(\d+)$')
R2 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)$')
R3 = re.compile(r'^(\d+|X|Y|M|MT)$')
R4 = re.compile(r'^(\d+|X|Y|M|MT)\s*[-:]\s*(\d+)-([ATCG]+)-([ATCG]+)$')
def get_awesomebar_result(db, query):
"""
Similar to the above, but this is after a user types enter
We need to figure out what they meant - could be gene, variant, region
Return tuple of (datatype, identifier)
Where datatype is one of 'gene', 'variant', or 'region'
And identifier is one of:
- ensembl ID for gene
- variant ID string for variant (eg. 1-1000-A-T)
- region ID string for region (eg. 1-1000-2000)
Follow these steps:
- if query is an ensembl ID, return it
- if a gene symbol, return that gene's ensembl ID
- if an RSID, return that variant's string
Finally, note that we don't return the whole object here - only it's identifier.
This could be important for performance later
"""
query = query.strip()
print 'Query: %s' % query
if query.startswith('HP:'):
description=phizz.query_hpo([query])
#description=hpo_db.hpo.find_one({'hpo_id':query})
return 'hpo', query
if query.startswith('MIM'):
disease=phizz.query_disease([query])
return 'mim', query
# Variant
variant = orm.get_variants_by_rsid(db, query.lower())
if variant:
if len(variant) == 1:
return 'variant', variant[0]['variant_id']
else:
return 'dbsnp_variant_set', variant[0]['rsid']
variant = get_variants_from_dbsnp(db, query.lower())
if variant:
return 'variant', variant[0]['variant_id']
# variant = get_variant(db, )
# TODO - https://github.com/brettpthomas/exac_browser/issues/14
gene = get_gene_by_name(db, query)
if gene:
return 'gene', gene['gene_id']
# From here out, all should be uppercase (gene, tx, region, variant_id)
query = query.upper()
gene = get_gene_by_name(db, query)
if gene:
return 'gene', gene['gene_id']
# Ensembl formatted queries
if query.startswith('ENS'):
# Gene
gene = get_gene(db, query)
if gene:
return 'gene', gene['gene_id']
# Transcript
transcript = get_transcript(db, query)
if transcript:
return 'transcript', transcript['transcript_id']
# From here on out, only region queries
if query.startswith('CHR'):
query = query.lstrip('CHR')
# Region
m = R1.match(query)
if m:
if int(m.group(3)) < int(m.group(2)):
return 'region', 'invalid'
return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(3))
m = R2.match(query)
if m:
return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(2))
m = R3.match(query)
if m:
return 'region', '{}'.format(m.group(1))
m = R4.match(query)
if m:
return 'variant', '{}-{}-{}-{}'.format(m.group(1), m.group(2), m.group(3), m.group(4))
return 'not_found', query
def get_genes_in_region(db, chrom, start, stop):
"""
Genes that overlap a region
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
genes = db.genes.find({ 'xstart': {'$lte': xstop}, 'xstop': {'$gte': xstart}, }, fields={'_id': False})
return list(genes)
def get_variants_in_region(db, chrom, start, stop):
"""
Variants that overlap a region
Unclear if this will include CNVs
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}
}, fields={'_id': False}, limit=SEARCH_LIMIT))
#add_consequence_to_variants(variants)
return list(variants)
def remove_extraneous_information(variant):
return
del variant['genotype_depths']
del variant['genotype_qualities']
del variant['transcripts']
del variant['genes']
del variant['orig_alt_alleles']
del variant['xpos']
del variant['xstart']
del variant['xstop']
del variant['site_quality']
del variant['vep_annotations']
def get_transcripts_in_gene(db, gene_id):
"""
"""
return list(db.transcripts.find({'gene_id': gene_id}, fields={'_id': False}))
def get_exons_in_transcript(db, transcript_id):
# return sorted(
# [x for x in
# db.exons.find({'transcript_id': transcript_id}, fields={'_id': False})
# if x['feature_type'] != 'exon'],
# key=lambda k: k['start'])
return sorted(list(db.exons.find({'transcript_id': transcript_id, 'feature_type': { "$in": ['CDS', 'UTR', 'exon'] }}, fields={'_id': False})), key=lambda k: k['start'])
def get_hpo_patients(hpo_db, patients_db, hpo_id):
"""
Get patients with HPO term.
"""
patients = [p for p in patients_db.patients.find({'features.id':hpo_id}) for f in p['features'] if f['id']== hpo_id and f['observed']=='yes']
print(hpo_id,len(patients))
for r in hpo_db.hpo.find({'is_a':hpo_id}):
for i in r['id']: patients+=list(itertools.chain(get_hpo_patients(hpo_db,patients_db,i)))
#remove duplicates
patients={v['external_id']:v for v in patients}.values()
return patients
# return hpo terms found in people in which variant is found
def get_hpo(variant_str):
samples=get_samples(variant_str)
#chrom,pos,ref,alt,=str(variant_str.strip()).split('-')
d=csv.DictReader(file('/data/uclex_data/UCLexInfo/uclex-samples.csv','r'),delimiter=',')
hpo=[]
for r in d:
if r['sample'] not in samples: continue
pheno=r['phenotype']
print((r['sample'],pheno,))
if pheno.startswith('HP'):
hpo+=[phizz.query_hpo([pheno])]
elif pheno.startswith('MIM'):
hpo+=[phizz.query_disease([pheno])]
return(hpo)
def get_hpo_children(hpo_db, hpo_id):
hpo=[hpo_db.hpo.find_one({'id':hpo_id})]
for r in hpo_db.hpo.find({'is_a':hpo_id}):
for i in r['id']:
hpo+=list(itertools.chain(get_hpo_children(hpo_db,i)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
def replace_hpo(hpo_db, hpo):
# some hpo_ids are obsolete.
record = hpo_db.hpo.find_one({'id':hpo[0]})
if not record:
print 'no record in replace_hpo'
print hpo
if 'replaced_by' in record:
new = hpo_db.hpo.find_one({'id':record['replaced_by'][0]})
return [new['id'][0], new['name'][0]]
else:
return hpo
def get_hpo_ancestors(hpo_db, hpo_id):
"""
Get HPO terms higher up in the hierarchy.
"""
h=hpo_db.hpo.find_one({'id':hpo_id})
#print(hpo_id,h)
if 'replaced_by' in h:
# not primary id, replace with primary id and try again
h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})
hpo=[h]
if 'is_a' not in h: return hpo
for hpo_parent_id in h['is_a']:
#p=hpo_db.hpo.find({'id':hpo_parent_id}):
hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
def get_hpo_ancestors_array(hpo_db, hpo_id):
# return an array of ids, instead of array of dicts
anc = get_hpo_ancestors(hpo_db, hpo_id)
result = []
for a in anc:
result.extend(a['id'])
return result
def get_hpo_size_freq(freq_file):
# read freq file
# result = {'HP:0000345':{size: 456, freq: 0.1, raw: 456/4500}}
hpo_freq = {}
inf = open(freq_file, 'r')
for l in inf:
l = l.rstrip().split('\t')
nums = l[1].split('/')
size = int(nums[0])
tot = float(nums[1])
hpo_freq[l[0]] = {'size': size, 'freq': size/tot, 'raw': l[1]}
return hpo_freq
def get_hpo_common_ancestors(hpo_db, h1, h2):
# return a list of hpo ids for h1 and h2's common ancestors
a1 = get_hpo_ancestors(hpo_db, h1)
a2 = get_hpo_ancestors(hpo_db,h2)
an1 = []
an2 = []
for a in a1:
an1.extend(a['id'])
for a in a2:
an2.extend(a['id'])
return list(set(an1) & set(an2))
def get_hpo_nearest_common_ancestors(hpo_db, h1, h2, hpo_freq):
# given hpo_freq, find out a list of nearest common ancestors
common_ans = get_hpo_common_ancestors(hpo_db, h1, h2)
freqs = [hpo_freq[h] for h in common_ans]
min_freq = min(freqs)
inds = [i for i, v in enumerate(freqs) if v == min_freq]
return [common_ans[i] for i in inds]
def hpo_minimum_set(hpo_db, hpo_ids=[]):
'''
minimize the hpo sets
results = {'HP:0000505': [ancestors]}
'''
hpo_ids = list(set(hpo_ids))
results = dict([(hpo_id, [ h['id'][0] for h in get_hpo_ancestors(hpo_db, hpo_id)],) for hpo_id in hpo_ids])
# minimise
bad_ids = []
for i in range(len(hpo_ids)):
for j in range(i+1,len(hpo_ids)):
if hpo_ids[i] in results[hpo_ids[j]]:
# i is j's ancestor, remove
bad_ids.append(hpo_ids[i])
break
if hpo_ids[j] in results[hpo_ids[i]]:
# j is i's ancestor, remove
bad_ids.append(hpo_ids[j])
return list(set(hpo_ids) - set(bad_ids))
def get_patient_hpo(hpo_db,patients_db, patient_id,ancestors=True):
"""
Get complete hierarchy of HPO terms for patient.
"""
p=patients_db.patients.find_one({'external_id':patient_id})
if 'features' not in p: return []
if ancestors:
hpo_ancestors=[]
for hpo_ids in [f['id'] for f in p['features'] if f['observed']=='yes']:
hpo_ancestors+=get_hpo_ancestors(hpo_db,hpo_ids)
# remove duplicates
hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()
return hpo_ancestors
else:
return [ hpo_db.hpo.find_one({'id':f['id']}) for f in p['features'] if f['observed']=='yes']
def get_gene_hpo(hpo_db,gene_name,dot=True):
"""
Get all HPO terms linked to gene name, including ancestors.
and return as dot string for plotting if dot is True.
"""
hpo_ids=[hpo['HPO-Term-ID'] for hpo in hpo_db.OMIM_ALL_FREQUENCIES_genes_to_phenotype.find({'entrez-gene-symbol':gene_name})]
if not hpo_ids:
hpo_ids=hpo_db.genes_pheno.find_one({'gene':gene_name})
# no hpo linked to gene
if hpo_ids is None: hpo_ids=[]
else: hpo_ids=hpo_ids['hpo']
hpo_ancestors=[get_hpo_ancestors(hpo_db,hid) for hid in hpo_ids]
hpo_ancestors=list(itertools.chain(*hpo_ancestors))
# remove duplicates
hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()
hpo_string="digraph {"
for h in hpo_ancestors:
hpo_id=h['id'][0]
hpo_label=h['name'][0]
#hpo_count=0
hpo_string+= '"{}" [style="filled", fixedsize="true", fontsize="15", shape="circle", width="0.75", fillcolor="powderblue", label="{}\n{}", color="transparent"];\n'.format(hpo_id,hpo_label,hpo_id)
for h in hpo_ancestors:
hpo_id=h['id'][0]
if 'is_a' not in h: continue
for anc in h['is_a']:
hpo_string+='"{}" -> "{}" [color="#000000", lty="solid"];\n'.format(anc,hpo_id)
hpo_string+= '}'
if dot:
return hpo_string
else:
return hpo_ancestors
# get hpo terms shared between patients
def common_hpo(hpo_db,patients_db,patient_ids):
terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]
# intersection of lists
common_hpo_term_ids=frozenset.intersection(*[frozenset([y['id'][0] for y in x]) for x in terms_by_patient])
# remove ancestors
#get_hpo_ancestors(hpo_db, hpo_id):
# lookup hpo terms
common_hpo_terms=[hpo_db.hpo.find_one({'id':hpo_id}) for hpo_id in common_hpo_term_ids]
return common_hpo_terms
# get union of hpo terms seen in patients
def union_hpo(hpo_db,patients_db,patient_ids):
terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]
#flatten lists
terms_by_patient=list(itertools.chain(*terms_by_patient))
# intersection of lists
terms_by_patient={h['id'][0]:h for h in terms_by_patient}.values()
return terms_by_patient
# VCF gene query
def variants_in_gene_vcf(gene_symbol):
import mygene
mg = mygene.MyGeneInfo()
g=mg.query('symbol:%s' % gene_symbol, fields='exons', species='human')
print g
exons=g['hits'][0]['exons']
for transcript in exons:
yield (transcript, exons[transcript],)
def get_patient_observed_hpo(patient, patient_db):
# returns [('HP:0000001', 'hell yeah')]
this_patient = patient_db.patients.find_one({'external_id':patient})
result = [(None, None)]
if not this_patient:
#print 'ERROR: %s not in patients db' % patient
pass
else:
if 'features' not in this_patient:
print 'WARNING: features not in ' + patient
p_features = this_patient.get('features', [{'id':'HP:0000001', 'label':'All', 'observed': 'yes' }])
result = [(f['id'], f['label']) for f in p_features if f['observed']=='yes']
return result
|
mit
| -3,338,054,858,156,872,700
| 33.51277
| 233
| 0.599989
| false
| 3.060986
| false
| false
| false
|
partofthething/home-assistant
|
homeassistant/components/tado/binary_sensor.py
|
1
|
7734
|
"""Support for Tado sensors for each zone."""
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_WINDOW,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
TYPE_AIR_CONDITIONING,
TYPE_BATTERY,
TYPE_HEATING,
TYPE_HOT_WATER,
TYPE_POWER,
)
from .entity import TadoDeviceEntity, TadoZoneEntity
_LOGGER = logging.getLogger(__name__)
DEVICE_SENSORS = {
TYPE_BATTERY: [
"battery state",
"connection state",
],
TYPE_POWER: [
"connection state",
],
}
ZONE_SENSORS = {
TYPE_HEATING: [
"power",
"link",
"overlay",
"early start",
"open window",
],
TYPE_AIR_CONDITIONING: [
"power",
"link",
"overlay",
"open window",
],
TYPE_HOT_WATER: ["power", "link", "overlay"],
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Tado sensor platform."""
tado = hass.data[DOMAIN][entry.entry_id][DATA]
devices = tado.devices
zones = tado.zones
entities = []
# Create device sensors
for device in devices:
if "batteryState" in device:
device_type = TYPE_BATTERY
else:
device_type = TYPE_POWER
entities.extend(
[
TadoDeviceBinarySensor(tado, device, variable)
for variable in DEVICE_SENSORS[device_type]
]
)
# Create zone sensors
for zone in zones:
zone_type = zone["type"]
if zone_type not in ZONE_SENSORS:
_LOGGER.warning("Unknown zone type skipped: %s", zone_type)
continue
entities.extend(
[
TadoZoneBinarySensor(tado, zone["name"], zone["id"], variable)
for variable in ZONE_SENSORS[zone_type]
]
)
if entities:
async_add_entities(entities, True)
class TadoDeviceBinarySensor(TadoDeviceEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, device_info, device_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(device_info)
self.device_variable = device_variable
self._unique_id = f"{device_variable} {self.device_id} {tado.home_id}"
self._state = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "device", self.device_id
),
self._async_update_callback,
)
)
self._async_update_device_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.device_name} {self.device_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.device_variable == "battery state":
return DEVICE_CLASS_BATTERY
if self.device_variable == "connection state":
return DEVICE_CLASS_CONNECTIVITY
return None
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_device_data()
self.async_write_ha_state()
@callback
def _async_update_device_data(self):
"""Handle update callbacks."""
try:
self._device_info = self._tado.data["device"][self.device_id]
except KeyError:
return
if self.device_variable == "battery state":
self._state = self._device_info["batteryState"] == "LOW"
elif self.device_variable == "connection state":
self._state = self._device_info.get("connectionState", {}).get(
"value", False
)
class TadoZoneBinarySensor(TadoZoneEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, zone_name, zone_id, zone_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(zone_name, tado.home_id, zone_id)
self.zone_variable = zone_variable
self._unique_id = f"{zone_variable} {zone_id} {tado.home_id}"
self._state = None
self._state_attributes = None
self._tado_zone_data = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "zone", self.zone_id
),
self._async_update_callback,
)
)
self._async_update_zone_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.zone_name} {self.zone_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.zone_variable == "early start":
return DEVICE_CLASS_POWER
if self.zone_variable == "link":
return DEVICE_CLASS_CONNECTIVITY
if self.zone_variable == "open window":
return DEVICE_CLASS_WINDOW
if self.zone_variable == "overlay":
return DEVICE_CLASS_POWER
if self.zone_variable == "power":
return DEVICE_CLASS_POWER
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_zone_data()
self.async_write_ha_state()
@callback
def _async_update_zone_data(self):
"""Handle update callbacks."""
try:
self._tado_zone_data = self._tado.data["zone"][self.zone_id]
except KeyError:
return
if self.zone_variable == "power":
self._state = self._tado_zone_data.power == "ON"
elif self.zone_variable == "link":
self._state = self._tado_zone_data.link == "ONLINE"
elif self.zone_variable == "overlay":
self._state = self._tado_zone_data.overlay_active
if self._tado_zone_data.overlay_active:
self._state_attributes = {
"termination": self._tado_zone_data.overlay_termination_type
}
elif self.zone_variable == "early start":
self._state = self._tado_zone_data.preparation
elif self.zone_variable == "open window":
self._state = bool(
self._tado_zone_data.open_window
or self._tado_zone_data.open_window_detected
)
self._state_attributes = self._tado_zone_data.open_window_attr
|
mit
| -1,611,426,186,836,054,300
| 27.32967
| 80
| 0.571115
| false
| 4.023933
| false
| false
| false
|
whiler/a-walk-in-python
|
spider/spider.py
|
1
|
10172
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# file: spider.py
# author: whiler
# license: BSD
# 引入 http.cookiejar 模块,提供 Cookie 支持
import http.cookiejar
# 引入模块 logging 模块,记录运行日志
import logging
# 引入 socket 模块,用于捕获 HTTP 超时错误
import socket
# 引入 urllib.request 模块,处理 HTTP 连接,创建 HTTP 请求
import urllib.request
# 引入 urllib.error ,用于捕获 HTTPError 错误
import urllib.error
# 引入 urllib.parse 模块,处理链接地址
import urllib.parse
# 引入 lxml.html 第三方包,解析 HTML
import lxml.html
# 配置基本的日志记录格式
logging.basicConfig(level=logging.NOTSET,
format='[%(levelname)s]\t%(asctime)s\t%(message)s',
datefmt='%Y-%m-%d %H:%M:%S %Z')
class Spider(object):
"""
定义一个 Spider 类,实现简单的网络爬虫
"""
def __init__(self, seeds, store):
"""
:seeds: 种子地址列
:store: 数据存储
"""
self.seeds = seeds
self.store = store
# 伪装成一个正常的浏览器,需要的请求头信息
self.headers = {
# 伪装成 Firefox 浏览器
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0',
# 假装通过 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html 访问到这些被抓取的网页
'Referer': 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html'
}
# 使用一个集合来记录已经访问过的链接地址
self.visited = set()
# 支持 Cookie
self.cookie = http.cookiejar.CookieJar()
self.processor = urllib.request.HTTPCookieProcessor(self.cookie)
self.opener = urllib.request.build_opener(self.processor)
def download(self, url):
"""
下载一个链接地址的内容
:url: 链接地址
"""
# 记录正在下载的地址
logging.debug('downloading ' + url)
# 构建一个 HTTP 请求
request = urllib.request.Request(url, headers=self.headers)
# 默认读取到的内容为空
raw = b''
# 开始捕获异常
try:
# 创建一个 HTTP 连接,并设置 10 秒超时
connection = self.opener.open(request, timeout=10.0)
# 从连接读取内容
raw = connection.read()
except urllib.error.HTTPError:
# 若发生 HTTPError 错误,记下连接地址
msg = 'download [' + url + '] raised urllib.request.HTTPError, skiped'
logging.exception(msg)
except socket.timeout:
# 若发生超时异常,记下连接地址
msg = 'download [' + url + '] raised socket.timeout, skiped'
logging.exception(msg)
except Exception:
# 若发生其他异常错误,记下连接地址
msg = 'download [' + url + '] failed, skiped'
logging.exception(msg)
else:
# 没有发生异常错误,关闭连接
connection.close()
# 将读取到的内容按照 GB18030 字符集编码方式进行解码
content = raw.decode('gb18030')
return content
def extract_urls(self, url, content):
"""
从内容中抽取链接地址
:url: 内容的来源地址
:content: 内容
"""
# 使用一个列表来保存链接地址
urls = list()
# 在 HTML 文件中,链接的 CSS 选择器是 a
selector = 'a'
# 使用 lxml.html.fromstring 解析 HTML 内容,构建节点树
root = lxml.html.fromstring(content)
# 遍历节点树中的每一个链接节点
for node in root.cssselect(selector):
# 获得链接节点的 href 属性值
relative = node.attrib.get('href', '')
if relative:
# 使用 urllib.parse 的 urljoin 函数,将相对地址转换为实际地址
real = urllib.parse.urljoin(url, relative)
# 将实际的地址添加到地址列表中
urls.append(real)
return urls
def extract_something(self, url, content):
"""
从内容中抽取感兴趣的信息
:url: 内容的来源地址
:content: 内容
"""
# 使用 lxml.html.fromstring 解析 HTML 内容,构建节点树
root = lxml.html.fromstring(content)
# 使用字典来保存每一个地区名称和区划代码
locations = dict()
# 使用 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 的长度作为计算当前链接地址相对地址的偏移量
offset = len('http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/')
# 截取当前链接地址偏移量以后的字符串作为相对地址
relative = url[offset:]
# 相对地址中 / 的数目为行政层级
depth = relative.count('/')
if 'index.html' == relative:
# 省、自治区、直辖市和特别行政区
# 表格中链接的文字就是区域的名称,链接地址中的数字就是区划代码
# 表格的单元格中的链接的 CSS 选择器
selector = '.provincetr td a'
# 遍历每一个链接节点
for node in root.cssselect(selector):
# 获得链接节点的链接地址,这是一个相对地址
href = node.attrib.get('href', '')
# 在链接地址中找到 . 的偏移量
offset = href.find('.')
# 截取链接地址中 . 以前的字符串作为区划代码
code = href[:offset]
# 追加 0 ,补齐12位
code = code.ljust(12, '0')
# 链接的文字内容就是区域名称
name = node.text_content()
# 将区划代码和名称关联起来
locations[code] = name
elif depth < 4:
# 市、区县、乡镇、村社区
# 每一行第一个单元格的内容是区划代码,最后一个单元格的内容是区域名称
if 0 == depth:
# 第一层级是市
selector = '.citytr'
elif 1 == depth:
# 第二层级是区县
selector = '.countytr'
elif 2 == depth:
# 第三层级是乡镇
selector = '.towntr'
else:
# 第四层级是村、社区
selector = '.villagetr'
# 遍历每一个节点,逐行处理表格中的每一行
for node in root.cssselect(selector):
# 获得这一行的所有单元格
cells = node.cssselect('td')
# 第一个单元格的内容是区划代码
code = cells[0].text_content()
# 最后一个单元格的内容是名称
name = cells[-1].text_content()
if code.isdigit():
# 全是是数字的才是合法的区划代码
locations[code] = name
else:
# 行政村村民小组和社区居民小组没有区划代码
logging.warn(url)
return locations
def dump(self, locations):
"""
保存抓取得到的数据
:locations: 从每一个网页抓取得到的区域字典
"""
return self.store(locations)
def filter(self, urls):
"""
过滤链接地址
:urls: 链接地址列表
"""
# 使用集合保存允许抓取的链接地址
allowed = set()
# 遍历地址列表中的每一个地址
for url in urls:
if url in self.visited:
# 已经访问过的地址,不允许再抓取
continue
elif not url.startswith('http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/'):
# 不抓取不是以 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 开始的链接地址
continue
else:
# 将以 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 开始,又没有访问过的地址,添加到允许访问的列表中
allowed.add(url)
return allowed
def work(self):
"""
开始干活
"""
# 遍历种子地址中的每一个地址
for url in self.seeds:
# 抓取一个地址
self.crawl(url)
return 0
def crawl(self, url):
"""
抓取一个地址
:url: 链接地址
"""
# 下载链接地址对应的内容
content = self.download(url)
if not content:
# 没有内容,退出抓取
return
# 将链接地址添加到访问过的地址集合中
self.visited.add(url)
# 从内容中抽取感兴趣的内容
data = self.extract_something(url, content)
# 保存抓取得到的数据
self.dump(data)
# 从内容中抽取链接地址
urls = self.extract_urls(url, content)
# 过滤抽取得到的链接地址
allowed = self.filter(urls)
# 遍历这些地址
for url in allowed:
# 抓取一个地址
self.crawl(url)
def store(locations):
"""
保存解析得到的数据,仅仅打印出来。
"""
for code, name in locations.items():
msg = '[' + code + ']:' + name
logging.debug(msg)
return 0
if '__main__' == __name__:
logging.info('begin')
# 种子地址列表
seeds = ['http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html']
# 创建一个 Spider 对象
spider = Spider(seeds, store)
# 调用 spider 的 work 方法,开始抓取
spider.work()
logging.info('finish')
|
cc0-1.0
| 5,744,109,774,003,198,000
| 24.326923
| 111
| 0.51468
| false
| 2.255781
| false
| false
| false
|
karulont/combopt
|
project5/draw.py
|
1
|
1977
|
import turtle
OFFSET=200
MULTIPLE=10
def draw(n,paths):
cords=[(0,0),(n,0),(n,n),(0,n),(0,0)]
turtle.penup()
for c in cords:
turtle.setpos(getCoord(c[0]),getCoord(c[1]))
turtle.pendown()
## turtle.left(90)
## turtle.penup()
## turtle.goto(-OFFSET,-OFFSET)
## turtle.pendown()
prevz=-1
for path in paths:
turtle.penup()
for stepi in range(1,len(path)-1):
step=path[stepi]
if len(step)==2:
continue
x,y,z=step
turtle.pencolor(getrgb(z))
turtle.setpos(getCoord(x),getCoord(y))
print(stepi)
## if stepi==1 or stepi==(len(path)-2):
if prevz!=z or stepi==1 or stepi==(len(path)-2):
turtle.write(str(x)+","+str(y))
prevz=z
turtle.setpos(getCoord(x),getCoord(y))
turtle.pendown()
turtle.ht()
input()
def getCoord(x):
return x*MULTIPLE-OFFSET
def getrgb(z):
x=[0,0,0]
x[z]=turtle.colormode()
return tuple(x)
if __name__=="__main__":
draw(48,[[[21, 0], (21, 0, 0), (22, 0, 0), (23, 0, 0), (24, 0, 0), (25, 0, 0), (26, 0, 0), (27, 0, 0), (28, 0, 0), (29, 0, 0), (30, 0, 0), (31, 0, 0), (32, 0, 0), (33, 0, 0), (34, 0, 0), (35, 0, 0), (36, 0, 0), (37, 0, 0), (38, 0, 0), (39, 0, 0), (40, 0, 0), (41, 0, 0), [41, 0]],
[[34, 0], (34, 0, 1), (34, 1, 1), (34, 2, 1), (34, 3, 1), (34, 4, 1), (34, 5, 1), (34, 6, 1), (34, 7, 1), (34, 8, 1), (34, 9, 1), (34, 10, 1), (34, 11, 1), (34, 12, 1), (34, 13, 1), (34, 14, 1), (34, 15, 1), (34, 16, 1), (34, 17, 1), (34, 18, 1), (34, 19, 1), (34, 20, 1), (34, 21, 1), (34, 22, 1), (34, 23, 1), (34, 24, 1), (34, 25, 1), (34, 26, 1), (34, 27, 1), (34, 28, 1), (34, 28, 0), (35, 28, 0), (36, 28, 0), (37, 28, 0), (38, 28, 0), (39, 28, 0), (40, 28, 0), (41, 28, 0), (42, 28, 0), (43, 28, 0), (44, 28, 0), (45, 28, 0), (46, 28, 0), (47, 28, 0), [47, 28]]])
|
mit
| 3,498,382,939,808,764,000
| 38.54
| 579
| 0.438543
| false
| 2.359189
| false
| false
| false
|
joaks1/PyMsBayes
|
pymsbayes/utils/functions.py
|
1
|
4664
|
#! /usr/bin/env python
import sys
import os
import errno
import random
import string
from pymsbayes.utils import GLOBAL_RNG
from pymsbayes.fileio import process_file_arg
def mkdr(path):
"""
Creates directory `path`, but suppresses error if `path` already exists.
"""
try:
os.makedirs(path)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def mk_new_dir(path):
attempt = -1
while True:
try:
if attempt < 0:
os.makedirs(path)
return path
else:
p = path.rstrip(os.path.sep) + '-' + str(attempt)
os.makedirs(p)
return p
except OSError, e:
if e.errno == errno.EEXIST:
attempt += 1
continue
else:
raise e
def get_new_path(path, max_attempts = 1000):
path = os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
if not os.path.exists(path):
f = open(path, 'w')
f.close()
return path
attempt = 0
while True:
p = '-'.join([path, str(attempt)])
if not os.path.exists(p):
f = open(p, 'w')
f.close()
return p
if attempt >= max_attempts:
raise Exception('failed to get unique path')
attempt += 1
def get_sublist_greater_than(values, threshold):
return [v for v in values if v > threshold]
def frange(start, stop, num_steps, include_end_point = False):
inc = (float(stop - start) / num_steps)
for i in range(num_steps):
yield start + (i * inc)
if include_end_point:
yield stop
def random_str(length=8,
char_pool=string.ascii_letters + string.digits):
return ''.join(random.choice(char_pool) for i in range(length))
def get_random_int(rng = GLOBAL_RNG):
return rng.randint(1, 999999999)
def get_indices_of_patterns(target_list, regex_list, sort=True):
indices = []
for regex in regex_list:
indices.extend([i for i, e in enumerate(target_list) if regex.match(e)])
if sort:
return sorted(indices)
return indices
def get_indices_of_strings(target_list, string_list, sort=True):
indices = []
for s in string_list:
indices.extend([i for i, e in enumerate(target_list) if s.strip() == e.strip()])
if sort:
return sorted(indices)
return indices
def list_splitter(l, n, by_size=False):
"""
Returns generator that yields list `l` as `n` sublists, or as `n`-sized
sublists if `by_size` is True.
"""
if n < 1:
raise StopIteration
elif by_size:
for i in range(0, len(l), n):
yield l[i:i+n]
else:
if n > len(l):
n = len(l)
step_size = len(l)/int(n)
if step_size < 1:
step_size = 1
# for i in range(0, len(l), step_size):
# yield l[i:i+step_size]
i = -step_size
for i in range(0, ((n-1)*step_size), step_size):
yield l[i:i+step_size]
yield l[i+step_size:]
def whereis(file_name):
"""
Returns the first absolute path to `file_name` encountered in $PATH.
Returns `None` if `file_name` is not found in $PATH.
"""
paths = os.environ.get('PATH', '').split(':')
for path in paths:
abs_path = os.path.join(path, file_name)
if os.path.exists(abs_path) and not os.path.isdir(abs_path):
return abs_path
break
return None
def is_file(path):
if not path:
return False
if not os.path.isfile(path):
return False
return True
def is_dir(path):
if not path:
return False
if not os.path.isdir(path):
return False
return True
def is_executable(path):
return is_file(path) and os.access(path, os.X_OK)
def which(exe):
if is_executable(exe):
return exe
name = os.path.basename(exe)
for p in os.environ['PATH'].split(os.pathsep):
p = p.strip('"')
exe_path = os.path.join(p, name)
if is_executable(exe_path):
return exe_path
return None
def long_division(dividend, diviser):
n, d = int(dividend), int(diviser)
quotient = n / d
remainder = n - (d * quotient)
return quotient, remainder
def get_tolerance(num_prior_samples, num_posterior_samples):
return num_posterior_samples / float(num_prior_samples)
def least_common_multiple(x):
y = [i for i in x]
while True:
if len(set(y)) == 1:
return y[0]
min_index = y.index(min(y))
y[min_index] += x[min_index]
|
gpl-3.0
| -502,087,976,988,867,900
| 26.116279
| 88
| 0.566681
| false
| 3.424376
| false
| false
| false
|
minlexx/skype_movie_bot
|
classes/yandex_translate.py
|
1
|
2217
|
# -*- coding: utf-8 -*-
import sys
import collections
# external libraries
import requests
import requests.exceptions
class YandexTranslate:
def __init__(self, yandex_api_key: str):
self._apikey = yandex_api_key
self._yt_url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
def translate(self, q: str, src_lang: str, dst_lang: str, fmt: str = 'plain') -> str:
"""
Translates string using Yandex translation service
:param q: strint to translate
:param src_lang: source lang code ('jp')
:param dst_lang: dest lang code ('en')
:param fmt: text format: 'plain' or 'html'
:return: translated string
"""
retval = ''
if fmt not in ['plain', 'html']:
raise ValueError('fmt must be plain or html!')
params = collections.OrderedDict()
params['key'] = self._apikey
params['text'] = q
params['lang'] = src_lang + '-' + dst_lang
params['format'] = fmt
try:
r = requests.get(self._yt_url, params=params)
r.raise_for_status()
response = r.json()
if type(response) == dict:
if 'text' in response:
retval = response['text']
except requests.exceptions.RequestException as re:
sys.stderr.write('Network error: {0}'.format(str(re)))
return retval
def test_yandextranslate(yandex_api_key: str):
yt = YandexTranslate(yandex_api_key)
res = yt.translate('はい', 'ja', 'en')
print(res)
res = yt.translate('少女', 'ja', 'en')
print(res)
res = yt.translate('カグラ使われが送るワイバーン生活 0日目(テスト動画)', 'ja', 'en')
print(res)
def yandex_translate_jp_en(text: str) -> str:
yt = YandexTranslate('trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3')
return yt.translate(text, 'jp', 'en')
if __name__ == '__main__':
api = 'trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3'
test_yandextranslate(api)
|
gpl-3.0
| -2,401,171,451,816,979,500
| 29.779412
| 112
| 0.576122
| false
| 3.122832
| false
| false
| false
|
azizmb/django-activity-stream
|
actstream/templatetags/activity_tags.py
|
1
|
6757
|
from django.template import Variable, Library, Node, TemplateSyntaxError,\
VariableDoesNotExist
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from actstream.models import Follow
register = Library()
def _is_following_helper(context, actor):
return Follow.objects.is_following(context.get('user'), actor)
class DisplayActivityFollowLabel(Node):
def __init__(self, actor, follow, unfollow):
self.actor = Variable(actor)
self.follow = follow
self.unfollow = unfollow
def render(self, context):
actor_instance = self.actor.resolve(context)
if _is_following_helper(context, actor_instance):
return self.unfollow
return self.follow
def do_activity_follow_label(parser, tokens):
bits = tokens.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError, "Accepted format {% activity_follow_label [instance] [follow_string] [unfollow_string] %}"
else:
return DisplayActivityFollowLabel(*bits[1:])
class DisplayActivityFollowUrl(Node):
def __init__(self, actor):
self.actor = Variable(actor)
def render(self, context):
actor_instance = self.actor.resolve(context)
content_type = ContentType.objects.get_for_model(actor_instance).pk
if _is_following_helper(context, actor_instance):
return reverse('actstream_unfollow', kwargs={'content_type_id': content_type, 'object_id': actor_instance.pk})
return reverse('actstream_follow', kwargs={'content_type_id': content_type, 'object_id': actor_instance.pk})
def do_activity_follow_url(parser, tokens):
bits = tokens.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "Accepted format {% activity_follow_url [instance] %}"
else:
return DisplayActivityFollowUrl(bits[1])
@register.simple_tag
def activity_followers_url(instance):
content_type = ContentType.objects.get_for_model(instance).pk
return reverse('actstream_followers',
kwargs={'content_type_id': content_type, 'object_id': instance.pk})
@register.simple_tag
def activity_followers_count(instance):
return Follow.objects.for_object(instance).count()
class AsNode(Node):
"""
Base template Node class for template tags that takes a predefined number
of arguments, ending in an optional 'as var' section.
"""
args_count = 1
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse and return a Node.
"""
bits = token.contents.split()
args_count = len(bits) - 1
if args_count >= 2 and bits[-2] == 'as':
as_var = bits[-1]
args_count -= 2
else:
as_var = None
if args_count != cls.args_count:
arg_list = ' '.join(['[arg]' * cls.args_count])
raise TemplateSyntaxError("Accepted formats {%% %(tagname)s "
"%(args)s %%} or {%% %(tagname)s %(args)s as [var] %%}" %
{'tagname': bits[0], 'args': arg_list})
args = [parser.compile_filter(token) for token in
bits[1:args_count + 1]]
return cls(args, varname=as_var)
def __init__(self, args, varname=None):
self.args = args
self.varname = varname
def render(self, context):
result = self.render_result(context)
if self.varname is not None:
context[self.varname] = result
return ''
return result
def render_result(self, context):
raise NotImplementedError("Must be implemented by a subclass")
class DisplayActionLabel(AsNode):
def render_result(self, context):
actor_instance = self.args[0].resolve(context)
try:
user = Variable("request.user").resolve(context)
except VariableDoesNotExist:
user = None
try:
if user and user == actor_instance.user:
result = " your "
else:
result = " %s's " % (actor_instance.user.get_full_name() or
actor_instance.user.username)
except ValueError:
result = ""
result += actor_instance.get_label()
return result
class DisplayAction(AsNode):
def render_result(self, context):
action_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' % action_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'action': action_instance},
context)
class DisplayActionShort(Node):
def __init__(self, action, varname=None):
self.action = Variable(action)
self.varname = varname
def render(self, context):
action_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' % action_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'action': action_instance,
'hide_actor': True}, context)
class DisplayGroupedActions(AsNode):
def render(self, context):
actions_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' %
actions_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'actions': actions_instance},
context)
class UserContentTypeNode(Node):
def __init__(self, *args):
self.args = args
def render(self, context):
context[self.args[-1]] = ContentType.objects.get_for_model(User)
return ''
def display_action(parser, token):
return DisplayAction.handle_token(parser, token)
def display_action_short(parser, token):
return DisplayActionShort.handle_token(parser, token)
def display_grouped_actions(parser, token):
return DisplayGroupedActions.handle_token(parser, token)
def action_label(parser, token):
return DisplayActionLabel.handle_token(parser, token)
# TODO: remove this, it's heinous
def get_user_contenttype(parser, token):
return UserContentTypeNode(*token.split_contents())
def is_following(user, actor):
return Follow.objects.is_following(user, actor)
register.filter(is_following)
register.tag(display_action)
register.tag(display_action_short)
register.tag(display_grouped_actions)
register.tag(action_label)
register.tag(get_user_contenttype)
register.tag('activity_follow_url', do_activity_follow_url)
register.tag('activity_follow_label', do_activity_follow_label)
|
bsd-3-clause
| -565,753,785,344,135,000
| 31.485577
| 125
| 0.641261
| false
| 3.965376
| false
| false
| false
|
vsfs/vsfs-bench
|
vsbench/slurm.py
|
1
|
10482
|
#!/usr/bin/env python
#SBATCH --time=12:00:00
#SBATCH --mem-per-cpu=1024
#SBATCH --partition=guest
#SBATCH --error=job.%J.err
#SBATCH --output=job.%J.out
#SBATCH --cpus-per-task=1
#
# Author: Lei Xu <eddyxu@gmail.com>
#
# TODO(eddyxu): generalize this to all drivers
from __future__ import print_function
from subprocess import check_output
from fabric.api import lcd, local, settings
from fabric.colors import yellow, red
import argparse
import importlib
import os
import sys
import time
sys.path.append('..')
SCRIPT_DIR = os.path.abspath(os.curdir)
VSBENCH = os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'bin/vsbench'))
FABFILE = os.path.join(SCRIPT_DIR, 'fabfile.py')
fabfile = None # module
def prepare_cluster(driver, num_shard):
"""
"""
print(yellow('Preparing cluster..'), file=sys.stderr)
with settings(warn_only=True), lcd(os.path.join(SCRIPT_DIR, driver)):
if driver == 'vsfs':
#local('fab start:%d,%d' % (num_shard, num_shard))
local('fab start:%d,%d' % (2, num_shard))
else:
local('fab start:%d' % num_shard)
def destory_cluster(driver):
print(red('Shutting down the cluster.'), file=sys.stderr)
with settings(warn_only=True), lcd(os.path.join(SCRIPT_DIR, driver)):
local('fab stop')
def populate_namesapce(driver, nfiles, nindices):
"""Populate the namespace with 'nfiles' files and 'nindices' index.
@param driver the name of file search driver. (vsfs/mysql/voltdb..)
@param nfiles number of files in the namespace.
@param nindices number of indexes in the namespace.
"""
print(yellow('Populating namespace...'), file=sys.stderr)
print(yellow('Importing files...'), file=sys.stderr)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op import -records_per_index %d' %
(VSBENCH, driver, driver, fabfile.env['head'], nfiles / 10),
shell=True)
print(yellow('Building %s indices...' % nindices),
file=sys.stderr)
check_output('%s -driver %s -%s_host %s -op create_indices '
'-num_indices %d' %
(VSBENCH, driver, driver, fabfile.env['head'], nindices),
shell=True)
def parallel_run(params, mpi=False, debug=False):
"""Parallelly running clients
"""
if mpi:
run_cmd = 'mpirun --mca orte_base_help_aggregate 0 '
else:
run_cmd = 'srun '
run_cmd += ' %s -driver %s -%s_host %s ' % \
(VSBENCH, args.driver, args.driver, fabfile.env['head'])
run_cmd += params
if mpi:
run_cmd += ' -mpi'
if debug:
print(run_cmd, file=sys.stderr)
print(run_cmd)
check_output(run_cmd, shell=True)
def create_indices(args, num_indices):
print(yellow('Intializing DB and creating indices...'), file=sys.stderr)
driver = args.driver
cmd = '%s -driver %s -%s_host %s -op create_indices -num_indices %d' % \
(VSBENCH, driver, driver, fabfile.env['head'], num_indices)
if driver == 'mysql':
cmd += ' -mysql_schema single'
check_output(cmd, shell=True)
def test_index(args):
"""Test indexing performance
"""
num_indices = 63 # Max indices supported in mongodb
def run_test(args):
"""
"""
params = '-op insert -num_indices 2 -records_per_index %d' % \
(args.total / num_indices)
if args.driver == 'mysql':
params += ' -cal_prefix -mysql_schema single'
parallel_run(params, args.mpi)
driver = args.driver
args.output.write("# Shard\tTotal\tLatency\n")
destory_cluster(args.driver)
time.sleep(3)
shard_confs = map(int, args.shards.split(','))
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(3)
if driver != 'mongodb':
# MongoDB's indices are created when start the cluster. Calling
# "vsbench -op create_indices" crahses the benchmark. Need to
# investigate later.
create_indices(args, num_indices)
print('Importing files.', file=sys.stderr)
params = '-op import -records_per_index %d' % \
(args.total / num_indices)
parallel_run(params)
print('Run insert for %d shard' % shard, file=sys.stderr)
start_time = time.time()
run_test(args)
end_time = time.time()
args.output.write('%d %d %0.2f\n' %
(shard, args.total, end_time - start_time))
args.output.flush()
destory_cluster(args.driver)
def test_open_index(args):
"""Use open loop to test the latency of VSFS.
"""
#ntasks = int(os.environ.get('SLURM_NTASKS'))
driver = 'vsfs'
prepare_cluster(driver, 16)
time.sleep(5)
populate_namesapce(args.driver, args.total, 240)
return
print(yellow('Run insert in open loop'), file=sys.stderr)
params = '%s -driver %s -%s_host %s -op insert ' \
'-num_indices 2 -records_per_index %d -batch_size 1 -latency' % \
(VSBENCH, driver, driver, fabfile.env['head'], args.total)
parallel_run(params)
def test_search(args):
args.output.write("# Shard Latency\n")
num_files = args.nfiles
shard_confs = map(int, args.shards.split(','))
destory_cluster(args.driver)
time.sleep(3)
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(10)
num_indices = 100
populate_namesapce(args.driver, num_files, num_indices)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op insert -num_indices 10 -records_per_index %s' %
(VSBENCH, args.driver, args.driver, fabfile.env['head'],
num_files),
shell=True)
start_time = time.time()
search_cmd = '%s -driver %s -%s_host %s -op search -query "%s"' % \
(VSBENCH, args.driver, args.driver, fabfile.env['head'],
"/foo/bar?index0>10000&index0<20000")
print(search_cmd)
check_output(search_cmd, shell=True)
end_time = time.time()
args.output.write('%d %0.2f\n' % (shard, end_time - start_time))
args.output.flush()
destory_cluster(args.driver)
args.output.close()
def test_open_search(args):
"""Test search latency in open loop.
"""
def run_test(args):
"""
"""
parallel_run('-op open_search -num_indices 20', args.mpi)
args.output.write("# Shard Latency\n")
shard_confs = map(int, args.shards.split(','))
destory_cluster()
time.sleep(3)
for shard in shard_confs:
prepare_cluster(args.driver, shard)
time.sleep(3)
populate_namesapce(args.driver, 100000, 100)
check_output('srun -n 10 %s -driver %s -%s_host %s '
'-op insert -num_indices 2 -records_per_index 50000' %
(VSBENCH, args.driver, args.driver, fabfile.env['head']),
shell=True)
start_time = time.time()
run_test(args)
end_time = time.time()
args.output.write('%d %0.2f\n' % (shard, end_time - start_time))
args.output.flush()
destory_cluster()
args.output.close()
def avail_drivers():
drivers = []
for subdir in os.listdir(SCRIPT_DIR):
if os.path.exists(os.path.join(subdir, 'fabfile.py')):
drivers.append(subdir)
return drivers
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage='sbatch -n NUM_CLIENTS %(prog)s [options] TEST',
description='run VSFS benchmark on sandhills (SLURM).',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--driver', metavar='NAME', default='mongodb',
choices=avail_drivers(),
help='available drivers: %(choices)s')
parser.add_argument(
'-s', '--shards', metavar='N0,N1,N2..',
default=','.join(map(str, range(2, 21, 2))),
help='Comma separated string of the numbers of shared servers to '
'test against')
parser.add_argument('--mpi', action="store_true", default=False,
help='use MPI to synchronize clients.')
parser.add_argument('-o', '--output', type=argparse.FileType('w'),
default='slurm_results.txt', metavar='FILE',
help='set output file')
subparsers = parser.add_subparsers(help='Available tests')
parser_index = subparsers.add_parser(
'index', help='test indexing performance',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_index.add_argument(
'-t', '--total', type=int, default=10**7, metavar='NUM',
help='Total number of index records.')
parser_index.add_argument(
'-i', '--index', type=int, default=63, metavar='NUM',
help='Number of indices')
parser_index.add_argument('--id')
parser_index.set_defaults(func=test_index)
parser_open_index = subparsers.add_parser(
'open_index', help='test indexing in open loop to measure latency',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_open_index.add_argument(
'-b', '--batch', type=int, default=1, metavar='NUM',
help='set the batch size')
parser_open_index.add_argument(
'-t', '--total', type=int, default=10**4, metavar='NUM',
help='Set the number of records to index.'
)
parser_open_index.set_defaults(func=test_open_index)
parser_search = subparsers.add_parser(
'search', help='test searching performance',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_search.add_argument(
'-n', '--nfiles', type=int, default=100000, metavar='NUM',
help='set number of files.')
parser_search.set_defaults(func=test_search)
parser_open_search = subparsers.add_parser(
'open_search', help='test searching in open loop to measure latency.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_open_search.add_argument(
'-n', '--nfiles', type=int, default=100000, metavar='NUM',
help='set number of files.')
parser_open_search.set_defaults(func=test_open_search)
args = parser.parse_args()
module_name = 'vsbench.%s.fabfile' % args.driver
fabfile = importlib.import_module(module_name)
args.func(args)
|
apache-2.0
| 4,452,980,237,569,532,400
| 35.778947
| 78
| 0.60103
| false
| 3.499833
| true
| false
| false
|
dgisser/mapio
|
calendar_sample.py
|
1
|
1577
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Calendar API.
Command-line application that retrieves the list of the user's calendars."""
import sys
from oauth2client import client
import googleapiclient
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'calendar', 'v3', __doc__, __file__,
scope='https://www.googleapis.com/auth/calendar.readonly')
try:
page_token = None
while True:
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
print calendar_list_entry['summary']
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize.')
if __name__ == '__main__':
main(sys.argv)
|
mit
| -1,054,553,263,972,978,800
| 32.553191
| 81
| 0.710843
| false
| 3.9425
| false
| false
| false
|
abeconnelly/untap
|
scripts/collect_samples_from_profile_data.py
|
1
|
2707
|
#!/usr/bin/python
import subprocess as sp
import sys
import json
import re
import csv
import os
debug=False
header=False
URL="https://my.pgp-hms.org"
if len(sys.argv)<2:
print "provide HTML file to parse"
sys.exit(1)
fn = sys.argv[1]
if debug: print "# processing:", fn
if len(sys.argv)<3:
print "provide huID"
sys.exit(1)
huid = sys.argv[2]
with open(fn) as ifp:
#ifp = open(fn)
pup_json = json.loads(sp.check_output(['pup', 'h3:contains("Samples") + div table tbody json{}'], stdin=ifp))
ready = False
CollectionEvent = []
curEvent = {}
curEvent["human_id"] = huid
curEvent["log"] = []
#.[0].children[].children[0].children
if len(pup_json) == 0:
sys.exit(0)
data = pup_json[0]["children"]
for x in data:
z = x["children"][0]
tag = z["tag"]
if tag == "th":
h = z["children"][0]
txt = h["text"]
href = h["href"]
if ready:
CollectionEvent.append(curEvent)
curEvent = {}
curEvent["human_id"] = huid
curEvent["log"] = []
ready = True
curEvent["href"] = URL + href
curEvent["text"] = txt
if debug: print "+++", href, txt
else:
description = re.sub(r'\s+', ' ', z["text"]).strip()
curEvent["description"] = description
ens = z["children"][1]["children"][0]["children"][0]["children"]
if debug: print ">>>", description
for en in ens:
en_date = ""
en_value = ""
en_descr = ""
if "children" not in en:
continue
row = en["children"]
if (len(row)>0) and ("text" in row[0]):
en_date = row[0]["text"]
if (len(row)>1) and ("text" in row[1]):
en_value = row[1]["text"]
if (len(row)>2) and ("text" in row[2]):
en_descr = row[2]["text"]
#en_date = en["children"][0]["text"]
#en_value = en["children"][1]["text"]
#en_descr = en["children"][2]["text"]
curEvent["log"].append( { "date" : en_date, "value" : en_value, "description" : en_descr })
if debug: print ">>>", en_date, ":", en_value, ":", en_descr
continue
CollectionEvent.append(curEvent)
if debug: print json.dumps(CollectionEvent)
writer = csv.writer(sys.stdout, delimiter='\t', lineterminator="\n")
if header:
writer.writerow([ "human_id", "href", "text", "description", "log_date", "log_text", "log_description" ])
for ev in CollectionEvent:
for log in ev["log"]:
writer.writerow([ ev["human_id"], ev["href"], ev["text"], ev["description"], log["date"], log["value"], log["description"] ])
|
agpl-3.0
| 6,023,896,672,139,526,000
| 23.169643
| 133
| 0.531954
| false
| 3.30122
| false
| false
| false
|
edeposit/marcxml2mods
|
setup.py
|
1
|
1723
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from setuptools import setup, find_packages
from docs import getVersion
# Variables ===================================================================
CHANGELOG = open('CHANGES.rst').read()
LONG_DESCRIPTION = "\n\n".join([
open('README.rst').read(),
open('CONTRIBUTORS.rst').read(),
CHANGELOG
])
# Functions ===================================================================
setup(
name='marcxml2mods',
version=getVersion(CHANGELOG),
description="Conversion from MARCXML/OAI to MODS, which is used in NK CZ.",
long_description=LONG_DESCRIPTION,
url='https://github.com/edeposit/marcxml2mods',
author='Edeposit team',
author_email='edeposit@email.cz',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Text Processing :: Markup :: XML",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license='MIT',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
# scripts=[''],
zip_safe=False,
install_requires=[
"lxml",
"xmltodict",
"pydhtmlparser>=2.1.4",
"marcxml_parser",
"remove_hairs",
],
extras_require={
"test": [
"pytest"
],
"docs": [
"sphinx",
"sphinxcontrib-napoleon",
]
},
)
|
mit
| -1,199,779,438,846,476,800
| 23.971014
| 79
| 0.506674
| false
| 4.202439
| false
| true
| false
|
buffer/thug
|
thug/Classifier/URLClassifier.py
|
1
|
2222
|
#!/usr/bin/env python
#
# URLClassifier.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Original code written by Thorsten Sick <thorsten.sick@avira.com>
# from Avira (developed for the iTES Project http://ites-project.org)
#
# Modified by Angelo Dell'Aera:
# - Designed the more generic Classifier module and embedded this
# module into such module
# - Converted to YARA rules
import logging
from .BaseClassifier import BaseClassifier
log = logging.getLogger("Thug")
class URLClassifier(BaseClassifier):
default_rule_file = "rules/urlclassifier.yar"
default_filter_file = "rules/urlfilter.yar"
_classifier = "URL Classifier"
def __init__(self):
BaseClassifier.__init__(self)
def classify(self, url):
for match in self.rules.match(data = url):
self.matches.append((url, match))
if self.discard_url_match(url, match):
continue
self.handle_match_etags(match)
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("url", url, rule, tags, meta)
for c in self.custom_classifiers:
self.custom_classifiers[c](url)
def filter(self, url):
ret = False
for match in self.filters.match(data = url):
rule = match.rule
meta = match.meta
tags = ",".join([" ".join(t.split('_')) for t in match.tags])
log.ThugLogging.log_classifier("urlfilter", url, rule, tags, meta)
ret = True
return ret
|
gpl-2.0
| -949,605,761,486,316,300
| 31.676471
| 78
| 0.653015
| false
| 3.864348
| false
| false
| false
|
ntim/g4sipm
|
sample/run/luigi/dynamic_range_simulation_bretz.py
|
1
|
2691
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import luigi
import sqlite3
import glob
import numpy as np
import json
from simulation_meta_task import *
class DynamicRangeSimulation(SimulationDynamicMetaTask, luigi.Task):
name = luigi.Parameter("dynamic-range-simulation-bretz")
n_repititions = luigi.IntParameter(10000)
step = luigi.IntParameter(5000)
n_min = luigi.IntParameter(1) # minimum number of photons
n_max = luigi.IntParameter(1000000) # maximum number of photons
base_run_kwargs = luigi.Parameter("{}")
def run_kwargs(self):
kwargs = dict(exe="../fast/fast", persist_hits=False, noise_if_no_signal=True)
kwargs.update(json.loads(self.base_run_kwargs))
# print json.loads(self.base_run_kwargs)
# Dice number of particles
n = np.random.random_integers(self.n_min, self.n_max, self.step)
return [clone(kwargs, n_particles=ni) for ni in n]
def run_after_yield(self):
# Open results.
inputs = self.sqlite_from_runs()
with self.output().open("w") as o:
for input in inputs:
con = sqlite3.connect(input.fn)
cur = con.cursor()
try:
n_particles, t_min, t_max = cur.execute("SELECT nParticles, tMin, tMax FROM particleSourceMessenger;").fetchone()
n_eff_cells = np.sum(cur.execute("SELECT weight FROM `g4sipmDigis-0` WHERE time >= %s AND time < %s;" % (t_min, t_max)).fetchall())
print >> o, n_particles, n_eff_cells
except Exception as e:
print "Failure in", input.fn
print e
class All(luigi.WrapperTask):
def requires(self):
model = "../sample/resources/hamamatsu-s13360-1325pe.properties"
kwargs = [dict(temperature=10, bias_voltage=(52.19 + 5.00), path_spec="10-deg"),
dict(temperature=25, bias_voltage=(53.00 + 5.00), path_spec="25-deg"),
dict(temperature=40, bias_voltage=(53.81 + 5.00), path_spec="40-deg"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 - 0.054), path_spec="25-deg-0.054-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 + 0.054), path_spec="25-deg+0.054-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 - 0.005), path_spec="25-deg-0.005-V"),
dict(temperature=25, bias_voltage=(53.00 + 5.00 + 0.005), path_spec="25-deg+0.005-V")
]
return [DynamicRangeSimulation(model=model, path_spec=kw["path_spec"], base_run_kwargs=json.dumps(kw)) for kw in kwargs]
if __name__ == "__main__":
luigi.run(main_task_cls=All)
|
gpl-3.0
| -3,709,466,599,450,708,000
| 46.210526
| 151
| 0.599034
| false
| 3.384906
| false
| false
| false
|
klahnakoski/esReplicate
|
pyLibrary/queries/expression_compiler.py
|
1
|
1182
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from pyLibrary import convert
from mo_logs import Log
from mo_dots import coalesce, Data
from mo_times.dates import Date
true = True
false = False
null = None
EMPTY_DICT = {}
def compile_expression(source):
"""
THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE
:param source: PYTHON SOURCE CODE
:return: PYTHON FUNCTION
"""
# FORCE MODULES TO BE IN NAMESPACE
_ = coalesce
_ = Date
_ = convert
_ = Log
_ = Data
_ = EMPTY_DICT
_ = re
output = None
exec """
def output(row, rownum=None, rows=None):
try:
return """ + source + """
except Exception as e:
Log.error("Problem with dynamic function {{func|quote}}", func= """ + convert.value2quote(source) + """, cause=e)
"""
return output
|
mpl-2.0
| -7,833,379,894,178,711,000
| 21.730769
| 122
| 0.652284
| false
| 3.466276
| false
| false
| false
|
ywangd/stash
|
bin/grep.py
|
1
|
2282
|
# -*- coding: utf-8 -*-
"""Search a regular expression pattern in one or more files"""
from __future__ import print_function
import argparse
import collections
import fileinput
import os
import re
import sys
def main(args):
global _stash
ap = argparse.ArgumentParser()
ap.add_argument('pattern', help='the pattern to match')
ap.add_argument('files', nargs='*', help='files to be searched')
ap.add_argument('-i', '--ignore-case', action='store_true', help='ignore case while searching')
ap.add_argument('-v', '--invert', action='store_true', help='invert the search result')
ap.add_argument('-c', '--count', action='store_true', help='count the search results instead of normal output')
ns = ap.parse_args(args)
flags = 0
if ns.ignore_case:
flags |= re.IGNORECASE
pattern = re.compile(ns.pattern, flags=flags)
# Do not try to grep directories
files = [f for f in ns.files if not os.path.isdir(f)]
fileinput.close() # in case it is not closed
try:
counts = collections.defaultdict(int)
for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")):
if bool(pattern.search(line)) != ns.invert:
if ns.count:
counts[fileinput.filename()] += 1
else:
if ns.invert: # optimize: if ns.invert, then no match, so no highlight color needed
newline = line
else:
newline = re.sub(pattern, lambda m: _stash.text_color(m.group(), 'red'), line)
if fileinput.isstdin():
fmt = u'{lineno}: {line}'
else:
fmt = u'{filename}: {lineno}: {line}'
print(fmt.format(filename=fileinput.filename(), lineno=fileinput.filelineno(), line=newline.rstrip()))
if ns.count:
for filename, count in counts.items():
fmt = u'{count:6} {filename}'
print(fmt.format(filename=filename, count=count))
except Exception as err:
print("grep: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
finally:
fileinput.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
mit
| -3,807,069,574,870,944,300
| 34.65625
| 122
| 0.574934
| false
| 4.024691
| false
| false
| false
|
wkoszek/puzzles
|
pattern_search/woogle_index.py
|
1
|
1936
|
#!/usr/bin/env python3
import getopt
import sys
import re
import random
import sqlite3
def main():
g_input_fn = False
g_do_search = False
g_dict_fn = False
g_words = []
try:
opts, args = getopt.getopt(sys.argv[1:],
"hi:d:s:v",
["help", "input=", "dict=", "search="])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
output = None
verbose = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
g_input_fn = a
elif o in ("-s", "--search"):
g_do_search = a;
elif o in ("-d", "--dict"):
g_dict_fn = a
else:
assert False, "unhandled option"
if g_input_fn == False:
print("You must pass --input to indicate where DB is");
sys.exit(2);
if g_do_search == False and g_dict_fn == False:
print("You must either pass --input with --dict");
sys.exit(2)
random.seed(14)
conn = sqlite3.connect(g_input_fn);
c = conn.cursor()
if g_do_search == False:
assert(g_dict_fn != None);
print("# initializing database " + g_dict_fn);
with open(g_dict_fn, "r") as f:
g_words += [ [line, random.randint(0, 1000)]
for line in f.read().split("\n")
if not re.match("^$", line)]
f.close()
c.execute("DROP TABLE IF EXISTS words");
c.execute('''CREATE TABLE words(word text, score real)''')
for word in g_words:
if len(word) <= 0:
continue;
c.execute("""
INSERT INTO words VALUES('{0}','{1}');
""".format(word[0], word[1]));
conn.commit();
conn.close();
else:
# From http://stackoverflow.com/questions/5071601/how-do-i-use-regex-in-a-sqlite-query
def match(expr, item):
return re.match(expr, item) is not None
conn.create_function("MATCH", 2, match)
c.execute("""
SELECT * FROM words
WHERE MATCH('.*{0}.*', word)
ORDER BY score DESC LIMIT 10;
""".format(g_do_search));
for v, r in c.fetchall():
print(v, r)
if __name__ == "__main__":
main()
|
bsd-2-clause
| 6,192,522,204,303,101,000
| 21.776471
| 88
| 0.594008
| false
| 2.700139
| false
| false
| false
|
tengqm/senlin-container
|
senlin/engine/service.py
|
1
|
95655
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import functools
import uuid
from docker import Client
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import context as senlin_context
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common.i18n import _LE
from senlin.common.i18n import _LI
from senlin.common import messaging as rpc_messaging
from senlin.common import scaleutils as su
from senlin.common import schema
from senlin.common import utils
from senlin.db import api as db_api
from senlin.engine.actions import base as action_mod
from senlin.engine import cluster as cluster_mod
from senlin.engine import cluster_policy
from senlin.engine import dispatcher
from senlin.engine import environment
from senlin.engine import health_manager
from senlin.engine import node as node_mod
from senlin.engine import receiver as receiver_mod
from senlin.engine import scheduler
from senlin.policies import base as policy_base
from senlin.profiles import base as profile_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def request_context(func):
@functools.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
if ctx is not None and not isinstance(ctx,
senlin_context.RequestContext):
ctx = senlin_context.RequestContext.from_dict(ctx.to_dict())
try:
return func(self, ctx, *args, **kwargs)
except exception.SenlinException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
class EngineService(service.Service):
'''Lifecycle manager for a running service engine.
- All the contained methods here are called from the RPC client.
- If a RPC call does not have a corresponding method here, an exception
will be thrown.
- Arguments to these calls are added dynamically and will be treated as
keyword arguments by the RPC client.
'''
def __init__(self, host, topic, manager=None):
super(EngineService, self).__init__()
self.host = host
self.topic = topic
self.dispatcher_topic = consts.ENGINE_DISPATCHER_TOPIC
self.health_mgr_topic = consts.ENGINE_HEALTH_MGR_TOPIC
# The following are initialized here and will be assigned in start()
# which happens after the fork when spawning multiple worker processes
self.engine_id = None
self.TG = None
self.target = None
self._rpc_server = None
# Intialize the global environment
environment.initialize()
def init_tgm(self):
self.TG = scheduler.ThreadGroupManager()
def start(self):
self.engine_id = str(uuid.uuid4())
self.init_tgm()
# create a dispatcher RPC service for this engine.
self.dispatcher = dispatcher.Dispatcher(self,
self.dispatcher_topic,
consts.RPC_API_VERSION,
self.TG)
LOG.info(_LI("Starting dispatcher for engine %s"), self.engine_id)
self.dispatcher.start()
# create a health manager RPC service for this engine.
self.health_mgr = health_manager.HealthManager(
self, self.health_mgr_topic, consts.RPC_API_VERSION)
LOG.info(_LI("Starting health manager for engine %s"), self.engine_id)
self.health_mgr.start()
target = oslo_messaging.Target(version=consts.RPC_API_VERSION,
server=self.host,
topic=self.topic)
self.target = target
self._rpc_server = rpc_messaging.get_rpc_server(target, self)
self._rpc_server.start()
self.service_manage_cleanup()
self.TG.add_timer(cfg.CONF.periodic_interval,
self.service_manage_report)
super(EngineService, self).start()
def _stop_rpc_server(self):
# Stop RPC connection to prevent new requests
LOG.info(_LI("Stopping engine service..."))
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info(_LI('Engine service stopped successfully'))
except Exception as ex:
LOG.error(_LE('Failed to stop engine service: %s'),
six.text_type(ex))
def stop(self):
self._stop_rpc_server()
# Notify dispatcher to stop all action threads it started.
LOG.info(_LI("Stopping dispatcher for engine %s"), self.engine_id)
self.dispatcher.stop()
# Notify health_manager to stop
LOG.info(_LI("Stopping health manager for engine %s"), self.engine_id)
self.health_mgr.stop()
self.TG.stop()
super(EngineService, self).stop()
def service_manage_report(self):
ctx = senlin_context.get_admin_context()
try:
svc = db_api.service_update(ctx, self.engine_id)
# if svc is None, means it's not created.
if svc is None:
params = dict(host=self.host,
binary='senlin-engine',
service_id=self.engine_id,
topic=self.topic)
db_api.service_create(ctx, **params)
except Exception as ex:
LOG.error(_LE('Service %(service_id)s update failed: %(error)s'),
{'service_id': self.engine_id, 'error': ex})
def service_manage_cleanup(self):
ctx = senlin_context.get_admin_context()
last_updated_window = (2 * cfg.CONF.periodic_interval)
time_line = timeutils.utcnow() - datetime.timedelta(
seconds=last_updated_window)
svcs = db_api.service_get_all(ctx)
for svc in svcs:
if svc['id'] == self.engine_id:
continue
if svc['updated_at'] < time_line:
# hasn't been updated, assuming it's died.
LOG.info(_LI('Service %s was aborted'), svc['id'])
db_api.service_delete(ctx, svc['id'])
@request_context
def credential_create(self, context, cred, attrs=None):
"""Create the credential based on the context.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param cred: A credential to be associated with the user identity
provided in the context.
:param dict attrs: Optional attributes associated with the credential.
:return: A dictionary containing the persistent credential.
"""
values = {
'user': context.user,
'project': context.project,
'cred': {
'openstack': {
'trust': cred
}
}
}
db_api.cred_create_update(context, values)
return {'cred': cred}
@request_context
def credential_get(self, context, query=None):
"""Get the credential based on the context.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param dict query: Optional query parameters.
:return: A dictionary containing the persistent credential, or None
if no matching credential is found.
"""
res = db_api.cred_get(context, context.user, context.project)
if res is None:
return None
return res.cred.get('openstack', None)
@request_context
def credential_update(self, context, cred, **attrs):
"""Update a credential based on the context and provided value.
We may add more parameters in future to the query parameter, for
example as Senlin expands its support to non-OpenStack backends.
:param context: The requesting context which contains the user id
along with other identity information.
:param dict attrs: Optional attribute values to be associated with
the credential.
:return: A dictionary containing the updated credential.
"""
db_api.cred_update(context, context.user, context.project,
{'cred': {'openstack': {'trust': cred}}})
return {'cred': cred}
@request_context
def get_revision(self, context):
return cfg.CONF.revision['senlin_engine_revision']
@request_context
def profile_type_list(self, context):
"""List known profile type implementations.
:param context: An instance of the request context.
:return: A list of profile types.
"""
return environment.global_env().get_profile_types()
@request_context
def profile_type_get(self, context, type_name):
"""Get the details about a profile type.
:param context: An instance of the request context.
:param type_name: The name of a profile type.
:return: The details about a profile type.
"""
profile = environment.global_env().get_profile(type_name)
data = profile.get_schema()
return {
'name': type_name,
'schema': data,
}
def profile_find(self, context, identity, project_safe=True):
"""Find a profile with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:param project_safe: A boolean indicating whether profile from
projects other than the requesting one can be
returned.
:return: A DB object of profile or an exception `ProfileNotFound` if
no matching object is found.
"""
if uuidutils.is_uuid_like(identity):
profile = db_api.profile_get(context, identity,
project_safe=project_safe)
if not profile:
profile = db_api.profile_get_by_name(context, identity,
project_safe=project_safe)
else:
profile = db_api.profile_get_by_name(context, identity,
project_safe=project_safe)
if not profile:
profile = db_api.profile_get_by_short_id(
context, identity, project_safe=project_safe)
if not profile:
raise exception.ProfileNotFound(profile=identity)
return profile
@request_context
def profile_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List profiles matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of profiles to
return in a response.
:param marker: An UUID specifying the profile after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether profiles from all
projects will be returned.
:return: A list of `Profile` object representations.
"""
limit = utils.parse_int_param(consts.PARAM_LIMIT, limit)
utils.validate_sort_param(sort, consts.PROFILE_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
profiles = profile_base.Profile.load_all(context,
limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [p.to_dict() for p in profiles]
@request_context
def profile_create(self, context, name, spec, metadata=None):
"""Create a profile with the given properties.
:param context: An instance of the request context.
:param name: The name for the profile to be created.
:param spec: A dictionary containing the spec for the profile.
:param metadata: A dictionary containing optional key-value pairs to
be associated with the profile.
:return: A dictionary containing the details of the profile object
created.
"""
if cfg.CONF.name_unique:
if db_api.profile_get_by_name(context, name):
msg = _("A profile named '%(name)s' already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
try:
plugin = environment.global_env().get_profile(type_str)
except exception.ProfileTypeNotFound:
msg = _("The specified profile type (%(name)s) is not found."
) % {"name": type_str}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating profile %(type)s '%(name)s'."),
{'type': type_str, 'name': name})
kwargs = {
'user': context.user,
'project': context.project,
'domain': context.domain,
'metadata': metadata,
}
profile = plugin(name, spec, **kwargs)
try:
profile.validate()
except exception.InvalidSpec as ex:
msg = six.text_type(ex)
LOG.error(_LE("Failed in creating profile: %s"), msg)
raise exception.BadRequest(msg=msg)
profile.store(context)
LOG.info(_LI("Profile %(name)s is created: %(id)s."),
{'name': name, 'id': profile.id})
return profile.to_dict()
@request_context
def profile_get(self, context, identity):
"""Retrieve the details about a profile.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:return: A dictionary containing the policy details, or an exception
of type `ProfileNotFound` if no matching object is found.
"""
db_profile = self.profile_find(context, identity)
profile = profile_base.Profile.load(context, profile=db_profile)
return profile.to_dict()
@request_context
def profile_update(self, context, profile_id, name=None, metadata=None):
"""Update the properties of a given profile.
:param context: An instance of the request context.
:param profile_id: The UUID, name or short-id of a profile.
:param name: The new name for the profile.
:param metadata: A dictionary of key-value pairs to be associated with
the profile.
:returns: A dictionary containing the details of the updated profile,
or an exception `ProfileNotFound` if no matching profile is
found.
"""
LOG.info(_LI("Updating profile '%(id)s.'"), {'id': profile_id})
db_profile = self.profile_find(context, profile_id)
profile = profile_base.Profile.load(context, profile=db_profile)
changed = False
if name is not None and name != profile.name:
profile.name = name
changed = True
if metadata is not None and metadata != profile.metadata:
profile.metadata = metadata
changed = True
if changed:
profile.store(context)
LOG.info(_LI("Profile '%(id)s' is updated."), {'id': profile_id})
return profile.to_dict()
@request_context
def profile_delete(self, context, identity):
"""Delete the specified profile.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:return: None if succeeded or an exception of `ResourceInUse` if
profile is referenced by certain clusters/nodes.
"""
db_profile = self.profile_find(context, identity)
LOG.info(_LI("Deleting profile '%s'."), identity)
try:
profile_base.Profile.delete(context, db_profile.id)
except exception.ResourceBusyError:
LOG.error(_LI("The profile '%s' cannot be deleted."), identity)
raise exception.ResourceInUse(resource_type='profile',
resource_id=db_profile.id)
LOG.info(_LI("Profile '%(id)s' is deleted."), {'id': identity})
@request_context
def policy_type_list(self, context):
"""List known policy type implementations.
:param context: An instance of the request context.
:return: A list of policy types.
"""
return environment.global_env().get_policy_types()
@request_context
def policy_type_get(self, context, type_name):
"""Get the details about a policy type.
:param context: An instance of the request context.
:param type_name: The name of a policy type.
:return: The details about a policy type.
"""
policy_type = environment.global_env().get_policy(type_name)
data = policy_type.get_schema()
return {
'name': type_name,
'schema': data
}
def policy_find(self, context, identity, project_safe=True):
"""Find a policy with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a profile.
:param project_safe: A boolean indicating whether policies from
projects other than the requesting one should be
evaluated.
:return: A DB object of policy or an exception of `PolicyNotFound` if
no matching object is found.
"""
if uuidutils.is_uuid_like(identity):
policy = db_api.policy_get(context, identity,
project_safe=project_safe)
if not policy:
policy = db_api.policy_get_by_name(context, identity,
project_safe=project_safe)
else:
policy = db_api.policy_get_by_name(context, identity,
project_safe=project_safe)
if not policy:
policy = db_api.policy_get_by_short_id(
context, identity, project_safe=project_safe)
if not policy:
raise exception.PolicyNotFound(policy=identity)
return policy
@request_context
def policy_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List policies matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of policies to
return in a response.
:param marker: An UUID specifying the policy after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether policies from all
projects will be returned.
:return: A list of `Policy` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.POLICY_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
policies = policy_base.Policy.load_all(context,
limit=limit, marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [p.to_dict() for p in policies]
@request_context
def policy_create(self, context, name, spec):
"""Create a policy with the given name and spec.
:param context: An instance of the request context.
:param name: The name for the policy to be created.
:param spec: A dictionary containing the spec for the policy.
:return: A dictionary containing the details of the policy object
created.
"""
if cfg.CONF.name_unique:
if db_api.policy_get_by_name(context, name):
msg = _("A policy named '%(name)s' already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
type_name, version = schema.get_spec_version(spec)
type_str = "-".join([type_name, version])
try:
plugin = environment.global_env().get_policy(type_str)
except exception.PolicyTypeNotFound:
msg = _("The specified policy type (%(name)s) is not found."
) % {"name": type_str}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating policy %(type)s '%(name)s'"),
{'type': type_str, 'name': name})
kwargs = {
'user': context.user,
'project': context.project,
'domain': context.domain,
}
policy = plugin(name, spec, **kwargs)
try:
policy.validate()
except exception.InvalidSpec as ex:
msg = six.text_type(ex)
LOG.error(_LE("Failed in creating policy: %s"), msg)
raise exception.BadRequest(msg=msg)
policy.store(context)
LOG.info(_LI("Policy '%(name)s' is created: %(id)s."),
{'name': name, 'id': policy.id})
return policy.to_dict()
@request_context
def policy_get(self, context, identity):
"""Retrieve the details about a policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:return: A dictionary containing the policy details, or an exception
of type `PolicyNotFound` if no matching object is found.
"""
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, db_policy=db_policy)
return policy.to_dict()
@request_context
def policy_update(self, context, identity, name):
"""Update the properties of a given policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:param name: The new name for the policy.
:returns: A dictionary containing the details of the updated policy or
an exception `PolicyNotFound` if no matching poicy is found,
or an exception `BadRequest` if name is not provided.
"""
if not name:
msg = _('Policy name not specified.')
raise exception.BadRequest(msg=msg)
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, db_policy=db_policy)
if name != policy.name:
LOG.info(_LI("Updating policy '%s'."), identity)
policy.name = name
policy.store(context)
LOG.info(_LI("Policy '%s' is updated."), identity)
return policy.to_dict()
@request_context
def policy_delete(self, context, identity):
"""Delete the specified policy.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a policy.
:return: None if succeeded or an exception of `ResourceInUse` if
policy is still attached to certain clusters.
"""
db_policy = self.policy_find(context, identity)
LOG.info(_LI("Delete policy '%s'."), identity)
try:
policy_base.Policy.delete(context, db_policy.id)
except exception.ResourceBusyError:
LOG.error(_LI("Policy '%s' cannot be deleted."), identity)
raise exception.ResourceInUse(resource_type='policy',
resource_id=db_policy.id)
LOG.info(_LI("Policy '%s' is deleted."), identity)
def cluster_find(self, context, identity, project_safe=True):
"""Find a cluster with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short ID of a cluster.
:param project_safe: A boolean parameter specifying whether only
clusters from the same project are qualified to
be returned.
:return: An instance of `Cluster` class.
:raises: `ClusterNotFound` if no matching object can be found.
"""
if uuidutils.is_uuid_like(identity):
cluster = db_api.cluster_get(context, identity,
project_safe=project_safe)
if not cluster:
cluster = db_api.cluster_get_by_name(context, identity,
project_safe=project_safe)
else:
cluster = db_api.cluster_get_by_name(context, identity,
project_safe=project_safe)
# maybe it is a short form of UUID
if not cluster:
cluster = db_api.cluster_get_by_short_id(
context, identity, project_safe=project_safe)
if not cluster:
raise exception.ClusterNotFound(cluster=identity)
return cluster
@request_context
def cluster_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List clusters matching the specified criteria.
:param context: An instance of request context.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the cluster after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether clusters from all
projects will be returned.
:return: A list of `Cluster` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.CLUSTER_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
clusters = cluster_mod.Cluster.load_all(context, limit=limit,
marker=marker, sort=sort,
filters=filters,
project_safe=project_safe)
return [cluster.to_dict() for cluster in clusters]
@request_context
def cluster_get(self, context, identity):
"""Retrieve the cluster specified.
:param context: An instance of the request context.
:param identity: The UUID, name or short-ID of a cluster.
:return: A dictionary containing the details about a cluster.
"""
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
return cluster.to_dict()
def check_cluster_quota(self, context):
"""Validate the number of clusters created in a project.
:param context: An instance of the request context.
:return: None if cluster creation is okay, or an exception of type
`Forbbiden` if number of clusters reaches the maximum.
"""
existing = db_api.cluster_count_all(context)
maximum = cfg.CONF.max_clusters_per_project
if existing >= maximum:
raise exception.Forbidden()
@request_context
def cluster_create(self, context, name, desired_capacity, profile_id,
min_size=None, max_size=None, metadata=None,
timeout=None, host_cluster=None):
"""Create a cluster.
:param context: An instance of the request context.
:param name: A string specifying the name of the cluster to be created.
:param desired_capacity: The desired capacity of the cluster.
:param profile_ID: The UUID, name or short-ID of the profile to use.
:param min_size: An integer specifying the minimum size of the cluster.
:param max_size: An integer specifying the maximum size of the cluster.
:param metadata: A dictionary containing key-value pairs to be
associated with the cluster.
:param timeout: An optional integer specifying the operation timeout
value in seconds.
:return: A dictionary containing the details about the cluster and the
ID of the action triggered by this operation.
"""
self.check_cluster_quota(context)
if cfg.CONF.name_unique:
if db_api.cluster_get_by_name(context, name):
msg = _("The cluster (%(name)s) already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
try:
db_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile '%s' is not found.") % profile_id
raise exception.BadRequest(msg=msg)
init_size = utils.parse_int_param(consts.CLUSTER_DESIRED_CAPACITY,
desired_capacity)
if min_size is not None:
min_size = utils.parse_int_param(consts.CLUSTER_MIN_SIZE, min_size)
if max_size is not None:
max_size = utils.parse_int_param(consts.CLUSTER_MAX_SIZE, max_size,
allow_negative=True)
if timeout is not None:
timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)
res = su.check_size_params(None, init_size, min_size, max_size, True)
if res:
raise exception.BadRequest(msg=res)
LOG.info(_LI("Creating cluster '%s'."), name)
if host_cluster:
host_cluster = self.cluster_get(context, host_cluster)
host_nodes = host_cluster['nodes']
metadata.update(host_cluster=host_cluster['id'])
metadata.update(candidate_nodes=host_nodes)
kwargs = {
'min_size': min_size,
'max_size': max_size,
'timeout': timeout,
'metadata': metadata,
'user': context.user,
'project': context.project,
'domain': context.domain,
}
cluster = cluster_mod.Cluster(name, init_size, db_profile.id,
**kwargs)
cluster.store(context)
# Build an Action for cluster creation
kwargs = {
'name': 'cluster_create_%s' % cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, cluster.id,
consts.CLUSTER_CREATE, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster create action queued: %s."), action_id)
result = cluster.to_dict()
result['action'] = action_id
return result
@request_context
def cluster_update(self, context, identity, name=None, profile_id=None,
metadata=None, timeout=None):
"""Update a cluster.
:param context: An instance of the request context.
:param identity: The UUID, name, or short-ID or the target cluster.
:param name: A string specifying the new name of the cluster.
:param profile_id: The UUID, name or short-ID of the new profile.
:param metadata: A dictionary containing key-value pairs to be
associated with the cluster.
:param timeout: An optional integer specifying the new operation
timeout value in seconds.
:return: A dictionary containing the details about the cluster and the
ID of the action triggered by this operation.
"""
# Get the database representation of the existing cluster
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
if cluster.status == cluster.ERROR:
msg = _('Updating a cluster in error state')
LOG.error(msg)
raise exception.FeatureNotSupported(feature=msg)
LOG.info(_LI("Updating cluster '%s'."), identity)
inputs = {}
if profile_id is not None:
old_profile = self.profile_find(context, cluster.profile_id)
try:
new_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile '%s' is not found."
) % profile_id
raise exception.BadRequest(msg=msg)
if new_profile.type != old_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
if old_profile.id != new_profile.id:
inputs['new_profile_id'] = new_profile.id
if metadata is not None and metadata != cluster.metadata:
inputs['metadata'] = metadata
if timeout is not None:
timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)
inputs['timeout'] = timeout
if name is not None:
inputs['name'] = name
kwargs = {
'name': 'cluster_update_%s' % cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, cluster.id,
consts.CLUSTER_UPDATE, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster update action queued: %s."), action_id)
resp = cluster.to_dict()
resp['action'] = action_id
return resp
@request_context
def cluster_delete(self, context, identity):
"""Delete the specified cluster.
:param identity: The UUID, name or short-ID of the target cluster.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI('Deleting cluster %s'), identity)
db_cluster = self.cluster_find(context, identity)
policies = db_api.cluster_policy_get_all(context, db_cluster.id)
if len(policies) > 0:
msg = _('Cluster %(id)s cannot be deleted without having all '
'policies detached.') % {'id': identity}
LOG.error(msg)
raise exception.BadRequest(msg=msg)
receivers = db_api.receiver_get_all(context, filters={'cluster_id':
db_cluster.id})
if len(receivers) > 0:
msg = _('Cluster %(id)s cannot be deleted without having all '
'receivers deleted.') % {'id': identity}
LOG.error(msg)
raise exception.BadRequest(msg=msg)
params = {
'name': 'cluster_delete_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DELETE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster delete action queued: %s"), action_id)
return {'action': action_id}
@request_context
def cluster_add_nodes(self, context, identity, nodes):
"""Add specified nodes to the specified cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the target cluster.
:param nodes: A list of node identities where each item is the UUID,
name or short-id of a node.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI("Adding nodes '%(nodes)s' to cluster '%(cluster)s'."),
{'cluster': identity, 'nodes': nodes})
db_cluster = self.cluster_find(context, identity)
db_cluster_profile = self.profile_find(context,
db_cluster.profile_id)
cluster_profile_type = db_cluster_profile.type
found = []
not_found = []
bad_nodes = []
owned_nodes = []
not_match_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
# Skip node in the same cluster already
if db_node.status != node_mod.Node.ACTIVE:
bad_nodes.append(db_node.id)
elif len(db_node.cluster_id) != 0:
owned_nodes.append(db_node.id)
else:
# check profile type matching
db_node_profile = self.profile_find(context,
db_node.profile_id)
node_profile_type = db_node_profile.type
if node_profile_type != cluster_profile_type:
not_match_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_match_nodes) > 0:
error = _("Profile type of nodes %s does not match that of the "
"cluster.") % not_match_nodes
LOG.error(error)
raise exception.ProfileTypeNotMatch(message=error)
elif len(owned_nodes) > 0:
error = _("Nodes %s already owned by some cluster.") % owned_nodes
LOG.error(error)
raise exception.NodeNotOrphan(message=error)
elif len(bad_nodes) > 0:
error = _("Nodes are not ACTIVE: %s.") % bad_nodes
elif len(not_found) > 0:
error = _("Nodes not found: %s.") % not_found
elif len(found) == 0:
error = _("No nodes to add: %s.") % nodes
if error is not None:
LOG.error(error)
raise exception.BadRequest(msg=error)
target_size = db_cluster.desired_capacity + len(found)
error = su.check_size_params(db_cluster, target_size, strict=True)
if error:
LOG.error(error)
raise exception.BadRequest(msg=error)
params = {
'name': 'cluster_add_nodes_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {'nodes': found},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_ADD_NODES,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster add nodes action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_del_nodes(self, context, identity, nodes):
"""Delete specified nodes from the named cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the cluster.
:param nodes: A list containing the identities of the nodes to delete.
:return: A dictionary containing the ID of the action triggered.
"""
LOG.info(_LI("Deleting nodes '%(nodes)s' from cluster '%(cluster)s'."),
{'cluster': identity, 'nodes': nodes})
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
if db_node.cluster_id != db_cluster.id:
bad_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_found):
error = _("Nodes not found: %s.") % not_found
elif len(bad_nodes):
error = _("Nodes not members of specified cluster: "
"%s.") % bad_nodes
elif len(found) == 0:
error = _("No nodes specified.")
if error is not None:
LOG.error(error)
raise exception.BadRequest(msg=error)
target_size = db_cluster.desired_capacity - len(found)
error = su.check_size_params(db_cluster, target_size, strict=True)
if error:
LOG.error(error)
raise exception.BadRequest(msg=error)
params = {
'name': 'cluster_del_nodes_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
'candidates': found,
'count': len(found),
},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DEL_NODES,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster delete nodes action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_resize(self, context, identity, adj_type=None, number=None,
min_size=None, max_size=None, min_step=None,
strict=True):
"""Adjust cluster size parameters.
:param identity: cluster dentity which can be name, id or short ID;
:param adj_type: optional; if specified, must be one of the strings
defined in consts.ADJUSTMENT_TYPES;
:param number: number for adjustment. It is interpreted as the new
desired_capacity of the cluster if `adj_type` is set
to `EXACT_CAPACITY`; it is interpreted as the relative
number of nodes to add/remove when `adj_type` is set
to `CHANGE_IN_CAPACITY`; it is treated as a percentage
when `adj_type` is set to `CHANGE_IN_PERCENTAGE`.
This parameter is optional.
:param min_size: new lower bound of the cluster size, if specified.
This parameter is optional.
:param max_size: new upper bound of the cluster size, if specified;
A value of negative means no upper limit is imposed.
This parameter is optional.
:param min_step: optional. It specifies the number of nodes to be
added or removed when `adj_type` is set to value
`CHANGE_IN_PERCENTAGE` and the number calculated is
less than 1 or so.
:param strict: optional boolean value. It specifies whether Senlin
should try a best-effort style resizing or just
reject the request when scaling beyond its current
size constraint.
:return: A dict containing the ID of an action fired.
"""
# check adj_type
if adj_type is not None:
if adj_type not in consts.ADJUSTMENT_TYPES:
raise exception.InvalidParameter(
name=consts.ADJUSTMENT_TYPE, value=adj_type)
if number is None:
msg = _('Missing number value for size adjustment.')
raise exception.BadRequest(msg=msg)
else:
if number is not None:
msg = _('Missing adjustment_type value for size adjustment.')
raise exception.BadRequest(msg=msg)
if adj_type == consts.EXACT_CAPACITY:
number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number)
elif adj_type == consts.CHANGE_IN_CAPACITY:
number = utils.parse_int_param(consts.ADJUSTMENT_NUMBER, number,
allow_negative=True)
elif adj_type == consts.CHANGE_IN_PERCENTAGE:
try:
number = float(number)
except ValueError:
raise exception.InvalidParameter(name=consts.ADJUSTMENT_NUMBER,
value=number)
# min_step is only used (so checked) for this case
if min_step is not None:
min_step = utils.parse_int_param(consts.ADJUSTMENT_MIN_STEP,
min_step)
if min_size is not None:
min_size = utils.parse_int_param(consts.ADJUSTMENT_MIN_SIZE,
min_size)
if max_size is not None:
max_size = utils.parse_int_param(consts.ADJUSTMENT_MAX_SIZE,
max_size, allow_negative=True)
if strict is not None:
strict = utils.parse_bool_param(consts.ADJUSTMENT_STRICT, strict)
db_cluster = self.cluster_find(context, identity)
current = db_cluster.desired_capacity
if adj_type is not None:
desired = su.calculate_desired(current, adj_type, number, min_step)
else:
desired = None
res = su.check_size_params(db_cluster, desired, min_size, max_size,
strict)
if res:
raise exception.BadRequest(msg=res)
fmt = _LI("Resizing cluster '%(cluster)s': type=%(adj_type)s, "
"number=%(number)s, min_size=%(min_size)s, "
"max_size=%(max_size)s, min_step=%(min_step)s, "
"strict=%(strict)s.")
LOG.info(fmt, {'cluster': identity, 'adj_type': adj_type,
'number': number, 'min_size': min_size,
'max_size': max_size, 'min_step': min_step,
'strict': strict})
params = {
'name': 'cluster_resize_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
consts.ADJUSTMENT_TYPE: adj_type,
consts.ADJUSTMENT_NUMBER: number,
consts.ADJUSTMENT_MIN_SIZE: min_size,
consts.ADJUSTMENT_MAX_SIZE: max_size,
consts.ADJUSTMENT_MIN_STEP: min_step,
consts.ADJUSTMENT_STRICT: strict
}
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_RESIZE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster resize action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_scale_out(self, context, identity, count=None):
"""Inflate the size of a cluster by then given number (optional).
:param context: Request context for the call.
:param identity: The name, ID or short ID of a cluster.
:param count: The number of nodes to add to the cluster. When omitted,
a policy gets a chance to decide the count number. When specified,
a policy would have to respect this input.
:return: A dict with the ID of the action fired.
"""
# Validation
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
metadata = cluster.to_dict()['metadata']
host_cluster = metadata.get('host_cluster', None)
candidate_hosts = []
if host_cluster:
host_cluster = self.cluster_get(context, host_cluster)
candidate_nodes = host_cluster['nodes']
host_nodes = metadata.get('host_nodes', None)
if host_nodes and candidate_nodes:
for node in candidate_nodes:
if node not in host_nodes:
candidate_hosts.append(node)
if candidate_hosts:
metadata.update(candidate_hosts=candidate_hosts)
cluster.metadate = metadata
cluster.store(context)
if count is not None:
count = utils.parse_int_param('count', count, allow_zero=False)
err = su.check_size_params(db_cluster,
db_cluster.desired_capacity + count)
if err:
raise exception.BadRequest(msg=err)
LOG.info(_LI('Scaling out cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': count})
inputs = {'count': count}
else:
LOG.info(_LI('Scaling out cluster %s'), db_cluster.name)
inputs = {}
params = {
'name': 'cluster_scale_out_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
if candidate_hosts:
params.update(candidate_hosts=candidate_hosts)
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_SCALE_OUT,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster Scale out action queued: %s"), action_id)
return {'action': action_id}
@request_context
def cluster_scale_in(self, context, identity, count=None):
"""Deflate the size of a cluster by given number (optional).
:param context: Request context for the call.
:param identity: The name, ID or short ID of a cluster.
:param count: The number of nodes to remove from the cluster. When
omitted, a policy gets a chance to decide the count number. When
specified, a policy would have to respect this input.
:return: A dict with the ID of the action fired.
"""
db_cluster = self.cluster_find(context, identity)
if count is not None:
count = utils.parse_int_param('count', count, allow_zero=False)
err = su.check_size_params(db_cluster,
db_cluster.desired_capacity - count)
if err:
raise exception.BadRequest(msg=err)
LOG.info(_LI('Scaling in cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': count})
inputs = {'count': count}
else:
LOG.info(_LI('Scaling in cluster %s'), db_cluster.name)
inputs = {}
params = {
'name': 'cluster_scale_in_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_SCALE_IN,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster Scale in action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_check(self, context, identity, params=None):
"""Check the status of a cluster.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a cluster.
:param params: A dictionary containing additional parameters for
the check operation.
:return: A dictionary containg the ID of the action triggered.
"""
LOG.info(_LI("Checking Cluster '%(cluster)s'."),
{'cluster': identity})
db_cluster = self.cluster_find(context, identity)
params = {
'name': 'cluster_check_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_CHECK, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster check action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_recover(self, context, identity, params=None):
"""Recover a cluster to a healthy status.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a cluster.
:param params: A dictionary containing additional parameters for
the check operation.
:return: A dictionary containg the ID of the action triggered.
"""
LOG.info(_LI("Recovering cluster '%s'."), identity)
db_cluster = self.cluster_find(context, identity)
params = {
'name': 'cluster_recover_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_RECOVER, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Cluster recover action queued: %s."), action_id)
return {'action': action_id}
def node_find(self, context, identity, project_safe=True):
"""Find a node with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param project_safe: A boolean indicating whether only nodes from the
same project as the requesting one are qualified
to be returned.
:return: A DB object of Node or an exception of `NodeNotFound` if no
matching object is found.
"""
if uuidutils.is_uuid_like(identity):
node = db_api.node_get(context, identity,
project_safe=project_safe)
if not node:
node = db_api.node_get_by_name(context, identity,
project_safe=project_safe)
else:
node = db_api.node_get_by_name(context, identity,
project_safe=project_safe)
if not node:
node = db_api.node_get_by_short_id(
context, identity, project_safe=project_safe)
if node is None:
raise exception.NodeNotFound(node=identity)
return node
@request_context
def node_list(self, context, cluster_id=None, filters=None, sort=None,
limit=None, marker=None, project_safe=True):
"""List node records matching the specified criteria.
:param context: An instance of the request context.
:param cluster_id: An optional parameter specifying the ID of the
cluster from which nodes are chosen.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the node after which the result
list starts.
:param project_safe: A boolean indicating whether nodes from all
projects will be returned.
:return: A list of `Node` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.NODE_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
# Maybe the cluster_id is a name or a short ID
if cluster_id:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
nodes = node_mod.Node.load_all(context, cluster_id=cluster_id,
limit=limit, marker=marker, sort=sort,
filters=filters,
project_safe=project_safe)
return [node.to_dict() for node in nodes]
@request_context
def node_create(self, context, name, profile_id, cluster_id=None,
role=None, metadata=None, host=None, container_name=None):
"""Create a node with provided properties.
:param context: An instance of the request context.
:param name: Name for the node to be created.
:param profile_id: The ID, name or short-id of the profile to be used.
:param cluster_id: The ID, name or short-id of the cluster in which
the new node will be a member. This could be None
if the node is to be a orphan node.
:param role: The role for the node to play in the cluster.
:param metadata: A dictionary containing the key-value pairs to be
associated with the node.
:return: A dictionary containing the details about the node to be
created along with the ID of the action triggered by this
request.
"""
if cfg.CONF.name_unique:
if db_api.node_get_by_name(context, name):
msg = _("The node named (%(name)s) already exists."
) % {"name": name}
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating node '%s'."), name)
if cluster_id is None:
cluster_id = ''
try:
node_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile (%s) is not found.") % profile_id
raise exception.BadRequest(msg=msg)
index = -1
if cluster_id:
try:
db_cluster = self.cluster_find(context, cluster_id)
except exception.ClusterNotFound:
msg = _("The specified cluster (%s) is not found."
) % cluster_id
raise exception.BadRequest(msg=msg)
cluster_id = db_cluster.id
if node_profile.id != db_cluster.profile_id:
cluster_profile = self.profile_find(context,
db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
LOG.error(msg)
raise exception.ProfileTypeNotMatch(message=msg)
index = db_api.cluster_next_index(context, cluster_id)
# Create a node instance
if host:
host_node = self.node_find(context, host)
host_ip = self.get_host_ip(context, host)
metadata.update(host_ip=host_ip)
metadata.update(host_node=host_node.id)
if container_name:
metadata.update(container_name=container_name)
kwargs = {
'index': index,
'role': role,
'metadata': metadata or {},
'user': context.user,
'project': context.project,
'domain': context.domain,
}
node = node_mod.Node(name, node_profile.id, cluster_id, context,
**kwargs)
node.store(context)
params = {
'name': 'node_create_%s' % node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, node.id,
consts.NODE_CREATE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node create action queued: %s."), action_id)
result = node.to_dict()
result['action'] = action_id
return result
@request_context
def node_get(self, context, identity, show_details=False):
"""Get the details about a node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param show_details: Optional parameter indicating whether the details
about the physical object should be returned.
:return: A dictionary containing the detailed information about a node
or an exception of `NodeNotFound` if no matching node could
be found.
"""
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
res = node.to_dict()
if show_details and node.physical_id:
res['details'] = node.get_details(context)
return res
@request_context
def node_update(self, context, identity, name=None, profile_id=None,
role=None, metadata=None):
"""Update a node with new propertye values.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:param name: Optional string specifying the new name for the node.
:param profile_id: The UUID, name or short-id of the new profile to
be used.
:param role: The new role for the node, if specified.
:param metadata: A dictionary of key-value pairs to be associated with
the node.
:return: A dictionary containing the updated representation of the
node along with the ID of the action triggered by this
request.
"""
LOG.info(_LI("Updating node '%s'."), identity)
db_node = self.node_find(context, identity)
if profile_id:
try:
db_profile = self.profile_find(context, profile_id)
except exception.ProfileNotFound:
msg = _("The specified profile (%s) is not found."
) % profile_id
raise exception.BadRequest(msg=msg)
profile_id = db_profile.id
# check if profile_type matches
old_profile = self.profile_find(context, db_node.profile_id)
if old_profile.type != db_profile.type:
msg = _('Cannot update a node to a different profile type, '
'operation aborted.')
LOG.error(msg)
raise exception.ProfileTypeNotMatch(message=msg)
inputs = {'new_profile_id': profile_id}
else:
inputs = {}
if name is not None and name != db_node.name:
inputs['name'] = name
if role is not None and role != db_node.role:
inputs['role'] = role
if metadata is not None and metadata != db_node.metadata:
inputs['metadata'] = metadata
if inputs == {}:
msg = _("No property needs an update.")
raise exception.BadRequest(msg=msg)
params = {
'name': 'node_update_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_UPDATE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node update action is queued: %s."), action_id)
node = node_mod.Node.load(context, node=db_node)
resp = node.to_dict()
resp['action'] = action_id
return resp
@request_context
def node_delete(self, context, identity, container_name=None):
"""Delete the specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:return: A dictionary containing the ID of the action triggered by
this request.
"""
LOG.info(_LI('Deleting node %s'), identity)
db_node = self.node_find(context, identity)
params = {
'name': 'node_delete_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_DELETE, **params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node delete action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def node_check(self, context, identity, params=None):
"""Check the health status of specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the node.
:param params: An dictionary providing additional input parameters
for the checking operation.
:return: A dictionary containing the ID of the action triggered by
this request.
"""
LOG.info(_LI("Checking node '%s'."), identity)
db_node = self.node_find(context, identity)
kwargs = {
'name': 'node_check_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params,
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_CHECK, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node check action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def node_recover(self, context, identity, params=None):
"""Recover the specified node.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a node.
:param params: A dictionary containing the optional parameters for
the requested recover operation.
:return: A dictionary containing the ID of the action triggered by the
recover request.
"""
LOG.info(_LI("Recovering node '%s'."), identity)
db_node = self.node_find(context, identity)
kwargs = {
'name': 'node_recover_%s' % db_node.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': params
}
action_id = action_mod.Action.create(context, db_node.id,
consts.NODE_RECOVER, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Node recover action is queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_list(self, context, identity, filters=None, sort=None):
"""List cluster-policy bindings given the cluster identity.
:param context: An instance of the request context.
:param identity: The ID, name or short ID of the target cluster.
:param filters: A list of key-value pairs for filtering out the result
list.
:param sort: A list of sorting keys (optionally appended with sorting
directions) separated by commas.
:return: A list containing dictionaries each representing a binding.
"""
utils.validate_sort_param(sort, consts.CLUSTER_POLICY_SORT_KEYS)
db_cluster = self.cluster_find(context, identity)
bindings = cluster_policy.ClusterPolicy.load_all(
context, db_cluster.id, filters=filters, sort=sort)
return [binding.to_dict() for binding in bindings]
@request_context
def cluster_policy_get(self, context, identity, policy_id):
"""Get the binding record giving the cluster and policy identity.
:param context: An instance of the request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy_id: The ID, name or short ID of the target policy.
:return: A dictionary containing the binding record, or raises an
exception of ``PolicyNotAttached``.
"""
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy_id)
try:
binding = cluster_policy.ClusterPolicy.load(
context, db_cluster.id, db_policy.id)
except exception.PolicyNotAttached:
raise exception.PolicyBindingNotFound(policy=policy_id,
identity=identity)
return binding.to_dict()
@request_context
def cluster_policy_attach(self, context, identity, policy, enabled=True):
"""Attach a policy to the specified cluster.
This is done via an action because a cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:param enabled: Optional parameter specifying whether the policy is
enabled when attached.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Attaching policy (%(policy)s) to cluster "
"(%(cluster)s)."),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
params = {
'name': 'attach_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {
'policy_id': db_policy.id,
'enabled': utils.parse_bool_param('enabled', enabled) or True,
}
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_ATTACH_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy attach action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_detach(self, context, identity, policy):
"""Detach a policy from the specified cluster.
This is done via an action because cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Detaching policy '%(policy)s' from cluster "
"'%(cluster)s'."),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
if binding is None:
msg = _("The policy (%(p)s) is not attached to the specified "
"cluster (%(c)s).") % {'p': policy, 'c': identity}
raise exception.BadRequest(msg=msg)
params = {
'name': 'detach_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': {'policy_id': db_policy.id},
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_DETACH_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy dettach action queued: %s."), action_id)
return {'action': action_id}
@request_context
def cluster_policy_update(self, context, identity, policy, enabled=None):
"""Update an existing policy binding on a cluster.
This is done via an action because cluster lock is needed.
:param context: An instance of request context.
:param identity: The ID, name or short ID of the target cluster.
:param policy: The ID, name or short ID of the target policy.
:param enabled: Optional parameter specifying whether the policy is
enabled after the update.
:return: A dictionary containg the ID of the action fired.
"""
LOG.info(_LI("Updating policy '%(policy)s' on cluster '%(cluster)s.'"),
{'policy': policy, 'cluster': identity})
db_cluster = self.cluster_find(context, identity)
try:
db_policy = self.policy_find(context, policy)
except exception.PolicyNotFound:
msg = _("The specified policy (%s) is not found.") % policy
raise exception.BadRequest(msg=msg)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
if binding is None:
msg = _("The policy (%(p)s) is not attached to the specified "
"cluster (%(c)s).") % {'p': policy, 'c': identity}
raise exception.BadRequest(msg=msg)
inputs = {'policy_id': db_policy.id}
if enabled is not None:
inputs['enabled'] = utils.parse_bool_param('enabled', enabled)
params = {
'name': 'update_policy_%s' % db_cluster.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs
}
action_id = action_mod.Action.create(context, db_cluster.id,
consts.CLUSTER_UPDATE_POLICY,
**params)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Policy update action queued: %s."), action_id)
return {'action': action_id}
def action_find(self, context, identity, project_safe=True):
"""Find an action with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action.
:return: A DB object of action or an exception `ActionNotFound` if no
matching action is found.
"""
if uuidutils.is_uuid_like(identity):
action = db_api.action_get(context, identity,
project_safe=project_safe)
if not action:
action = db_api.action_get_by_name(context, identity,
project_safe=project_safe)
else:
action = db_api.action_get_by_name(context, identity,
project_safe=project_safe)
if not action:
action = db_api.action_get_by_short_id(
context, identity, project_safe=project_safe)
if not action:
raise exception.ActionNotFound(action=identity)
return action
@request_context
def action_list(self, context, filters=None, limit=None, marker=None,
sort=None, project_safe=True):
"""List action records matching the specified criteria.
:param context: An instance of the request context.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the action after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param project_safe: A boolean indicating whether actions from all
projects will be returned.
:return: A list of `Action` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.ACTION_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
results = action_mod.Action.load_all(context, filters=filters,
limit=limit, marker=marker,
sort=sort,
project_safe=project_safe)
return [a.to_dict() for a in results]
@request_context
def action_create(self, context, name, cluster, action, inputs=None):
"""Create an action with given details.
:param context: Request context instance.
:param name: Name of the action.
:param cluster: Name, ID or short ID of the targeted cluster.
:param action: String representation of the action.
:param inputs: Optional inputs for the action.
:return: A dict containing the action created.
"""
LOG.info(_LI("Creating action '%s'."), name)
target = self.cluster_find(context, cluster)
# Create an action instance
params = {
'name': name,
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': inputs or {},
}
action_id = action_mod.Action.create(context, target.id, action,
**params)
# TODO(Anyone): Uncomment this to notify the dispatcher
# dispatcher.start_action(action_id=action.id)
LOG.info(_LI("Action '%(name)s' is created: %(id)s."),
{'name': name, 'id': action_id})
return {'action': action_id}
@request_context
def action_get(self, context, identity):
"""Get the details about specified action.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action.
:return: A dictionary containing the details about an action, or an
exception `ActionNotFound` if no matching action is found.
"""
db_action = self.action_find(context, identity)
action = action_mod.Action.load(context, db_action=db_action)
return action.to_dict()
@request_context
def action_delete(self, context, identity):
"""Delete the specified action object.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an action object.
:return: None if deletion was successful, or an exception of type
`ResourceInUse`.
"""
db_action = self.action_find(context, identity)
LOG.info(_LI("Deleting action '%s'."), identity)
try:
action_mod.Action.delete(context, db_action.id)
except exception.ResourceBusyError:
raise exception.ResourceInUse(resource_type='action',
resource_id=db_action.id)
LOG.info(_LI("Action '%s' is deleted."), identity)
def receiver_find(self, context, identity, project_safe=True):
"""Find a receiver with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:param project_safe: A boolean indicating whether receiver from other
projects other than the requesting one can be
returned.
:return: A DB object of receiver or an exception `ReceiverNotFound`
if no matching reciever is found.
"""
if uuidutils.is_uuid_like(identity):
receiver = db_api.receiver_get(context, identity,
project_safe=project_safe)
if not receiver:
receiver = db_api.receiver_get_by_name(
context, identity, project_safe=project_safe)
else:
receiver = db_api.receiver_get_by_name(
context, identity, project_safe=project_safe)
if not receiver:
receiver = db_api.receiver_get_by_short_id(
context, identity, project_safe=project_safe)
if not receiver:
raise exception.ReceiverNotFound(receiver=identity)
return receiver
@request_context
def receiver_list(self, context, limit=None, marker=None, sort=None,
filters=None, project_safe=True):
"""List receivers matching the specified criteria.
:param context: An instance of the request context.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the receiver after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param project_safe: A boolean indicating whether receivers from all
projects will be returned.
:return: A list of `Receiver` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.RECEIVER_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
receivers = receiver_mod.Receiver.load_all(context, limit=limit,
marker=marker,
sort=sort, filters=filters,
project_safe=project_safe)
return [r.to_dict() for r in receivers]
@request_context
def receiver_create(self, context, name, type_name, cluster_id, action,
actor=None, params=None):
"""Create a receiver.
:param context: An instance of the request context.
:param name: Name of the receiver.
:param type_name: Name of the receiver type, subject to validation.
:param cluster_id: UUID, name or short-id of a cluster.
:param action: Name or ID of an action, currently only builtin action
names are supported.
:param actor: Future extension.
:param params: A dictionary containing key-value pairs as inputs to
the action.
:return: A dictionary containing the details about the receiver
created.
"""
if cfg.CONF.name_unique:
if db_api.receiver_get_by_name(context, name):
msg = _("A receiver named '%s' already exists.") % name
raise exception.BadRequest(msg=msg)
LOG.info(_LI("Creating receiver %(n)s: \n"
" type: %(t)s\n cluster: %(c)s\n action: %(a)s."),
{'n': name, 't': type_name, 'c': cluster_id, 'a': action})
rtype = type_name.lower()
if rtype not in consts.RECEIVER_TYPES:
msg = _("Receiver type '%s' is not supported.") % rtype
raise exception.BadRequest(msg=msg)
# Check whether cluster identified by cluster_id does exist
cluster = None
try:
cluster = self.cluster_find(context, cluster_id)
except exception.ClusterNotFound:
msg = _("The referenced cluster '%s' is not found.") % cluster_id
raise exception.BadRequest(msg=msg)
# permission checking
if not context.is_admin and context.user != cluster.user:
raise exception.Forbidden()
# Check action name
if action not in consts.ACTION_NAMES:
msg = _("Illegal action '%s' specified.") % action
raise exception.BadRequest(msg=msg)
if action.lower().split('_')[0] != 'cluster':
msg = _("Action '%s' is not applicable to clusters.") % action
raise exception.BadRequest(msg=msg)
if not params:
params = {}
kwargs = {
'name': name,
'user': context.user,
'project': context.project,
'domain': context.domain,
'params': params
}
receiver = receiver_mod.Receiver.create(context, rtype, cluster,
action, **kwargs)
LOG.info(_LI("Receiver (%(n)s) is created: %(i)s."),
{'n': name, 'i': receiver.id})
return receiver.to_dict()
@request_context
def receiver_get(self, context, identity, project_safe=True):
"""Get the details about a receiver.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:param project_safe: Whether matching object from other projects can
be returned.
:return: A dictionary containing the details about a receiver or
an exception `ReceiverNotFound` if no matching object found.
"""
db_receiver = self.receiver_find(context, identity,
project_safe=project_safe)
receiver = receiver_mod.Receiver.load(context,
receiver_obj=db_receiver,
project_safe=project_safe)
return receiver.to_dict()
@request_context
def receiver_delete(self, context, identity):
"""Delete the specified receiver.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of a receiver.
:return: None if successfully deleted the receiver or an exception of
`ReceiverNotFound` if the object could not be found.
"""
db_receiver = self.receiver_find(context, identity)
LOG.info(_LI("Deleting receiver %s."), identity)
db_api.receiver_delete(context, db_receiver.id)
LOG.info(_LI("Receiver %s is deleted."), identity)
@request_context
def webhook_trigger(self, context, identity, params=None):
LOG.info(_LI("Triggering webhook (%s)."), identity)
receiver = self.receiver_find(context, identity)
try:
cluster = self.cluster_find(context, receiver.cluster_id)
except exception.ClusterNotFound:
msg = _("The referenced cluster (%s) is not found."
) % receiver.cluster_id
raise exception.BadRequest(msg=msg)
data = copy.deepcopy(receiver.params)
if params:
data.update(params)
kwargs = {
'name': 'webhook_%s' % receiver.id[:8],
'cause': action_mod.CAUSE_RPC,
'status': action_mod.Action.READY,
'inputs': data,
}
action_id = action_mod.Action.create(context, cluster.id,
receiver.action, **kwargs)
dispatcher.start_action(action_id=action_id)
LOG.info(_LI("Webhook %(w)s' triggered with action queued: %(a)s."),
{'w': identity, 'a': action_id})
return {'action': action_id}
def event_find(self, context, identity, project_safe=True):
"""Find an event with the given identity.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of the event.
:param project_safe: A boolean specifying that only events from the
same project as the requesting one are qualified
to be returned.
:return: A dictionary containing the details of the event.
"""
event = None
if uuidutils.is_uuid_like(identity):
event = db_api.event_get(context, identity,
project_safe=project_safe)
if not event:
event = db_api.event_get_by_short_id(context, identity,
project_safe=project_safe)
if not event:
raise exception.EventNotFound(event=identity)
return event
@request_context
def event_list(self, context, filters=None, limit=None, marker=None,
sort=None, project_safe=True):
"""List event records matching the specified criteria.
:param context: An instance of the request context.
:param filters: A dictionary of key-value pairs for filtering out the
result list.
:param limit: An integer specifying the maximum number of objects to
return in a response.
:param marker: An UUID specifying the event after which the result
list starts.
:param sort: A list of sorting keys (each optionally attached with a
sorting direction) separated by commas.
:param project_safe: A boolean indicating whether events from all
projects will be returned.
:return: A list of `Event` object representations.
"""
limit = utils.parse_int_param('limit', limit)
utils.validate_sort_param(sort, consts.EVENT_SORT_KEYS)
project_safe = utils.parse_bool_param('project_safe', project_safe)
if not project_safe and not context.is_admin:
raise exception.Forbidden()
if filters and consts.EVENT_LEVEL in filters:
value = filters.pop(consts.EVENT_LEVEL)
value = utils.parse_level_values(value)
if value is not None:
filters[consts.EVENT_LEVEL] = value
all_events = db_api.event_get_all(context, filters=filters,
limit=limit, marker=marker,
sort=sort, project_safe=project_safe)
results = [event.as_dict() for event in all_events]
return results
@request_context
def event_get(self, context, identity):
"""Get the details about a specified event.
:param context: An instance of the request context.
:param identity: The UUID, name or short-id of an event.
:return: A dictionary containing the details about the event or an
exception of `EventNotFound` if no matching record could be
found.
"""
db_event = self.event_find(context, identity)
return db_event.as_dict()
def get_host_ip(self, context, host):
if host:
db_node = self.node_find(context, host)
physical_id = db_node.physical_id
if not physical_id:
return
node = node_mod.Node.load(context, node_id=db_node.id)
details = node.get_details(context)
for output in details.outputs:
if output['output_key'] == 'floating_ip':
server_ip = output['output_value']
return server_ip
@request_context
def container_list(self, context, limit, host):
server_ip = self.get_host_ip(context, host)
if server_ip:
url = 'tcp://' + server_ip + ':2375'
docker_cli = Client(base_url=url)
containers = docker_cli.containers(all=True)
for j in range(len(containers)):
containers[j]['Server'] = server_ip
return containers
|
apache-2.0
| -2,164,811,942,441,370,400
| 41.933124
| 79
| 0.569212
| false
| 4.507563
| false
| false
| false
|
Joacchim/Comix
|
src/archive.py
|
1
|
20550
|
# coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
"""Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
return SEVENZIP
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def _get_rar_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None
|
gpl-2.0
| 2,014,588,338,973,242,000
| 37.555347
| 107
| 0.522238
| false
| 4.377929
| false
| false
| false
|
debjyoti385/dartnews
|
crawling/newsParser.py
|
1
|
2286
|
from HTMLParser import HTMLParser
import sys, re,os
from os import listdir
import Levenshtein
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
f=open('latlongmapping.csv',"r")
L=[]
for line in f:
(lat,lng,address)=line.strip().split("|")
L.append((lat,lng,address))
f.close()
newsCount=0
if __name__=='__main__':
if len(sys.argv)<4:
print "Please provide <news webpages directory> <urlmapping file> <outputnewssummary file>"
sys.exit(1)
mypath=sys.argv[1]
urlmappingfile=open(sys.argv[2])
print 'calculating url mapping ...'
urlmapping={}
for line in urlmappingfile:
sp=line.strip().split(",")
urlmapping[sp[0]]=sp[1]
print 'url mapping calculated, starting parser...'
out=open(sys.argv[3],"w")
onlyfiles = [ os.path.join(mypath,f) for f in listdir(mypath) ]
fcount=0
for filepath in onlyfiles:
f=open(filepath)
content=f.read()
f.close()
headlineSearch=re.search('<h1[^<]*>(.*)</h1>',content)
headline=""
if headlineSearch:
headline=strip_tags(headlineSearch.group(1))
time=re.search('((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[^<]*IST)',content)
if time:
time=strip_tags(time.group(1))
else:
time=""
news=re.search('<div[\s]+class="Normal">(.*)</div>[\s]*<',content)
if news:
news=strip_tags(news.group(1))
else:
news=re.findall('<div [^<]*mod-articletext[^<]*>(.*)</div>[\w\s]*<',content)
newsstr=""
if news:
for n in news:
newsstr+=(" "+strip_tags(n))
news=newsstr
if news=='':
#print "Got empty news in",filepath
pass
if 'delhi' in headline.lower() or 'delhi' in news[:50].lower():
url=urlmapping[filepath.split("/")[-1]]
D={}
for (lat,lng,address) in L:
s=0
for keyword in address.split(",")[0:2]:
if keyword in news.lower():
s+=1
D[(lat,lng,address)]=s
entries=sorted(D,key=lambda x: D[x],reverse=True)
if entries!=[]:
print entries[0],news,s
#out.write(time+"\x01"+headline+'\x01'+news+"\x01"+url+"\n");
fcount+=1
if fcount%10000==0:
print 'Processed',fcount,'files'
out.close()
|
bsd-2-clause
| -7,346,165,303,894,431,000
| 25.581395
| 93
| 0.628171
| false
| 2.695755
| false
| false
| false
|
ExaScience/smurff
|
data/jaak/make.py
|
1
|
3536
|
#!/usr/bin/env python
import smurff.matrix_io as mio
import urllib.request
import scipy.io as sio
import os
from hashlib import sha256
import smurff
urls = [
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-346targets.mm",
"10c3e1f989a7a415a585a175ed59eeaa33eff66272d47580374f26342cddaa88",
"chembl-IC50-346targets.mm",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-compound-feat.mm",
"f9fe0d296272ef26872409be6991200dbf4884b0cf6c96af8892abfd2b55e3bc",
"chembl-IC50-compound-feat.mm",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-compounds.csv",
"e8f045a67ee149c6100684e07920036de72583366596eb5748a79be6e3b96f7c",
"chembl-IC50-compounds.csv",
),
(
"http://homes.esat.kuleuven.be/~jsimm/chembl-IC50-proteins-uniprot.csv",
"224b1b44abcab8448b023874f4676af30d64fe651754144f9cbdc67853b76ea8",
"chembl-IC50-proteins-uniprot.csv",
),
]
for url, expected_sha, output in urls:
if os.path.isfile(output):
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha == actual_sha):
continue
print("download %s" % output)
urllib.request.urlretrieve(url, output)
ic50 = sio.mmread("chembl-IC50-346targets.mm")
feat = sio.mmread("chembl-IC50-compound-feat.mm")
ic50_100c = ic50.tocsr()[0:100,:]
ic50_100c_train, ic50_100c_test = smurff.make_train_test(ic50_100c, 0.2, 1234)
# 0,1 binary for probit
ic50_01 = ic50.copy()
ic50_01.data = (ic50_01.data >= 6) * 1.
# -1,+1
ic50_11 = ic50.copy()
ic50_11.data = ((ic50.data >= 6) * 2.) - 1.
feat_100 = feat.tocsr()[0:100,:]
feat_100 = feat_100[:,feat_100.getnnz(0)>0]
feat_100_dense = feat_100.todense()
generated_files = [
( "f0d2ad6cf8173a64e12b48821e683b642b593555c552f4abf1f10ba255af78fc", "chembl-IC50-100compounds-feat-dense.ddm", feat_100_dense,),
( "0dd148a0da1a11ce6c6c3847d0cc2820dc9c819868f964a653a0d42063ce5c42", "chembl-IC50-100compounds-feat.sdm", feat_100,),
( "973074474497b236bf75fecfe9cc17471783fd40dbdda158b81e0ebbb408d30b", "chembl-IC50-346targets-01.sdm", ic50_01,),
( "5d7c821cdce02b4315a98a94cba5747e82d423feb1a2158bf03a7640aa82625d", "chembl-IC50-346targets-100compounds.sdm", ic50_100c,),
( "c70dbc990a5190d1c5d83594259abf10da409d2ba853038ad8f0e36f76ab56a8", "chembl-IC50-346targets-100compounds-train.sdm", ic50_100c_train,),
( "b2d7f742f434e9b933c22dfd45fa28d9189860edd1e42a6f0a5477f6f6f7d122", "chembl-IC50-346targets-100compounds-test.sdm", ic50_100c_test,),
( "bcf5cee9702e318591b76f064859c1d0769158d0b0f5c44057392c2f9385a591", "chembl-IC50-346targets-11.sdm", ic50_11,),
( "1defd1c82ac3243ad60a23a753287df494d3b50f2fd5ff7f4a074182b07e3318", "chembl-IC50-346targets.sdm", ic50, ),
( "badfa23abb83e0b731e969e1117fd4269f2df16e1faf14eb54c53c60465e87f1", "chembl-IC50-compound-feat.sdm", feat, ),
]
for expected_sha, output, data in generated_files:
if os.path.isfile(output):
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha == actual_sha):
continue
print("make %s" % output)
mio.write_matrix(output, data)
actual_sha = sha256(open(output, "rb").read()).hexdigest()
if (expected_sha != actual_sha):
print("Checksum mismatch for %s: expected %s, got %s" % (output, expected_sha, actual_sha))
|
mit
| 2,601,276,451,595,209,000
| 41.095238
| 145
| 0.680147
| false
| 2.413652
| false
| false
| false
|
liorsion/django-backlinks
|
src/backlinks/pingback/client.py
|
1
|
4452
|
import re
import xmlrpclib
import urllib
from backlinks.exceptions import fault_code_to_client_error, \
BacklinkClientError, BacklinkClientRemoteError, \
BacklinkClientConnectionError, BacklinkClientServerDoesNotExist,\
BacklinkClientAccessDenied, BacklinkClientInvalidResponse
from backlinks.conf import settings
from backlinks.utils import url_reader
# See http://hixie.ch/specs/pingback/pingback#TOC2.3
PINGBACK_RE = re.compile(r'<link rel="pingback" href="(?P<pingback_url>[^"]+)" ?/?>')
# Override the user agent for xmlrpclib's ServerProxy
class BacklinksTransport(xmlrpclib.Transport):
user_agent = settings.USER_AGENT_STRING
class BacklinksSafeTransport(xmlrpclib.SafeTransport):
user_agent = settings.USER_AGENT_STRING
# Build a nice ServerProxy replacement that will use our transport classes
class BacklinksServerProxy(xmlrpclib.ServerProxy):
transport_class = BacklinksTransport
safe_transport_class = BacklinksSafeTransport
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, use_datetime=0):
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
transport = self.safe_transport_class(use_datetime=use_datetime)
else:
transport = self.transport_class(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, methodname, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
class PingbackClient(object):
"""
A client for the Pingback protocol.
"""
# Default proxy class
proxy_class = BacklinksServerProxy
def __init__(self, proxy_class=None):
self.proxy_class = proxy_class or self.proxy_class
def autodiscover(self, link, response):
"""
Determine the Pingback server URL for a given response for a resource.
"""
pingback_url = response.headers.getheader('x-pingback', None)
if not pingback_url:
match = PINGBACK_RE.search(response.body)
if match:
pingback_url = match.group('pingback_url')
return pingback_url
def ping(self, ping_url, target_url, source_url, verbose=False, *args, **kwargs):
"""
Attempt to ping a resource using the given Pingback server URL.
"""
try:
server = self.proxy_class(ping_url, verbose=verbose)
result = server.pingback.ping(source_url, target_url)
return True
except xmlrpclib.Fault, e:
exception_class = fault_code_to_client_error.get(int(e.faultCode),
BacklinkClientError)
raise exception_class(reason=e.faultString)
except xmlrpclib.ProtocolError, e:
if e.errcode == 404:
raise BacklinkClientServerDoesNotExist
elif e.errcode == 500:
raise BacklinkClientRemoteError
elif e.errcode in (401, 403):
raise BacklinkClientAccessDenied
raise BacklinkClientConnectionError(reason=e.errmsg)
except xmlrpclib.ResponseError, e:
raise BacklinkClientInvalidResponse(reason=e.message)
except Exception, e:
raise BacklinkClientError(reason=str(e))
# A default instance of the Pingback client for convenience.
default_client = PingbackClient()
|
bsd-3-clause
| -388,211,051,411,919,940
| 33.511628
| 85
| 0.621968
| false
| 4.313953
| false
| false
| false
|
haystack/eyebrowse-server
|
notifications/views.py
|
1
|
6176
|
import datetime
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.utils import timezone
from annoying.decorators import render_to
from accounts.models import UserProfile
from api.models import EyeHistory, PopularHistoryInfo
from api.utils import humanize_time
from common.constants import EMPTY_SEARCH_MSG
from common.view_helpers import _template_values
from eyebrowse.log import logger
from live_stream.query_managers import profile_stat_gen
from live_stream.query_managers import online_user
from notifications.models import Notification
from notifications.models import NoticeType, NOTICE_MEDIA
from notifications.utils import notice_setting_for_user
from stats.models import FavData
@login_required
@render_to('notifications/notifications.html')
def notifications(request):
user = get_object_or_404(User, username=request.user.username)
userprof = UserProfile.objects.get(user=user)
confirmed = userprof.confirmed
if not confirmed:
return redirect('/consent')
empty_search_msg = EMPTY_SEARCH_MSG['notifications']
# stats
tot_time, item_count = profile_stat_gen(user)
fav_data = FavData.objects.get(user=user)
num_history = EyeHistory.objects.filter(user=user).count()
is_online = online_user(user=user)
following_users = user.profile.follows.all()
following_count = following_users.count()
follower_count = UserProfile.objects.filter(follows=user.profile).count()
notifications = notification_renderer(user, empty_search_msg)
nots = Notification.objects.filter(recipient=user, seen=False)
for n in nots:
n.seen = True
n.save()
template_dict = {
"username": user.username,
"following_count": following_count,
"follower_count": follower_count,
"is_online": is_online,
"num_history": num_history,
"notifications": notifications,
"tot_time": tot_time,
"item_count": item_count,
"fav_data": fav_data,
}
return _template_values(request,
page_title="notifications",
navbar='notify',
sub_navbar="subnav_data",
not_count=0,
**template_dict)
def notification_renderer(user, empty_search_msg):
notifications = Notification.objects.filter(
recipient=user).select_related().order_by('-date_created')
for notif in notifications:
if notif.notice_type.label != "new_follower":
pop = PopularHistoryInfo.objects.filter(url=notif.url)
if pop.exists():
notif.description = pop[0].description
notif.img_url = pop[0].img_url
notif.favIconUrl = pop[0].favIconUrl
notif.title = pop[0].title
notif.hum_date = humanize_time(
timezone.now() - notif.date_created)
else:
notif.description = None
template_dict = {'notifications': notifications,
'empty_search_msg': empty_search_msg, }
return render_to_string('notifications/notification_list.html', template_dict)
class NoticeSettingsView(TemplateView):
template_name = "notifications/notice_settings.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(NoticeSettingsView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['nav_account'] = 'active'
context['email_notifications'] = 'active'
context['user'] = request.user
context['page_title'] = 'Set Email Notifications'
return self.render_to_response(context)
@property
def scoping(self):
return None
def setting_for_user(self, notice_type, medium_id):
return notice_setting_for_user(
self.request.user,
notice_type,
medium_id,
scoping=self.scoping
)
def form_label(self, notice_type, medium_id):
return "setting-{0}-{1}".format(
notice_type.pk,
medium_id
)
def process_cell(self, label):
val = self.request.POST.get(label)
_, pk, medium_id = label.split("-")
notice_type = NoticeType.objects.get(pk=pk)
setting = self.setting_for_user(notice_type, medium_id)
if val == "on":
setting.send = True
else:
setting.send = False
setting.save()
def settings_table(self):
notice_types = NoticeType.objects.all()
table = []
for notice_type in notice_types:
row = []
for medium_id, medium_display in NOTICE_MEDIA:
setting = self.setting_for_user(notice_type, medium_id)
row.append((
self.form_label(notice_type, medium_id),
setting.send)
)
table.append({"notice_type": notice_type, "cells": row})
return table
def post(self, request, *args, **kwargs):
table = self.settings_table()
for row in table:
for cell in row["cells"]:
self.process_cell(cell[0])
return HttpResponseRedirect(request.POST.get("next_page", "."))
def get_context_data(self, **kwargs):
settings = {
"column_headers": [
medium_display
for _, medium_display in NOTICE_MEDIA
],
"rows": self.settings_table(),
}
context = super(NoticeSettingsView, self).get_context_data(**kwargs)
context.update({
"notice_types": NoticeType.objects.all(),
"notice_settings": settings
})
return context
|
mit
| 3,723,686,512,894,883,300
| 31.505263
| 82
| 0.622733
| false
| 4.106383
| false
| false
| false
|
vanossj/pyAtlasBoneSegmentation
|
src/ICP.py
|
1
|
22312
|
'''
Created on Feb 15, 2012
@author: Jeff
'''
import numpy
import numpyTransform
from scipy.spatial import cKDTree as KDTree
# from scipy.spatial import Delaunay
from scipy.spatial.distance import cdist
import scipy.optimize
import time
from math import pi
from MatlabFunctions import MatlabFmincon
import nlopt
import sys
class ICP(object):
'''
classdocs
'''
def __init__(self, modelPointCloud, dataPointCloud, **kwargs):
'''
Supported Signatures
modelPointCloud
The model point cloud is the base to which the data point cloud will be matched
dataPointCloud
The data point cloud is transformed so that it matches the model point cloud
Key Word Arguments:
maxIterations
maximum number of iterations to perform, default is 10
TODO: in the future provide an option to also account for minimum acceptable error
matchingMethod
'kdtree' Use a KD-Tree for nearest neighbor search {default}
'bruteforce' Use brute force for nearest neighbor search
minimizeMethod
'point' Use point to point minimization {default}
'plane' Use point to plane minimization
weightMethod
function that takes indices into the modelPointCloud and returns the weight of those indices
By default all points are weighted equally
modelDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
dataDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
ICP Process is five steps
1: Input Filter
2: Match
3: Outlier Filter
4: Error Minimization
5: Check if error is less than limits
yes: we are don
no: go back to step 2 with new transformation function
'''
self.startTime = time.time()
if 'modelDownsampleFactor' in kwargs and int(kwargs['modelDownsampleFactor']) > 1:
factor = int(kwargs['modelDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
modelDownSampleIndices = numpy.tile(temp, (modelPointCloud.shape[0] / factor) + 1)[:modelPointCloud.shape[0]]
else:
modelDownSampleIndices = numpy.ones(modelPointCloud.shape[0], dtype=numpy.bool)
if 'dataDownsampleFactor' in kwargs and int(kwargs['dataDownsampleFactor']) > 1:
factor = int(kwargs['dataDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
dataDownSampleIndices = numpy.tile(temp, (dataPointCloud.shape[0] / factor) + 1)[:dataPointCloud.shape[0]]
else:
dataDownSampleIndices = numpy.ones(dataPointCloud.shape[0], dtype=numpy.bool)
# TODO: uniform sampling of point clouds
self.q = modelPointCloud[modelDownSampleIndices]
self.p = dataPointCloud[dataDownSampleIndices]
self.matlab = None
# get kwargs
if 'maxIterations' in kwargs:
self.K = int(kwargs['maxIterations'])
else:
self.K = 10
if 'matchingMethod' in kwargs:
if kwargs['matchingMethod'] == 'bruteforce':
self.matching = self.matchingBruteForce
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
if 'minimizeMethod' in kwargs:
if kwargs['minimizeMethod'] == 'plane': # point to plane
self.minimize = self.minimizePlane
elif kwargs['minimizeMethod'] == 'fmincon':
self.minimize = self.minimizeMatlab
self.matlab = MatlabFmincon()
elif kwargs['minimizeMethod'] == 'custom':
self.minimize = self.minimizeCustom
else: # point to point
self.minimize = self.minimizePoint
else:
self.minimize = self.minimizePoint
if 'weightMethod' in kwargs:
self.weightMethod = kwargs['weightMethod']
else:
self.weightMethod = self.weightEqual
# initialize translation and rotation matrix
self.transformMatrix = numpy.matrix(numpy.identity(4))
# initialize list of translations and rotation matrix for each iteration of ICP
self.totalTransformMatrix = [numpy.matrix(numpy.identity(4))]
self.pt = self.p.copy() # transformed point cloud
self.t = [] # array of times for each iteration of ICP
self.err = [] # error for each iteration of ICP
self.Np = self.p.shape[0] # number of points in data cloud
# preprocessing finish, log time
self.t.append(time.time() - self.startTime)
print 'Time for preprocessing:', self.t[-1]
def __del__(self):
if self.matlab is not None:
del self.matlab
def runICP(self, **kwargs):
tStart = time.time()
# get 'global' tolerances
if 'x0' in kwargs:
kwargs['initX0'] = kwargs['x0'].copy()
if 'lb' in kwargs:
kwargs['initLB'] = kwargs['lb'].copy()
if 'ub' in kwargs:
kwargs['initUB'] = kwargs['ub'].copy()
# main ICP loop
for k in xrange(self.K):
t1 = time.time()
minDistances, nearestNeighbor = self.matching(self.pt)
# get indices of the points we are interested in
p_idx = numpy.ones(self.p.shape[0], dtype=numpy.bool) # since there are no edges we are interested in all the points
q_idx = nearestNeighbor
print '\tTime to calc min distance:', time.time() - t1
# TODO: Input filtering
# reject some % of worst matches
# Multiresolution sampling
# add error for first iteration
if k == 0:
t1 = time.time()
self.err.append(numpy.sqrt(numpy.sum(minDistances ** 2) / minDistances.shape[0]))
print '\tInitial RMS error: %f, Time to calc: %f' % (self.err[-1], time.time() - t1)
# generate rotation matrix and translation
t1 = time.time()
weights = self.weightMethod(nearestNeighbor)
# get current cumulative rotation/translation in independent variable values, this way we can change the iteration bounds so that the global bounds are not violated
cummulativeX0 = numpy.zeros(9)
rotMat, tx, ty, tz, sx, sy, sz = numpyTransform.decomposeMatrix(self.totalTransformMatrix[-1])
rx, ry, rz = numpyTransform.rotationMat2Euler(rotMat)
cummulativeX0[0] = rx
cummulativeX0[1] = ry
cummulativeX0[2] = rz
cummulativeX0[3] = tx
cummulativeX0[4] = ty
cummulativeX0[5] = tz
cummulativeX0[6] = sx
cummulativeX0[7] = sy
cummulativeX0[8] = sz
R, T, S = self.minimize(self.q[q_idx], self.pt[p_idx], weights=weights, cummulativeX0=cummulativeX0, **kwargs)
print '\tTime to calc new transformation:', time.time() - t1
# create combined transformation matrix, apply this relative transformation to current transformation
transformMatrix = numpy.matrix(numpy.identity(4))
transformMatrix *= T
transformMatrix *= R
transformMatrix *= S
self.totalTransformMatrix.append(self.totalTransformMatrix[-1] * transformMatrix)
# apply last transformation
t1 = time.time()
self.pt = numpyTransform.transformPoints(self.totalTransformMatrix[-1], self.p)
print '\tTime to applying transform to all points:', time.time() - t1
# root mean of objective function
t1 = time.time()
self.err.append(self.rms_error(self.q[q_idx], self.pt[p_idx]))
print '\tIteration %d RMS error: %f, Time to calc: %f' % (k + 1, self.err[-1], time.time() - t1)
# TODO: add extrapolation
# store time to get to this iteration
self.t.append(time.time() - self.startTime)
print 'Iteration %d took %7.3f seconds' % (k + 1, self.t[-1] - self.t[-2])
print 'Total ICP run time:', time.time() - tStart
return self.totalTransformMatrix, self.err, self.t
def matchingKDTree(self, points):
minDistances, nearestNeighborIndex = self.qKDTree.query(points)
return minDistances, nearestNeighborIndex
def matchingBruteForce(self, points):
nearestNeighborIndex = numpy.zeros(points.shape[0])
distances = cdist(points, self.q) # calculate all combination of point distances
minDistances = distances.min(axis=1)
for i in xrange(points.shape[0]):
nearestNeighborIndex[i] = numpy.where(distances[i] == minDistances[i])[0][0]
return minDistances, nearestNeighborIndex
def minimizePoint(self, q, p, **kwargs):
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
if 'weights' in kwargs:
weights = kwargs['weights']
else:
raise Warning('weights argument not supplied')
return R, T
# function [R,T] = eq_point(q,p,weights)
m = p.shape[0]
n = q.shape[0]
# normalize weights
weights = weights / weights.sum()
# find data centroid and deviations from centroid
q_bar = (numpy.mat(q.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
q_mark = q - numpy.tile(q_bar, n).reshape((n, 3))
# Apply weights
q_mark = q_mark * numpy.repeat(weights, 3).reshape((weights.shape[0], 3))
# find data centroid and deviations from centroid
p_bar = (numpy.mat(p.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
p_mark = p - numpy.tile(p_bar, m).reshape((m, 3))
# Apply weights
# p_mark = p_mark * numpy.repeat(weights, 3).reshape((weights.shape[0],3))
N = (numpy.mat(p_mark).T * numpy.mat(q_mark)).getA() # taking points of q in matched order
[U, Ss, V] = numpy.linalg.svd(N); # singular value decomposition
V = (numpy.mat(V).H).getA()
RMattemp = numpy.mat(V) * numpy.mat(U).T
Ttemp = (numpy.mat(q_bar).T - RMattemp * numpy.mat(p_bar).T).getA().squeeze()
R[:3, :3] = RMattemp.getA()
T = numpyTransform.translation(Ttemp)
return R, T, S
def minimizeMatlab(self, modelPoints, dataPoints, **kwargs):
if 'x0' in kwargs:
x0 = kwargs['x0']
else:
raise Exception('There are no variables to solve for')
# check for initial settings and bounds so that we can calculate current settings and bounds
if 'initX0' in kwargs:
initX0 = kwargs['initX0']
if 'initLB' in kwargs:
initLB = kwargs['initLB']
if 'initUB' in kwargs:
initUB = kwargs['initUB']
if 'cummulativeX0' in kwargs:
cummulativeX0 = kwargs['cummulativeX0']
# NOTE: I think this only works if x0/initX) is all zeros
ub = initUB - (cummulativeX0 - initX0)
lb = initLB - (cummulativeX0 - initX0)
# rounding errors can cause Bounds to be incorrect
i = ub < x0
if numpy.any(i):
print 'upper bounds less than x0'
ub[i] = x0[i] + 10 * numpy.spacing(x0[i])
i = lb > x0
if numpy.any(i):
print 'lower bounds less than x0'
lb[i] = x0[i] - 10 * numpy.spacing(x0[i])
# if x0.shape[0] > 6 or ('scaleOnly' in kwargs and kwargs['scaleOnly']):
# raise Exception('Scaling is not currently supported it will screw things up. Need some way to control scaling bounds so that it stays in global scaling bounds')
try:
if 'scaleOnly' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-3:], lb[-3:], ub[-3:], scaleOnly=kwargs['scaleOnly'])
elif 'scaleOnlyIso' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-1:], lb[-1:], ub[-1:], scaleOnlyIso=kwargs['scaleOnlyIso'])
else:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[:6], lb[:6], ub[:6]) # only rotation and translation
except:
sys.stderr.write('ERROR: Problem with matlab, closing matlab\n')
del self.matlab
self.matlab = None
return R, T, S
def minimizeCustom(self, p, q, **kwargs):
S = numpy.matrix(numpy.identity(4))
# TODO: try using functions from the nlopt module
def objectiveFunc(*args, **kwargs):
d = p
m = q
params = args[0]
if args[1].size > 0: # gradient
args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01]) # arbitrary gradient
# transform = numpy.matrix(numpy.identity(4))
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
Dicp = numpyTransform.transformPoints(transform, d)
# err = self.rms_error(m, Dicp)
err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
# err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
return err
x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if 'optAlg' in kwargs:
opt = nlopt.opt(kwargs['optAlg'], 6)
else:
opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)
opt.set_min_objective(objectiveFunc)
opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
opt.set_maxeval(1500)
params = opt.optimize(x0)
# output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
# params = output[0]
# params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)
# constraints = []
# varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = output[0]
# print 'Min error:', output[1]
# params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
return rotx * roty * rotz, S
def minimizePlane(self, p, q, **kwargs):
# TODO: Actually fill out
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
# function [R,T] = eq_plane(q,p,n,weights)
# n = n .* repmat(weights,3,1);
#
# c = cross(p,n);
#
# cn = vertcat(c,n);
#
# C = cn*transpose(cn);
#
# b = - [sum(sum((p-q).*repmat(cn(1,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(2,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(3,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(4,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(5,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(6,:),3,1).*n))];
#
# X = C\b;
#
# cx = cos(X(1)); cy = cos(X(2)); cz = cos(X(3));
# sx = sin(X(1)); sy = sin(X(2)); sz = sin(X(3));
#
# R = [cy*cz cz*sx*sy-cx*sz cx*cz*sy+sx*sz;
# cy*sz cx*cz+sx*sy*sz cx*sy*sz-cz*sx;
# -sy cy*sx cx*cy];
#
# T = X(4:6);
return R, T, S
def weightEqual(self, qIndices):
return numpy.ones(qIndices.shape[0])
def rms_error(self, a, b):
'''
Determine the RMS error between two point equally sized point clouds with point correspondence.
NOTE: a and b need to have equal number of points
'''
if a.shape[0] != b.shape[0]:
raise Exception('Input Point clouds a and b do not have the same number of points')
distSq = numpy.sum((a - b) ** 2, axis=1)
err = numpy.sqrt(numpy.mean(distSq))
return err
def demo(*args, **kwargs):
import math
m = 80 # width of grid
n = m ** 2 # number of points
minVal = -2.0
maxVal = 2.0
delta = (maxVal - minVal) / (m - 1)
X, Y = numpy.mgrid[minVal:maxVal + delta:delta, minVal:maxVal + delta:delta]
X = X.flatten()
Y = Y.flatten()
Z = numpy.sin(X) * numpy.cos(Y)
# Create the data point-matrix
M = numpy.array([X, Y, Z]).T
# Translation values (a.u.):
Tx = 0.5
Ty = -0.3
Tz = 0.2
# Translation vector
T = numpyTransform.translation(Tx, Ty, Tz)
S = numpyTransform.scaling(1.0, N=4)
# Rotation values (rad.):
rx = 0.3
ry = -0.2
rz = 0.05
Rx = numpy.matrix([[1, 0, 0, 0],
[0, math.cos(rx), -math.sin(rx), 0],
[0, math.sin(rx), math.cos(rx), 0],
[0, 0, 0, 1]])
Ry = numpy.matrix([[math.cos(ry), 0, math.sin(ry), 0],
[0, 1, 0, 0],
[-math.sin(ry), 0, math.cos(ry), 0],
[0, 0, 0, 1]])
Rz = numpy.matrix([[math.cos(rz), -math.sin(rz), 0, 0],
[math.sin(rz), math.cos(rz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Rotation matrix
R = Rx * Ry * Rz
transformMat = numpy.matrix(numpy.identity(4))
transformMat *= T
transformMat *= R
transformMat *= S
# Transform data-matrix plus noise into model-matrix
D = numpyTransform.transformPoints(transformMat, M)
# Add noise to model and data
M = M + 0.01 * numpy.random.randn(n, 3)
D = D + 0.01 * numpy.random.randn(n, 3)
# Run ICP (standard settings)
initialGuess = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
lowerBounds = numpy.array([-pi, -pi, -pi, -100.0, -100.0, -100.0])
upperBounds = numpy.array([pi, pi, pi, 100.0, 100.0, 100.0])
icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='fmincon', **kwargs)
# icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='point', **kwargs)
transform, err, t = icp.runICP(x0=initialGuess, lb=lowerBounds, ub=upperBounds)
# Transform data-matrix using ICP result
Dicp = numpyTransform.transformPoints(transform[-1], D)
# Plot model points blue and transformed points red
if False:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(D[:, 0], D[:, 1], D[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 3)
ax.plot(t, err, 'x--')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
plt.show()
else:
import visvis as vv
app = vv.use()
vv.figure()
vv.subplot(2, 2, 1)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(D[:, 0], D[:, 1], D[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('Red: z=sin(x)*cos(y), blue: transformed point cloud')
# Plot the results
vv.subplot(2, 2, 2)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('ICP result')
# Plot RMS curve
vv.subplot(2, 2, 3)
vv.plot(t, err, ls='--', ms='x')
vv.xlabel('time [s]')
vv.ylabel('d_{RMS}')
vv.title('KD-Tree matching')
if 'optAlg' in kwargs:
opt2 = nlopt.opt(kwargs['optAlg'], 2)
vv.title(opt2.get_algorithm_name())
del opt2
else:
vv.title('KD-Tree matching')
app.Run()
if __name__ == '__main__':
demo()
# demo2()
|
mit
| -712,446,531,366,426,000
| 37.842857
| 176
| 0.551273
| false
| 3.491159
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.