code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# groupwise-ics: synchronize GroupWise calendar to ICS file and back
# Copyright (C) 2013 Cedric Bosdonnat <cedric@bosdonnat.fr>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
from dateutil import rrule
import email
import os.path
import sys
import re
class LineUnwrapper(object):
def __init__(self, s):
try:
self.lines = s.split('\n')
except:
print >> sys.stderr, "Can't parse %s\n" % (s)
self.lines = []
self.lines_read = None
self.saved = None
def each_line(self):
for line in self.lines:
line = line.rstrip('\r')
if line.startswith(' ') or line.startswith('\t'):
if self.saved is None:
self.saved = ''
self.lines_read = []
self.lines_read.append(line)
self.saved += line.strip()
else:
if self.saved is not None:
retval = (self.lines_read, self.saved)
self.lines_read = [line]
self.saved = line.strip()
yield retval
self.lines_read = [line]
self.saved = line.strip()
class Calendar(object):
def __init__(self, mailstr, attach_write_func=None):
self.events = []
mail = email.message_from_string(mailstr)
ical = None
attachments = []
self.timezones = {}
for part in mail.walk():
if part.get_content_type().startswith('text/calendar'):
ical = part.get_payload(decode=True)
else:
disposition = part.get('Content-Disposition')
if disposition and disposition.startswith('attachment'):
attachment = {}
filename = part.get_filename()
if filename:
attachment['filename'] = filename
attachment['content-type'] = part.get_content_type()
attachment['payload'] = part.get_payload(decode=True)
attachments.append(attachment)
if ical is None:
print >> sys.stderr, "Didn't find any ical data in following email %s\n" % (mailstr)
else:
self.parse(ical, attachments, attach_write_func)
def parse(self, ical, attachments, attach_write_func=None):
content = LineUnwrapper(ical)
vtimezone = None
vevent = None
tzmap = {}
timezone = []
for (real_lines, line) in content.each_line():
if vtimezone is not None:
timezone.append(line)
if line == 'END:VTIMEZONE':
tzmap[vtimezone.tzid] = vtimezone
self.timezones[vtimezone.tzid] = timezone
vtimezone = None
else:
vtimezone.parseline(line)
elif vevent is None and line == 'BEGIN:VTIMEZONE':
vtimezone = Timezone()
timezone.append(line)
elif vevent is not None:
if line == 'END:VEVENT':
self.events.append(vevent)
vevent = None
else:
vevent.parseline(real_lines, line, attachments,
attach_write_func)
elif vtimezone is None and line == 'BEGIN:VEVENT':
vevent = Event(tzmap)
def diff(self, calendar):
'''
Searches for differences between this calendar (origin)
and the one passed as parameter.
@result: (changed, removed, added, unchanged) where all items
in the tupple is a dictionary with a unique ID as key
and the event as value. Items in the changed dictionary
are dictionaries with 'old' and 'new' keys to store both
the old and new version of the event
'''
# First search for changed or removed events
changed = {}
removed = {}
unchanged = {}
orig_events = self.get_events_by_uid()
dest_events = calendar.get_events_by_uid()
for uid in orig_events:
if uid in dest_events:
if orig_events[uid] == dest_events[uid]:
unchanged[uid] = orig_events[uid]
else:
changed[uid] = {'old': orig_events[uid], 'new': dest_events[uid]}
else:
removed[uid] = orig_events[uid]
# Then search for new events
added = {}
for uid in dest_events:
if uid not in orig_events:
added[uid] = dest_events[uid]
return (changed, removed, added, unchanged)
def get_events_by_uid(self):
by_uid = {}
for event in self.events:
uid = event.uid
if event.gwrecordid is not None:
uid = event.gwrecordid
by_uid[uid] = event
return by_uid
def to_ical(self):
out = ''
out = 'BEGIN:VCALENDAR\r\n'
out += 'PRODID:-//SUSE Hackweek//NONSGML groupwise-to-ics//EN\r\n'
out += 'VERSION:2.0\r\n'
for timezone in self.timezones:
out += '\r\n'.join(self.timezones[timezone])
for event in self.events:
out += event.to_ical()
out += 'END:VCALENDAR\r\n'
return out
class Timezone(datetime.tzinfo):
def __init__(self):
self.tzid = None
self.component = None
self.changes = []
def parseline(self, line):
if line.startswith('TZID:'):
self.tzid = line[len('TZID:'):].lower().translate(None, '"\'')
elif self.component is None and line.startswith('BEGIN:'):
value = line[len('BEGIN:'):]
self.component = TZDetails(value)
elif self.component is not None:
if line.startswith('END:'):
self.changes.append(self.component)
sorted(self.changes, key = lambda change: change.start)
self.component = None
else:
self.component.parseline(line)
def utcoffset(self, dt):
offset = None
# Build a list of sorted changes
all_changes = {}
now = datetime.datetime.now()
for change in self.changes:
if change.start:
all_changes[change.start] = change
else:
it = iter(change.rrule)
try:
date = it.next()
# Who plans meetings in 10 years from now?
while date.year < now.year + 10:
date = it.next()
all_changes[date] = change
except StopIteration:
pass
# Find the matching change
for date in sorted(all_changes.keys()):
if dt >= date:
offset = all_changes[date].offsetto
else:
if offset is None:
return all_changes[date].offsetfrom
else:
return offset
return offset
class TZDetails(object):
def __init__(self, kind):
self.kind = kind
self.name = None
self.offsetfrom = 0
self.offsetto = 0
self.start = None
self.rrule = None
def parseline(self, line):
if line.startswith('TZNAME:'):
self.name = line[len('TZNAME:'):]
if line.startswith('DTSTART:'):
value = line[len('DTSTART:'):]
self.start = datetime.datetime.strptime(value, '%Y%m%dT%H%M%S')
if self.rrule:
self.rrule = rrule.rrulestr(self.rrule, dtstart=self.start)
self.start = None
if line.startswith('TZOFFSETFROM:'):
value = line[len('TZOFFSETFROM:'):]
self.offsetfrom = self.parseoffset(value)
if line.startswith('TZOFFSETTO:'):
value = line[len('TZOFFSETTO:'):]
self.offsetto = self.parseoffset(value)
if line.startswith('RRULE:'):
self.rrule = line[len('RRULE:'):]
if self.start:
self.rrule = rrule.rrulestr(self.rrule, dtstart=self.start)
self.start = None
def parseoffset(self, value):
try:
minutes = int(value[-2:])
hours = int(value[-4:-2])
sign = 1
if len(value) == 5 and value[0] == '-':
sign = -1
minutes = sign * ( minutes + hours * 60 )
return datetime.timedelta(minutes = minutes)
except ValueError:
return None
def __eq__(self, other):
return self.kind == other.kind and \
self.name == other.name and \
self.offsetfrom == other.offsetfrom and \
self.offsetto == other.offsetto and \
self.start == other.start
class ParametrizedValue(object):
def __init__(self, ical):
pos = ical.find(':')
self.value = None
# Split the value from the parameters
if pos >= 0:
self.value = ical[pos + 1:]
params = ical[:pos].split(';')
else:
params = ical.split(';')
# Process the parameters
new_params = {}
for param in params:
pos = param.find('=')
if pos >= 0:
key = param[:pos]
new_params[key] = param[pos + 1:]
self.params = new_params
def set_params(self, value):
self._params = {}
# Upper case all keys to avoid potential problems
for param in value:
self._params[param.upper()] = value[param]
def get_params(self):
return self._params;
params = property(get_params, set_params)
def __eq__(self, other):
params_equals = set(self.params.items()) ^ set(other.params.items())
return self.value == other.value and len(params_equals) == 0
def __repr__(self):
return self.to_ical()
def __hash__(self):
return hash(repr(self))
def to_ical(self):
result = ''
for param in self.params:
result += ';%s=%s' % (param, self.params[param])
result += ':%s' % self.value
return result
class Event(object):
def __init__(self, tzmap):
self.lines = []
self.properties = {}
self.tzmap = tzmap
self.attendees = []
self.attachments = []
def get_property(self, key):
value = None
if key in self.properties:
value = self.properties[key][0]
return value
def set_property(self,value, key, pattern):
if key in self.properties:
lineno = self.properties[key][1]
del self.lines[lineno]
del self.properties[key]
lineno = len(self.lines)
self.lines.append(pattern % value)
self.properties[key] = (value, lineno)
def get_uid(self):
return self.get_property('uid')
def set_uid(self, uid):
self.set_property(uid, 'uid', 'UID:%s')
uid = property(get_uid, set_uid)
def get_gwrecordid(self):
return self.get_property('gwrecordid')
def set_gwrecordid(self, value):
self.set_property(value, 'gwrecordid', 'X-GWRECORDID:%s')
gwrecordid = property(get_gwrecordid, set_gwrecordid)
def get_dtstamp(self):
return self.get_property('dtstamp')
def set_dtstamp(self, value):
self.set_property(value, 'dtstamp', 'DTSTAMP:%s')
dtstamp = property(get_dtstamp, set_dtstamp)
def get_dtstart(self):
"""
starts with a ':' in most cases as this can have parameters (for all-day events)
"""
return self.get_property('dtstart')
def set_dtstart(self, value):
self.set_property(value, 'dtstart', 'DTSTART%s')
dtstart = property(get_dtstart, set_dtstart)
def get_dtend(self):
"""
starts with a ':' in most cases as this can have parameters (for all-day events)
"""
return self.get_property('dtend')
def set_dtend(self, value):
self.set_property(value, 'dtend', 'DTEND%s')
dtend = property(get_dtend, set_dtend)
def get_summary(self):
return self.get_property('summary')
def set_summary(self, value):
self.set_property(value, 'summary', 'SUMMARY:%s')
summary = property(get_summary, set_summary)
def get_location(self):
return self.get_property('location')
def set_location(self, value):
self.set_property(value, 'location', 'LOCATION:%s')
location = property(get_location, set_location)
def get_description(self):
return self.get_property('description')
def set_description(self, value):
self.set_property(value, 'description', 'DESCRIPTION:%s')
description = property(get_description, set_description)
def get_status(self):
return self.get_property('status')
def set_status(self, value):
self.set_property(value, 'status', 'STATUS:%s')
status = property(get_status, set_status)
def get_organizer(self):
return self.get_property('organizer')
def set_organizer(self, value):
self.set_property(value, 'organizer', 'ORGANIZER%s')
organizer = property(get_organizer, set_organizer)
def parseline(self, real_lines, line, attachments, attach_write_func=None):
if line.startswith('DTSTART'):
value = line[len('DTSTART'):]
self.dtstart = self.datetime_to_utc(value)
elif line.startswith('DTEND'):
value = line[len('DTEND'):]
self.dtend = self.datetime_to_utc(value)
elif line.startswith('UID:'):
self.uid = line[len('UID:'):]
elif line.startswith('X-GWRECORDID:'):
self.gwrecordid = line[len('X-GWRECORDID:'):]
elif line.startswith('DTSTAMP:'):
utc = self.datetime_to_utc(line[len('DTSTAMP'):])
if utc.startswith(':'):
utc = utc[1:]
self.dtstamp = utc
elif line.startswith('SUMMARY:'):
self.summary = line[len('SUMMARY:'):]
elif line.startswith('LOCATION:'):
self.location = line[len('LOCATION:'):]
elif line.startswith('DESCRIPTION:'):
self.description = line[len('DESCRIPTION:'):]
elif line.startswith('STATUS:'):
self.status = line[len('STATUS:'):]
elif line.startswith('ORGANIZER'):
self.organizer = ParametrizedValue(line[len('ORGANIZER'):])
elif line.startswith('ATTENDEE'):
self.attendees.append(ParametrizedValue(line[len('ATTENDEE'):]))
elif line.startswith('ATTACH'):
attach = ParametrizedValue(line[len('ATTACH'):])
if attach.value.lower() == 'cid:...':
for attachment in attachments:
attach = ParametrizedValue('')
filename = 'unnamed'
if attachment['filename']:
filename = attachment['filename']
if self.gwrecordid:
filename = os.path.join(self.gwrecordid, filename)
payload = attachment['payload']
attach.value = attach_write_func(filename, payload)
self.attachments.append(attach)
else:
# Don't add lines if we got a property: the line is
# auto-added in the property setter
self.lines.extend(real_lines)
def datetime_to_utc(self, local):
value = ParametrizedValue(local)
return value.to_ical() # FIXME disable code below, it doesn't handle all case
if 'TZID' in value.params:
# We got a localized time, search for the timezone definition
# we extracted from the calendar and convert to UTC
tzid = value.params['TZID']
# Strip the possible quotes from the tzid
tzid = tzid.translate(None, '"\'')
tz = self.tzmap[tzid.lower()]
dt = datetime.datetime.strptime(value.value, '%Y%m%dT%H%M%S')
utc_dt = dt - tz.utcoffset(dt);
value.value = utc_dt.strftime('%Y%m%dT%H%M%SZ')
del value.params['TZID']
elif not value.value.endswith('Z') and value.value.find('T') >= 0:
# No time zone indication: assume it's local time
dt = time.strptime(value.value, '%Y%m%dT%H%M%S')
utc_dt = time.gmtime(time.mktime(dt))
value.value = time.strftime('%Y%m%dT%H%M%SZ', utc_dt)
return value.to_ical()
def fix_groupwise_inconsistencies (self):
# full day dtstart
if self.get_dtstart().find('T')>=0 :
exdate = False
for i in range(len(self.lines)):
# ensure excluding event are fullday too
if self.lines[i].startswith('EXDATE;TZID=""'):
exdate = True
elif exdate and self.lines[i].startswith(' '):
self.lines[i] = re.sub("T[0-9]*","", self.lines[i])
else:
exdate = False
def to_ical(self):
attendees_lines = []
attachments_lines = []
for attendee in self.attendees:
attendees_lines.append('ATTENDEE%s' % attendee)
for attachment in self.attachments:
attachments_lines.append('ATTACH%s' % attachment)
self.fix_groupwise_inconsistencies()
return 'BEGIN:VEVENT\r\n%s\r\n%s\r\n%s\r\nEND:VEVENT\r\n' % (
'\r\n'.join(self.lines), '\r\n'.join(attendees_lines),
'\r\n'.join(attachments_lines))
def __eq__(self, other):
# Get the properties as a dictionary without lines numbers to compare them
self_props = {}
for prop in self.properties:
self_props[prop] = self.properties[prop][0]
other_props = {}
for prop in other.properties:
other_props[prop] = other.properties[prop][0]
# We don't mind the order of the items in the dictionary in the comparison
props_equal = set(self_props.items()) ^ set(other_props.items())
attendees_equal = set(self.attendees) ^ set(other.attendees)
return len(props_equal) == 0 and len(attendees_equal) == 0
|
cbosdo/groupwise-ics
|
cal.py
|
Python
|
gpl-3.0
| 18,999
|
def find2Sum(arr, target):
if arr is None:
return None
sumList = []
d = dict()
for x in range(len(arr)):
if arr[x] in d:
index = d[arr[x]]
smallList = []
smallList.append(index)
smallList.append(x)
sumList.append(smallList)
else:
d[target - arr[x]] = x
print sumList
def main():
arr = [2, 7, 11, 15]
target = 9
find2Sum(arr, target)
if __name__ == '__main__':
main()
|
ruchikd/Algorithms
|
Python/2Sum/2Sum.py
|
Python
|
gpl-3.0
| 405
|
import random
import os
import safe
import PIL
import PIL.Image
import PIL.ImageDraw
import PIL.ImageFont
import io
import core.settings
import urllib
import aiohttp
import asyncio
import traceback
import re
import imageutil
import core.help
import advertising
import discord
import json
from queuedict import QueueDict
from open_relative import *
from discord.ext.commands import command, Cog
from utils import is_private, MessageEditGuard
from contextlib import suppress
core.help.load_from_file('./help/latex.md')
LATEX_SERVER_URL = 'http://rtex.probablyaweb.site/api/v2'
DELETE_EMOJI = '🗑'
# Load data from external files
def load_template():
with open_relative('template.tex', encoding = 'utf-8') as f:
raw = f.read()
# Remove any comments from the template
cleaned = re.sub(r'%.*\n', '', raw)
return cleaned
TEMPLATE = load_template()
with open_relative('replacements.json', encoding = 'utf-8') as _f:
TEX_REPLACEMENTS = json.load(_f)
# Error messages
LATEX_TIMEOUT_MESSAGE = 'The renderer took too long to respond.'
PERMS_FAILURE = '''\
I don't have permission to upload images here :frowning:
The owner of this server should be able to fix this issue.
'''
DELETE_PERMS_FAILURE = '''\
The bot has been set up to delete `=tex` command inputs.
It requires the **manage messages** permission in order to do this.
'''
class RenderingError(Exception):
def __init__(self, log):
self.log = log
def __str__(self):
return f'RenderingError@{id(self)}'
def __repr__(self):
return f'RenderingError@{id(self)}'
class LatexModule(Cog):
def __init__(self, bot):
self.bot = bot
@command(aliases=['latex', 'rtex'])
@core.settings.command_allowed('c-tex')
async def tex(self, context, *, latex=''):
await self.handle(context.message, latex, is_inline=False)
@command(aliases=['wtex'])
@core.settings.command_allowed('c-tex')
async def texw(self, context, *, latex=''):
await self.handle(context.message, latex, wide=True, is_inline=False)
@command(aliases=['ptex'])
@core.settings.command_allowed('c-tex')
async def texp(self, context, *, latex=''):
await self.handle(context.message, latex, noblock=True, is_inline=False)
@Cog.listener()
async def on_message_discarded(self, message):
if not message.author.bot and message.content.count('$$') >= 2 and not message.content.startswith('=='):
if is_private(message.channel) or (await self.bot.settings.resolve_message('c-tex', message) and await self.bot.settings.resolve_message('f-inline-tex', message)):
latex = extract_inline_tex(message.clean_content)
if latex != '':
await self.handle(message, latex, centre=False, is_inline=True)
async def handle(self, message, source, *, is_inline, centre=True, wide=False, noblock=False):
if source == '':
await message.channel.send('Type `=help tex` for information on how to use this command.')
else:
print(f'LaTeX - {message.author} {message.author.id} - {source}')
colour_back, colour_text = await self.get_colours(message.author)
# Content replacement has to happen last in case it introduces a marker
latex = TEMPLATE.replace('\\begin{#BLOCK}', '').replace('\\end{#BLOCK}', '') if noblock else TEMPLATE
latex = latex.replace('#COLOUR', colour_text) \
.replace('#PAPERTYPE', 'a2paper' if wide else 'a5paper') \
.replace('#BLOCK', 'gather*' if centre else 'flushleft') \
.replace('#CONTENT', process_latex(source, is_inline))
await self.render_and_reply(
message,
latex,
colour_back,
oversampling=(1 if wide else 2)
)
async def render_and_reply(self, message, latex, colour_back, *, oversampling):
with MessageEditGuard(message, message.channel, self.bot) as guard:
async with message.channel.typing():
sent_message = None
try:
render_result = await generate_image_online(
latex,
colour_back,
oversampling=oversampling
)
except asyncio.TimeoutError:
sent_message = await guard.send(LATEX_TIMEOUT_MESSAGE)
except RenderingError as e:
err = e.log is not None and re.search(r'^!.*?^!', e.log + '\n!', re.MULTILINE + re.DOTALL)
if err and len(err[0]) < 1000:
m = err[0].strip("!\n")
sent_message = await guard.send(f'Rendering failed. Check your code. You may edit your existing message.\n\n**Error Log:**\n```\n{m}\n```')
else:
sent_message = await guard.send('Rendering failed. Check your code. You can edit your existing message if needed.')
else:
sent_message = await guard.send(file=discord.File(render_result, 'latex.png'))
await self.bot.advertise_to(message.author, message.channel, guard)
if await self.bot.settings.resolve_message('f-tex-delete', message):
try:
await message.delete()
except discord.errors.NotFound:
pass
except discord.errors.Forbidden:
await guard.send('Failed to delete source message automatically - either grant the bot "Manage Messages" permissions or disable `f-tex-delete`')
if sent_message and await self.bot.settings.resolve_message('f-tex-trashcan', message):
with suppress(discord.errors.NotFound):
await sent_message.add_reaction(DELETE_EMOJI)
@Cog.listener()
async def on_reaction_add(self, reaction, user):
if not user.bot and reaction.emoji == DELETE_EMOJI:
blame = await self.bot.keystore.get_json('blame', str(reaction.message.id))
if blame is not None and blame['id'] == user.id:
await reaction.message.delete()
async def get_colours(self, user):
colour_setting = await self.bot.keystore.get('p-tex-colour', str(user.id)) or 'dark'
if colour_setting == 'light':
return 'ffffff', '202020'
elif colour_setting == 'dark':
return '36393F', 'f0f0f0'
# Fallback in case of other weird things
return '36393F', 'f0f0f0'
async def generate_image_online(latex, colour_back, *, oversampling):
payload = {
'format': 'png',
'code': latex.strip(),
'density': 220 * oversampling,
'quality': 100
}
async with aiohttp.ClientSession() as session:
try:
async with session.post(LATEX_SERVER_URL, json=payload, timeout=8) as loc_req:
loc_req.raise_for_status()
jdata = await loc_req.json()
if jdata['status'] == 'error':
print('Rendering error')
raise RenderingError(jdata.get('log'))
filename = jdata['filename']
# Now actually get the image
async with session.get(LATEX_SERVER_URL + '/' + filename, timeout=3) as img_req:
img_req.raise_for_status()
fo = io.BytesIO(await img_req.read())
image = PIL.Image.open(fo).convert('RGBA')
except aiohttp.client_exceptions.ClientResponseError:
print('Client response error')
raise RenderingError(None)
if image.width <= 2 or image.height <= 2:
print('Image is empty')
raise RenderingError(None)
border_size = 5 * oversampling
colour_back = imageutil.hex_to_tuple(colour_back)
width, height = image.size
backing = imageutil.new_monocolour((width + border_size * 2, height + border_size * 2), colour_back)
backing.paste(image, (border_size, border_size), image)
if oversampling != 1:
backing = backing.resize((backing.width // oversampling, backing.height // oversampling), resample = PIL.Image.BICUBIC)
fobj = io.BytesIO()
backing.save(fobj, format='PNG')
fobj = io.BytesIO(fobj.getvalue())
return fobj
def extract_inline_tex(content):
parts = iter(content.split('$$'))
latex = ''
try:
while True:
word = next(parts)
if word != '':
latex += word.replace('#', '\\#') \
.replace('$', '\\$') \
.replace('%', '\\%')
latex += ' '
word = next(parts)
if word != '':
latex += '$\\displaystyle {}$ '.format(word.strip('`'))
except StopIteration:
pass
return latex.rstrip()
BLOCKFORMAT_REGEX = re.compile('^```(?:tex\n)?((?:.|\n)*)```$')
def process_latex(latex, is_inline):
latex = latex.strip(' \n')
blockformat = re.match(BLOCKFORMAT_REGEX, latex)
if blockformat:
latex = blockformat[1].strip(' \n')
for key, value in TEX_REPLACEMENTS.items():
if key in latex:
latex = latex.replace(key, value)
return latex
def setup(bot):
bot.add_cog(LatexModule(bot))
|
DXsmiley/mathbot
|
mathbot/modules/latex/__init__.py
|
Python
|
gpl-3.0
| 8,127
|
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test case for module General
"""
from __future__ import absolute_import, print_function
from bleachbit.General import *
from bleachbit import logger
from tests import common
import shutil
import unittest
class GeneralTestCase(common.BleachbitTestCase):
"""Test case for module General"""
def test_boolstr_to_bool(self):
"""Test case for method boolstr_to_bool"""
tests = (('True', True),
('true', True),
('False', False),
('false', False))
for test in tests:
self.assertEqual(boolstr_to_bool(test[0]), test[1])
def test_getrealuid(self):
"""Test for getrealuid()"""
if 'posix' != os.name:
self.assertRaises(RuntimeError, getrealuid)
return
uid = getrealuid()
self.assertIsInstance(uid, int)
self.assertTrue(0 <= uid <= 65535)
if sudo_mode():
self.assertGreater(uid, 0)
logger.debug("os.getenv('LOGNAME') = %s", os.getenv('LOGNAME'))
logger.debug("os.getenv('SUDO_UID') = %s", os.getenv('SUDO_UID'))
logger.debug('os.geteuid() = %d', os.geteuid())
logger.debug('os.getuid() = %d', os.getuid())
try:
logger.debug('os.login() = %s', os.getlogin())
except:
logger.exception('os.login() raised exception')
def test_makedirs(self):
"""Unit test for makedirs"""
dir = os.path.join(self.tempdir, 'just', 'a', 'directory', 'adventure')
# directory does not exist
makedirs(dir)
self.assertLExists(dir)
# directory already exists
makedirs(dir)
self.assertLExists(dir)
# clean up
shutil.rmtree(os.path.join(self.tempdir, 'just'))
def test_run_external(self):
"""Unit test for run_external"""
args = {'nt': ['cmd.exe', '/c', 'dir', '%windir%\system32', '/s', '/b'],
'posix': ['find', '/usr/bin']}
(rc, stdout, stderr) = run_external(args[os.name])
self.assertEqual(0, rc)
self.assertEqual(0, len(stderr))
self.assertRaises(OSError, run_external, ['cmddoesnotexist'])
args = {'nt': ['cmd.exe', '/c', 'dir', 'c:\doesnotexist'],
'posix': ['ls', '/doesnotexist']}
(rc, stdout, stderr) = run_external(args[os.name])
self.assertNotEqual(0, rc)
@unittest.skipUnless('posix' == os.name, 'skipping on platforms without sudo')
def test_run_external_clean_env(self):
"""Unit test for clean_env parameter to run_external()"""
def run(args, clean_env):
(rc, stdout, stderr) = run_external(args, clean_env=clean_env)
self.assertEqual(rc, 0)
return stdout.rstrip('\n')
# clean_env should set language to C
run(['sh', '-c', '[ "x$LANG" = "xC" ]'], clean_env=True)
run(['sh', '-c', '[ "x$LC_ALL" = "xC" ]'], clean_env=True)
# clean_env parameter should not alter the PATH, and the PATH
# should not be empty
path_clean = run(['bash', '-c', 'echo $PATH'], clean_env=True)
self.assertEqual(os.getenv('PATH'), path_clean)
self.assertGreater(len(path_clean), 10)
path_unclean = run(['bash', '-c', 'echo $PATH'], clean_env=False)
self.assertEqual(path_clean, path_unclean)
# With parent environment set to English and parameter clean_env=False,
# expect English.
os.putenv('LC_ALL', 'C')
(rc, stdout, stderr) = run_external(
['ls', '/doesnotexist'], clean_env=False)
self.assertEqual(rc, 2)
self.assertTrue('No such file' in stderr)
# Set parent environment to Spanish.
os.putenv('LC_ALL', 'es_MX.UTF-8')
(rc, stdout, stderr) = run_external(
['ls', '/doesnotexist'], clean_env=False)
self.assertEqual(rc, 2)
if os.path.exists('/usr/share/locale-langpack/es/LC_MESSAGES/coreutils.mo'):
# Spanish language pack is installed.
self.assertTrue('No existe el archivo' in stderr)
# Here the parent environment has Spanish, but the child process
# should use English.
(rc, stdout, stderr) = run_external(
['ls', '/doesnotexist'], clean_env=True)
self.assertEqual(rc, 2)
self.assertTrue('No such file' in stderr)
# Reset environment
os.unsetenv('LC_ALL')
@unittest.skipUnless('posix' == os.name, 'skipping on platforms without sudo')
def test_sudo_mode(self):
"""Unit test for sudo_mode"""
self.assertIsInstance(sudo_mode(), bool)
|
brahmastra2016/bleachbit
|
tests/TestGeneral.py
|
Python
|
gpl-3.0
| 5,373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from obci.interfaces.bci.abstract_classifier import AbstractClassifier
class DummyClassifier(AbstractClassifier):
def classify(self, chunk):
print("classify called with chunk shaped "+str(chunk.shape))
return {'thetarget': 1.0}
def learn(self, chunk, target):
print("learn ("+target+") called with chunk shaped "+str(chunk.shape))
|
BrainTech/openbci
|
obci/interfaces/bci/budzik/experimental/dummy_classifier.py
|
Python
|
gpl-3.0
| 452
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
sysrss – Let your system generate a maintenance notification RSS
Copyright © 2012, 2013 Mattias Andrée (maandree@member.fsf.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
import time
import datetime
from subprocess import Popen, PIPE
'''
Hack to enforce UTF-8 in output (in the future, if you see anypony not using utf-8 in
programs by default, report them to Princess Celestia so she can banish them to the moon)
@param text:str The text to print (empty string is default)
@param end:str The appendix to the text to print (line breaking is default)
'''
def print(text = '', end = '\n'):
sys.stdout.buffer.write((str(text) + end).encode('utf-8'))
sys.stdout.buffer.flush()
'''
stderr equivalent to print()
@param text:str The text to print (empty string is default)
@param end:str The appendix to the text to print (line breaking is default)
'''
def printerr(text = '', end = '\n'):
sys.stderr.buffer.write((str(text) + end).encode('utf-8'))
sys.stderr.buffer.flush()
'''
Link {@link #print}, only better because this does not take a text ending
but takes a format and parameters
@param master:str Formated string
@param slave:str* Parameters for the formated string
'''
def printf(master, *slave):
sys.stdout.buffer.write((master % slave).encode('utf-8'))
'''
Flush stdout
'''
def flush():
sys.stdout.buffer.flush()
'''
Mane class
@dependency util-linux::uuidgen
'''
class SysRSS:
'''
Mane method and constructor
'''
def __init__(self):
self.root = os.getenv('HOME') + '/.sysrss/'
self.sysinit()
if not self.initSites():
exit(255)
if len(self.sites) == 0:
print('There are no sites, update %s.' % (self.root + 'sites'))
exit(254)
proper = []
for site in self.sites:
site.interval = int(site.interval)
if site.interval <= 0:
print('Site %s does not have a positive interval and will therefore only be checked right now.' % site.name)
else:
proper.append(site)
message = site()
if (message is not None) and (len(message) > 0):
self.publish(site.name, message)
self.sites = proper
while True:
next = min(self.sites, key = lambda site : site.next).next
for site in self.sites:
if next > 0:
time.sleep(next * 60)
if site.next == next:
message = site()
if (message is not None) and (len(message) > 0):
self.publish(site.name, message)
site.next = site.interval
else:
site.next -= next
'''
Initialise the system
'''
def sysinit(self):
if not os.path.isdir(self.root):
os.mkdir(self.root)
printf('Created root directory, %s.\n', self.root)
if not os.path.isfile(self.root + 'log'):
with open(self.root + 'log', 'wb') as file:
file.flush()
printf('Created log file, %s, it contains ever thing that have ever happend, ever.\n', self.root + 'log')
flush()
if not os.path.isfile(self.root + 'maintenance.rss'):
date = self.getTime()
with open(self.root + 'maintenance.rss', 'wb') as file:
file.write('<?xml version="1.0" encoding="utf-8"?>\n'.encode('utf-8'))
file.write('<rss version="2.0">\n'.encode('utf-8'))
file.write(' <channel>\n'.encode('utf-8'))
file.write(' <title>SysRSS</title>\n'.encode('utf-8'))
file.write(' <description>System maintenance notification RSS</description>\n'.encode('utf-8'))
file.write(' <link>http://localhost/</link>\n'.encode('utf-8'))
file.write((' <lastBuildDate>%s</lastBuildDate>\n' % date).encode('utf-8'))
file.write((' <pubDate>%s</pubDate>\n' % date).encode('utf-8'))
file.write(' <ttl>1800</ttl>\n'.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.write(' </channel>\n'.encode('utf-8'))
file.write('</rss>\n'.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.flush()
printf('Created rss file, %s, your should set you news feed aggregator to syndicate this file.\n', self.root + 'maintenance.rss')
flush()
self.pubdate = date
self.publish('Welcome to SysRSS', 'This is going to be so awesome! 😄 \n\nEx animo\nSysRSS\n\n')
else:
data = None
with open(self.root + 'maintenance.rss', 'rb') as file:
data = file.read()
data = data.decode('utf8', 'replace')
data = data[data.find('<pubDate>') + len('<pubDate>'):]
data = data[:data.find('</')]
self.pubdate = data
'''
Initialise site list
@return :boolean Whether the program can continue
'''
def initSites(self):
self.sites = []
sites = self.sites
sitefile = self.root + 'sites'
if os.path.exists(sitefile):
with open(sitefile, 'rb') as file:
code = file.read().decode('utf8', 'replace') + '\n'
code = compile(code, sitefile, 'exec')
exec(code)
else:
with open(sitefile, 'wb') as file:
file.write('# -*- mode: python, coding: utf-8 -*-\n'.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.write('# self.sites (alternatively sites) is a list that you\n'.encode('utf-8'))
file.write('# should fill with Site:s, a site descripts a subsystem\n'.encode('utf-8'))
file.write('# that generates updates. Site\'s constructor takes 3\n'.encode('utf-8'))
file.write('# arguments: name, interval, implementation. The first\n'.encode('utf-8'))
file.write('# `name` is the name of the subsystme, it is displayed\n'.encode('utf-8'))
file.write('# as the title on all updates. `interval` is the number\n'.encode('utf-8'))
file.write('# is minutes between update checks. `implementation` is\n'.encode('utf-8'))
file.write('# function or functor that returns an update message,\n'.encode('utf-8'))
file.write('# or an empty string if there are no updates.\n'.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.flush()
printf('Created site file, %s, you should fill it in and then restart this program.\n', sitefile)
flush()
return False
return True
'''
Publish a news item to the RSS
@param system:str The subsystem that generated the message
@param message:str Message to display
'''
def publish(self, system, message):
date = self.getTime()
addendum = self.makeNews(system, message).encode('utf-8')
with open(self.root + 'log', 'ab') as file:
file.write(addendum)
file.flush()
printf('The feed log as been updated with %s.\n', system)
with open(self.root + 'tmp', 'wb') as file:
file.write('<?xml version="1.0" encoding="utf-8"?>\n'.encode('utf-8'))
file.write('<rss version="2.0">\n'.encode('utf-8'))
file.write(' <channel>\n'.encode('utf-8'))
file.write(' <title>SysRSS</title>\n'.encode('utf-8'))
file.write(' <description>System maintenance notification RSS</description>\n'.encode('utf-8'))
file.write(' <link>http://localhost/</link>\n'.encode('utf-8'))
file.write((' <lastBuildDate>%s</lastBuildDate>\n' % date).encode('utf-8'))
file.write((' <pubDate>%s</pubDate>\n\n' % self.pubdate).encode('utf-8'))
with open(self.root + 'log', 'rb') as logfile:
file.write(logfile.read())
file.write(' </channel>\n'.encode('utf-8'))
file.write('</rss>\n'.encode('utf-8'))
file.write('\n'.encode('utf-8'))
file.flush()
Popen(['mv', self.root + 'tmp', self.root + 'maintenance.rss']).wait()
printf('The feed as been updated with %s.\n', system)
'''
Generate RSS item
@param system:str The subsystem that generated the message
@param message:str Message to display
@return :str RSS item
'''
def makeNews(self, system, message):
def makeUglyButReadable(data):
data = data.replace(']]>', ']]]]><![CDATA[>')
data = data.replace('\n', '<br>') # [sic!]
return '<![CDATA[' + data + ']]>'
return('<item>\n <title>%s</title>\n <guid>%s</guid>\n <pubDate>%s</pubDate>\n <description>%s</description>\n</item>\n\n' %
(makeUglyButReadable(system), self.generateUUID(), self.getTime(), makeUglyButReadable(message)))
'''
Generate an UUID
@return An UUID
'''
def generateUUID(self):
uuid = Popen(['uuidgen'], stdout=PIPE).communicate()[0].decode('utf-8', 'replace')
if uuid[-1] == '\n':
uuid = uuid[:-1]
return uuid
'''
Get a locale independent time stamp in RSS's [poor] format
@return :str The current time
'''
def getTime(self):
time = datetime.datetime.utcnow().strftime('(%w), %d [%m] %Y %H:%M:%S +0000')
time = time.replace('(1)', 'Mon')
time = time.replace('(2)', 'Tue')
time = time.replace('(3)', 'Wed')
time = time.replace('(4)', 'Thu')
time = time.replace('(5)', 'Fri')
time = time.replace('(6)', 'Sat')
time = time.replace('(0)', 'Sun') # [sic!]
time = time.replace('[01]', 'Jan')
time = time.replace('[02]', 'Feb')
time = time.replace('[03]', 'Mar')
time = time.replace('[04]', 'Apr')
time = time.replace('[05]', 'May')
time = time.replace('[06]', 'Jun')
time = time.replace('[07]', 'Jul')
time = time.replace('[08]', 'Aug')
time = time.replace('[09]', 'Sep')
time = time.replace('[10]', 'Oct')
time = time.replace('[11]', 'Nov')
time = time.replace('[12]', 'Dec')
return time
'''
Subsystem definition class
'''
class Site:
'''
Constructor
@param name System name
@param interval:int Generation interval in minutes
@param implementation:()→str Publish message generator, empty string is ignored
'''
def __init__(self, name, interval, implementation):
self.name = name
self.interval = interval
self.implementation = implementation
self.next = interval
'''
Invocation method
@return :str Message to publish
'''
def __call__(self):
return self.implementation()
'''
Execute mane method if started using this file
'''
if __name__ == '__main__':
SysRSS()
|
maandree/sysrss
|
sysrss.py
|
Python
|
gpl-3.0
| 11,951
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Morris Jobke 2010 <morris.jobke@googlemail.com>
#
# MailNotify is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MailNotify is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# python imports
import indicate
import subprocess
# own imports
import logging
log = logging.getLogger('Log.UnreadMails')
class UnreadMails:
def __init__(self, link, prefix='', style=1):
log.debug('new UnreadMails ...')
self.link = link
self.style = style
self.prefix = prefix
self.mails = []
self.mailboxes = {}
self.indicators = {}
def add(self, mails):
mailboxes = {}
for mail in mails:
if mail.mailbox['name'] not in mailboxes:
mailboxes[mail.mailbox['name']] = {'count': 0, 'link': mail.mailbox['link']}
mailboxes[mail.mailbox['name']]['count'] += 1
if mail.subject not in [m.subject for m in self.mails]:
log.info('new mail ...')
self.mails.append(mail)
self.mailboxes = mailboxes
# cleanup
for mail in self.mails:
if mail.subject not in [m.subject for m in mails]:
self.mails.remove(mail)
if self.style == 2:
self.indicators[self.prefix + mail.subject].hide()
del self.indicators[self.prefix + mail.subject]
def indicate(self):
if self.style == 1:
#### STYLE 1 ####
for m in self.mailboxes:
n = self.prefix + m
if n in self.indicators:
self.indicators[n].update(str(self.mailboxes[m]['count']))
else:
i = IndicatorItem(
self.mailboxes[m]['link'],
n,
str(self.mailboxes[m]['count'])
)
self.indicators[n] = i
elif self.style == 2:
#### STYLE 2 ####
for m in self.mails:
n = self.prefix + m.subject
if not n in self.indicators:
i = IndicatorItem(
m.link,
n,
m.time
)
self.indicators[n] = i
def clear(self):
for i in self.indicators:
self.indicators[i].hide()
self.mails = []
self.mailboxes = {}
self.indicators = {}
class IndicatorItem(indicate.Indicator):
def __init__(self, link, subject, timeOrCount):
'''
indicator for IndicatorItems
'''
log.debug('new IndicatorItem ...')
indicate.Indicator.__init__(self)
self.link = link
self.set_property('name', subject)
if type(timeOrCount) == type(float()):
self.set_property_time('time', timeOrCount)
elif type(timeOrCount) == type(str()):
self.set_property('count', timeOrCount)
self.connect('user-display', self.click)
self.set_property('draw-attention', 'true')
self.show()
def click(self, server, something):
server.set_property('draw-attention', 'false')
subprocess.call(['gnome-open', self.link])
def stress(self):
self.set_property('draw-attention', 'true')
def unstress(self):
self.set_property('draw-attention', 'false')
def update(self, timeOrCount):
if type(timeOrCount) == type(float()):
self.set_property_time('time', timeOrCount)
elif type(timeOrCount) == type(str()):
self.set_property('count', timeOrCount)
self.set_property('draw-attention', 'true')
|
MorrisJobke/MailNotify
|
includes/unreadmails.py
|
Python
|
gpl-3.0
| 3,557
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-30 22:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('atlas_doc', '0014_auto_20170830_1803'),
]
operations = [
migrations.AddField(
model_name='page',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='atlas_doc.Page'),
),
migrations.AddField(
model_name='version',
name='level',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='version',
name='lft',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='version',
name='rght',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='version',
name='tree_id',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.AlterField(
model_name='collection',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='atlas_doc.Collection'),
),
migrations.AlterField(
model_name='version',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='atlas_doc.Version'),
),
]
|
robertdown/atlas_docs
|
atlas_doc/migrations/0015_auto_20170830_1836.py
|
Python
|
gpl-3.0
| 2,047
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2015 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import sys
import shutil
from patoolib import util
from . import basedir, datadir, needs_program, patool_cmd
class ArchiveExtractTest (unittest.TestCase):
@needs_program('7z')
def test_extract(self):
tmpdir = util.tmpdir(dir=basedir)
try:
archive = os.path.join(datadir, "t .7z")
util.run_checked([sys.executable, patool_cmd, "-vv", "--non-interactive", "extract", "--outdir", tmpdir, archive])
finally:
shutil.rmtree(tmpdir)
|
wummel/patool
|
tests/test_extract.py
|
Python
|
gpl-3.0
| 1,227
|
"""
"""
from collections.abc import Mapping
__all__ = [
"AtlasView",
"AdjacencyView",
"MultiAdjacencyView",
"UnionAtlas",
"UnionAdjacency",
"UnionMultiInner",
"UnionMultiAdjacency",
"FilterAtlas",
"FilterAdjacency",
"FilterMultiInner",
"FilterMultiAdjacency",
]
class AtlasView(Mapping):
"""An AtlasView is a Read-only Mapping of Mappings.
It is a View into a dict-of-dict data structure.
The inner level of dict is read-write. But the
outer level is read-only.
See Also
========
AdjacencyView - View into dict-of-dict-of-dict
MultiAdjacencyView - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_atlas",)
def __getstate__(self):
return {"_atlas": self._atlas}
def __setstate__(self, state):
self._atlas = state["_atlas"]
def __init__(self, d):
self._atlas = d
def __len__(self):
return len(self._atlas)
def __iter__(self):
return iter(self._atlas)
def __getitem__(self, key):
return self._atlas[key]
def copy(self):
return {n: self[n].copy() for n in self._atlas}
def __str__(self):
return str(self._atlas) # {nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._atlas!r})"
class AdjacencyView(AtlasView):
"""An AdjacencyView is a Read-only Map of Maps of Maps.
It is a View into a dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView - View into dict-of-dict
MultiAdjacencyView - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AtlasView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
class MultiAdjacencyView(AdjacencyView):
"""An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps.
It is a View into a dict-of-dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView - View into dict-of-dict
AdjacencyView - View into dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AdjacencyView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
class UnionAtlas(Mapping):
"""A read-only union of two atlases (dict-of-dict).
The two dict-of-dicts represent the inner dict of
an Adjacency: `G.succ[node]` and `G.pred[node]`.
The inner level of dict of both hold attribute key:value
pairs and is read-write. But the outer level is read-only.
See Also
========
UnionAdjacency - View into dict-of-dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_succ", "_pred")
def __getstate__(self):
return {"_succ": self._succ, "_pred": self._pred}
def __setstate__(self, state):
self._succ = state["_succ"]
self._pred = state["_pred"]
def __init__(self, succ, pred):
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ) + len(self._pred)
def __iter__(self):
return iter(set(self._succ.keys()) | set(self._pred.keys()))
def __getitem__(self, key):
try:
return self._succ[key]
except KeyError:
return self._pred[key]
def copy(self):
result = {nbr: dd.copy() for nbr, dd in self._succ.items()}
for nbr, dd in self._pred.items():
if nbr in result:
result[nbr].update(dd)
else:
result[nbr] = dd.copy()
return result
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"
class UnionAdjacency(Mapping):
"""A read-only union of dict Adjacencies as a Map of Maps of Maps.
The two input dict-of-dict-of-dicts represent the union of
`G.succ` and `G.pred`. Return values are UnionAtlas
The inner level of dict is read-write. But the
middle and outer levels are read-only.
succ : a dict-of-dict-of-dict {node: nbrdict}
pred : a dict-of-dict-of-dict {node: nbrdict}
The keys for the two dicts should be the same
See Also
========
UnionAtlas - View into dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_succ", "_pred")
def __getstate__(self):
return {"_succ": self._succ, "_pred": self._pred}
def __setstate__(self, state):
self._succ = state["_succ"]
self._pred = state["_pred"]
def __init__(self, succ, pred):
# keys must be the same for two input dicts
assert len(set(succ.keys()) ^ set(pred.keys())) == 0
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ) # length of each dict should be the same
def __iter__(self):
return iter(self._succ)
def __getitem__(self, nbr):
return UnionAtlas(self._succ[nbr], self._pred[nbr])
def copy(self):
return {n: self[n].copy() for n in self._succ}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"
class UnionMultiInner(UnionAtlas):
"""A read-only union of two inner dicts of MultiAdjacencies.
The two input dict-of-dict-of-dicts represent the union of
`G.succ[node]` and `G.pred[node]` for MultiDiGraphs.
Return values are UnionAtlas.
The inner level of dict is read-write. But the outer levels are read-only.
See Also
========
UnionAtlas - View into dict-of-dict
UnionAdjacency - View into dict-of-dict-of-dict
UnionMultiAdjacency - View into dict-of-dict-of-dict-of-dict
"""
__slots__ = () # Still uses UnionAtlas slots names _succ, _pred
def __getitem__(self, node):
in_succ = node in self._succ
in_pred = node in self._pred
if in_succ:
if in_pred:
return UnionAtlas(self._succ[node], self._pred[node])
return UnionAtlas(self._succ[node], {})
return UnionAtlas({}, self._pred[node])
def copy(self):
nodes = set(self._succ.keys()) | set(self._pred.keys())
return {n: self[n].copy() for n in nodes}
class UnionMultiAdjacency(UnionAdjacency):
"""A read-only union of two dict MultiAdjacencies.
The two input dict-of-dict-of-dict-of-dicts represent the union of
`G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency.
The inner level of dict is read-write. But the outer levels are read-only.
See Also
========
UnionAtlas - View into dict-of-dict
UnionMultiInner - View into dict-of-dict-of-dict
"""
__slots__ = () # Still uses UnionAdjacency slots names _succ, _pred
def __getitem__(self, node):
return UnionMultiInner(self._succ[node], self._pred[node])
class FilterAtlas(Mapping): # nodedict, nbrdict, keydict
def __init__(self, d, NODE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, key):
if key in self._atlas and self.NODE_OK(key):
return self._atlas[key]
raise KeyError(f"Key {key} not found")
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return {u: self._atlas[u] for u in self.NODE_OK.nodes if u in self._atlas}
return {u: d for u, d in self._atlas.items() if self.NODE_OK(u)}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})"
class FilterAdjacency(Mapping): # edgedict
def __init__(self, d, NODE_OK, EDGE_OK):
self._atlas = d
self.NODE_OK = NODE_OK
self.EDGE_OK = EDGE_OK
def __len__(self):
return sum(1 for n in self)
def __iter__(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return (n for n in self.NODE_OK.nodes if n in self._atlas)
return (n for n in self._atlas if self.NODE_OK(n))
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def new_node_ok(nbr):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr)
return FilterAtlas(self._atlas[node], new_node_ok)
raise KeyError(f"Key {node} not found")
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return {
u: {
v: d
for v, d in self._atlas[u].items()
if self.NODE_OK(v)
if self.EDGE_OK(u, v)
}
for u in self.NODE_OK.nodes
if u in self._atlas
}
return {
u: {v: d for v, d in nbrs.items() if self.NODE_OK(v) if self.EDGE_OK(u, v)}
for u, nbrs in self._atlas.items()
if self.NODE_OK(u)
}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
name = self.__class__.__name__
return f"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})"
class FilterMultiInner(FilterAdjacency): # muliedge_seconddict
def __iter__(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas)
else:
my_nodes = (n for n in self._atlas if self.NODE_OK(n))
for n in my_nodes:
some_keys_ok = False
for key in self._atlas[n]:
if self.EDGE_OK(n, key):
some_keys_ok = True
break
if some_keys_ok is True:
yield n
def __getitem__(self, nbr):
if nbr in self._atlas and self.NODE_OK(nbr):
def new_node_ok(key):
return self.EDGE_OK(nbr, key)
return FilterAtlas(self._atlas[nbr], new_node_ok)
raise KeyError(f"Key {nbr} not found")
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
return {
v: {k: d for k, d in self._atlas[v].items() if self.EDGE_OK(v, k)}
for v in self.NODE_OK.nodes
if v in self._atlas
}
return {
v: {k: d for k, d in nbrs.items() if self.EDGE_OK(v, k)}
for v, nbrs in self._atlas.items()
if self.NODE_OK(v)
}
class FilterMultiAdjacency(FilterAdjacency): # multiedgedict
def __getitem__(self, node):
if node in self._atlas and self.NODE_OK(node):
def edge_ok(nbr, key):
return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key)
return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok)
raise KeyError(f"Key {node} not found")
def copy(self):
try: # check that NODE_OK has attr 'nodes'
node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas)
except AttributeError:
node_ok_shorter = False
if node_ok_shorter:
my_nodes = self.NODE_OK.nodes
return {
u: {
v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)}
for v, kd in self._atlas[u].items()
if v in my_nodes
}
for u in my_nodes
if u in self._atlas
}
return {
u: {
v: {k: d for k, d in kd.items() if self.EDGE_OK(u, v, k)}
for v, kd in nbrs.items()
if self.NODE_OK(v)
}
for u, nbrs in self._atlas.items()
if self.NODE_OK(u)
}
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/classes/coreviews.py
|
Python
|
gpl-3.0
| 13,480
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 13:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blogadmin', '0003_auto_20170826_1836'),
]
operations = [
migrations.RenameField(
model_name='article',
old_name='pub_date',
new_name='pub_time',
),
]
|
baike21/blog
|
blogadmin/migrations/0004_auto_20170826_2131.py
|
Python
|
gpl-3.0
| 435
|
# -*- coding: utf-8 -*-
# -*- mode: python -*-
from .base import BMBase
class BijectiveMap(BMBase):
'''Basic one to one bijective map'''
def __setitem__(self, key, value):
self.set_value(key, value)
def __getitem__(self, key):
return self.get_value(key)
def __delitem__(self, key):
self.del_value(key)
def set_value(self, key, value):
self.m_kv[key] = value
self.m_vk[value] = key
def get_value(self, key, default=False):
try:
return self.m_kv[key]
except KeyError:
if default != False:
return default
raise
def del_value(self, key):
v = self.m_kv[key]
del self.m_kv[key]
del self.m_vk[v]
def del_key(self, value):
key = self.get_key(value)
del self.m_vk[value]
del self.m_kv[key]
|
waipu/pybimaps
|
pybimaps/o2o.py
|
Python
|
gpl-3.0
| 888
|
#!/usr/bin/python
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
import os.path
import re, commands, time
from tcga_utils.processes import *
from tcga_utils.utils import *
import time
dorky = re.compile('(\-*\d+)([ACGT]+)>([ACGT]+)')
#########################################
def clean_cdna_change_annotation(old_annot):
new_annot = old_annot.replace ("c.", "")
if '>' in new_annot:
match_return = re.match(dorky,new_annot)
if not match_return:
# I have no idea what this is
# at least we got rid of the 'c.' crap
pass
else:
new_annot = "%s%s%s" %( match_return.group(2), match_return.group(1), match_return.group(3))
return new_annot
#########################################
def update_db (cursor, table, row_id, update_fields):
qry = "update %s set " % table
first = True
for field, value in update_fields.iteritems():
if (not first):
qry += ", "
qry += " %s = " % field
if value is None:
qry += " null "
elif type(value) is int:
qry += " %d" % value
else:
qry += " \'%s\'" % value
first = False
qry += " where id = %d" % int(row_id)
rows = search_db (cursor, qry)
# if there is a return, something went wrong
if (rows):
search_db (cursor, qry, verbose=True)
exit(1) # exit, bcs we should not be here
#########################################
def one_allele_normal(field) :
tumor = {field['tumor_seq_allele1'], field['tumor_seq_allele2']} # this is a set
normal = {field['match_norm_seq_allele1'], field['match_norm_seq_allele2']}
return len(tumor&normal)>0
#########################################
def new_conflict_annotation (base_annot, list_of_ids):
annot = ""
for db_id in list_of_ids:
if len(annot)>0: annot += "; "
annot += "%s with %d" % (base_annot, db_id)
return annot
#########################################
def conflict_annotation_updated (existing_fields, conflict_comment, new_id):
conflict_annotation = existing_fields['conflict']
if not conflict_annotation:
conflict_annotation = ""
elif len(conflict_annotation) > 0:
conflict_annotation += "; "
conflict_annotation += "%s with %d" % (conflict_comment, new_id)
return conflict_annotation
#########################################
def update_conflict_field (cursor, table, existing_row_id, existing_fields, new_id, conflict_comment):
conflict_annotation = conflict_annotation_updated (existing_fields, conflict_comment, new_id)
update_fields = {'conflict': conflict_annotation}
existing_fields['conflict'] = conflict_annotation
update_db (cursor, table, existing_row_id, update_fields)
#########################################
def selected_info_overlap (existing_fields, new_fields, field_selection):
# indicator (True/False) arrays
existing_has_info = map (lambda f: existing_fields[f]is not None and len(existing_fields[f]) > 0, field_selection)
new_has_info = map (lambda f: new_fields[f] is not None and len(new_fields[f])>0, field_selection)
# list of indices where both have info
both_have_info = filter (lambda x: existing_has_info[x] and new_has_info[x], range(len(field_selection)) )
# list of indices where both ahve info and info is the same
info_the_same = filter (lambda x: existing_fields[field_selection[x]] == new_fields[field_selection[x]], both_have_info )
# is information the same in all fields that exist in both entries?
if len(both_have_info) != len (info_the_same):
# nope - we have a conflict
return "conflict"
elif len (both_have_info) < sum (1 for x in new_has_info if x):
# the new entry has more info
return "new covers existing"
return "existing covers new"
#########################################
def is_exact_duplicate (new_fields, existing_fields_by_database_id):
exists_exact_duplicate = False
for db_id, fields in existing_fields_by_database_id.iteritems():
this_is_exact_duplicate = True
for field_name, value in fields.iteritems():
if field_name in ['id', 'conflict']: continue
if is_useful (new_fields, field_name) and is_useful (fields, field_name) and\
str(value) != str(new_fields[field_name]):
this_is_exact_duplicate = False
break
if this_is_exact_duplicate:
exists_exact_duplicate = True
break
return exists_exact_duplicate
#########################################
def is_tumor_allele_copy_paste (cursor, fields):
is_cp = False
if not fields.has_key('meta_info_idx') or not fields['meta_info_idx']:
return is_cp
qry = "select diagnostics, file_name from mutations_meta where id=%d" % int(fields['meta_info_idx'])
rows = search_db(cursor, qry)
is_cp = rows and ("tumor alleles identical" in rows[0][0])
if is_cp: print rows[0][1], " copy pastes"
return is_cp
#########################################
#this is godawful, but so is TCGA
def diagnose_duplication_reasons (cursor, existing_fields_by_database_id, new_fields):
diagnostics = {}
# is this an exact duplicate by any chance?
if is_exact_duplicate (new_fields, existing_fields_by_database_id):
return diagnostics
allele_fields = ['reference_allele', 'tumor_seq_allele1', 'tumor_seq_allele2', 'match_norm_seq_allele1', 'match_norm_seq_allele2']
interpretation_fields = ['cdna_change', 'aa_change', 'variant_classification']
new_fields_maf_duplicates_tumor_allele = is_tumor_allele_copy_paste(cursor, new_fields)
for db_id, existing_fields in existing_fields_by_database_id.iteritems():
diagnostics[db_id] = ""
if existing_fields['end_position'] == new_fields['end_position']:
# if differing required_fields is subset of tumor alleles
differing_req_fields = set(filter(lambda x: existing_fields[x] != new_fields[x], get_required_fields()))
existing_fields_maf_duplicates_tumor_allele = is_tumor_allele_copy_paste(cursor, existing_fields)
if differing_req_fields <= set(['tumor_seq_allele1', 'tumor_seq_allele2']):
if is_useful(existing_fields, 'tumor_seq_allele1') and is_useful(existing_fields, 'tumor_seq_allele2') \
and not is_useful(new_fields, 'tumor_seq_allele1') and not is_useful(new_fields, 'tumor_seq_allele2'):
diagnostics[db_id] += "move on: old allele has tumor allele info"
elif is_useful(new_fields, 'tumor_seq_allele1') and is_useful(new_fields, 'tumor_seq_allele2') \
and not is_useful(existing_fields, 'tumor_seq_allele1') and not is_useful(existing_fields, 'tumor_seq_allele2'):
diagnostics[db_id] += "use new: new allele has tumor allele info"
elif is_useful(existing_fields, 'aa_change') and not is_useful(new_fields, 'aa_change'):
diagnostics[db_id] += "move on: old allele has aa"
elif is_useful(new_fields, 'aa_change') and not is_useful(existing_fields, 'aa_change'):
diagnostics[db_id] += "use new: new allele has aa"
elif new_fields_maf_duplicates_tumor_allele and not existing_fields_maf_duplicates_tumor_allele:
diagnostics[db_id] += "move on: new maf duplicates tumor allele"
elif existing_fields_maf_duplicates_tumor_allele and not new_fields_maf_duplicates_tumor_allele :
diagnostics[db_id] += "use new: existing maf duplicates tumor allele"
elif existing_fields['validation_status'] == 'valid' and new_fields['validation_status'] != 'valid':
diagnostics[db_id] += "move on: old allele validated"
elif new_fields['validation_status'] == 'valid' and existing_fields['validation_status'] != 'valid':
diagnostics[db_id] += "use new: new allele validated"
elif existing_fields['variant_type'] == 'ins' and new_fields['variant_type']=='ins':
ex_t = existing_fields['tumor_seq_allele1']+existing_fields['tumor_seq_allele2']
new_t = new_fields['tumor_seq_allele1']+new_fields['tumor_seq_allele2']
# se the comment for deletions below
if len(ex_t) > len(new_t):
diagnostics[db_id] += "move on: old insert longer"
elif len(ex_t) < len(new_t):
diagnostics[db_id] += "use new: new insert longer"
else:
same = True
#cDNA change is in principle superfluous - we have that info from other fields
for field_name in ['aa_change', 'validation_status', 'variant_classification', 'variant_type']:
if is_useful(existing_fields, field_name) and is_useful(new_fields, field_name) and \
existing_fields[field_name]==new_fields[field_name] or \
not is_useful(existing_fields, field_name) and not is_useful(new_fields, field_name):
pass
else:
same = False
break
# this is something like "the same for all intents and purposes" somebody equally lazy from thwo places deposited this
# this might be homozygosity, but it seems to me that somebidy decide that it was easier to them to just copy the two fields
if same:
if existing_fields['tumor_seq_allele1']!=existing_fields['tumor_seq_allele2']:
diagnostics[db_id] += "move on: old had differing tumor alleles"
else:
diagnostics[db_id] += "use new: by tiebreaker" # i.e. I am sick of this
elif False:
for field_name, val in existing_fields.iteritems():
blah = None
if new_fields.has_key(field_name):
blah = new_fields[field_name]
print field_name, " ", val, " ", blah
exit(1)
if len(diagnostics[db_id]) == 0: # we are still undiagnosed
# giving the actual allele is the more fundamental info - go for that primarily
allele_diagnostics = selected_info_overlap (existing_fields, new_fields, allele_fields)
if allele_diagnostics=="conflict":
# do we have a compound by any chance"
if not one_allele_normal(existing_fields) and not one_allele_normal(new_fields):
diagnostics[db_id] = "compound heterozygous"
else:
diagnostics[db_id] = "conflicting allele info"
if existing_fields['validation_status'] == 'valid' and new_fields['validation_status'] != 'valid':
diagnostics[db_id] += "move on: old allele validated"
elif new_fields['validation_status'] == 'valid' and existing_fields['validation_status'] != 'valid':
diagnostics[db_id] += "use new: new allele validated"
elif allele_diagnostics=="existing covers new":
diagnostics[db_id] += "move on: old allele info covers"
elif allele_diagnostics=="new covers existing":
diagnostics[db_id] += "use new: new allele info covers"
elif allele_diagnostics=="duplicate":
diagnostics[db_id] += "allele info duplicate; "
# do we have a tie breaker among the interpretation fields?
interpretation_diagnostics = selected_info_overlap (existing_fields, new_fields, interpretation_fields)
if interpretation_diagnostics=="conflict":
diagnostics[db_id] += "conflicting interpretation"
elif interpretation_diagnostics=="existing covers new":
diagnostics[db_id] += "move on: old interpretation info covers"
elif interpretation_diagnostics=="existing covers new":
diagnostics[db_id] += "use new: new interpretation info covers"
elif interpretation_diagnostics=="duplicate":
diagnostics[db_id] += "move on: interpretation info duplicate"
else: # we should not really be here - this is is just the default behavior so we do not crash on this
diagnostics[db_id] += "move on: keep old"
else: # we should not really be here - this is is just the default behavior so we do not crash on this
diagnostics[db_id] += "move on: keep old"
elif existing_fields['end_position'] == new_fields['end_position']+1 \
and [existing_fields['variant_type'], new_fields['variant_type']]==['dnp','snp']:
diagnostics[db_id] += "move on: old is dnp"
elif new_fields['end_position'] == existing_fields['end_position']+1 \
and [new_fields['variant_type'], existing_fields['variant_type']]==['dnp','snp']:
diagnostics[db_id] += "use new: new is dnp"
# this is rather arbitrary; my guess: longer rearrangements harder to detect,
# and whoever claims they found something like it is putting more effort:
elif existing_fields['end_position'] > new_fields['end_position']:
diagnostics[db_id] += "move on: old is longer"
elif new_fields['end_position'] > existing_fields['end_position']:
diagnostics[db_id] += "use new: new is longer"
else: # the end position is not the same
diagnostics[db_id] = "end positions different *%s* *%s* " % (
existing_fields['end_position'], new_fields['end_position'])
# how could that happen?
# one of the possibilities (gosh, will I have to go through all of them?)
# is that a frameshift mutation is interpreted differently
# hard to find a robust solution
if existing_fields['variant_classification'] == 'frame_shift_del' and one_allele_normal(
existing_fields) and one_allele_normal(new_fields):
# do nothing, this is a different interpretation of the same mutation
diagnostics[db_id] = "move on: different classification"
elif not one_allele_normal(existing_fields) and not one_allele_normal(new_fields):
# store, this is possibly compound heterozygous
diagnostics[db_id] = "compound heterozygous"
else:
# I don't know what this is
diagnostics[db_id] = "conflict: different length"
return diagnostics
#########################################
def store_conflicts_and_duplicates (cursor, mutations_table, expected_fields, new_fields):
conflict_table = '%s_conflict_mutations' % (mutations_table.split("_")[0])
# is this an exact copy of something we have already stored as duplicate?
sample_barcode_short = new_fields['sample_barcode_short']
chromosome = new_fields['chromosome']
start_position = new_fields['start_position']
qry = "select * from %s " % conflict_table
qry += "where sample_barcode_short = '%s' " % sample_barcode_short
qry += "and chromosome = '%s' " % chromosome
qry += "and start_position = %s " % start_position
existing_rows = search_db(cursor, qry)
new_entry = False
if not existing_rows: #this is the first time we see this entry
new_entry = True
else:
# take a more careful look
existing_fields_by_database_id = dict( zip (map (lambda x: x[0], existing_rows), map (lambda x: make_named_fields(expected_fields, x[1:]), existing_rows) ))
# if this is not an exact duplicate, store
new_entry = not is_exact_duplicate(new_fields, existing_fields_by_database_id)
if new_entry:
insert_into_db(cursor, conflict_table, new_fields)
return "stored in conflicts and duplicates table"
else:
return "found stored in conflicts and duplicates table"
#########################################
def resolve_duplicate (cursor, table, expected_fields, existing_rows, new_fields):
existing_fields_by_database_id = dict( zip (map (lambda x: x[0], existing_rows), map (lambda x: make_named_fields(expected_fields, x[1:]), existing_rows) ))
# try to diagnose how come we have multiple reports for a mutation starting at a given position
diagnostics = diagnose_duplication_reasons (cursor, existing_fields_by_database_id, new_fields)
# if this is an exact duplicate do nothing (this is mainly to protect us from re-runs filling the database with the same things by mistake)
if not diagnostics: return "exact duplicate"
if False:
print "+"*18
for dbid, diag in diagnostics.iteritems():
print dbid, diag
# if False and new_fields['start_position'] == 35043650:
print
print "="*20
for header in new_fields.keys():
print "%30s [ %s ] " % (header, new_fields[header]),
for existing_fields in existing_fields_by_database_id.values():
print " [ %s ] " % (existing_fields[header]),
print
print
print
# in the order of difficulty ...
# 1) if it is a duplicate or subset of one of the existing entries, we do not have to worry about it any more
this_is_a_duplicate = len(filter(lambda x: "move on" in x, diagnostics.values() ))>0
if this_is_a_duplicate:
#store the new entry in the conflict_mutations table
db_ids_to_be_left_in_place = filter(lambda x: "move on" in diagnostics[x], diagnostics.keys())
# special case - we are favoring the dnp annotation over snp
dnps = filter(lambda x: "dnp" in diagnostics[x],db_ids_to_be_left_in_place)
other = filter(lambda x: not "dnp" in diagnostics[x],db_ids_to_be_left_in_place)
descr_string = ""
if len(dnps)>0:
id_list_string = ""
for db_id in dnps:
if len(id_list_string)>0: id_list_string+= ","
id_list_string += str(db_id)
descr_string += "snp vs dnp for (" + id_list_string + ") in table %s" % table
if len(other)>0:
if descr_string: descr_string += "; "
id_list_string = ""
for db_id in other:
if len(id_list_string)>0: id_list_string+= ","
id_list_string += str(db_id)
descr_string += "duplicate of (" + id_list_string + ") in table %s" % table
new_fields['conflict'] = descr_string
return store_conflicts_and_duplicates (cursor, table, expected_fields, new_fields)
there_is_no_conflict = len(filter(lambda x: "conflict" in x, diagnostics.values() ))==0
dbids_to_be_replaced = filter(lambda dbid: "use new" in diagnostics[dbid], existing_fields_by_database_id.keys() )
# 2) if there is no conflict, just replace the rows that the new one covers (has a superset info)
if there_is_no_conflict:
if len(dbids_to_be_replaced)>0:
update_db (cursor, table, dbids_to_be_replaced[0], new_fields)
for db_id in dbids_to_be_replaced:
# store in the duplicates and conflicts
existing_fields = existing_fields_by_database_id[db_id]
if "dnp" in diagnostics[db_id]:
conflict_annotation_updated(existing_fields, "snp/dnp", dbids_to_be_replaced[0])
store_conflicts_and_duplicates(cursor, table, expected_fields, existing_fields)
for db_id in dbids_to_be_replaced[1:]:
# delete from the main table
qry = "delete from %s where id=%d" % (table, db_id)
search_db (cursor, qry)
return "superset"
else: # there is one alternative possibility to covered entries: candidate compound mutations
compound_db_ids = filter (lambda dbid: "compound" in diagnostics[dbid], existing_fields_by_database_id.keys())
if len(compound_db_ids)>0:
new_fields['conflict'] = new_conflict_annotation ("compound", compound_db_ids)
insert_into_db (cursor, table, new_fields)
return "compound"
else:
print " *** ", filter(lambda dbid: "use new" in diagnostics[dbid], existing_fields_by_database_id.keys() )
print "we should not have ended here ..."
exit(1)
# 3) if there is a conflict, but no rows to replace, add the new row and mark conflict
# (potentially compound get the similar treatment)
conflicted_db_ids = filter(lambda dbid: "conflict" in diagnostics[dbid], existing_fields_by_database_id.keys() )
if len(dbids_to_be_replaced)==0:
new_id = insert_into_db (cursor, table, new_fields)
for db_id in conflicted_db_ids:
update_conflict_field (cursor, table, db_id, existing_fields_by_database_id[db_id], new_id, "unresolved")
update_conflict_field (cursor, table, new_id, new_fields, db_id, "unresolved")
# 4) if there there are rows to replace and there is conflict with some other rows,
# replace the rows and reconsider conflicts
else:
# new entry
id_to_reuse = dbids_to_be_replaced[0]
new_fields['conflict'] = new_conflict_annotation ("unresolved", conflicted_db_ids)
update_db (cursor, table, id_to_reuse, new_fields)
for db_id in dbids_to_be_replaced:
# store in the duplicates and conflicts
store_conflicts_and_duplicates (cursor, table, expected_fields, existing_fields_by_database_id[db_id])
for db_id in dbids_to_be_replaced[1:]:
qry = "delete from %s where id=%d" % (table, db_id)
search_db (cursor, qry)
# corrected old entries
for db_id in conflicted_db_ids:
new_annot = new_conflict_annotation ("unresolved", map(lambda x: id_to_reuse if x==db_id else x, conflicted_db_ids))
update_db (cursor, table, db_id, {'conflict': new_annot})
return "conflict"
#########################################
def insert_into_db (cursor, table, fields):
qry = "insert into %s " % table
qry += "("
first = True
for field in fields.keys(): # again will have to check for the type here
if (not first):
qry += ", "
qry += field
first = False
qry += ")"
qry += " values "
qry += "("
first = True
for value in fields.values(): # again will have to check for the type here
if (not first):
qry += ", "
if value is None:
qry += " null "
elif type(value) is int:
qry += " %d" % value
elif type(value) is float:
qry += " %f" % value
else:
qry += " \'%s\'" % value
first = False
qry += ")"
rows = search_db (cursor, qry)
# if there is a return something went wrong
if (rows):
search_db (cursor, qry, verbose=True)
exit(1) # exit, bcs we should not be here
qry = "select last_insert_id()"
rows = search_db (cursor, qry)
if not rows:
print "last insert id failure (?)"
exit(1) # last insert id failure
return int(rows[0][0])
#########################################
def store (cursor, table, expected_fields, maf_header_fields, new_row):
sample_barcode_short = new_row[ maf_header_fields.index('sample_barcode_short')]
chromosome = new_row[ maf_header_fields.index('chromosome')]
start_position = new_row[ maf_header_fields.index('start_position')]
qry = "select * from %s " % table
qry += "where sample_barcode_short = '%s' " % sample_barcode_short
qry += "and chromosome = '%s' " % chromosome
qry += "and start_position = %s " % start_position
existing_rows = search_db(cursor, qry)
new_fields = make_named_fields(maf_header_fields, new_row, expected_fields)
if not existing_rows: #this is the first time we see a mutation in this place in this patient
insert_into_db(cursor, table, new_fields)
return "new"
else:
# do something about possible duplicates
return resolve_duplicate (cursor, table, expected_fields, existing_rows, new_fields)
return ""
#########################################
def field_cleanup(maf_header_fields, sample_barcode_short, maf_diagnostics, meta_id, maf_fields):
number_of_header_fields = len(maf_header_fields)
normal_allele2_missing = maf_diagnostics and "match_norm_seq_allele2" in maf_diagnostics
normal_allele1_missing = maf_diagnostics and "match_norm_seq_allele1" in maf_diagnostics
reference_allele_missing = maf_diagnostics and "reference_allele" in maf_diagnostics
# TCGA is exhausting all possibilities here (row longer than the header):
clean_fields = maf_fields[:number_of_header_fields] # I do not know what you guys are anyway, so off you go
clean_fields = [x.replace("'", '') for x in clean_fields] # get rid of the quote marks
clean_fields = [x.replace(" ", '') for x in clean_fields] # get rid of the quote marks
# is the number of the fields smaller than the number we are expecting from the header?
# (yes, that can happen, anybody can submit anything in whichever fucking format they like in here)
for i in range(len(clean_fields), number_of_header_fields):
clean_fields.append("missing")
clean_fields.append(sample_barcode_short)
# also in addition to the fields in the original maf file, we are adding a pointer to the maffile itself
clean_fields.append(meta_id)
# there is an optional 'conflict field' which might get filled if there is one:
clean_fields.append(None)
# entrez id should be a number
if 'entrez_gene_id' in maf_header_fields:
entrez_gene_id_field_idx = maf_header_fields.index('entrez_gene_id')
entrez_gene_id_field_entry = clean_fields[entrez_gene_id_field_idx]
if type(entrez_gene_id_field_entry)==str and len(entrez_gene_id_field_entry.replace(" ",""))==0: clean_fields[entrez_gene_id_field_idx]=None
# special: I want to be able to index on aa_change
# so I limited the length of the aa_change field to 100 characters (if it does not know the
# the length of the field, mysql refuses to index)
# but sometimes people put large swath of sequence here; instead of chopping, replace with the mutation type
if 'aa_change' in maf_header_fields:
aa_change_field_idx = maf_header_fields.index('aa_change')
aa_change_field_entry = clean_fields[aa_change_field_idx]
if len(aa_change_field_entry) > 100:
clean_fields[aa_change_field_idx] = clean_fields[maf_header_fields.index('variant_classification')]
else:
clean_fields[aa_change_field_idx] = aa_change_field_entry.replace ("p.", "")
# the same for cdna
if 'cdna_change' in maf_header_fields:
cdna_change_field_idx = maf_header_fields.index('cdna_change')
cdna_change_field_entry = clean_fields[cdna_change_field_idx]
if len(cdna_change_field_entry) > 100:
clean_fields[cdna_change_field_idx] = clean_fields[maf_header_fields.index('variant_classification')]
else:
clean_fields[cdna_change_field_idx] = clean_cdna_change_annotation(cdna_change_field_entry)
# change to lowercase wherever possiblea
for header in ['variant_classification', 'variant_type', 'verification_status', 'validation_status' , 'mutation_status']:
index = maf_header_fields.index(header)
clean_fields[index] = clean_fields[index].lower()
for header in ['start_position', 'end_position']:
index = maf_header_fields.index(header)
clean_fields[index] = int(clean_fields[index])
# one more thing, I hope it is the last
chromosome_field = maf_header_fields.index('chromosome')
if clean_fields[chromosome_field].upper() == "MT":
clean_fields[chromosome_field] = "M"
#tumor1_idx = maf_header_fields.index('tumor_seq_allele1')
#tumor2_idx = maf_header_fields.index('tumor_seq_allele2')
norm1_idx = maf_header_fields.index('match_norm_seq_allele1')
norm2_idx = maf_header_fields.index('match_norm_seq_allele2')
ref_idx = maf_header_fields.index('reference_allele')
# if allele2 info is missing in the whole maf file, fill in the info for allele1
if normal_allele2_missing and not normal_allele1_missing:
clean_fields[norm2_idx] = clean_fields[norm1_idx]
elif normal_allele1_missing and not normal_allele2_missing:
clean_fields[norm1_idx] = clean_fields[norm2_idx]
elif normal_allele1_missing and normal_allele2_missing and not reference_allele_missing:
clean_fields[norm1_idx] = clean_fields[ref_idx]
clean_fields[norm2_idx] = clean_fields[ref_idx]
var_type_idx = maf_header_fields.index('variant_type')
var_class_idx = maf_header_fields.index('variant_classification')
if "del" in clean_fields[var_class_idx]:
clean_fields[var_type_idx] = "del"
if "ins" in clean_fields[var_class_idx]:
clean_fields[var_type_idx] = "ins"
# this all needs to be redone if they ever start top put in decent estimate for allales
if clean_fields[var_type_idx] != "ins":
if clean_fields[norm2_idx] == "-":
clean_fields[norm2_idx] = ""
if clean_fields[norm1_idx] == "-":
clean_fields[norm1_idx] = ""
return clean_fields
#########################################
def construct_short_tumor_barcode(maf_header_fields, maf_fields):
# here is where we construct the short version of the barcode that identifies the sample
# I am adding this one so I do not have to search the database by doing substring comparison
tbarcode = maf_fields[maf_header_fields.index('tumor_sample_barcode')]
# the elements of the barcode are
# project - tissue source site (TSS) - participant -
# source.vial - portion.analyte - plate - (sequencing or characterization center)
elements = tbarcode.split('-')
tumor_type_code = elements[3][:-1]
sample_barcode_short = '-'.join(elements[1:3] + [tumor_type_code]) # get rid of the 'vial' character
return sample_barcode_short
#########################################
def check_tumor_type(tumor_type_ids, maf_header_fields, maf_fields):
# here is where we construct the short version of the barcode that identifies the sample
# I am adding this one so I do not have to search the database by doing substring comparison
tbarcode = maf_fields[maf_header_fields.index('tumor_sample_barcode')]
# the elements of the barcode are
# project - tissue source site (TSS) - participant -
# source.vial - portion.analyte - plate - (sequencing or characterization center)
elements = tbarcode.split('-')
tumor_type_code = elements[3][:-1]
# tumor_type_code can signal additional or metastatic tumors from the same patient
# to keep our life simple we'll just stick to primary tumors
# indicated by source code 01, 03, 08, or 09
if not tumor_type_code in tumor_type_ids: return None
sample_barcode_short = '-'.join(elements[1:3] + [tumor_type_code]) # get rid of the 'vial' character
return sample_barcode_short
#########################################
def load_maf (cursor, db_name, table, maffile, maf_diagnostics, meta_id, stats):
if not os.path.isfile(maffile):
print "not found: "
print maffile
exit(1) # maffile not found
cmd = "wc -l " + maffile
nol = int(commands.getstatusoutput(cmd)[1].split()[0]) -1
print "processing %d lines from %s " % (nol, maffile)
print "diagnostics:", maf_diagnostics
expected_fields = get_expected_fields(cursor, db_name, table)
maf_header_fields = process_header_line(maffile)
inff = open(maffile, "r")
line_ct = 0
start = time.time()
first = True
tot_entries = 0
for line in inff:
line_ct += 1
#if not line_ct%1000:
# print "\t\t\t processed %5d (%4.1f%%) %8.2fs" % (line_ct, float(line_ct)/nol*100, time.time()-start)
if line.isspace(): continue
if line[0]=='#': continue
if first: # this is the header line
first = False
continue
tot_entries += 1
line = line.rstrip()
maf_fields = line.split('\t')
sample_barcode_short = construct_short_tumor_barcode(maf_header_fields, maf_fields)
clean_fields = field_cleanup(maf_header_fields, sample_barcode_short, maf_diagnostics, meta_id, maf_fields)
retval = store(cursor, table, expected_fields,
maf_header_fields + ['sample_barcode_short', 'meta_info_id', 'conflict'], clean_fields)
if not retval in stats.keys(): stats[retval] = 0
stats[retval] += 1
inff.close()
#########################################
def load_tables(tables, other_args):
[db_dir, db_name] = other_args
db = connect_to_mysql()
cursor = db.cursor()
switch_to_db(cursor,db_name)
for table in tables:
print "========================="
print table, os.getpid()
cancer_type = table.split("_")[0]
qry = "select * from %s_mutations_meta" % cancer_type
rows = search_db(cursor, qry)
if not rows:
print "no meta info found"
continue
maf_file = {}
maf_diagnostics = {}
for row in rows:
[meta_id, file_name, quality_check, assembly, diagnostics] = row
if quality_check=="fail": continue
maf_file[meta_id] = "/".join([db_dir, cancer_type, "Somatic_Mutations", file_name])
maf_diagnostics[meta_id] = diagnostics
stats = {}
for meta_id, maffile in maf_file.iteritems():
time0 = time.time()
load_maf (cursor, db_name, table, maffile, maf_diagnostics[meta_id], meta_id, stats)
time1 = time.time()
print "\t %s done in %.3f mins %d" % (maffile, float(time1-time0)/60, os.getpid())
#for stattype, ct in stats.iteritems():
# print "\t\t", stattype, ct
print
cursor.close()
db.close()
##################################################################################
def main():
db_dir = '/storage/databases/tcga'
if not os.path.isdir(db_dir):
print "directory " + db_dir + " not found"
exit(1) # TCGA db dir not found
db = connect_to_mysql()
cursor = db.cursor()
qry = "select table_name from information_schema.tables "
qry += "where table_schema='tcga' and table_name like '%_somatic_mutations'"
tables = [field[0] for field in search_db(cursor,qry)]
cursor.close()
db.close()
db_name = "tcga"
number_of_chunks = 8 # myISAM does not deadlock
parallelize(number_of_chunks, load_tables, tables, [db_dir, db_name])
#########################################
if __name__ == '__main__':
main()
|
ivanamihalek/tcga
|
tcga/01_somatic_mutations/005a_load_maf.py
|
Python
|
gpl-3.0
| 32,427
|
# $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Double Layer Flat Space Truss Roof
"""
from __future__ import print_function
_status = 'checked'
_level = 'advanced'
_topics = ['FEA']
_techniques = ['dialog', 'animation', 'persistence', 'color']
from gui.draw import *
from plugins.properties import *
import time
def run():
try:
############################
# Load the needed calpy modules
from plugins import calpy_itf
import calpy
calpy.options.optimize = False
from calpy import fe_util
from calpy import truss3d
############################
except:
return
if not checkWorkdir():
return
####
#Data
###################
dx = 1800 # Modular size [mm]
ht = 1500 # Deck height [mm]
nx = 8 # number of bottom deck modules in x direction
ny = 6 # number of bottom deck modules in y direction
q = -0.005 #distributed load [N/mm^2]
#############
#Creating the model
###################
top = (Formex("1").replic2(nx-1,ny,1,1) + Formex("2").replic2(nx,ny-1,1,1)).scale(dx)
top.setProp(3)
bottom = (Formex("1").replic2(nx,ny+1,1,1) + Formex("2").replic2(nx+1,ny,1,1)).scale(dx).translate([-dx/2,-dx/2,-ht])
bottom.setProp(0)
T0 = Formex(4*[[[0,0,0]]]) # 4 times the corner of the top deck
T4 = bottom.select([0,1,nx,nx+1]) # 4 nodes of corner module of bottom deck
dia = connect([T0,T4]).replic2(nx,ny,dx,dx)
dia.setProp(1)
F = (top+bottom+dia)
# Show upright
createView('myview1',(0.,-90.,0.))
clear();linewidth(1);draw(F,view='myview1')
############
#Creating FE-model
###################
mesh = F.toMesh()
###############
#Creating elemsets
###################
# Remember: elements in mesh are in the same order as elements in F
topbar = where(F.prop==3)[0]
bottombar = where(F.prop==0)[0]
diabar = where(F.prop==1)[0]
###############
#Creating nodesets
###################
nnod = mesh.nnodes()
nlist = arange(nnod)
count = zeros(nnod)
for n in mesh.elems.flat:
count[n] += 1
field = nlist[count==8]
topedge = nlist[count==7]
topcorner = nlist[count==6]
bottomedge = nlist[count==5]
bottomcorner = nlist[count==3]
edge = concatenate([topedge,topcorner])
support = concatenate([bottomedge,bottomcorner])
########################
#Defining and assigning the properties
#############################
Q = 0.5*q*dx*dx
P = PropertyDB()
P.nodeProp(field,cload = [0,0,Q,0,0,0])
P.nodeProp(edge,cload = [0,0,Q/2,0,0,0])
P.nodeProp(support,bound = [1,1,1,0,0,0])
circ20 = ElemSection(section={'name':'circ20','sectiontype':'Circ','radius':10, 'cross_section':314.159}, material={'name':'S500', 'young_modulus':210000, 'shear_modulus':81000, 'poisson_ratio':0.3, 'yield_stress' : 500,'density':0.000007850})
props = [ \
P.elemProp(topbar,section=circ20,eltype='T3D2'), \
P.elemProp(bottombar,section=circ20,eltype='T3D2'), \
P.elemProp(diabar,section=circ20,eltype='T3D2'), \
]
# Since all elems have same characteristics, we could just have used:
# P.elemProp(section=circ20,elemtype='T3D2')
# But putting the elems in three sets allows for separate postprocessing
#########
#calpy analysis
###################
# boundary conditions
bcon = zeros([nnod,3],dtype=int)
bcon[support] = [ 1,1,1 ]
fe_util.NumberEquations(bcon)
#materials
mats = array([ [p.young_modulus,p.density,p.cross_section] for p in props])
matnr = zeros_like(F.prop)
for i,p in enumerate(props):
matnr[p.set] = i+1
matnod = concatenate([matnr.reshape((-1,1)),mesh.elems+1],axis=-1)
ndof = bcon.max()
# loads
nlc=1
loads=zeros((ndof,nlc),Float)
for n in field:
loads[:,0] = fe_util.AssembleVector(loads[:,0],[ 0.0, 0.0, Q ],bcon[n,:])
for n in edge:
loads[:,0] = fe_util.AssembleVector(loads[:,0],[ 0.0, 0.0, Q/2 ],bcon[n,:])
message("Performing analysis: this may take some time")
# Find a candidate for the output file
fullname = os.path.splitext(__file__)[0] + '.out'
basename = os.path.basename(fullname)
dirname = os.path.dirname(fullname)
outfilename = None
for candidate in [dirname,pf.cfg['workdir'],'/var/tmp']:
if isWritable(candidate):
fullname = os.path.join(candidate,basename)
if not os.path.exists(fullname) or isWritable(fullname):
outfilename = fullname
break
if outfilename is None:
error("No writable path: I can not execute the simulation.\nCopy the script to a writable path and try running from there.")
return
outfile = open(outfilename,'w')
message("Output is written to file '%s'" % os.path.realpath(outfilename))
stdout_saved = sys.stdout
sys.stdout = outfile
print("# File created by pyFormex on %s" % time.ctime())
print("# Script name: %s" % pf.scriptName)
displ,frc = truss3d.static(mesh.coords,bcon,mats,matnod,loads,Echo=True)
print("# Analysis finished on %s" % time.ctime())
sys.stdout = stdout_saved
outfile.close()
################################
#Using pyFormex as postprocessor
################################
if pf.options.gui:
from plugins.postproc import niceNumber,frameScale
import gui.colorscale as cs
import gui.decors
def showOutput():
#showText(file(outfilename).read())
showFile(outfilename)
def showForces():
# Give the mesh some meaningful colors.
# The frc array returns element forces and has shape
# (nelems,nforcevalues,nloadcases)
# In this case there is only one resultant force per element (the
# normal force), and only load case; we still need to select the
# scalar element result values from the array into a onedimensional
# vector val.
val = frc[:,0,0]
# create a colorscale
CS = cs.ColorScale([blue,yellow,red],val.min(),val.max(),0.,2.,2.)
cval = array(map(CS.color,val))
#aprint(cval,header=['Red','Green','Blue'])
clear()
linewidth(3)
draw(mesh,color=cval)
drawText('Normal force in the truss members',300,50,size=24)
CL = cs.ColorLegend(CS,100)
CLA = decors.ColorLegend(CL,10,10,30,200,size=24)
decorate(CLA)
# Show a deformed plot
def deformed_plot(dscale=100.):
"""Shows a deformed plot with deformations scaled with a factor scale."""
# deformed structure
dnodes = mesh.coords + dscale * displ[:,:,0]
deformed = Mesh(dnodes,mesh.elems,mesh.prop)
FA = draw(deformed,bbox='last',view=None,wait=False)
TA = drawText('Deformed geometry (scale %.2f)' % dscale,300,50,size=24)
return FA,TA
def animate_deformed_plot(amplitude,sleeptime=1,count=1):
"""Shows an animation of the deformation plot using nframes."""
FA = TA = None
clear()
while count > 0:
count -= 1
for s in amplitude:
F,T = deformed_plot(s)
if FA:
pf.canvas.removeActor(FA)
if TA:
pf.canvas.removeDecoration(TA)
TA,FA = T,F
sleep(sleeptime)
def getOptimscale():
"""Determine an optimal scale for displaying the deformation"""
siz0 = F.sizes()
dF = Formex(displ[:,:,0][mesh.elems])
#clear(); draw(dF,color=black)
siz1 = dF.sizes()
return niceNumber(1./(siz1/siz0).max())
def showDeformation():
clear()
linewidth(1)
draw(F,color=black)
linewidth(3)
deformed_plot(optimscale)
view('last',True)
def showAnimatedDeformation():
"""Show animated deformation"""
nframes = 10
res = askItems([
_I('scale',optimscale),
_I('nframes',nframes),
_I('form','revert',choices=['up','updown','revert']),
_I('duration',5./nframes),
_I('ncycles',2),
],caption='Animation Parameters')
if res:
scale = res['scale']
nframes = res['nframes']
form = res['form']
duration = res['duration']
ncycles = res['ncycles']
amp = scale * frameScale(nframes,form)
animate_deformed_plot(amp,duration,ncycles)
optimscale = getOptimscale()
options = ['None','Output File','Member forces','Deformation','Animated deformation']
functions = [None,showOutput,showForces,showDeformation,showAnimatedDeformation]
while True:
ans = ask("Which results do you want to see?",options)
ind = options.index(ans)
if ind <= 0:
break
functions[ind]()
if widgets.input_timeout > 0: #timeout
break
if __name__ == 'draw':
run()
# End
|
dladd/pyFormex
|
pyformex/examples/SpaceTrussRoof_calpy.py
|
Python
|
gpl-3.0
| 10,543
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import test
from taskcoachlib import patterns
class EventTest(test.TestCase):
def setUp(self):
self.event = patterns.Event('eventtype', self, 'some value')
def testEqualWhenAllValuesAreEqual(self):
self.assertEqual(self.event,
patterns.Event('eventtype', self, 'some value'))
def testUnequalWhenValuesAreDifferent(self):
self.assertNotEqual(self.event,
patterns.Event('eventtype', self, 'other value'))
def testUnequalWhenTypesAreDifferent(self):
self.assertNotEqual(self.event,
patterns.Event('other type', self, 'some value'))
def testUnequalWhenSourcesAreDifferent(self):
self.assertNotEqual(self.event,
patterns.Event('eventtype', None, 'some value'))
def testEventWithoutType(self):
event = patterns.Event()
self.assertEqual(set(), event.types())
def testEventWithoutSources(self):
event = patterns.Event('eventtype')
self.assertEqual(set(), event.sources())
def testEventSources(self):
self.assertEqual(set([self]), self.event.sources())
def testEventValue(self):
self.assertEqual('some value', self.event.value())
def testEventValues(self):
self.assertEqual(('some value',), self.event.values())
def testEventValueForSpecificSource(self):
self.assertEqual('some value', self.event.value(self))
def testEventValuesForSpecificSource(self):
self.assertEqual(('some value',), self.event.values(self))
def testAddSource(self):
self.event.addSource('source')
self.assertEqual(set([self, 'source']), self.event.sources())
def testAddExistingSource(self):
self.event.addSource(self)
self.assertEqual(set([self]), self.event.sources())
def testAddSourceAndValue(self):
self.event.addSource('source', 'value')
self.assertEqual('value', self.event.value('source'))
def testAddSourceAndValues(self):
self.event.addSource('source', 'value1', 'value2')
self.assertEqual(set(['value1', 'value2']), set(self.event.values('source')))
def testExistingSourceAndValue(self):
self.event.addSource(self, 'new value')
self.assertEqual(set(['some value', 'new value']), set(self.event.values()))
def testEventTypes(self):
self.assertEqual(set(['eventtype']), self.event.types())
def testAddSourceForSpecificType(self):
self.event.addSource(self, type='another eventtype')
self.assertEqual(set(['eventtype', 'another eventtype']),
self.event.types())
def testGetSourcesForSpecificType(self):
self.assertEqual(set([self]), self.event.sources('eventtype'))
def testGetSourcesForSpecificTypes(self):
self.event.addSource('source', type='another eventtype')
self.assertEqual(set([self, 'source']), self.event.sources(*self.event.types()))
def testGetSourcesForNonExistingEventType(self):
self.assertEqual(set(), self.event.sources('unused eventType'))
def testGetAllSourcesAfterAddingSourceForSpecificType(self):
self.event.addSource('source', type='another eventtype')
self.assertEqual(set([self, 'source']), self.event.sources())
def testAddSourceAndValueForSpecificType(self):
self.event.addSource('source', 'value', type='another eventtype')
self.assertEqual('value', self.event.value('source'))
def testAddSourceAndValuesForSpecificType(self):
self.event.addSource('source', 'value1', 'value2',
type='another eventtype')
self.assertEqual(set(['value1', 'value2']), set(self.event.values('source')))
def testAddExistingSourceToAnotherType(self):
self.event.addSource(self, type='another eventtype')
self.assertEqual(set([self]), self.event.sources())
def testAddExistingSourceWithValueToTypeDoesNotRemoveValueForEarlierType(self):
self.event.addSource(self, 'value for another eventtype',
type='another eventtype')
self.assertEqual('some value', self.event.value(self, type='eventtype'))
def testAddExistingSourceWithValueToType(self):
self.event.addSource(self, 'value for another eventtype',
type='another eventtype')
self.assertEqual('value for another eventtype',
self.event.value(self, type='another eventtype'))
def testSubEventForOneTypeWhenEventHasOneType(self):
self.assertEqual(self.event, self.event.subEvent((self.event.type(), self)))
def testSubEventForOneTypeWhenEventHasTwoTypes(self):
self.event.addSource('source', type='another eventtype')
expectedEvent = patterns.Event('eventtype', self, 'some value')
self.assertEqual(expectedEvent, self.event.subEvent(('eventtype', self)))
def testSubEventForTwoTypesWhenEventHasTwoTypes(self):
self.event.addSource('source', type='another eventtype')
args = [('eventtype', self), ('another eventtype', 'source')]
self.assertEqual(self.event, self.event.subEvent(*args)) # pylint: disable=W0142
def testSubEventForTypeThatIsNotPresent(self):
self.assertEqual(patterns.Event(),
self.event.subEvent(('missing eventtype', self)))
def testSubEventForOneSourceWhenEventHasOneSource(self):
self.assertEqual(self.event, self.event.subEvent(('eventtype', self)))
def testSubEventForUnspecifiedSource(self):
self.assertEqual(self.event, self.event.subEvent(('eventtype', None)))
def testSubEventForUnspecifiedSourceAndSpecifiedSources(self):
self.assertEqual(self.event, self.event.subEvent(('eventtype', self),
['eventtype', None]))
def testSubEventForSourceThatIsNotPresent(self):
self.assertEqual(patterns.Event(),
self.event.subEvent(('eventtype', 'missing source')))
def testSubEventForSourceThatIsNotPresentForSpecifiedType(self):
self.event.addSource('source', type='another eventtype')
self.assertEqual(patterns.Event(),
self.event.subEvent(('eventtype', 'source')))
class ObservableCollectionFixture(test.TestCase):
def setUp(self):
self.collection = self.createObservableCollection()
patterns.Publisher().registerObserver(self.onAdd,
eventType=self.collection.addItemEventType(),
eventSource=self.collection)
patterns.Publisher().registerObserver(self.onRemove,
eventType=self.collection.removeItemEventType(),
eventSource=self.collection)
self.receivedAddEvents = []
self.receivedRemoveEvents = []
def createObservableCollection(self):
raise NotImplementedError # pragma: no cover
def onAdd(self, event):
self.receivedAddEvents.append(event)
def onRemove(self, event):
self.receivedRemoveEvents.append(event)
class ObservableCollectionTestsMixin(object):
def testCollectionEqualsItself(self):
self.failUnless(self.collection == self.collection)
def testCollectionDoesNotEqualOtherCollections(self):
self.failIf(self.collection == self.createObservableCollection())
def testAppend(self):
self.collection.append(1)
self.failUnless(1 in self.collection)
def testAppend_Notification(self):
self.collection.append(1)
self.assertEqual(1, self.receivedAddEvents[0].value())
def testExtend(self):
self.collection.extend([1, 2])
self.failUnless(1 in self.collection and 2 in self.collection)
def testExtend_Notification(self):
self.collection.extend([1, 2, 3])
self.assertEqual((1, 2, 3), self.receivedAddEvents[0].values())
def testExtend_NoNotificationWhenNoItems(self):
self.collection.extend([])
self.failIf(self.receivedAddEvents)
def testRemove(self):
self.collection.append(1)
self.collection.remove(1)
self.failIf(self.collection)
def testRemove_Notification(self):
self.collection.append(1)
self.collection.remove(1)
self.assertEqual(1, self.receivedRemoveEvents[0].value())
def testRemovingAnItemNotInCollection_CausesException(self):
try:
self.collection.remove(1)
self.fail('Expected ValueError or KeyError') # pragma: no cover
except (ValueError, KeyError):
pass
def testRemovingAnItemNotInCollection_CausesNoNotification(self):
try:
self.collection.remove(1)
except (ValueError, KeyError):
pass
self.failIf(self.receivedRemoveEvents)
def testRemoveItems(self):
self.collection.extend([1, 2, 3])
self.collection.removeItems([1, 2])
self.failIf(1 in self.collection or 2 in self.collection)
def testRemoveItems_Notification(self):
self.collection.extend([1, 2, 3])
self.collection.removeItems([1, 2])
self.assertEqual((1, 2), self.receivedRemoveEvents[0].values())
def testRemoveItems_NoNotificationWhenNoItems(self):
self.collection.extend([1, 2, 3])
self.collection.removeItems([])
self.failIf(self.receivedRemoveEvents)
def testClear(self):
self.collection.extend([1, 2, 3])
self.collection.clear()
self.failIf(self.collection)
def testClear_Notification(self):
self.collection.extend([1, 2, 3])
self.collection.clear()
self.assertEqual((1, 2, 3), self.receivedRemoveEvents[0].values())
def testClear_NoNotificationWhenNoItems(self):
self.collection.clear()
self.failIf(self.receivedRemoveEvents)
def testModificationEventTypes(self):
self.assertEqual([self.collection.addItemEventType(),
self.collection.removeItemEventType()],
self.collection.modificationEventTypes())
class ObservableListTest(ObservableCollectionFixture, ObservableCollectionTestsMixin):
def createObservableCollection(self):
return patterns.ObservableList()
def testAppendSameItemTwice(self):
self.collection.append(1)
self.collection.append(1)
self.assertEqual(2, len(self.collection))
class ObservableSetTest(ObservableCollectionFixture, ObservableCollectionTestsMixin):
def createObservableCollection(self):
return patterns.ObservableSet()
def testAppendSameItemTwice(self):
self.collection.append(1)
self.collection.append(1)
self.assertEqual(1, len(self.collection))
class ListDecoratorTest_Constructor(test.TestCase):
def testOriginalNotEmpty(self):
observable = patterns.ObservableList([1, 2, 3])
observer = patterns.ListDecorator(observable)
self.assertEqual([1, 2, 3], observer)
class SetDecoratorTest_Constructor(test.TestCase):
def testOriginalNotEmpty(self):
observable = patterns.ObservableSet([1, 2, 3])
observer = patterns.SetDecorator(observable)
self.assertEqual([1, 2, 3], observer)
class ListDecoratorTest_AddItems(test.TestCase):
def setUp(self):
self.observable = patterns.ObservableList()
self.observer = patterns.ListDecorator(self.observable)
def testAppendToObservable(self):
self.observable.append(1)
self.assertEqual([1], self.observer)
def testAppendToObserver(self):
self.observer.append(1)
self.assertEqual([1], self.observable)
def testExtendObservable(self):
self.observable.extend([1, 2, 3])
self.assertEqual([1, 2, 3], self.observer)
def testExtendObserver(self):
self.observer.extend([1, 2, 3])
self.assertEqual([1, 2, 3], self.observable)
class SetDecoratorTest_AddItems(test.TestCase):
def setUp(self):
self.observable = patterns.ObservableList()
self.observer = patterns.SetDecorator(self.observable)
def testAppendToObservable(self):
self.observable.append(1)
self.assertEqual([1], self.observer)
def testAppendToObserver(self):
self.observer.append(1)
self.assertEqual([1], self.observable)
def testExtendObservable(self):
self.observable.extend([1, 2, 3])
self.assertEqual([1, 2, 3], self.observer)
def testExtendObserver(self):
self.observer.extend([1, 2, 3])
self.assertEqual([1, 2, 3], self.observable)
class ListDecoratorTest_RemoveItems(test.TestCase):
def setUp(self):
self.observable = patterns.ObservableList()
self.observer = patterns.ListDecorator(self.observable)
self.observable.extend([1, 2, 3])
def testRemoveFromOriginal(self):
self.observable.remove(1)
self.assertEqual([2, 3], self.observer)
def testRemoveFromObserver(self):
self.observer.remove(1)
self.assertEqual([2, 3], self.observable)
def testRemoveItemsFromOriginal(self):
self.observable.removeItems([1, 2])
self.assertEqual([3], self.observer)
def testRemoveItemsFromObserver(self):
self.observer.removeItems([1, 2])
self.assertEqual([3], self.observable)
class SetDecoratorTest_RemoveItems(test.TestCase):
def setUp(self):
self.observable = patterns.ObservableList()
self.observer = patterns.SetDecorator(self.observable)
self.observable.extend([1, 2, 3])
def testRemoveFromOriginal(self):
self.observable.remove(1)
self.assertEqual([2, 3], self.observer)
def testRemoveFromObserver(self):
self.observer.remove(1)
self.assertEqual([2, 3], self.observable)
def testRemoveItemsFromOriginal(self):
self.observable.removeItems([1, 2])
self.assertEqual([3], self.observer)
def testRemoveItemsFromObserver(self):
self.observer.removeItems([1, 2])
self.assertEqual([3], self.observable)
class ListDecoratorTest_ObserveTheObserver(test.TestCase):
def setUp(self):
self.list = patterns.ObservableList()
self.observer = patterns.ListDecorator(self.list)
patterns.Publisher().registerObserver(self.onAdd,
eventType=self.observer.addItemEventType(),
eventSource=self.observer)
patterns.Publisher().registerObserver(self.onRemove,
eventType=self.observer.removeItemEventType(),
eventSource=self.observer)
self.receivedAddEvents = []
self.receivedRemoveEvents = []
def onAdd(self, event):
self.receivedAddEvents.append(event)
def onRemove(self, event):
self.receivedRemoveEvents.append(event)
def testExtendOriginal(self):
self.list.extend([1, 2, 3])
self.assertEqual((1, 2, 3), self.receivedAddEvents[0].values())
def testExtendObserver(self):
self.observer.extend([1, 2, 3])
self.assertEqual((1, 2, 3), self.receivedAddEvents[0].values())
def testRemoveItemsFromOriginal(self):
self.list.extend([1, 2, 3])
self.list.removeItems([1, 3])
self.assertEqual((1, 3), self.receivedRemoveEvents[0].values())
class PublisherTest(test.TestCase):
def setUp(self):
self.publisher = patterns.Publisher()
self.events = []
self.events2 = []
def onEvent(self, event):
self.events.append(event)
def onEvent2(self, event):
self.events2.append(event)
def testPublisherIsSingleton(self):
anotherPublisher = patterns.Publisher()
self.failUnless(self.publisher is anotherPublisher)
def testRegisterObserver(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.assertEqual([self.onEvent], self.publisher.observers())
def testRegisterObserver_Twice(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.assertEqual([self.onEvent], self.publisher.observers())
def testRegisterObserver_ForTwoDifferentTypes(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1')
self.publisher.registerObserver(self.onEvent, eventType='eventType2')
self.assertEqual([self.onEvent], self.publisher.observers())
def testRegisterObserver_ListMethod(self):
''' A previous implementation of Publisher used sets. This caused a
"TypeError: list objects are unhashable" whenever one tried to use
an instance method of a list (sub)class as callback. '''
class List(list):
def onEvent(self, *args):
pass # pragma: no cover
self.publisher.registerObserver(List().onEvent, eventType='eventType')
def testGetObservers_WithoutObservers(self):
self.assertEqual([], self.publisher.observers())
def testGetObserversForSpecificEventType_WithoutObservers(self):
self.assertEqual([], self.publisher.observers(eventType='eventType'))
def testGetObserversForSpecificEventType_WithObserver(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.assertEqual([self.onEvent],
self.publisher.observers(eventType='eventType'))
def testGetObserversForSpecificEventType_WhenDifferentTypesRegistered(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1')
self.publisher.registerObserver(self.onEvent, eventType='eventType2')
self.assertEqual([self.onEvent],
self.publisher.observers(eventType='eventType1'))
def testNotifyObservers_WithoutObservers(self):
patterns.Event('eventType', self).send()
self.failIf(self.events)
def testNotifyObservers_WithObserverForDifferentEventType(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1')
patterns.Event('eventType2', self).send()
self.failIf(self.events)
def testNotifyObservers_WithObserverForRightEventType(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
patterns.Event('eventType', self).send()
self.assertEqual([patterns.Event('eventType', self)], self.events)
def testNotifyObservers_WithObserversForSameAndDifferentEventTypes(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1')
self.publisher.registerObserver(self.onEvent, eventType='eventType2')
patterns.Event('eventType1', self).send()
self.assertEqual([patterns.Event('eventType1', self)], self.events)
def testNotifyObservers_ForDifferentEventTypesWithOneEvent(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1')
self.publisher.registerObserver(self.onEvent2, eventType='eventType2')
event = patterns.Event('eventType1', self)
event.addSource(self, type='eventType2')
event.send()
self.assertEqual([patterns.Event('eventType1', self)], self.events)
self.assertEqual([patterns.Event('eventType2', self)], self.events2)
def testNotifyObserversWithEventWithoutTypes(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
patterns.Event().send()
self.failIf(self.events)
def testNotifyObserversWithEventWithoutSources(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
patterns.Event('eventType').send()
self.failIf(self.events)
def testRemoveObserverForAnyEventType_NotRegisteredBefore(self):
self.publisher.removeObserver(self.onEvent)
self.assertEqual([], self.publisher.observers())
def testRemoveObserverForAnyEventType_RegisteredBefore(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.removeObserver(self.onEvent)
self.assertEqual([], self.publisher.observers())
def testRemoveObserverForSpecificType_RegisteredForSameType(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.removeObserver(self.onEvent, eventType='eventType')
self.assertEqual([], self.publisher.observers())
def testRemoveObserverForSpecificType_RegisteredForDifferentType(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.removeObserver(self.onEvent, eventType='otherType')
self.assertEqual([self.onEvent], self.publisher.observers())
def testRemoveObserverForSpecificType_RegisteredForDifferentTypeThatHasObservers(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.registerObserver(self.onEvent2, eventType='otherType')
self.publisher.removeObserver(self.onEvent, eventType='otherType')
self.assertEqual([self.onEvent], self.publisher.observers('eventType'))
def testClear(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType')
self.publisher.clear()
self.assertEqual([], self.publisher.observers())
def testRegisterObserver_ForSpecificSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable1')
patterns.Event('eventType', 'observable2').send()
self.failIf(self.events)
def testNotifyObserver_ForSpecificSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable1')
event = patterns.Event('eventType', 'observable1')
event.send()
self.assertEqual([event], self.events)
def testRemoveObserver_RegisteredForSpecificSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable1')
self.publisher.removeObserver(self.onEvent)
event = patterns.Event('eventType', 'observable1')
event.send()
self.failIf(self.events)
def testRemoveObserverForSpecificEventType_RegisteredForSpecificSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable1')
self.publisher.removeObserver(self.onEvent, eventType='eventType')
patterns.Event('eventType', 'observable1').send()
self.failIf(self.events)
def testRemoveObserverForSpecificEventSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable1')
self.publisher.registerObserver(self.onEvent, eventType='eventType',
eventSource='observable2')
self.publisher.removeObserver(self.onEvent, eventSource='observable1')
patterns.Event('eventType', 'observable2').send()
self.failUnless(self.events)
def testRemoveObserverForSpecificEventTypeAndSource(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1',
eventSource='observable1')
self.publisher.registerObserver(self.onEvent, eventType='eventType1',
eventSource='observable2')
self.publisher.registerObserver(self.onEvent, eventType='eventType2',
eventSource='observable1')
self.publisher.removeObserver(self.onEvent, eventType='eventType1',
eventSource='observable1')
patterns.Event('eventType1', 'observable1').send()
self.failIf(self.events)
patterns.Event('eventType2', 'observable1').send()
self.failUnless(self.events)
def testRemoveObserverForSpecificEventTypeAndSourceDoesNotRemoveOtherSources(self):
self.publisher.registerObserver(self.onEvent, eventType='eventType1',
eventSource='observable1')
self.publisher.registerObserver(self.onEvent, eventType='eventType1',
eventSource='observable2')
self.publisher.registerObserver(self.onEvent, eventType='eventType2',
eventSource='observable1')
self.publisher.removeObserver(self.onEvent, eventType='eventType1',
eventSource='observable1')
patterns.Event('eventType1', 'observable2').send()
self.failUnless(self.events)
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/tests/unittests/patternsTests/ObservableTest.py
|
Python
|
gpl-3.0
| 25,981
|
#!/usr/bin/env python
import numpy as np # for numerics
import random # for random number generation
from math import sqrt,pi,e # some common function
import math
#import qutip as qp # qutip library for Bloch sphere visualisations
import cmath # library for complex numbers
from collections import Counter
#from tabulate import tabulate # for nice printing
#################### Defining some elementary gates
i_ = np.complex(0,1)
H = 1./sqrt(2)*np.array([[1, 1],[1, -1]])
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -i_],[i_, 0]])
Z = np.array([[1,0],[0,-1]])
eye = np.eye(2,2)
S=np.array([[1,0],[0,i_]])
Sdagger=np.array([[1,0],[0,-i_]])
T=np.array([[1,0],[0, e**(i_*pi/4.)]])
Tdagger=np.array([[1,0],[0, e**(-i_*pi/4.)]])
def Rx(angle):
angle = float(angle)
return np.array([[cmath.cos(angle/2),-i_*cmath.sin(angle/2)],[-i_*cmath.sin(angle/2), cmath.cos(angle/2)]])
def Ry(angle):
angle = float(angle)
return np.array([[cmath.cos(angle/2),-cmath.sin(angle/2)],[cmath.sin(angle/2),cmath.cos(angle/2)]])
def Rz(angle):
angle = float(angle)
return np.array([[cmath.exp(-i_*angle/2),0],[0,cmath.exp(i_*angle/2)]])
#################### State functions
def create_state(num_qubits, list_type, lis):
state = np.zeros(num_qubits)
# if the list has less entries than amplitudes (2**n),
# interpret the entries as positions
#if len(lis) < 2**num_qubits:
if list_type == "indices":
# check if all entries are valid positions
if any(isinstance(item, complex) or item > 2**num_qubits \
or not isinstance(item, int) for item in lis) :
raise StandardError('Cannot interpret input of State() creator.'\
' Please enter a list of valid positions.')
# initialise state
state = np.array([1./sqrt(len(lis)) if i in lis else 0 \
for i in range(2**num_qubits)])
# else if the list has as many entries as amplitudes (2**n),
# interpret the entries as amplitudes
#elif len(lis) == 2**num_qubits:
elif list_type == "amplitudes":
state = np.array(lis)
if not is_normalised(state):
state = renormalise(state)
print( 'Note thate the state you generated was normalised ' \
'automatically ')
else:
raise StandardError('Cannot interpret input of State() creator.'\
' Please enter a list of valid amplitudes or positions.')
return state
def renormalise(state): # Renormalise the amplitude vector to unit length
normalis_factor = float(np.real(np.sqrt(np.vdot(state,state))))
state = state/normalis_factor
return state
def is_normalised(state): #Check if a state is normalised
if np.isclose(float(np.real(np.vdot(state,state))), float(1.0),rtol = 1e-03):
return True
else:
return False
def measure(state, runs = 1, output = 'outcomes'): #perform measurements on the state
# simulates a repeated generation and projective measurement of the state
# Options: 'outcomes' prints every result while 'stats' prints an overview
results = np.random.choice(len(state), runs, p=[abs(el)**2 for el in state])
if output == 'outcomes':
print ("Measurement Results")
print ("Index Basis state ")
print ("----- ----------- ")
for el_res in results:
print "{0:04}".format(el_res),' |', "".join(( "{0:0", \
str(int(np.log2(len(state)))),"b}")).format(el_res),'>'
if output == 'stats':
hist_dict = Counter(results)
indices = list(hist_dict.keys())
occurences = [value/float(runs) for value in list(hist_dict.values())]
print ("\n Measurement Statistics:")
print ("rel. occ. Index Basis state")
print ("--------- ------ -----------")
for i in range(len(indices)):
print occurences[i], " ",indices[i],' |', "".join(( "{0:0", \
str(int(np.log2(len(state)))),"b}")).format(results[i]),'>'
#print tabulate(printdata, headers = ['rel. occ.', 'Index', 'Basis state'])
return None
def print_me(state, style = None): # print out current state.
# Options:
# None/empty - simple output of state information
# 'slim' - As in 'None' but without zero amplitude entries
# 'amplitudes' - only array of amplitudes
np.set_printoptions(precision=3, suppress = True)
print
if style == None: # print all nonzero amplitudes
print "Index Probability Amplitude Basis state "
print "----- ----------- --------- ----------- "
for i in range(len(state)):
if not np.isclose(state[i],0.0):
basis_string = "".join(( "{0:0", \
str(int(np.log2(len(state)))),"b}"))
print '', "{0:04}".format(i), ' ', \
"{0:.3f}".format(abs(state[i])**2), ' ', \
"{0:.3f}".format(state[i]), \
"".join((' |',basis_string.format(i) , '>' ))
if style == 'full': # print all amplitudes
print "Index Probability Amplitude Basis state "
print "----- ----------- --------- ----------- "
for i in range(len(state)):
basis_string = "".join(( "{0:0", str(int(np.log2(len(state)))),"b}"))
print '', "{0:04}".format(i), ' ', \
"{0:.3f}".format(abs(state[i])**2), ' ', \
"{0:.3f}".format(state[i]), \
"".join((' |',basis_string.format(i) , '>' ))
if style == 'amplitudes':
print "Amplitudes: ", state
if style == 'probabilities':
print "Probabilities:\n ", ["{0:.3f}".format(np.abs(item)**2) \
for item in state]
print
return None
def grover_iteration(state, marked_pos):
# performs a Grover iteration on a quantum state
# check if list is of desired format
if any(item > len(state) for item in marked_pos)\
or any( not isinstance(item, int) for item in marked_pos):
raise StandardError('Cannot interpret the list of marked positions'\
' in grover_iteration()')
marked_state = [- el if i in marked_pos else el \
for i,el in enumerate(state)]
rotated_state = [-el + 2*np.mean(marked_state) for el in marked_state]
return rotated_state
'''
def project_on_blochsphere(state):
if len(state) == 2:
alpha = state[0]
beta = state[1]
bloch = qp.Bloch() # initialise Bloch sphere
# Define the x,y and z axes for the Bloch sphere
#x_basis = (qp.basis(2,0)+(1+0j)*qp.basis(2,1)).unit()
#y_basis = (qp.basis(2,0)+(0+1j)*qp.basis(2,1)).unit()
#z_basis = (qp.basis(2,0)+(0+0j)*qp.basis(2,1)).unit()
#bloch.add_states([x_basis,y_basis,z_basis]) # add the axes to the Bloch sphere
#onestate = [[0.,0.,-1.]] # define |1> state vector
#bloch.add_vectors(onestate) # add |1> state vector
bloch.vector_color = ['g'] # Bloch vector colour
bloch.vector_width = 3 # define Bloch vector width
# Find and eliminate global phase
angle_alpha = cmath.phase(alpha)
angle_beta = cmath.phase(beta)
if angle_beta < 0:
if angle_alpha < angle_beta:
alpha_new = alpha/cmath.exp(1j*angle_beta)
beta_new = beta/cmath.exp(1j*angle_beta)
else:
alpha_new = alpha/cmath.exp(1j*angle_alpha)
beta_new = beta/cmath.exp(1j*angle_alpha)
else:
if angle_alpha > angle_beta:
alpha_new = alpha/cmath.exp(1j*angle_beta)
beta_new = beta/cmath.exp(1j*angle_beta)
else:
alpha_new = alpha/cmath.exp(1j*angle_alpha)
beta_new = beta/cmath.exp(1j*angle_alpha)
if abs(alpha) == 0 or abs(beta) == 0:
if alpha == 0:
bloch.clear()
down = [0,0,-1]
bloch.add_vectors(down)
else:
bloch.clear()
up = [0,0,1]
bloch.add_vectors(up)
else:
# Compute theta and phi from alpha and beta
theta = 2*cmath.acos(alpha_new)
phi = -1j*cmath.log(beta_new/cmath.sin(theta/2))
# Compute the cartesian coordinates
x = cmath.sin(theta)*cmath.cos(phi)
y = cmath.sin(theta)*cmath.sin(phi)
z = cmath.cos(theta)
# Create the new state vector and plot it onto the Bloch sphere
new_vec = [x.real,y.real,z.real]
bloch.add_vectors(new_vec)
bloch.show()
else:
raise StandardError('Bloch projection is only supported'\
' for single qubit states.')
'''
#################### Gate functions
def apply_unitary(gate_matrix, qubit_pos, quantum_state):
num_qubits = int(math.log(len(quantum_state),2))
# check if input matrix is a 2x2 matrix
if cmp(gate_matrix.shape, (2,2)) != 0:
raise StandardError('Cannot create total unitary. '\
'Input matrix must be 2x2.')
# check if input matrix is unitary
if np.allclose(np.linalg.inv(gate_matrix),gate_matrix.conjugate().transpose()) == False:
raise StandardError('Cannot create total unitary.'\
' Input matrix must be unitary.')
if any(item > num_qubits-1 or item < 0 for item in qubit_pos):
raise StandardError('Cannot apply quantum gate.'\
' Qubit position is not valid.')
if (len(qubit_pos) == 1):
# check if qubit positions are valid
if (qubit_pos[0] < 0) or (qubit_pos[0] > num_qubits) :
raise StandardError('Your selected qubit position is out of range.'\
' Please choose a valid qubit position.')
else:
# create a list of gates representing the required tensor product
unitary_list = [gate_matrix if qubit_pos[0] == i else np.eye(2,2) for i in range(num_qubits)]
# calculate the Kronecker tensor product with (n-1) identity matrices
# to obtain a 2**n x 2**n matrix that can be acted on n qubits
u_new = unitary_list[0]
for k in range(num_qubits-1):
u_new = np.kron(u_new, unitary_list[k+1])
gate = u_new
# apply the 2**n x 2**n matrix to the current quantum_state
quantum_state = np.dot(gate,quantum_state)
return quantum_state
elif (len(qubit_pos) == 2):
# isolate control and target qubits
control = qubit_pos[0]
target = qubit_pos[1]
if control==target:
raise StandardError('Target and control are the same. '\
'Please choose different target and '\
'control qubits.')
# CASE 1: ADJACENT QUBITS ARE CHOSEN AS CONTROL AND TARGET
if (control == target-1) or (target == control-1):
checker = False
if (target == control-1):
checker = True
save_control = control
control = target
target = save_control
# initialize empty 4 x 4 matrix for controlled gate
cgate = np.zeros((4,4),dtype=np.complex128)
# if control position is reached:
# perform the outer product |1><1| and, thereafter, the tensor product with the unitary that shall be controlled
cgate += np.kron((np.matrix(create_state(1,'amplitudes',[0,1])).transpose())*np.matrix(create_state(1,'amplitudes',[0,1])), np.matrix(gate_matrix))
# perform the outer product |0><0| and, thereafter, the tensor product with the identity matrix
cgate += np.kron(np.matrix(create_state(1,'amplitudes',[1,0])).transpose()*np.matrix(create_state(1,'amplitudes',[1,0])), np.matrix(eye).astype(np.complex128))
# convert to array
cgate = np.array(cgate)
if num_qubits > 2:
# perform the tensor products with identity matrices
for k in range(num_qubits):
# pre-multiply identities
if all([(k<control),(k<target)]):
cgate = np.kron(eye,cgate)
# post-multiply identities
elif all([(k>control),(k>target)]):
cgate = np.kron(cgate, eye)
if checker:
save_control = control
control = target
target = save_control
# use the Hadamard trick to reverse the direction of the CNOT gate
if control > target:
quantum_state = apply_unitary(H,[control],quantum_state)
quantum_state = apply_unitary(H,[target],quantum_state)
quantum_state = np.dot(cgate,quantum_state)
quantum_state = apply_unitary(H,[control],quantum_state)
quantum_state = apply_unitary(H,[target],quantum_state)
return quantum_state
if checker:
save_control = control
control = target
target = save_control
# use the Hadamard trick to reverse the direction of the CNOT gate
if control > target:
quantum_state = apply_unitary(H,[control],quantum_state)
quantum_state = apply_unitary(H,[target],quantum_state)
quantum_state = np.dot(cgate,quantum_state)
quantum_state = apply_unitary(H,[control],quantum_state)
quantum_state = apply_unitary(H,[target],quantum_state)
else:
# apply the 2**n x 2**n matrix to the quantum state
quantum_state = np.dot(cgate,quantum_state)
return quantum_state
else:
# obtain the respective gate matrix with the
# create_controlledGate function
cgate = create_controlledGate(gate_matrix, qubit_pos, len(quantum_state), num_qubits)
# apply the 2**n x 2**n matrix to the quantum state
quantum_state = np.dot(cgate,quantum_state)
return quantum_state
# Controlled controlled case: currently only allows for Toffoli
elif (len(qubit_pos) == 3):
control1 = qubit_pos[0]
control2 = qubit_pos[1]
target = qubit_pos[2]
# check if input gate is X > only Toffoli allowed for now
if (gate_matrix==X).all() == False:
raise StandardError('Cannot create the controlled controlled U gate. '\
'Only Toffoli supported so far. '\
'Input matrix must be the X gate.')
quantum_state = apply_unitary(H,[target],quantum_state)
quantum_state = apply_unitary(X,[control1,target],quantum_state)
quantum_state = apply_unitary(Tdagger,[target],quantum_state)
quantum_state = apply_unitary(X,[control2,target],quantum_state)
quantum_state = apply_unitary(T,[target],quantum_state)
quantum_state = apply_unitary(X,[control1,target],quantum_state)
quantum_state = apply_unitary(Tdagger,[target],quantum_state)
quantum_state = apply_unitary(X,[control2,target],quantum_state)
quantum_state = apply_unitary(T,[target],quantum_state)
quantum_state = apply_unitary(T,[control1],quantum_state)
quantum_state = apply_unitary(X,[control2,control1],quantum_state)
quantum_state = apply_unitary(H,[target],quantum_state)
quantum_state = apply_unitary(T,[control2],quantum_state)
quantum_state = apply_unitary(Tdagger,[control1],quantum_state)
quantum_state = apply_unitary(X,[control2,control1],quantum_state)
return quantum_state
else:
raise StandardError('Too many qubits specified.'\
' Please enter a maximum of 2 valid positions.')
return None
def create_controlledGate(gate_matrix, qubit_pos, num_amplitudes, num_qubits):
control = qubit_pos[0]
target = qubit_pos[1]
if ((control-target) == -(num_qubits-1)):
cgate = np.eye(num_amplitudes,num_amplitudes)
iteration_list = np.array(num_amplitudes/2)
value_save = num_amplitudes/2
for k in range(num_amplitudes/4-1):
iteration_list = np.append(iteration_list,value_save+2)
value_save = value_save+2
for m in iteration_list:
cgate[np.array([m,m+1])]=cgate[np.array([m+1,m])]
return cgate
elif ((control-target) == num_qubits-1):
cgate = np.eye(num_amplitudes,num_amplitudes)
iteration_list = np.array(1)
value_save = 1
for k in range(num_amplitudes/4-1):
iteration_list = np.append(iteration_list,value_save+2)
value_save = value_save+2
for m in iteration_list:
cgate[np.array([m,m+num_amplitudes/2])]=cgate[np.array([m+num_amplitudes/2,m])]
return cgate
elif (control <= num_qubits-2) and (target <= num_qubits-2):
pre_cgate = create_controlledGate(gate_matrix, qubit_pos, num_amplitudes/2, num_qubits-1)
cgate = np.kron(pre_cgate,eye)
return cgate
elif (control == num_qubits-1) or (target == num_qubits-2):
pre_cgate = create_controlledGate(gate_matrix, [qubit_pos[0]-1, qubit_pos[1]-1], num_amplitudes/2, num_qubits-1)
cgate = np.kron(eye,pre_cgate)
return cgate
else:
qubit_pos = [ x-1 for x in qubit_pos]
pre_cgate = create_controlledGate(gate_matrix, qubit_pos, num_amplitudes/2, num_qubits-1)
cgate = np.kron(eye,pre_cgate)
return cgate
|
mariaschuld/qc_simulator
|
py2/quantum_simulator_py2_no_tabulate.py
|
Python
|
gpl-3.0
| 17,997
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pi/Documents/desenvolvimentoRos/install/include".split(';') if "/home/pi/Documents/desenvolvimentoRos/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "angles"
PROJECT_SPACE_DIR = "/home/pi/Documents/desenvolvimentoRos/install"
PROJECT_VERSION = "1.9.10"
|
UnbDroid/robomagellan
|
Codigos/Raspberry/desenvolvimentoRos/build/angles/angles/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
gpl-3.0
| 492
|
# rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import rfactorlcd
import math
class SteeringDashlet(rfactorlcd.Dashlet):
def __init__(self, *args):
super(SteeringDashlet, self).__init__(*args)
self.steering = 0
self.rotation = math.pi / 2
def update_state(self, state):
if self.steering != state.steering:
self.steering = state.steering
def draw(self, cr):
r = min(self.w, self.h)/2.0
cr.set_source_rgb(*self.lcd_style.shadow_color)
cr.save()
cr.translate(self.w/2, self.h/2)
cr.rotate(self.rotation * self.steering)
cr.move_to(-r, 0)
cr.line_to(r, 0)
cr.move_to(0, 0)
cr.line_to(0, r)
cr.stroke()
cr.restore()
cr.set_source_rgb(*self.lcd_style.foreground_color)
cr.new_path()
cr.arc(self.w/2, self.h/2,
r, 0, 2.0 * math.pi)
cr.stroke()
cr.new_path()
cr.arc(self.w/2, self.h/2,
r/5.0, 0, 2.0 * math.pi)
cr.fill()
# EOF #
|
Grumbel/rfactorlcd
|
rfactorlcd/dashlets/steering_dashlet.py
|
Python
|
gpl-3.0
| 1,727
|
import requests
from resources.lib.modules import workers
from resources.lib.modules import util
from resources.lib.modules import cache
import datetime
import player
PLAYER_HANDLER = player.__name__
def get_live_channels():
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/lists/651acd5c-236d-47d1-9e57-584a233ab76a?limit=200&orderby=titleAsc&page=1&useragent=androidtv'
response = requests.get(url).json()
channels = []
threads = [workers.Thread(__merge_channel_data, channel['prgSvcId'], channels) for channel in response['items']]
[i.start() for i in threads]
[i.join() for i in threads]
return channels
def __merge_channel_data(channel, result):
data = get_channel_epg_now(channel)
result.append(data)
def get_channel_epg_now(channel):
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/epg/{channel}/beforenowandnext?beforeCount=0&nextCount=0&includeCurrentProgram=true'.format(channel=channel)
response = requests.get(url).json()
now = response['schedules'][0]
program = now['program']
title = program['seriesTitle']
series = u" (S" + str(program['seasonNumber']) + u':E' + str(program['episodeNumber']) + u")" if program['programType'] == 'Series' else u''
episode_title = program['title'] + series if 'title' in program and program['title'] != title else ''
studio = response['title']
thumb = None
fanart = None
if 'programImages' in program and len(program['programImages']) > 0:
thumb = next((image['url'] for image in program['programImages'] if image['type'] == 'Thumbnail'), None)
fanart = next((image['url'] for image in program['programImages'] if image['type'] == 'Backdrop'), thumb) or thumb
thumb = thumb or fanart
logo = response['positiveLogoUrl']
cast = [c['name'] for c in program['castMembers']]
date = util.strptime(now['startTimeUtc'], '%Y-%m-%dT%H:%M:%SZ') + util.get_utc_delta()
program_name = title + (u': ' + episode_title if episode_title else u'')
stop_time = date + datetime.timedelta(seconds=int(now.get('durationSeconds', 0)))
program_time_desc = datetime.datetime.strftime(date, '%H:%M') + ' - ' + datetime.datetime.strftime(stop_time, '%H:%M')
tags = [program_time_desc]
description = '%s | %s' % (program_time_desc, program['synopsis'])
return {
'handler': PLAYER_HANDLER,
'method': 'playlive',
'id': response['prgSvcId'],
'IsPlayable': True,
'livefeed': True,
'label': u"[B]" + studio + u"[/B][I] - " + program_name + u"[/I]",
'title': u"[B]" + studio + u"[/B][I] - " + program_name + u"[/I]",
'studio': 'Oi Play',
# 'title': episode_title,
'tvshowtitle': title,
'sorttitle': program_name,
'channel_id': response['prgSvcId'],
'dateadded': datetime.datetime.strftime(date, '%Y-%m-%d %H:%M:%S'),
'plot': description,
'tag': tags,
'duration': now['durationSeconds'],
'adult': program['isAdult'],
'cast': cast,
'director': program['directors'],
'genre': program['genres'],
'rating': program['rating'],
'year': program['releaseYear'],
'episode': program['episodeNumber'] if program['episodeNumber'] else None,
'season': program['seasonNumber'] if program['seasonNumber'] else None,
'art': {
'icon': logo,
'thumb': thumb,
'tvshow.poster': thumb,
'clearlogo': logo,
'fanart': fanart
}
}
def get_epg(start, end, channel_map):
start_time = datetime.datetime.strftime(start, '%Y-%m-%dT%H:%M:%SZ')
end_time = datetime.datetime.strftime(end, '%Y-%m-%dT%H:%M:%SZ')
url = 'https://apim.oi.net.br/app/oiplay/ummex/v1/epg?starttime={starttime}&endtime={endtime}&liveSubscriberGroup={channelmap}'.format(starttime=start_time, endtime=end_time, channelmap=channel_map)
epg = cache.get(requests.get, 20, url, table='oiplay').json()
return epg
|
olavopeixoto/plugin.video.brplay
|
resources/lib/modules/oiplay/scraper_live.py
|
Python
|
gpl-3.0
| 4,017
|
from .lintegrate import *
__version__ = "0.1.11"
|
mattpitkin/lintegrate
|
lintegrate/__init__.py
|
Python
|
gpl-3.0
| 50
|
import level1
def Drag(density, velocity, dragCoeff, Area):
# F = force due to drag
p = density
v = velocity
c = dragCoeff
A = Area
if v>0:
F = 0.5*p*(v**2)*c*A
elif v<0:
F = -0.5*p*(v**2)*c*A
else:
F=0
return F
|
pcraddock/lander
|
main/Drag.py
|
Python
|
gpl-3.0
| 243
|
# -*- coding: utf-8 -*-
"""
coco.config
~~~~~~~~~~~~
the configuration related objects.
copy from flask
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import types
import errno
import json
from werkzeug.utils import import_string
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: bool. ``True`` if able to load config, ``False`` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType('config')
d.__file__ = filename
try:
with open(filename, mode='rb') as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes. :meth:`from_object`
loads only the uppercase attributes of the module/class. A ``dict``
object will not work with :meth:`from_object` because the keys of a
``dict`` are not attributes of the ``dict`` class.
Example of module-based configuration::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
See :ref:`config-dev-prod` for an example of class-based configuration
using :meth:`from_object`.
:param obj: an import name or object
"""
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_json(self, filename, silent=False):
"""Updates the values in the config from a JSON file. This function
behaves as if the JSON object was a dictionary and passed to the
:meth:`from_mapping` function.
:param filename: the filename of the JSON file. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
.. versionadded:: 0.11
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename) as json_file:
obj = json.loads(json_file.read())
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
return self.from_mapping(obj)
def from_mapping(self, *mapping, **kwargs):
"""Updates the config like :meth:`update` ignoring items with non-upper
keys.
.. versionadded:: 0.11
"""
mappings = []
if len(mapping) == 1:
if hasattr(mapping[0], 'items'):
mappings.append(mapping[0].items())
else:
mappings.append(mapping[0])
elif len(mapping) > 1:
raise TypeError(
'expected at most 1 positional argument, got %d' % len(mapping)
)
mappings.append(kwargs.items())
for mapping in mappings:
for (key, value) in mapping:
if key.isupper():
self[key] = value
return True
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace):]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
ibuler/coco
|
coco/config.py
|
Python
|
gpl-3.0
| 9,849
|
from tkinter import *
dt = 50
ball_radius = 10
class MainWindow:
def __init__(self):
self.root = Tk()
self.canvas = Canvas(self.root, width=600, height=400, bg='lightgreen')
self.canvas.pack()
self.a = [300, 200]
self.line = self.canvas.create_line(self.a, [400, 200], width=5, fill='red')
self.balls = []
self.balls_velocity = []
self.ball_sprite = PhotoImage(file="ball_sprite.png")
self.canvas.bind('<Motion>', self.mouse_motion)
self.canvas.bind('<Button-1>', self.mouse_click)
self.canvas.after(dt, self.game_cycle)
self.root.mainloop()
def mouse_motion(self, event):
b = [event.x, event.y]
self.canvas.coords(self.line, *self.a, *b)
def mouse_click(self, event):
#ball = self.canvas.create_oval(event.x - ball_radius, event.y - ball_radius,
# event.x + ball_radius, event.y + ball_radius)
ball = self.canvas.create_image(event.x - ball_radius, event.y - ball_radius,
image=self.ball_sprite, tag='ball')
vx = round((event.x - self.a[0])/10)
vy = round((event.y - self.a[1])/10)
self.balls_velocity.append([vx, vy])
self.balls.append(ball)
def game_cycle(self, *ignore):
self.canvas.after(dt, self.game_cycle) # перезапуск цикла
for ball, velocity in zip(self.balls, self.balls_velocity):
self.canvas.move(ball, velocity[0], velocity[1])
window = MainWindow()
|
tkhirianov/fox_python_2016
|
lesson_19/canvas_usage.py
|
Python
|
gpl-3.0
| 1,570
|
""" EC2Endpoint class is the implementation of the EC2 interface to
a cloud endpoint
"""
import os
import json
import boto3
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.Resources.Cloud.Endpoint import Endpoint
class EC2Endpoint(Endpoint):
def __init__(self, parameters=None):
super(EC2Endpoint, self).__init__(parameters=parameters)
# logger
self.log = gLogger.getSubLogger("EC2Endpoint")
self.valid = False
result = self.initialize()
if result["OK"]:
self.log.debug("EC2Endpoint created and validated")
self.valid = True
else:
self.log.error(result["Message"])
def initialize(self):
availableParams = {
"RegionName": "region_name",
"AccessKey": "aws_access_key_id",
"SecretKey": "aws_secret_access_key",
"EndpointUrl": "endpoint_url", # EndpointUrl is optional
}
connDict = {}
for var in availableParams:
if var in self.parameters:
connDict[availableParams[var]] = self.parameters[var]
try:
self.__ec2 = boto3.resource("ec2", **connDict)
except Exception as e:
self.log.exception("Failed to connect to EC2")
errorStatus = "Can't connect to EC2: " + str(e)
return S_ERROR(errorStatus)
result = self.__loadInstanceType()
if not result["OK"]:
return result
result = self.__checkConnection()
return result
def __loadInstanceType(self):
currentDir = os.path.dirname(os.path.abspath(__file__))
instanceTypeFile = os.path.join(currentDir, "ec2_instance_type.json")
try:
with open(instanceTypeFile, "r") as f:
self.__instanceTypeInfo = json.load(f)
except Exception as e:
self.log.exception("Failed to fetch EC2 instance details")
errmsg = "Exception loading EC2 instance type info: %s" % e
self.log.error(errmsg)
return S_ERROR(errmsg)
return S_OK()
def __checkConnection(self):
"""
Checks connection status by trying to list the images.
:return: S_OK | S_ERROR
"""
try:
self.__ec2.images.filter(Owners=["self"])
except Exception as e:
self.log.exception("Failed to list EC2 images")
return S_ERROR(e)
return S_OK()
def createInstances(self, vmsToSubmit):
outputDict = {}
for nvm in range(vmsToSubmit):
instanceID = makeGuid()[:8]
result = self.createInstance(instanceID)
if result["OK"]:
ec2Id, nodeDict = result["Value"]
self.log.debug("Created VM instance %s/%s" % (ec2Id, instanceID))
outputDict[ec2Id] = nodeDict
else:
self.log.error("Create EC2 instance error:", result["Message"])
break
return S_OK(outputDict)
def createInstance(self, instanceID=""):
if not instanceID:
instanceID = makeGuid()[:8]
self.parameters["VMUUID"] = instanceID
self.parameters["VMType"] = self.parameters.get("CEType", "EC2")
createNodeDict = {}
# Image
if "ImageID" in self.parameters and "ImageName" not in self.parameters:
try:
images = self.__ec2.images.filter(Filters=[{"Name": "name", "Values": [self.parameters["ImageName"]]}])
imageId = None
for image in images:
imageId = image.id
break
except Exception as e:
self.log.exception("Exception when get ID from image name %s:" % self.parameters["ImageName"])
return S_ERROR("Failed to get image for Name %s" % self.parameters["ImageName"])
if imageId is None:
return S_ERROR("Image name %s not found" % self.parameters["ImageName"])
elif "ImageID" in self.parameters:
try:
self.__ec2.images.filter(ImageIds=[self.parameters["ImageID"]])
except Exception as e:
self.log.exception("Failed to get EC2 image list")
return S_ERROR("Failed to get image for ID %s" % self.parameters["ImageID"])
imageId = self.parameters["ImageID"]
else:
return S_ERROR("No image specified")
createNodeDict["ImageId"] = imageId
# Instance type
if "FlavorName" not in self.parameters:
return S_ERROR("No flavor specified")
instanceType = self.parameters["FlavorName"]
createNodeDict["InstanceType"] = instanceType
# User data
result = self._createUserDataScript()
if not result["OK"]:
return result
createNodeDict["UserData"] = str(result["Value"])
# Other params
for param in ["KeyName", "SubnetId", "EbsOptimized"]:
if param in self.parameters:
createNodeDict[param] = self.parameters[param]
self.log.info("Creating node:")
for key, value in createNodeDict.items():
self.log.verbose("%s: %s" % (key, value))
# Create the VM instance now
try:
instances = self.__ec2.create_instances(MinCount=1, MaxCount=1, **createNodeDict)
except Exception as e:
self.log.exception("Failed to create EC2 instance")
return S_ERROR("Exception in ec2 create_instances: %s" % e)
if len(instances) < 1:
errmsg = "ec2 create_instances failed to create any VM"
self.log.error(errmsg)
return S_ERROR(errmsg)
# Create the name in tags
ec2Id = instances[0].id
tags = [{"Key": "Name", "Value": "DIRAC_%s" % instanceID}]
try:
self.__ec2.create_tags(Resources=[ec2Id], Tags=tags)
except Exception as e:
self.log.exception("Failed to tag EC2 instance")
return S_ERROR("Exception setup name for %s: %s" % (ec2Id, e))
# Properties of the instance
nodeDict = {}
# nodeDict['PublicIP'] = publicIP
nodeDict["InstanceID"] = instanceID
if instanceType in self.__instanceTypeInfo:
nodeDict["NumberOfProcessors"] = self.__instanceTypeInfo[instanceType]["vCPU"]
nodeDict["RAM"] = self.__instanceTypeInfo[instanceType]["Memory"]
else:
nodeDict["NumberOfProcessors"] = 1
return S_OK((ec2Id, nodeDict))
def stopVM(self, nodeID, publicIP=""):
"""
Given the node ID it gets the node details, which are used to destroy the
node making use of the libcloud.openstack driver. If three is any public IP
( floating IP ) assigned, frees it as well.
:Parameters:
**uniqueId** - `string`
openstack node id ( not uuid ! )
**public_ip** - `string`
public IP assigned to the node if any
:return: S_OK | S_ERROR
"""
try:
self.__ec2.Instance(nodeID).terminate()
except Exception as e:
self.log.exception("Failed to terminate EC2 instance")
return S_ERROR("Exception terminate instance %s: %s" % (nodeID, e))
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/Resources/Cloud/EC2Endpoint.py
|
Python
|
gpl-3.0
| 7,409
|
from .util import hook, colours, repl, other
from . import logging, limit
import socks, re, json, traceback, time, os, glob, importlib, requests, pkg_resources
events = []
def help_bot(conn=None, info=None):
"""Shows help for commands"""
for command in [func for func in conn.bot.events if func._event == "command"]:
if command._commandname == info.args.lstrip():
conn.notice(info.nick, command._help)
setattr(help_bot, "_commandname", "help")
setattr(help_bot, "_prefix", "!")
setattr(help_bot, "_help", help_bot.__doc__)
setattr(help_bot, "_perms", "all")
setattr(help_bot, "_event", "command")
setattr(help_bot, "_thread", False)
events.append(help_bot)
def list_bot(conn=None, info=None):
return " ".join([func._commandname for func in conn.bot.events if func._event == "command"])
setattr(list_bot, "_commandname", "list")
setattr(list_bot, "_prefix", "!")
setattr(list_bot, "_help", list_bot.__doc__)
setattr(list_bot, "_perms", "all")
setattr(list_bot, "_event", "command")
setattr(list_bot, "_thread", False)
events.append(list_bot)
def bot_quit(conn, info):
conn.quit()
setattr(bot_quit, "_commandname", "quit")
setattr(bot_quit, "_prefix", "!")
setattr(bot_quit, "_help", bot_quit.__doc__)
setattr(bot_quit, "_perms", ["admin"])
setattr(bot_quit, "_event", "command")
setattr(bot_quit, "_thread", False)
events.append(bot_quit)
def flush(conn, info):
return "Sucessfully flushed {0} lines.".format(conn.flush())
setattr(flush, "_commandname", "flush")
setattr(flush, "_prefix", "!")
setattr(flush, "_help", flush.__doc__)
setattr(flush, "_perms", ["admin"])
setattr(flush, "_event", "command")
setattr(flush, "_thread", False)
events.append(flush)
|
IndigoTiger/ezzybot
|
ezzybot/builtin.py
|
Python
|
gpl-3.0
| 1,710
|
import sys.os
import cv
def detectObject(image):
size = cv.GetSize(image)
grayscale = cv.CreateImage(size, 8, 1)
cv.cvtColor(image, grayscale, cv.CV_BGR2GRAY)
storage = cv.CreateMemStorage(0)
cv.EqualizeHist(grayscale, grayscale)
cascade = cv.Load("haarcascade_frontalface_alt.xml")
faces = cv.HarrDetectObjects(grayscale, cascade, storage, 1.2, 2
cv.CV_HAAR_DO_CANNY_PRUNING, (100, 100))
for i in faces:
((x, y, w, h), d) = i
cv.Rectangle(image, (int(x), int(y)),
(int(x+w), int(y+h)),
cv.CV_RGB(0, 255, 0), 3, 8, 0)
def displayObject(image):
cv.ShowImage("face", image)
cv.WaitKey(0)
if __name__ == "__main__":
image = cv.LoadImage("face.png")
detectObject(image)
displayObject(image)
|
lalakiwe/FaceReg
|
test/face_detect.py
|
Python
|
gpl-3.0
| 734
|
from django.utils import timezone
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError
from exercise.exercise_summary import UserExerciseSummary
from .exercise_models import BaseExercise, LearningObject
from .submission_models import Submission
from userprofile.models import UserProfile
from course.models import CourseInstance, CourseModule, LearningObjectCategory
class ExerciseCollection(BaseExercise):
# Submissions must persist even if the target course or category
# gets destroyed.
target_category = models.ForeignKey(
LearningObjectCategory,
verbose_name=_('LABEL_TARGET_CATEGORY'),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
class Meta:
verbose_name = _('MODEL_NAME_EXERCISE_COLLECTION')
verbose_name_plural = _('MODEL_NAME_EXERCISE_COLLECTION_PLURAL')
# Clearing possible sources for erronous functionality
def clean(self):
super().clean()
errors = {}
if self.target_category.id == self.category.id:
errors['target_category'] = 'Cannot set own category as target category.'
if self.max_submissions != 1:
errors['max_submissions'] = 'Exercise Collection can have only 1 submission.'
if self.max_group_size != 1:
errors['max_group_size'] = 'Exercise Collection can have only 1 submitter'
if self.min_group_size != 1:
errors['min_group_size'] = 'Exercise Collection can have only 1 submitter'
if errors:
raise ValidationError(errors)
# Allows viewing of submissions
# Not actually user submittable
@property
def is_submittable(self):
return True
# Calculates the sum of best submissions in target category
#
# returns None:
# * Timing doesn't allow submissions
# * Target category doesn't have exercises with points
#
def get_points(self, user, no_scaling=False):
total_points = 0
tc_max_points = self.target_category_maxpoints
max_points = self.max_points
if tc_max_points == 0:
return None
timing, d1 = self.get_timing([user.userprofile],timezone.now())
if (timing == self.TIMING.CLOSED_AFTER or
timing == self.TIMING.ARCHIVED or
timing == self.TIMING.CLOSED_BEFORE or
timing == self.TIMING.UNOFFICIAL):
return None
for exercise in self.exercises:
summary = UserExerciseSummary(exercise, user)
if summary.best_submission is not None:
total_points += summary.best_submission.grade
if timing == self.TIMING.LATE:
total_points = round(total_points * (1 - self.course_module.late_submission_penalty))
if tc_max_points == max_points or no_scaling:
return total_points
total_points = total_points / (tc_max_points / max_points)
return total_points
# Used when staff forces regrading
def grade(self, request, submission):
self.check_submission(request.user, forced=True)
# Updates submission for ExerciseCollection
# Updated submission is saved only if grade has changed
# Parameters:
# * no_update: Doesn't update grade if submission exists
# * forced: Updates submission even if grade hasn't changed
#
def check_submission(self, user, no_update=False, forced=False):
if no_update and self.is_submitted(user) and not forced:
return
# Create new submission or use previous
if not self.is_submitted(user):
current_submission = Submission.objects.create(
exercise=self,
feedback="",
grade=-1,
)
current_submission.clean()
current_submission.save()
else:
submissions = self.get_submissions_for_student(user.userprofile)
current_submission = submissions[0]
new_grade = self.get_points(user)
if new_grade == current_submission.grade and not forced:
return
grading_data, feedback = self._generate_grading_data(user.userprofile)
current_submission.grade = new_grade
current_submission.submission_time = timezone.now()
current_submission.status = Submission.STATUS.READY
current_submission.submitters.set([user.userprofile])
current_submission.grading_data = grading_data
current_submission.feedback = feedback
current_submission.clean()
current_submission.save()
# Check if user has a submission for this exercise
def is_submitted(self, user):
return self.get_submissions_for_student(user.userprofile).count() > 0
# Property to access max_points in target category
@property
def target_category_maxpoints(self):
max_points = 0
for exercise in self.exercises:
max_points += exercise.max_points
return max_points
# Property to access exercises in target category
@property
def exercises(self):
return BaseExercise.objects.filter(
category=self.target_category
).order_by('id')
# There are always submissions left from system's point of view
def one_has_submissions(self, students):
return True
# Generates feedback and grading_data
# Feedback is in HTML format
# grading_data is currently blank
def _generate_grading_data(self, profile):
feedback = ""
grading_data = ""
exercise_counter = 1
for exercise in self.exercises:
submission = UserExerciseSummary(exercise, profile.user).best_submission
if submission is None:
grade = 0
else:
grade = submission.grade
feedback += "Exercise {}: {}/{}\n Course: {} - {}\n Exercise: {}\n".format(
exercise_counter,
grade,
exercise.max_points,
exercise.category.course_instance.course.name,
exercise.category.course_instance.instance_name,
exercise.name,
)
exercise_counter += 1
feedback = "<pre>\n" + feedback + "\n</pre>\n"
return {"grading_data": grading_data}, feedback
# Updates submissions if new submission is in any ExerciseCollection's target category.
# ! Probably needs Cache-optimization
@receiver(post_save, sender=Submission)
def update_exercise_collection_submission(sender, instance, **kwargs):
collections = ExerciseCollection.objects.filter(target_category=instance.exercise.category)
if not collections:
return
profile = instance.submitters.first()
if not profile:
return
for collection in collections:
collection.check_submission(profile.user)
|
teemulehtinen/a-plus
|
exercise/exercisecollection_models.py
|
Python
|
gpl-3.0
| 7,057
|
# doHacking
#
# Used by:
# Modules from group: Data Miners (9 of 9)
type = "active"
def handler(fit, module, context):
pass
|
bsmr-eve/Pyfa
|
eos/effects/dohacking.py
|
Python
|
gpl-3.0
| 130
|
# xiaolongdolly 2017.8.20
squares = [value**2 for value in range(1, 11)]
print(squares)
|
xiaolongdolly/Python_Course
|
chapter_4/squares/squares_3.py
|
Python
|
gpl-3.0
| 88
|
from django.contrib.auth.forms import AuthenticationForm
from django import forms
from django.forms.models import BaseModelFormSet
from players.models import *
class LoginForm(AuthenticationForm):
def confirm_login_allowed(self, user):
super(LoginForm, self).confirm_login_allowed(user)
if not user.is_staff and not hasattr(user, 'character'):
raise forms.ValidationError(
"This account has no character. Please wait for a GM to add one.",
code='no_character', )
# Session submit forms
class SessionFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
initial = kwargs.pop('initial')
self.user = initial['user']
self.character = initial['character']
self.session = initial['session']
super(SessionFormSet, self).__init__(*args, **kwargs)
def fill_save(self):
self.save_existing_objects(commit=True) # deletes objects
instances = self.save(commit=False)
for instance in instances:
instance.session = self.session
instance.character = self.character
instance.save()
self.save_m2m()
class DisciplineActivationFormSet(SessionFormSet):
def __init__(self, *args, **kwargs):
super(DisciplineActivationFormSet, self).__init__(*args, **kwargs)
self.queryset = ActiveDisciplines.objects.filter(
character=self.character,
session=self.session)
self.max_num = 1
self.can_delete = False
for form in self.forms:
form.fields[
'disciplines'].queryset = self.user.character.disciplines.all()
class ActionFormSet(SessionFormSet):
def __init__(self, *args, **kwargs):
super(ActionFormSet, self).__init__(*args, **kwargs)
self.queryset = Action.objects.filter(character=self.character,
session=self.session)
action_count = self.character.action_count(self.session)
self.extra = action_count
self.max_num = self.extra
# otherwise django might populate the forms with actions that
# doest match their action_type queryset
self.can_delete = False
i = 0
for action in self.character.actions(self.session):
for j in range(action.count):
form = self.forms[i]
# we could use form.initial to look at previous values. However
# matching the action to the option is hard.
form.fields['action_type'].queryset = action.action_types.all()
i = i + 1
class FeedingFormSet(SessionFormSet):
def __init__(self, *args, **kwargs):
super(FeedingFormSet, self).__init__(*args, **kwargs)
self.queryset = Feeding.objects.filter(character=self.character,
session=self.session)
self.max_num = 3
self.extra = 3
self.can_delete = False
for form in self.forms:
form.fields[
'discipline'].queryset = self.user.character.disciplines.all()
|
TreacheryLarp/downtime
|
players/forms.py
|
Python
|
gpl-3.0
| 3,138
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
from freeseer import main
main()
|
Freeseer/freeseer
|
src/freeseer/__main__.py
|
Python
|
gpl-3.0
| 981
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2017-10-12 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/combined_1960-2020/0.5x0.5/combined_sources_Monoterp_1960-2020_greg.nc'
#
# STASH code emissions are associated with
# 301-320: surface
# m01s00i314: Monoterpene surface emissions
#
# 321-340: full atmosphere
#
stash='m01s00i314'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='Monoterp'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='Monoterpene surf emissions'
ocube.standard_name='tendency_of_atmosphere_mass_content_of_monoterpenes_due_to_emission'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['um_stash_source']=stash
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_Monoterp_1960-2020_greg.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of monoterpenes from 1960 to 2020'
ocube.attributes['File_version']='v2'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1960-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian')
ocube.coord(axis='t').points=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([15.5, 45.5, 75.5, 106, 136.5, 167, 197.5,
228.5, 259, 289.5, 320, 350.5, 381.5, 411, 440.5, 471, 501.5, 532, 562.5, 593.5,
624, 654.5, 685, 715.5, 746.5, 776, 805.5, 836, 866.5, 897, 927.5, 958.5, 989,
1019.5, 1050, 1080.5, 1111.5, 1141, 1170.5, 1201, 1231.5, 1262, 1292.5, 1323.5,
1354, 1384.5, 1415, 1445.5, 1476.5, 1506.5, 1536.5, 1567, 1597.5, 1628, 1658.5,
1689.5, 1720, 1750.5, 1781, 1811.5, 1842.5, 1872, 1901.5, 1932, 1962.5, 1993,
2023.5, 2054.5, 2085, 2115.5, 2146, 2176.5, 2207.5, 2237, 2266.5, 2297, 2327.5,
2358, 2388.5, 2419.5, 2450, 2480.5, 2511, 2541.5, 2572.5, 2602, 2631.5, 2662,
2692.5, 2723, 2753.5, 2784.5, 2815, 2845.5, 2876, 2906.5, 2937.5, 2967.5, 2997.5,
3028, 3058.5, 3089, 3119.5, 3150.5, 3181, 3211.5, 3242, 3272.5, 3303.5, 3333,
3362.5, 3393, 3423.5, 3454, 3484.5, 3515.5, 3546, 3576.5, 3607, 3637.5, 3668.5,
3698, 3727.5, 3758, 3788.5, 3819, 3849.5, 3880.5, 3911, 3941.5, 3972, 4002.5,
4033.5, 4063, 4092.5, 4123, 4153.5, 4184, 4214.5, 4245.5, 4276, 4306.5, 4337,
4367.5, 4398.5, 4428.5, 4458.5, 4489, 4519.5, 4550, 4580.5, 4611.5, 4642, 4672.5,
4703, 4733.5, 4764.5, 4794, 4823.5, 4854, 4884.5, 4915, 4945.5, 4976.5, 5007,
5037.5, 5068, 5098.5, 5129.5, 5159, 5188.5, 5219, 5249.5, 5280, 5310.5, 5341.5,
5372, 5402.5, 5433, 5463.5, 5494.5, 5524, 5553.5, 5584, 5614.5, 5645, 5675.5,
5706.5, 5737, 5767.5, 5798, 5828.5, 5859.5, 5889.5, 5919.5, 5950, 5980.5, 6011,
6041.5, 6072.5, 6103, 6133.5, 6164, 6194.5, 6225.5, 6255, 6284.5, 6315, 6345.5,
6376, 6406.5, 6437.5, 6468, 6498.5, 6529, 6559.5, 6590.5, 6620, 6649.5, 6680,
6710.5, 6741, 6771.5, 6802.5, 6833, 6863.5, 6894, 6924.5, 6955.5, 6985, 7014.5,
7045, 7075.5, 7106, 7136.5, 7167.5, 7198, 7228.5, 7259, 7289.5, 7320.5, 7350.5,
7380.5, 7411, 7441.5, 7472, 7502.5, 7533.5, 7564, 7594.5, 7625, 7655.5, 7686.5,
7716, 7745.5, 7776, 7806.5, 7837, 7867.5, 7898.5, 7929, 7959.5, 7990, 8020.5,
8051.5, 8081, 8110.5, 8141, 8171.5, 8202, 8232.5, 8263.5, 8294, 8324.5, 8355,
8385.5, 8416.5, 8446, 8475.5, 8506, 8536.5, 8567, 8597.5, 8628.5, 8659, 8689.5,
8720, 8750.5, 8781.5, 8811.5, 8841.5, 8872, 8902.5, 8933, 8963.5, 8994.5, 9025,
9055.5, 9086, 9116.5, 9147.5, 9177, 9206.5, 9237, 9267.5, 9298, 9328.5, 9359.5,
9390, 9420.5, 9451, 9481.5, 9512.5, 9542, 9571.5, 9602, 9632.5, 9663, 9693.5,
9724.5, 9755, 9785.5, 9816, 9846.5, 9877.5, 9907, 9936.5, 9967, 9997.5, 10028,
10058.5, 10089.5, 10120, 10150.5, 10181, 10211.5, 10242.5, 10272.5, 10302.5,
10333, 10363.5, 10394, 10424.5, 10455.5, 10486, 10516.5, 10547, 10577.5, 10608.5,
10638, 10667.5, 10698, 10728.5, 10759, 10789.5, 10820.5, 10851, 10881.5, 10912,
10942.5, 10973.5, 11003, 11032.5, 11063, 11093.5, 11124, 11154.5, 11185.5, 11216,
11246.5, 11277, 11307.5, 11338.5, 11368, 11397.5, 11428, 11458.5, 11489, 11519.5,
11550.5, 11581, 11611.5, 11642, 11672.5, 11703.5, 11733.5, 11763.5, 11794,
11824.5, 11855, 11885.5, 11916.5, 11947, 11977.5, 12008, 12038.5, 12069.5, 12099,
12128.5, 12159, 12189.5, 12220, 12250.5, 12281.5, 12312, 12342.5, 12373, 12403.5,
12434.5, 12464, 12493.5, 12524, 12554.5, 12585, 12615.5, 12646.5, 12677, 12707.5,
12738, 12768.5, 12799.5, 12829, 12858.5, 12889, 12919.5, 12950, 12980.5, 13011.5,
13042, 13072.5, 13103, 13133.5, 13164.5, 13194.5, 13224.5, 13255, 13285.5, 13316,
13346.5, 13377.5, 13408, 13438.5, 13469, 13499.5, 13530.5, 13560, 13589.5, 13620,
13650.5, 13681, 13711.5, 13742.5, 13773, 13803.5, 13834, 13864.5, 13895.5, 13925,
13954.5, 13985, 14015.5, 14046, 14076.5, 14107.5, 14138, 14168.5, 14199, 14229.5,
14260.5, 14290, 14319.5, 14350, 14380.5, 14411, 14441.5, 14472.5, 14503, 14533.5,
14564, 14594.5, 14625.5, 14655.5, 14685.5, 14716, 14746.5, 14777, 14807.5,
14838.5, 14869, 14899.5, 14930, 14960.5, 14991.5, 15021, 15050.5, 15081,
15111.5, 15142, 15172.5, 15203.5, 15234, 15264.5, 15295, 15325.5,
15356.5, 15386, 15415.5, 15446, 15476.5, 15507, 15537.5, 15568.5, 15599,
15629.5, 15660, 15690.5, 15721.5, 15751, 15780.5, 15811, 15841.5, 15872,
15902.5, 15933.5, 15964, 15994.5, 16025, 16055.5, 16086.5, 16116.5,
16146.5, 16177, 16207.5, 16238, 16268.5, 16299.5, 16330, 16360.5, 16391,
16421.5, 16452.5, 16482, 16511.5, 16542, 16572.5, 16603, 16633.5,
16664.5, 16695, 16725.5, 16756, 16786.5, 16817.5, 16847, 16876.5, 16907,
16937.5, 16968, 16998.5, 17029.5, 17060, 17090.5, 17121, 17151.5,
17182.5, 17212, 17241.5, 17272, 17302.5, 17333, 17363.5, 17394.5, 17425,
17455.5, 17486, 17516.5, 17547.5, 17577.5, 17607.5, 17638, 17668.5,
17699, 17729.5, 17760.5, 17791, 17821.5, 17852, 17882.5, 17913.5, 17943,
17972.5, 18003, 18033.5, 18064, 18094.5, 18125.5, 18156, 18186.5, 18217,
18247.5, 18278.5, 18308, 18337.5, 18368, 18398.5, 18429, 18459.5,
18490.5, 18521, 18551.5, 18582, 18612.5, 18643.5, 18673, 18702.5, 18733,
18763.5, 18794, 18824.5, 18855.5, 18886, 18916.5, 18947, 18977.5,
19008.5, 19038.5, 19068.5, 19099, 19129.5, 19160, 19190.5, 19221.5,
19252, 19282.5, 19313, 19343.5, 19374.5, 19404, 19433.5, 19464, 19494.5,
19525, 19555.5, 19586.5, 19617, 19647.5, 19678, 19708.5, 19739.5, 19769,
19798.5, 19829, 19859.5, 19890, 19920.5, 19951.5, 19982, 20012.5, 20043,
20073.5, 20104.5, 20134, 20163.5, 20194, 20224.5, 20255, 20285.5,
20316.5, 20347, 20377.5, 20408, 20438.5, 20469.5, 20499.5, 20529.5,
20560, 20590.5, 20621, 20651.5, 20682.5, 20713, 20743.5, 20774, 20804.5,
20835.5, 20865, 20894.5, 20925, 20955.5, 20986, 21016.5, 21047.5, 21078,
21108.5, 21139, 21169.5, 21200.5, 21230, 21259.5, 21290, 21320.5, 21351,
21381.5, 21412.5, 21443, 21473.5, 21504, 21534.5, 21565.5, 21595,
21624.5, 21655, 21685.5, 21716, 21746.5, 21777.5, 21808, 21838.5, 21869,
21899.5, 21930.5, 21960.5, 21990.5, 22021, 22051.5, 22082, 22112.5,
22143.5, 22174, 22204.5, 22235, 22265.5],dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1960-01-01 00:00:00', calendar='gregorian'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_'+species_name+'.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1960-2020/regrid_Monoterp_emissions_n96e_greg.py
|
Python
|
gpl-3.0
| 19,020
|
from django.urls import reverse_lazy, reverse
from django.template.defaultfilters import force_escape
from django.views.generic import ListView, View
from django.views.generic.edit import FormView
from django.shortcuts import get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from notifications.models import Notification
from profile.models import ActiveUser
from .forms import NewThreadForm, ReplyForm
from .models import Message, Thread, MessageBox
import copy
class ReplyView(FormView):
"""
Handle form submission for a reply in an existing thread.
"""
template_name = None
form_class = ReplyForm
success_url = None
dispatch = method_decorator(login_required)(FormView.dispatch)
def form_valid(self, form):
box = get_object_or_404(MessageBox.objects, thread=self.kwargs['thread'], user=self.request.user)
box.thread.post_message(self.request.user, form.cleaned_data['text'])
thread = box.thread
recipients = thread.recipients
recipients.remove(self.request.user)
for recipient in recipients:
Notification.objects.get_or_create(
recipient=recipient,
title='Nouveau message',
description='%s a posté un nouveau message dans la conversation <em>%s</em>.'
% (self.request.user.get_username(), force_escape(thread.title)),
action=reverse('messaging_show', kwargs={'thread': thread.pk})+'#unread',
app='messaging',
key='thread-%d' % thread.pk)
messages.success(self.request, "Message enregistré.")
return redirect(reverse_lazy('messaging_show', kwargs={'thread': self.kwargs['thread']}) + '#last')
def form_invalid(self, form):
messages.error(self.request, '\n'.join(form.text.errors))
return redirect(reverse_lazy('messaging_show', kwargs={'thread': self.kwargs['thread']}))
class MarkThreadView(View):
"""
Mark thread as archived, unarchived, starred, unstarred, read, unread, or deleted and redirect
according to a `next` parameter if it exists.
"""
dispatch = method_decorator(login_required)(View.dispatch)
def get(self, request, *args, **kwargs):
thread = kwargs['thread']
if 'next' in self.request.GET:
next = self.request.GET['next']
else:
next = None
mark = kwargs['mark']
if mark not in ['archived', 'unarchived', 'starred', 'unstarred', 'read', 'unread', 'deleted']:
return redirect(reverse_lazy('messaging_show', kwargs={'thread': thread}))
box = get_object_or_404(MessageBox.objects, thread=kwargs['thread'], user=self.request.user)
if mark == 'archived':
messages.success(request, 'La discussion a été déplacée vers les archives.')
box.mark_archived()
elif mark == 'unarchived':
messages.success(request, 'La discussion a été déplacée vers la boîte de réception.')
box.mark_normal()
elif mark == 'starred':
messages.success(request, 'Une étoile a été ajoutée à la discussion.')
box.mark_starred()
elif mark == 'unstarred':
messages.success(request, 'Une étoile a été retirée de la discussion.')
box.mark_unstarred()
elif mark == 'read':
messages.success(request, 'La discussion a été marquée comme lue.')
box.mark_read()
elif mark == 'unread':
messages.success(request, 'La discussion a été marquée comme non-lue.')
box.mark_unread()
elif mark == 'deleted':
messages.success(request, 'La discussion a été supprimée.')
box.mark_deleted()
if next == 'thread':
next = None
if next == 'archived':
return redirect(reverse_lazy('messaging_archived'))
elif next == 'thread':
return redirect(reverse_lazy('messaging_show', kwargs={'thread': kwargs['thread']}))
else:
return redirect(reverse_lazy('messaging_inbox'))
class NewThreadView(FormView):
"""
Display and handle the form needed to create a new thread.
If a `username` is specified, then this username is used as a default recipient.
"""
template_name = 'messaging/thread_new.html'
form_class = NewThreadForm
success_url = None # Is set according to the new created thread
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
if 'username' in kwargs:
self.initial_target = kwargs['username']
else:
self.initial_target = None
return FormView.dispatch(self, *args, **kwargs)
def get_initial(self):
initial = FormView.get_initial(self)
if self.initial_target:
user = get_object_or_404(ActiveUser, username=self.initial_target)
initial['recipients'] = user
return initial
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
data = form.cleaned_data
# Remove user from recipients, if needed
for recipient in data['recipients']:
if recipient.get_username() == self.request.user.get_username():
data['recipients'].remove(recipient)
break
new_box = Thread.objects.create_thread(self.request.user, data['title'], data['text'], data['recipients'])
thread = new_box.thread
recipients = thread.recipients
recipients.remove(self.request.user)
for recipient in recipients:
Notification.objects.get_or_create(
recipient=recipient,
title='Nouvelle conversation',
description='%s a entamé une nouvelle conversation avec vous : <em>%s</em>.'
% (self.request.user.get_username(), force_escape(thread.title)),
action=reverse('messaging_show', kwargs={'thread': thread.pk}),
app='messaging',
key='thread-%d' % thread.pk)
messages.success(self.request, 'La nouvelle conversation a été enregistrée.')
return redirect(reverse_lazy('messaging_show', kwargs={'thread': new_box.thread.pk}))
class MessageListView(ListView):
"""
Display the content of a given thread.
"""
template_name = 'messaging/thread_show.html'
context_object_name = 'message_list'
# See dispatch method below
# dispatch = method_decorator(login_required)(ListView.dispatch)
def get_queryset(self):
return Message.objects.filter(thread=self.box.thread)
def get_context_data(self, **kwargs):
context = ListView.get_context_data(self, **kwargs)
context['user_list'] = self.box.thread.recipients
context['date_read'] = copy.copy(self.box.date_read)
context['box'] = self.box
context['form'] = ReplyForm()
self.box.mark_read()
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.box = get_object_or_404(MessageBox.objects, thread=self.kwargs['thread'], user=self.request.user)
return ListView.dispatch(self, request, *args, **kwargs)
class ThreadListView(ListView):
"""
Display the content of the given message box (chosen from `filter` url parameter).
"""
template_name = 'messaging/thread_list.html'
context_object_name = 'box_list'
dispatch = method_decorator(login_required)(ListView.dispatch)
def get_queryset(self):
if 'filter' in self.kwargs and self.kwargs['filter'] == 'archived':
manager = MessageBox.archived
else:
manager = MessageBox.unarchived
return manager.all().filter(user=self.request.user).order_by('-thread__last_message__date')
def get_context_data(self, **kwargs):
context = ListView.get_context_data(self, **kwargs)
# Set '?next=' if needed
if 'filter' in self.kwargs and self.kwargs['filter'] == 'archived':
context['archived'] = True
context['next'] = '?next=archived'
return context
|
AlexandreDecan/Lexpage
|
app/messaging/views.py
|
Python
|
gpl-3.0
| 8,456
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
VetEpiGIS-Group
A QGIS plugin
Spatial functions for vet epidemiology
-------------------
begin : 2016-05-06
git sha : $Format:%H$
copyright : (C) 2016 by Norbert Solymosi
email : solymosi.norbert@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os, shutil
from PyQt5.QtGui import *
from PyQt5.QtCore import pyqtSignal, Qt, QSettings, QCoreApplication, QFile, QFileInfo, QDate, QVariant, \
pyqtSignal, QRegExp, QDateTime, QTranslator, QFile, QDir, QIODevice, QTextStream
from PyQt5.QtSql import *
from PyQt5.QtWidgets import QDialog, QFileDialog
import psycopg2
import psycopg2.extensions
# use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
from .merge_dialog import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self, wdbpath, dbtype):
"""Constructor for the dialog.
"""
QDialog.__init__(self)
self.setupUi(self)
self.wdbpath = wdbpath
self.dbtype = dbtype
self.toolButton.clicked.connect(self.dbSource)
self.setWDBPath()
# def setLabelName(self):
# if self.db_type == 'postgis':
# self.label_db.setText('PostGIS db:')
# if self.db_type == 'spatialite':
# self.label_db.setText('SpatiaLite db:')
def dbSource(self):
dbpath = QFileDialog.getOpenFileName(self,
'Select input VetEpiGIS database file', QDir.currentPath(), 'SpatiaLite file (*.sqlite *.*)')
dbpath = dbpath[0]
if os.path.isfile(dbpath):
self.lineEdit.setText(dbpath)
def setWDBPath(self):
if self.dbtype == 'postgis':
self.label_WD.setText('PostGIS WDB:')
elif self.dbtype == 'spatialite':
self.label_WD.setText('SpatialLite WDB:')
self.label_path_WD.setText(self.wdbpath)
|
IZSVenezie/VetEpiGIS-Group
|
plugin/merge.py
|
Python
|
gpl-3.0
| 2,772
|
# Find the cube root of a perfect cube
x = int(input('Enter an integer: '))
for ans in range(0, abs(x)+1):
if ans**3 >= abs(x):
break
if ans**3 != abs(x):
print(x, 'is not a perfect cube')
else:
if x < 0:
ans = -ans
print('Cube root of', x, 'is', ans)
|
bmoretz/Algorithms
|
Computation/ch_02/cube1.py
|
Python
|
gpl-3.0
| 286
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 <Kyle Francis> <guitarman_usa@yahoo.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import optparse, os
import gettext
from gettext import gettext as _
gettext.textdomain('metau')
from gi.repository import Gtk, Gdk, GObject # pylint: disable=E0611
from metau import MetauWindow
from metau_lib import set_up_logging, get_version
def parse_options():
"""Support for command line options"""
parser = optparse.OptionParser(version="%%prog %s" % get_version())
parser.add_option(
"-v", "--verbose", action="count", dest="verbose",
help=_("Show debug messages (-vv debugs metau_lib also)"))
(options, args) = parser.parse_args()
set_up_logging(options)
def main():
'constructor for your class instances'
parse_options()
#initialize threads
GObject.threads_init()
Gdk.threads_init()
# Run the application.
window = MetauWindow.MetauWindow()
window.show()
Gdk.threads_enter()
Gtk.main()
Gdk.threads_leave()
# Delete our temporary cover art files extracted from atoms
for filename in window.temp_coverart:
print "Removing temporary coverart file: ",window.temp_coverart[filename][0]
os.remove(window.temp_coverart[filename][0])
|
guitarmanusa/metau
|
metau/__init__.py
|
Python
|
gpl-3.0
| 1,928
|
#Python program to merge two sorted array a[] and b[] and storing it in c[]
a = []
b = []
c = []
num1 = int(input("Enter number of elements in array 1: "))
print("Enter your sorted array 1: ")
for i in range(0, num1):
a.insert(i, int(input())
num2 = int(input("Enter number of elements in array 2: "))
print("Enter your sorted array 2: ")
for i in range(0, num2):
b.insert(i, int(input())
k = 0
j = 0
i = 0
# Traverse both array
while i < num1 AND j < num2:
if a[i] > b[j]:
c[k] = b[j]
k = k + 1
j = j + 1
else:
c[k] = a[i]
k = k + 1
i = i + 1
# copying remaining elements of array a
while i < num1:
c[k] = a[i]
k = k + 1
i = i + 1
# copying remaining elements of array b
while j < num2:
c[k] = b[j]
k = k + 1
j = j + 1
print("New merged array: ")
for i in c:
print(i)
'''
Enter number of elements in array 1: 5
Enter your sorted array 1:
3
45
1
2
4
Enter number of elements in array 2: 4
Enter your sorted array 2:
32
10
5
7
New merged array:
1
2
3
4
5
7
10
32
45
'''
|
jainaman224/Algo_Ds_Notes
|
1-D_Array/Python/Merge_Two_Sorted_Arrays.py
|
Python
|
gpl-3.0
| 1,097
|
# $Id: ip.py 65 2010-03-26 02:53:51Z dugsong $
"""Internet Protocol."""
import dpkt
class IP(dpkt.Packet):
__hdr__ = (
('v_hl', 'B', (4 << 4) | (20 >> 2)),
('tos', 'B', 0),
('len', 'H', 20),
('id', 'H', 0),
('off', 'H', 0),
('ttl', 'B', 64),
('p', 'B', 0),
('sum', 'H', 0),
('src', '4s', '\x00' * 4),
('dst', '4s', '\x00' * 4)
)
_protosw = {}
opts = ''
def _get_v(self): return self.v_hl >> 4
def _set_v(self, v): self.v_hl = (v << 4) | (self.v_hl & 0xf)
v = property(_get_v, _set_v)
def _get_hl(self): return self.v_hl & 0xf
def _set_hl(self, hl): self.v_hl = (self.v_hl & 0xf0) | hl
hl = property(_get_hl, _set_hl)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
if self.sum == 0:
self.sum = dpkt.in_cksum(self.pack_hdr() + self.opts)
if (self.p == 6 or self.p == 17) and \
(self.off & (IP_MF|IP_OFFMASK)) == 0 and \
isinstance(self.data, dpkt.Packet) and self.data.sum == 0:
# Set zeroed TCP and UDP checksums for non-fragments.
p = str(self.data)
s = dpkt.struct.pack('>4s4sxBH', self.src, self.dst,
self.p, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
self.data.sum = dpkt.in_cksum_done(s)
if self.p == 17 and self.data.sum == 0:
self.data.sum = 0xffff # RFC 768
# XXX - skip transports which don't need the pseudoheader
return self.pack_hdr() + self.opts + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self.v_hl & 0xf) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
buf = buf[self.__hdr_len__ + ol:self.len]
try:
self.data = self._protosw[self.p](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
set_proto = classmethod(set_proto)
def get_proto(cls, p):
return cls._protosw[p]
get_proto = classmethod(get_proto)
# Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
IP_TOS_DEFAULT = 0x00 # default
IP_TOS_LOWDELAY = 0x10 # low delay
IP_TOS_THROUGHPUT = 0x08 # high throughput
IP_TOS_RELIABILITY = 0x04 # high reliability
IP_TOS_LOWCOST = 0x02 # low monetary cost - XXX
IP_TOS_ECT = 0x02 # ECN-capable transport
IP_TOS_CE = 0x01 # congestion experienced
# IP precedence (high 3 bits of ip_tos), hopefully unused
IP_TOS_PREC_ROUTINE = 0x00
IP_TOS_PREC_PRIORITY = 0x20
IP_TOS_PREC_IMMEDIATE = 0x40
IP_TOS_PREC_FLASH = 0x60
IP_TOS_PREC_FLASHOVERRIDE = 0x80
IP_TOS_PREC_CRITIC_ECP = 0xa0
IP_TOS_PREC_INTERNETCONTROL = 0xc0
IP_TOS_PREC_NETCONTROL = 0xe0
# Fragmentation flags (ip_off)
IP_RF = 0x8000 # reserved
IP_DF = 0x4000 # don't fragment
IP_MF = 0x2000 # more fragments (not last frag)
IP_OFFMASK = 0x1fff # mask for fragment offset
# Time-to-live (ip_ttl), seconds
IP_TTL_DEFAULT = 64 # default ttl, RFC 1122, RFC 1340
IP_TTL_MAX = 255 # maximum ttl
# Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers
IP_PROTO_IP = 0 # dummy for IP
IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options
IP_PROTO_ICMP = 1 # ICMP
IP_PROTO_IGMP = 2 # IGMP
IP_PROTO_GGP = 3 # gateway-gateway protocol
IP_PROTO_IPIP = 4 # IP in IP
IP_PROTO_ST = 5 # ST datagram mode
IP_PROTO_TCP = 6 # TCP
IP_PROTO_CBT = 7 # CBT
IP_PROTO_EGP = 8 # exterior gateway protocol
IP_PROTO_IGP = 9 # interior gateway protocol
IP_PROTO_BBNRCC = 10 # BBN RCC monitoring
IP_PROTO_NVP = 11 # Network Voice Protocol
IP_PROTO_PUP = 12 # PARC universal packet
IP_PROTO_ARGUS = 13 # ARGUS
IP_PROTO_EMCON = 14 # EMCON
IP_PROTO_XNET = 15 # Cross Net Debugger
IP_PROTO_CHAOS = 16 # Chaos
IP_PROTO_UDP = 17 # UDP
IP_PROTO_MUX = 18 # multiplexing
IP_PROTO_DCNMEAS = 19 # DCN measurement
IP_PROTO_HMP = 20 # Host Monitoring Protocol
IP_PROTO_PRM = 21 # Packet Radio Measurement
IP_PROTO_IDP = 22 # Xerox NS IDP
IP_PROTO_TRUNK1 = 23 # Trunk-1
IP_PROTO_TRUNK2 = 24 # Trunk-2
IP_PROTO_LEAF1 = 25 # Leaf-1
IP_PROTO_LEAF2 = 26 # Leaf-2
IP_PROTO_RDP = 27 # "Reliable Datagram" proto
IP_PROTO_IRTP = 28 # Inet Reliable Transaction
IP_PROTO_TP = 29 # ISO TP class 4
IP_PROTO_NETBLT = 30 # Bulk Data Transfer
IP_PROTO_MFPNSP = 31 # MFE Network Services
IP_PROTO_MERITINP = 32 # Merit Internodal Protocol
IP_PROTO_SEP = 33 # Sequential Exchange proto
IP_PROTO_3PC = 34 # Third Party Connect proto
IP_PROTO_IDPR = 35 # Interdomain Policy Route
IP_PROTO_XTP = 36 # Xpress Transfer Protocol
IP_PROTO_DDP = 37 # Datagram Delivery Proto
IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans
IP_PROTO_TPPP = 39 # TP++ Transport Protocol
IP_PROTO_IL = 40 # IL Transport Protocol
IP_PROTO_IP6 = 41 # IPv6
IP_PROTO_SDRP = 42 # Source Demand Routing
IP_PROTO_ROUTING = 43 # IPv6 routing header
IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header
IP_PROTO_RSVP = 46 # Reservation protocol
IP_PROTO_GRE = 47 # General Routing Encap
IP_PROTO_MHRP = 48 # Mobile Host Routing
IP_PROTO_ENA = 49 # ENA
IP_PROTO_ESP = 50 # Encap Security Payload
IP_PROTO_AH = 51 # Authentication Header
IP_PROTO_INLSP = 52 # Integated Net Layer Sec
IP_PROTO_SWIPE = 53 # SWIPE
IP_PROTO_NARP = 54 # NBMA Address Resolution
IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004
IP_PROTO_TLSP = 56 # Transport Layer Security
IP_PROTO_SKIP = 57 # SKIP
IP_PROTO_ICMP6 = 58 # ICMP for IPv6
IP_PROTO_NONE = 59 # IPv6 no next header
IP_PROTO_DSTOPTS = 60 # IPv6 destination options
IP_PROTO_ANYHOST = 61 # any host internal proto
IP_PROTO_CFTP = 62 # CFTP
IP_PROTO_ANYNET = 63 # any local network
IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK
IP_PROTO_KRYPTOLAN = 65 # Kryptolan
IP_PROTO_RVD = 66 # MIT Remote Virtual Disk
IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core
IP_PROTO_DISTFS = 68 # any distributed fs
IP_PROTO_SATMON = 69 # SATNET Monitoring
IP_PROTO_VISA = 70 # VISA Protocol
IP_PROTO_IPCV = 71 # Inet Packet Core Utility
IP_PROTO_CPNX = 72 # Comp Proto Net Executive
IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat
IP_PROTO_WSN = 74 # Wang Span Network
IP_PROTO_PVP = 75 # Packet Video Protocol
IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor
IP_PROTO_SUNND = 77 # SUN ND Protocol
IP_PROTO_WBMON = 78 # WIDEBAND Monitoring
IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK
IP_PROTO_EON = 80 # ISO CNLP
IP_PROTO_VMTP = 81 # Versatile Msg Transport
IP_PROTO_SVMTP = 82 # Secure VMTP
IP_PROTO_VINES = 83 # VINES
IP_PROTO_TTP = 84 # TTP
IP_PROTO_NSFIGP = 85 # NSFNET-IGP
IP_PROTO_DGP = 86 # Dissimilar Gateway Proto
IP_PROTO_TCF = 87 # TCF
IP_PROTO_EIGRP = 88 # EIGRP
IP_PROTO_OSPF = 89 # Open Shortest Path First
IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol
IP_PROTO_LARP = 91 # Locus Address Resolution
IP_PROTO_MTP = 92 # Multicast Transport Proto
IP_PROTO_AX25 = 93 # AX.25 Frames
IP_PROTO_IPIPENCAP = 94 # yet-another IP encap
IP_PROTO_MICP = 95 # Mobile Internet Ctrl
IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto
IP_PROTO_ETHERIP = 97 # Ethernet in IPv4
IP_PROTO_ENCAP = 98 # encapsulation header
IP_PROTO_ANYENC = 99 # private encryption scheme
IP_PROTO_GMTP = 100 # GMTP
IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto
IP_PROTO_PNNI = 102 # PNNI over IP
IP_PROTO_PIM = 103 # Protocol Indep Multicast
IP_PROTO_ARIS = 104 # ARIS
IP_PROTO_SCPS = 105 # SCPS
IP_PROTO_QNX = 106 # QNX
IP_PROTO_AN = 107 # Active Networks
IP_PROTO_IPCOMP = 108 # IP Payload Compression
IP_PROTO_SNP = 109 # Sitara Networks Protocol
IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol
IP_PROTO_IPXIP = 111 # IPX in IP
IP_PROTO_VRRP = 112 # Virtual Router Redundancy
IP_PROTO_PGM = 113 # PGM Reliable Transport
IP_PROTO_ANY0HOP = 114 # 0-hop protocol
IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto
IP_PROTO_DDX = 116 # D-II Data Exchange (DDX)
IP_PROTO_IATP = 117 # Interactive Agent Xfer
IP_PROTO_STP = 118 # Schedule Transfer Proto
IP_PROTO_SRP = 119 # SpectraLink Radio Proto
IP_PROTO_UTI = 120 # UTI
IP_PROTO_SMP = 121 # Simple Message Protocol
IP_PROTO_SM = 122 # SM
IP_PROTO_PTP = 123 # Performance Transparency
IP_PROTO_ISIS = 124 # ISIS over IPv4
IP_PROTO_FIRE = 125 # FIRE
IP_PROTO_CRTP = 126 # Combat Radio Transport
IP_PROTO_CRUDP = 127 # Combat Radio UDP
IP_PROTO_SSCOPMCE = 128 # SSCOPMCE
IP_PROTO_IPLT = 129 # IPLT
IP_PROTO_SPS = 130 # Secure Packet Shield
IP_PROTO_PIPE = 131 # Private IP Encap in IP
IP_PROTO_SCTP = 132 # Stream Ctrl Transmission
IP_PROTO_FC = 133 # Fibre Channel
IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE
IP_PROTO_RAW = 255 # Raw IP packets
IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved
IP_PROTO_MAX = 255
# XXX - auto-load IP dispatch table from IP_PROTO_* definitions
def __load_protos():
g = globals()
for k, v in g.items():
if k.startswith('IP_PROTO_'):
name = k[9:].lower()
try:
mod = __import__(name, g)
except ImportError:
continue
IP.set_proto(v, getattr(mod, name.upper()))
if not IP._protosw:
__load_protos()
if __name__ == '__main__':
import unittest
class IPTestCase(unittest.TestCase):
def test_IP(self):
import udp
s = 'E\x00\x00"\x00\x00\x00\x00@\x11r\xc0\x01\x02\x03\x04\x01\x02\x03\x04\x00o\x00\xde\x00\x0e\xbf5foobar'
ip = IP(id=0, src='\x01\x02\x03\x04', dst='\x01\x02\x03\x04', p=17)
u = udp.UDP(sport=111, dport=222)
u.data = 'foobar'
u.ulen += len(u.data)
ip.data = u
ip.len += len(u)
self.failUnless(str(ip) == s)
ip = IP(s)
self.failUnless(str(ip) == s)
self.failUnless(ip.udp.sport == 111)
self.failUnless(ip.udp.data == 'foobar')
def test_hl(self):
s = 'BB\x03\x00\x00\x00\x00\x00\x00\x00\xd0\x00\xec\xbc\xa5\x00\x00\x00\x03\x80\x00\x00\xd0\x01\xf2\xac\xa5"0\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00'
try:
ip = IP(s)
except dpkt.UnpackError:
pass
def test_opt(self):
s = '\x4f\x00\x00\x50\xae\x08\x00\x00\x40\x06\x17\xfc\xc0\xa8\x0a\x26\xc0\xa8\x0a\x01\x07\x27\x08\x01\x02\x03\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip = IP(s)
ip.sum = 0
self.failUnless(str(ip) == s)
unittest.main()
|
packenx/PythonInstaller
|
app/src/main/assets/arm/static/python/lib/python2.7/site-packages/dpkt/ip.py
|
Python
|
gpl-3.0
| 11,035
|
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from gi.repository import GObject
import logging
from softwarecenter.utils import get_icon_from_theme, size_to_str, utf8
from softwarecenter.backend import get_install_backend
from softwarecenter.backend.transactionswatcher import get_transactions_watcher
from gettext import gettext as _
class PendingStore(Gtk.ListStore):
# column names
(COL_TID,
COL_ICON,
COL_NAME,
COL_STATUS,
COL_PROGRESS,
COL_PULSE,
COL_CANCEL) = range(7)
# column types
column_types = (str, # COL_TID
GdkPixbuf.Pixbuf, # COL_ICON
str, # COL_NAME
str, # COL_STATUS
float, # COL_PROGRESS
int, # COL_PULSE
str) # COL_CANCEL
# icons
PENDING_STORE_ICON_CANCEL = Gtk.STOCK_CANCEL
PENDING_STORE_ICON_NO_CANCEL = "" # Gtk.STOCK_YES
ICON_SIZE = 24
def __init__(self, icons):
# icon, status, progress
Gtk.ListStore.__init__(self)
self.set_column_types(self.column_types)
self._transactions_watcher = get_transactions_watcher()
self._transactions_watcher.connect("lowlevel-transactions-changed",
self._on_lowlevel_transactions_changed)
# data
self.icons = icons
# the apt-daemon stuff
self.backend = get_install_backend()
self._signals = []
# let the pulse helper run
GObject.timeout_add(500, self._pulse_purchase_helper)
def clear(self):
super(PendingStore, self).clear()
for sig in self._signals:
GObject.source_remove(sig)
del sig
self._signals = []
def _on_lowlevel_transactions_changed(self, watcher, current_tid, pending_tids):
logging.debug("on_transaction_changed %s (%s)" % (current_tid, len(pending_tids)))
self.clear()
for tid in [current_tid] + pending_tids:
if not tid:
continue
# we do this synchronous (it used to be a reply_handler)
# otherwise we run into a race that
# when we get two on_transaction_changed closely after each
# other clear() is run before the "_append_transaction" handler
# is run and we end up with two (or more) _append_transactions
trans = self._transactions_watcher.get_transaction(tid)
if trans:
self._append_transaction(trans)
# add pending purchases as pseudo transactions
for pkgname in self.backend.pending_purchases:
iconname = self.backend.pending_purchases[pkgname].iconname
icon = get_icon_from_theme(self.icons, iconname=iconname, iconsize=self.ICON_SIZE)
appname = self.backend.pending_purchases[pkgname].appname
status_text = self._render_status_text(
appname or pkgname, _(u'Installing purchase\u2026'))
self.append([pkgname, icon, pkgname, status_text, float(0), 1, None])
def _pulse_purchase_helper(self):
for item in self:
if item[self.COL_PULSE] > 0:
self[-1][self.COL_PULSE] += 1
return True
def _append_transaction(self, trans):
"""Extract information about the transaction and append it to the
store.
"""
logging.debug("_append_transaction %s (%s)" % (trans.tid, trans))
self._signals.append(
trans.connect(
"progress-details-changed", self._on_progress_details_changed))
self._signals.append(
trans.connect("progress-changed", self._on_progress_changed))
self._signals.append(
trans.connect("status-changed", self._on_status_changed))
self._signals.append(
trans.connect(
"cancellable-changed",self._on_cancellable_changed))
if "sc_appname" in trans.meta_data:
appname = trans.meta_data["sc_appname"]
elif "sc_pkgname" in trans.meta_data:
appname = trans.meta_data["sc_pkgname"]
else:
#FIXME: Extract information from packages property
appname = trans.get_role_description()
self._signals.append(
trans.connect("role-changed", self._on_role_changed))
try:
iconname = trans.meta_data["sc_iconname"]
except KeyError:
icon = get_icon_from_theme(self.icons, iconsize=self.ICON_SIZE)
else:
icon = get_icon_from_theme(self.icons, iconname=iconname, iconsize=self.ICON_SIZE)
if trans.is_waiting():
status = trans.status_details
else:
status = trans.get_status_description()
status_text = self._render_status_text(appname, status)
cancel_icon = self._get_cancel_icon(trans.cancellable)
self.append([trans.tid, icon, appname, status_text, float(trans.progress),
-1, cancel_icon])
def _on_cancellable_changed(self, trans, cancellable):
#print "_on_allow_cancel: ", trans, allow_cancel
for row in self:
if row[self.COL_TID] == trans.tid:
row[self.COL_CANCEL] = self._get_cancel_icon(cancellable)
def _get_cancel_icon(self, cancellable):
if cancellable:
return self.PENDING_STORE_ICON_CANCEL
else:
return self.PENDING_STORE_ICON_NO_CANCEL
def _on_role_changed(self, trans, role):
#print "_on_progress_changed: ", trans, role
for row in self:
if row[self.COL_TID] == trans.tid:
row[self.COL_NAME] = trans.get_role_description(role) or ""
def _on_progress_details_changed(self, trans, current_items, total_items,
current_bytes, total_bytes, current_cps,
eta):
#print "_on_progress_details_changed: ", trans, progress
for row in self:
if row[self.COL_TID] == trans.tid:
if trans.is_downloading():
name = row[self.COL_NAME]
current_bytes_str = size_to_str(current_bytes)
total_bytes_str = size_to_str(total_bytes)
status = _("Downloaded %sB of %sB") % \
(current_bytes_str, total_bytes_str)
row[self.COL_STATUS] = self._render_status_text(name, status)
def _on_progress_changed(self, trans, progress):
# print "_on_progress_changed: ", trans, progress
for row in self:
if row[self.COL_TID] == trans.tid:
if progress:
row[self.COL_PROGRESS] = float(progress)
def _on_status_changed(self, trans, status):
#print "_on_progress_changed: ", trans, status
for row in self:
if row[self.COL_TID] == trans.tid:
# FIXME: the spaces around %s are poor mans padding because
# setting xpad on the cell-renderer seems to not work
name = row[self.COL_NAME]
if trans.is_waiting():
st = trans.status_details
else:
st = trans.get_status_description(status)
row[self.COL_STATUS] = self._render_status_text(name, st)
def _render_status_text(self, name, status):
if not name:
name = ""
return "%s\n<small>%s</small>" % (utf8(name), utf8(status))
|
armikhael/software-center
|
softwarecenter/ui/gtk3/models/pendingstore.py
|
Python
|
gpl-3.0
| 7,630
|
import ipcalc
import ipam.exception
class ReserveIP (object):
"""
Provides the ability to reserve ip addresses before actually
having host names and MAC addresses.
"""
def __init__(self, data_store=None):
super(ReserveIP, self).__init__()
self._data_store = data_store
|
vpejovski/simpleipam
|
ipam/reserveip.py
|
Python
|
gpl-3.0
| 308
|
# Copyright (C) 2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Configuration system interface."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'ConfigurationUpdatedEvent',
'IConfiguration',
]
from zope.interface import Interface
class IConfiguration(Interface):
"""Marker interface; used for adaptation in the REST API."""
class ConfigurationUpdatedEvent:
"""The system-wide global configuration was updated."""
def __init__(self, config):
self.config = config
|
hcs/mailman
|
src/mailman/interfaces/configuration.py
|
Python
|
gpl-3.0
| 1,237
|
"""
BlackDog
Copyright (C) 2014 Snaipe, Therozin
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import hashlib
import re
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
import atexit
import requests
from blackdog import PluginVersion, BlackDogException
class HTTPServer(TCPServer):
def __init__(self, port):
super().__init__(("", port), RequestHandler, bind_and_activate=False)
from blackdog import BlackDog
self.blackdog = BlackDog.instance
self.port = port
def __enter__(self):
self.server_bind()
self.server_activate()
atexit.register(self.close)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
atexit.unregister(self.close)
def close(self):
self.server_close()
def pattern(pattern):
def decorator(func):
func.pattern = pattern
return func
return decorator
class RequestHandler(SimpleHTTPRequestHandler):
def get_groupid(self):
return '.'.join(self.path.split('/')[1:-3])
def handle_pattern(self, path, *args, **kwargs):
handles = []
for m in [m for m in [getattr(self, m) for m in dir(self)] if hasattr(m, 'pattern')]:
handles.append((m.pattern, m))
for p, m in handles:
if re.match(p, path):
m(*args, **kwargs)
return True
return False
def handle_text(self, content, mime='text/plain'):
if not content:
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.send_header('Content-type', mime)
self.send_header('Content-length', len(content))
self.end_headers()
self.wfile.write(content.encode('ascii'))
@pattern(r'.*\.jar$')
def handle_jar(self, version: PluginVersion):
if not version.url():
self.send_response(404)
self.end_headers()
return
r = requests.get(version.url(), stream=True)
if r.status_code != 200:
self.send_response(404)
self.end_headers()
r.close()
return
self.send_response(200)
self.send_header('Content-type', 'application/java-archive')
self.send_header('Content-length', r.headers['content-length'])
self.end_headers()
if not version.sha1():
sha1 = hashlib.sha1()
for chunk in r.iter_content(16 * 1024):
self.wfile.write(chunk)
sha1.update(chunk)
version.sha1(sha1.hexdigest())
self.server.blackdog.bukkitdev.save_plugin(version.get_plugin())
else:
for chunk in r.iter_content(16 * 1024):
self.wfile.write(chunk)
@pattern(r'.*\.pom$')
def handle_pom(self, version: PluginVersion):
self.handle_text(version.get_pom(self.get_groupid()), mime='text/xml')
@pattern(r'.*\.jar.sha1$')
def handle_jar_sha1(self, version: PluginVersion):
self.handle_text(version.sha1())
@pattern(r'.*\.jar.md5$')
def handle_jar_md5(self, version: PluginVersion):
self.handle_text(version.md5())
@pattern(r'.*\.pom.sha1$')
def handle_pom_sha1(self, version: PluginVersion):
self.handle_text(version.get_pom_sha1(self.get_groupid()))
@pattern(r'.*\.pom.md5$')
def handle_pom_md5(self, version: PluginVersion):
self.handle_text(version.get_pom_md5(self.get_groupid()))
def do_GET(self):
from blackdog import BlackDog
blackdog = BlackDog.instance
try:
dir_path = self.path.split('/')[:-1]
plugin_version = dir_path[-1]
plugin_name = dir_path[-2]
except:
self.send_response(404)
self.end_headers()
return
try:
plugin = blackdog.bukkitdev.get_plugin(plugin_name, no_query=True)
if not plugin.has_version(plugin_version) or not plugin.get_version(plugin_version).can_download():
plugin = blackdog.bukkitdev.get_plugin(plugin_name, version=plugin_version)
version = plugin.get_version(plugin_version)
if not self.handle_pattern(self.path, version):
self.send_response(404)
self.end_headers()
except BlackDogException as e:
blackdog.logger.error(e.message)
self.send_response(404)
self.end_headers()
except Exception as e:
blackdog.logger.exception(e)
self.send_response(404)
self.end_headers()
|
Snaipe/BlackDog
|
blackdog/server.py
|
Python
|
gpl-3.0
| 5,246
|
import json
import sys
from nsx_rest import *
#from common.jinja import *
def get_tz_all():
r = nsxGet("/api/2.0/vdn/scopes", "json")
r_dict = json.loads(r)
tzones = {"transportzones" : []}
scopes = r_dict['allScopes']
for scope in scopes:
tzones["transportzones"].append({'name' : scope['name'], 'id' : scope['id']})
return tzones
def get_tz_id_by_name(name):
r = nsxGet("/api/2.0/vdn/scopes", "json")
r_dict = json.loads(r)
allScopes = r_dict['allScopes']
for elem in allScopes:
if name == elem['name']:
return elem['name'], elem['id']
return None, None
# Example: getTZbyId("")
def get_tz_by_id(tzId):
r = nsxGet("/api/2.0/vdn/scopes/" + tzId, "json")
return json.loads(r)
# EXAMPLE:
# clusters is a list
# clusters = [{'objectId' : 'domain-c123'}, {'objectId' : 'domain-c321'}]
def createTz(name, clusters, description="", controlPlaneMode="HYBRID_MODE"):
jinja_vars = {'name' : name,
'description' : description,
'clusters' : clusters,
'controlPlaneMode' : controlPlaneMode}
dir = os.path.dirname(__file__)
nsx_tz_xml = os.path.join(dir, '../../templates/nsx_transportzone_create.j2')
data = render(nsx_tz_xml, jinja_vars)
return nsxPost("/api/2.0/vdn/scopes", data)
def updateTzByName(currName, clusters, newName=None, description=None, controlPlaneMode=None):
jinja_vars = {'objectId' : "",
'name' : newName,
'description' : description,
'clusters' : clusters,
'controlPlaneMode' : controlPlaneMode}
tzName, tzId = getTzIdByName(currName)
jinja_vars['objectId'] = tzId
jinja_vars = removeEmptyParams(jinja_vars)
dir = os.path.dirname(__file__)
nsx_tz_xml = os.path.join(dir, '../../templates/nsx_transportzone_update.j2')
data = render(nsx_tz_xml, jinja_vars)
#print(data)
nsxPut("/api/2.0/vdn/scopes/" + tzId + "/attributes", data)
def delete_tz_by_name(name):
tzName, tzId = getTzIdByName(name)
return nsxDelete("/api/2.0/vdn/scopes/" + tzId, "json")
def delete_tz_by_id(tzId):
return nsxDelete("/api/2.0/vdn/scopes/" + tzId, "json")
|
FibercorpLabs/FibercorpDevops
|
vmware/nsx/utils/nsx/TransportZone.py
|
Python
|
gpl-3.0
| 2,162
|
# the same method as version 2 in python
import numpy as np
import scipy.io.wavfile as wav
from scipy import signal
import matplotlib.pyplot as plt
from pylab import specgram
import os
import os.path
import cv2
#audiopath='Recordings/3D + small mic 6.wav'
audiopath='sound/A1.wav'
filename=audiopath[11:-4]
samplerate,sample = wav.read(audiopath)
if(sample.ndim>1):#scipy.io.wavfile
sample=sample[:,0]
seconds=len(sample)/samplerate
if seconds<7:
exit("audio too short")
result=specgram(sample, NFFT=256, Fs=samplerate, noverlap=64) #from pylab
spec,frqs,times=result[0],result[1],result[2]
print(seconds)
print(spec.shape)
spec=np.array(spec)
#cv2.plot(spec)
plt.show()
for i in range(0,114):
plt.figure(i)
plt.plot(abs(spec[8:30,i]))
plt.show()
|
Sisqui/lungcheck_django
|
processing codes/tagging and model learning code/show.py
|
Python
|
gpl-3.0
| 810
|
## Code outside the data string, and the setup and action blocks is ignored
## If manually editing, you must reload the code. Delete the resource timestamp so kaithem knows it's new
__data__="""
continual: false
enable: true
once: true
priority: interactive
rate-limit: 0.0
resource-timestamp: 1641276823517748
resource-type: event
versions: {}
"""
__trigger__='False'
if __name__=='__setup__':
#This code runs once when the event loads. It also runs when you save the event during the test compile
#and may run multiple times when kaithem boots due to dependancy resolution
__doc__=''
import numpy, time, threading, weakref, socket,logging,traceback,gc,copy,struct
logger = logging.getLogger("system.chandler")
universesLock = threading.RLock()
#Iterable, made from a copy of universes
module.universes={}
#MUTABLE
_universes={}
class Universe():
"Represents a lighting universe, similar to a DMX universe, but is not limited to DMX. "
def __init__(self, name,count=512,number=0):
for i in ":/[]()*\\`~!@#$%^&*=+|{}'\";<>,":
if i in name:
raise ValueError("Name cannot contain special characters except _")
self.name = name
self.hidden=True
#If local fading is disabled, the rendering tries to compress everything down to a set of fade commands.
#This is the time at which the current fade is supposed to end.
self.fadeEndTime = 0
#If False, lighting values don't fade in, they just jump straight to the target,
#For things like smart bulbs where we want to use the remote fade instead.
self.localFading = True
#Used by blend modes to request that the
#remote device do onboard interpolation
#The longest time requested by any layer is used
#The final interpolation time is the greater of
#This and the time determined by fadeEndTime
self.interpolationTime=0
#Let subclasses set these
if not hasattr(self,"status"):
self.status = "normal"
if not hasattr(self,"ok"):
self.ok = True
# name:weakref(fixture) for every ficture that is mapped to this universe
self.fixtures = {}
#Represents the telemetry data back from the physical device of this universe.
self.telemetry = {}
#Dict of all board ids that have already pushed a status update
self.statusChanged = {}
self.channels = {}
#Maps names to numbers, mostly for tagpoint universes.
if not hasattr(self,"channelNames"):
self.channelNames={}
self.groups ={}
self.values = numpy.array([0.0]*count,dtype="f4")
self.alphas = numpy.array([0.0]*count,dtype="f4")
#These channels should blend like Hue, which is normal blending but
#There's no "background" of zeros. If there's nothing "behind" it, we consider it
#100% opaque
#Type is bool
self.hueBlendMask = numpy.array([0.0]*count,dtype="?")
self.count = count
#Maps fine channel numbers to coarse channel numbers
self.fine_channels = {}
#Used for the caching. It's the layer we want to save as the background state before we apply.
#Calculated as either the last scene rendered in the stack or the first scene that requests a rerender that affects the universe
self.save_before_layer = (0,0)
#Reset in pre_render, indicates if we've not rendered a layer that we think is going to change soon
#so far in this frame
self.all_static = True
with module.lock:
with universesLock:
if name in _universes and _universes[name]():
gc.collect()
time.sleep(0.1)
gc.collect()
#We retry, because the universes are often temporarily cached as strong refs
if name in _universes and _universes[name]():
try:
_universes[name]().close()
except:
raise ValueError("Name "+name+ " is taken")
_universes[name] = weakref.ref(self)
module.universes = {i:_universes[i] for i in _universes if _universes[i]()}
#flag to apply all scenes, even ones not marked as neding rerender
self.full_rerender = False
#The priority, started of the top layer layer that's been applied to this scene.
self.top_layer= (0,0)
#This is the priority, started of the "saved" layer that's been cached so we don't
#Have to rerender it or anything below it.
self.prerendered_layer= (0,0)
#A copy of the state of the universe just after prerendered_layer was rendered, so we can go back
#and start from there without rerendering lower layers.
#The format is values,alphas
self.prerendered_data= ([0.0]*count,[0.0]*count)
#Maybe there might be an iteration error. But it's just a GUI convienence that
#A simple refresh solves, so ignore it.
try:
for i in module.boards:
i().pushUniverses()
except Exception as e:
print(e)
kaithem.message.post("/chandler/command/refreshFixtures", self.name)
self.refresh_scenes()
def __del__(self):
self.close()
def close(self):
with universesLock:
#Don't delete the object that replaced this
if self.name in _universes and (_universes[self.name]() is self):
del _universes[self.name]
module.universes = {i:_universes[i] for i in _universes if _universes[i]()}
def alreadyClosed(*a,**k):
raise RuntimeError("This universe has been stopped, possibly because it was replaced wih a newer one")
self.onFrame = alreadyClosed
self.setStatus= alreadyClosed
self.refresh_scenes=alreadyClosed
self.reset_to_cache = alreadyClosed
self.reset = alreadyClosed
self.preFrame= alreadyClosed
self.save_prerendered=alreadyClosed
def setStatus(self,s,ok):
"Set the status shown in the gui. ok is a bool value that indicates if the object is able to transmit data to the fixtures"
#avoid pushing unneded statuses
if (self.status == s) and (self.ok == ok):
return
self.status = s
self.ok = ok
self.statusChanged = {}
def refresh_scenes(self):
"""Stop and restart all active scenes, because some caches might need to be updated
when a new universes is added
"""
kaithem.message.post("/chandler/command/refreshScenes",None)
def __del__(self):
#Do as little as possible in the undefined __del__ thread
kaithem.message.post("/chandler/command/refreshScenes",None)
def channelsChanged(self):
"Call this when fixtures are added, moved, or modified."
with module.lock:
self.fine_channels = {}
for i in self.channels:
fixture = self.channels[i]()
if not fixture:
continue
if not fixture.startAddress:
continue
data = fixture.channels[i-fixture.startAddress]
if (data[1]== "fine") and (i>1):
if len(data==2):
self.fine_channels[i]= i-1
else:
self.fine_channels[i]= fixture.startAddress+data[2]
def reset_to_cache(self):
"Remove all changes since the prerendered layer."
values,alphas = self.prerendered_data
self.values = copy.deepcopy(values)
self.alphas = copy.deepcopy(alphas)
self.top_layer = self.prerendered_layer
def save_prerendered(self, p, s):
"Save this layer as the cached layer. Called in the render functions"
self.prerendered_layer = (p,s)
self.prerendered_data = (copy.deepcopy(self.values),copy.deepcopy(self.alphas))
def reset(self):
"Reset all values to 0 including the prerendered data"
self.prerendered_layer = (0,0)
self.values = numpy.array([0.0]*self.count,dtype="f4")
self.alphas = numpy.array([0.0]*self.count,dtype="f4")
self.top_layer = (0,0)
def preFrame(self):
"Frame preprocessor, uses fixture-specific info, generally only called under lock"
#Assign fine channels their value based on the coarse channel
for i in self.fine_channels:
self.values[i] = (self.values[self.fine_channels[i]]%1)*255
def onFrame(self):
pass
def message(data):
"An enttec DMX message from a set of values"
data = numpy.maximum(numpy.minimum(data,255),0)
data = data.astype(numpy.uint8)
data = data.tobytes()[1:513]
return (b'\x7e\x06'+struct.pack('<H',len(data))+data+b'\xe7')
def rawmessage(data):
"An enttec open DMX message from a set of values"
data = numpy.maximum(numpy.minimum(data,255),0)
data = data.astype(numpy.uint8)
data = data.tobytes()[1:513]
#Remove the 0 position as DMX starts at 1
return (b'\0'+data)
class EnttecUniverse(Universe):
#Thanks to https://github.com/c0z3n/pySimpleDMX
#I didn't actually use the code, but it was a very useful resouurce
#For protocol documentation.
def __init__(self,name,channels=128,portname="",framerate=44,number=0):
self.ok = False
self.number=number
self.status = "Disconnect"
self.statusChanged = {}
#Sender needs the values to be there for setup
self.values = numpy.array([0.0]*channels,dtype="f4")
self.sender = makeSender(DMXSender,weakref.ref(self),portname,framerate)
self.sender.connect()
Universe.__init__(self,name,channels)
self.hidden=False
def onFrame(self):
data = message(self.values)
self.sender.onFrame(data)
def __del__(self):
#Stop the thread when this gets deleted
self.sender.onFrame(None)
class DMXSender():
"""This object is used by the universe object to send data to the enttec adapter.
It runs in it's own thread because the frame rate might have nothing to do with
the rate at which the data actually gets rendered.
"""
def __init__(self,universe,port,framerate):
self.frame = threading.Event()
self.universe= universe
self.data = message(universe().values)
self.thread = threading.Thread(target =self.run)
self.thread.daemon = True
self.thread.name = "DMXSenderThread_"+self.thread.name
self.portname = port
self.framerate = float(framerate)
self.lock = threading.Lock()
self.port = None
self.connect()
self.thread.start()
def setStatus(self,s,ok):
try:
self.universe().setStatus(s,ok)
except:
pass
def connect(self):
#Different status message first time
try:
self.reconnect()
except Exception as e:
self.setStatus('Could not connect, '+str(e)[:100]+'...',False)
def reconnect(self,portlist=None):
"Try to reconnect to the adapter"
try:
import serial
if not self.portname:
import serial.tools.list_ports
p = portlist or serial.tools.list_ports.comports()
if p:
if len(p)>1:
self.setStatus('More than one device found, refusing to guess. Please specify a device.',False)
return
else:
p =p[0].device
else:
self.setStatus('No device found',False)
return
else:
p = self.portname
time.sleep(0.1)
try:
self.port.close()
except:
pass
self.port = serial.Serial(p,57600, timeout=1.0, write_timeout=1.0)
#This is a flush to try to re-sync recievers that don't have any kind of time out detection
#We do this by sending a frame where each value is the packet end code,
#Hoping that it lines up with the end of whatever unfinished data we don't know about.
self.setStatus('Found port, writing sync data',True)
for i in range(0,8):
self.port.write(message(numpy.array([231]*120)))
time.sleep(0.05)
self.port.write(message(numpy.zeros(max(128,len(self.universe().values)))))
time.sleep(0.1)
self.port.read(self.port.inWaiting())
time.sleep(0.05)
self.port.write(self.data)
self.setStatus('connected to '+p,True)
except Exception as e:
try:
self.setStatus('disconnected, '+str(e)[:100]+'...',False)
except:
pass
def run(self):
while 1:
try:
s = module.timefunc()
self.port.read(self.port.inWaiting())
x =self.frame.wait(1)
if not x:
continue
with self.lock:
if self.data is None:
try:
self.port.close()
except:
pass
return
self.port.write(self.data)
self.frame.clear()
time.sleep(max(((1.0/self.framerate)-(module.timefunc()-s)), 0))
except Exception as e:
try:
self.port.close()
except:
pass
try:
if self.data is None:
return
if self.port:
self.setStatus('disconnected, '+str(e)[:100]+'...',False)
self.port=None
print("Attempting reconnect")
#I don't remember why we retry twice here. But reusing the port list should reduce CPU a lot.
time.sleep(3)
import serial
portlist= serial.tools.list_ports.comports()
#reconnect is designed not to raise Exceptions, so if there's0
#an error here it's probably because the whole scope is being cleaned
self.reconnect(portlist)
time.sleep(3)
self.reconnect(portlist)
time.sleep(1)
except:
print("Sender thread exiting")
print(traceback.format_exc())
return
def onFrame(self,data):
with self.lock:
self.data = data
self.frame.set()
class ArtNetUniverse(Universe):
def __init__(self,name,channels=128,address="255.255.255.255:6454",framerate=44,number=0):
self.ok = True
self.status = "OK"
self.number=number
self.statusChanged = {}
x = address.split("://")
if len(x)>1:
scheme = x[0]
else:
scheme=''
addr,port = x[-1].split(":")
port = int(port)
#Sender needs the values to be there for setup
#Channel 0 is a dummy to make math easier.
self.values = numpy.array([0.0]*(channels+1),dtype="f4")
self.sender = makeSender(ArtNetSender,weakref.ref(self),addr,port,framerate,scheme)
Universe.__init__(self,name,channels)
self.hidden=False
def onFrame(self):
data = (self.values)
self.sender.onFrame(data,None,self.number)
def __del__(self):
#Stop the thread when this gets deleted
self.sender.onFrame(None)
class TagpointUniverse(Universe):
"Used for outputting lighting to Kaithem's internal Tagpoint system"
def __init__(self,name,channels=128,tagpoints={},framerate=44,number=0):
self.ok = True
self.status = "OK"
self.number=number
self.statusChanged = {}
self.tagpoints=tagpoints
self.channelCount=channels
self.tagObjsByNum={}
self.claims = {}
self.hidden=False
self.channelNames={}
#Put a claim on all the tags
for i in self.tagpoints:
#One higher than default
try:
if not i.strip():
continue
x = i.split(':')
chname=''
try:
num = int(x[0].strip())
except:
num = len(self.claims)+1
chname = x[0].strip()
if len(x)==2:
chname = x[1].strip()
else:
if not chname:
chname = 'tp'+str(num)
tpn = self.tagpoints[i]
if tpn:
self.tagObjsByNum[num]=kaithem.tags[tpn]
self.claims[num]= kaithem.tags[tpn].claim(0,"Chandler_"+name, 50 if number < 2 else number)
self.channelNames[chname]=num
except Exception as e:
self.status="error, "+i+" "+ str(e)
logger.exception("Error related to tag point "+i)
print(traceback.format_exc())
event("board.error",traceback.format_exc())
#Sender needs the values to be there for setup
self.values = numpy.array([0.0]*channels,dtype="f4")
Universe.__init__(self,name,channels)
def onFrame(self):
for i in self.claims:
try:
x = float(self.values[i])
if x>-1:
if self.tagObjsByNum[i].min is not None and self.tagObjsByNum[i].min>=-10**14:
# Should the tag point have a range set, and should that range be smaller than some very large possible default
# it could be, map the value from our 0-255 scale to whatever the tag point's scale is.
if self.tagObjsByNum[i].max is not None and self.tagObjsByNum[i].max<=10**14:
x = x/255
x *= self.tagObjsByNum[i].max- self.tagObjsByNum[i].min
x+= self.tagObjsByNum[i].min
self.claims[i].set(x)
except:
rl_log_exc("Error in tagpoint universe")
print(traceback.format_exc())
def makeSender(c,uref,*a):
return c(uref,*a)
class ArtNetSender():
"""This object is used by the universe object to send data to the enttec adapter.
It runs in it's own thread because the frame rate might have nothing to do with
the rate at which the data actually gets rendered.
"""
def __init__(self,universe,addr,port,framerate,scheme):
self.frame = threading.Event()
self.scheme=scheme
self.universe=universe
self.data = False
self.running = 1
#The last telemetry we didn't ignore
self.lastTelemetry = 0
if self.scheme == "pavillion":
def onBatteryStatus(v):
self.universe().telemetry['battery']=v
if self.lastTelemetry<(time.time()-10):
self.universe().statusChanged={}
def onConnectionStatus(v):
self.universe().telemetry['rssi']=v
if self.lastTelemetry<(time.time()-10):
self.universe().statusChanged={}
self.connectionTag = kaithem.tags["/devices/"+addr+".rssi"]
self._oncs = onConnectionStatus
self.connectionTag.subscribe(onConnectionStatus)
self.batteryTag = kaithem.tags["/devices/"+addr+".battery"]
self._onb = onBatteryStatus
self.batteryTag.subscribe(onBatteryStatus)
def run():
import time, traceback
interval = 1.1/self.framerate
while self.running:
try:
s = time.time()
x =self.frame.wait(interval)
if not x:
interval= min(60, interval*1.3)
else:
interval = 1.5/self.framerate
if self.data is False:
continue
with self.lock:
if self.data is None:
print("Stopping ArtNet Sender for "+self.addr)
return
#Here we have the option to use a Pavillion device
if self.scheme=="pavillion":
try:
addr=kaithem.devices[self.addr].data['address']
except:
time.sleep(3)
continue
else:
addr=self.addr
self.frame.clear()
try:
self.sock.sendto(self.data, (addr, self.port))
except:
time.sleep(5)
raise
time.sleep(max(((1.0/self.framerate)-(time.time()-s)), 0))
except Exception as e:
rl_log_exc("Error in artnet universe")
print(traceback.format_exc())
self.thread = threading.Thread(target =run)
self.thread.name = "ArtnetSenderThread_"+self.thread.name
self.thread.daemon = True
self.framerate = float(framerate)
self.lock = threading.Lock()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Bind to the server address
self.sock.bind(('',0))
self.sock.settimeout(1)
self.addr = addr
self.port = port
self.thread.start()
def __del__(self):
self.running=0
def setStatus(self,s,ok):
try:
self.universe().setStatus(s,ok)
except:
pass
def onFrame(self,data,physical = None, universe=0):
with self.lock:
if not (data is None):
#DMX starts at 1, don't send element 0 even though it exists.
p = b'Art-Net\x00\x00\x50\x00\x0E\0' + struct.pack("<BH", physical if not physical is None else universe, universe) +struct.pack(">H",len(data)) + (data.astype(numpy.uint8).tobytes()[1:])
self.data = p
else:
self.data =data
self.frame.set()
class EnttecOpenUniverse(Universe):
#Thanks to https://github.com/c0z3n/pySimpleDMX
#I didn't actually use the code, but it was a very useful resouurce
#For protocol documentation.
def __init__(self,name,channels=128,portname="",framerate=44,number=0):
self.ok = False
self.number=number
self.status = "Disconnect"
self.statusChanged = {}
#Sender needs the values to be there for setup
self.values = numpy.array([0.0]*channels,dtype="f4")
self.sender = makeDMXSender(weakref.ref(self),portname,framerate)
self.sender.connect()
Universe.__init__(self,name,channels)
self.hidden=False
def onFrame(self):
data = rawmessage(self.values)
self.sender.onFrame(data)
def __del__(self):
#Stop the thread when this gets deleted
self.sender.onFrame(None)
def makeDMXSender(uref, port, fr):
return RawDMXSender(uref,port, fr)
class RawDMXSender():
"""This object is used by the universe object to send data to the enttec adapter.
It runs in it's own thread because the frame rate might have nothing to do with
the rate at which the data actually gets rendered.
"""
def __init__(self,universe,port,framerate):
self.frame = threading.Event()
self.data = rawmessage(universe().values)
self.universe= universe
self.thread = threading.Thread(target =self.run)
self.thread.daemon = True
self.thread.name = "DMXSenderThread_"+self.thread.name
self.portname = port
self.framerate = float(framerate)
self.lock = threading.Lock()
self.port = None
self.connect()
self.thread.start()
def setStatus(self,s,ok):
try:
self.universe().setStatus(s,ok)
except:
pass
def connect(self):
#Different status message first time
try:
self.reconnect()
except Exception as e:
self.setStatus('Could not connect, '+str(e)[:100]+'...',False)
def reconnect(self):
"Try to reconnect to the adapter"
try:
import serial
if not self.portname:
import serial.tools.list_ports
p = serial.tools.list_ports.comports()
if p:
if len(p)>1:
self.setStatus('More than one device found, refusing to guess. Please specify a device.',False)
return
else:
p =p[0].device
else:
self.setStatus('No device found',False)
return
else:
p = self.portname
time.sleep(0.1)
try:
self.port.close()
except:
pass
self.port = serial.Serial(p,baudrate=250000, timeout=1.0, write_timeout=1.0,stopbits=2)
self.port.read(self.port.inWaiting())
time.sleep(0.05)
self.port.break_condition=True
time.sleep(0.0001)
self.port.break_condition=False
time.sleep(0.0003)
self.port.write(self.data)
self.port.flush()
self.setStatus('connected to '+p,True)
except Exception as e:
try:
self.setStatus('disconnected, '+str(e)[:100]+'...',False)
except:
pass
def run(self):
while 1:
try:
s = module.timefunc()
self.port.read(self.port.inWaiting())
x =self.frame.wait(0.1)
with self.lock:
if self.data is None:
try:
self.port.close()
except:
pass
return
self.port.break_condition=True
time.sleep(0.0001)
self.port.break_condition=False
time.sleep(0.0003)
self.port.write(self.data)
if x:
self.frame.clear()
time.sleep(max(((1.0/self.framerate)-(module.timefunc()-s)), 0))
except Exception as e:
try:
self.port.close()
except:
pass
try:
if self.data is None:
return
if self.port:
self.setStatus('disconnected, '+str(e)[:100]+'...',False)
self.port=None
#reconnect is designed not to raise Exceptions, so if there's0
#an error here it's probably because the whole scope is being cleaned
time.sleep(1)
self.reconnect()
time.sleep(1)
self.reconnect()
time.sleep(1)
except:
return
def onFrame(self,data):
with self.lock:
self.data = data
self.frame.set()
import colorzero
colorTagDeviceUniverses = {}
addedTags = {}
discoverlock = threading.RLock()
def onDelTag(t,m):
if m in addedTags:
with discoverlock:
if m in addedTags:
del addedTags[m]
discoverColorTagDevices()
kaithem.message.subscribe("/system/tags/deleted", onDelTag)
def onAddTag(t,m):
if 'color' not in m and 'fade' not in m and 'light' not in m and 'bulb' not in m and 'colour' not in m:
return
discoverColorTagDevices()
kaithem.message.subscribe("/system/tags/created", onAddTag)
kaithem.message.subscribe("/system/tags/configured", onAddTag)
def discoverColorTagDevices():
global colorTagDeviceUniverses
u = {}
# Devices may have "subdevices" represented by tag heirarchy, like:
# /devices/devname/subdevice.color
def handleSubdevice(dev,sd,c,ft):
if dev == sd:
name = dev
else:
name = dev+"."+sd
addedTags[c.name]=True
if ft:
addedTags[ft.name]=True
with universesLock:
if not name in _universes:
u[name] = ColorTagUniverse(name, c, ft)
else:
u[name]= _universes[name]()
for i in kaithem.devices:
d = kaithem.devices[i]
c = None
ft = None
last_sd= None
for j in sorted(d.tagPoints.keys()):
jn = d.tagPoints[j].name
#everything between the last slash and the dot, because the dot marks "property of"
subdevice = jn.split('/')[-1].split('.')[0]
if last_sd and c and not subdevice==last_sd:
handleSubdevice(i,subdevice,c,ft)
c = None
ft = None
last_sd=subdevice
t = d.tagPoints[j]
if t.subtype=='color':
c = t
elif t.subtype == "light_fade_duration":
ft = t
# Found something with a color!
if c:
handleSubdevice(i,subdevice,c,ft)
colorTagDeviceUniverses = u
class ColorTagUniverse(Universe):
"""
Detects devices with a "color" property having the subtype color.
"""
def __init__(self,name,tag, fadeTag=None):
self.ok = True
self.status = "Disconnect"
self.statusChanged = {}
Universe.__init__(self,name,4)
self.hidden=False
self.tag = tag
self.f = module.Fixture(self.name+".rgb",[['R','red'],['G','green'],['B','blue']])
self.f.assign(self.name, 1)
self.lock=threading.RLock()
self.lastColor = None
if fadeTag:
self.fadeTag = fadeTag
self.localFading= False
else:
self.fadeTag=None
self.localFading=True
def onFrame(self):
def f():
if self.lock.acquire(timeout=1):
try:
self._onFrame()
finally:
self.lock.release()
kaithem.misc.do(f)
def _onFrame(self):
c = colorzero.Color.from_rgb(self.values[1]/255, self.values[2]/255, self.values[3]/255).html
tm = time.monotonic()
# Only set the fade tag right before we are about to do something with the bulb, otherwise we would be adding a ton
# of useless writes
if not c == self.lastColor or not c == self.tag.value:
self.lastColor=c
if self.fadeTag:
t = max(self.fadeEndTime-module.timefunc(), self.interpolationTime, 0)
# Round to the nearest 20th of a second so we don't accidentally set the values more often than needed if it doesn't change
t= int(t*20)/20
self.fadeTag(t,tm, annotation="Chandler")
self.tag(c, tm, annotation="Chandler")
module.discoverColorTagDevices=discoverColorTagDevices
module.Universe = Universe
module.EnttecUniverse = EnttecUniverse
module.EnttecOpenUniverse = EnttecOpenUniverse
module.TagpointUniverse = TagpointUniverse
module.ArtNetUniverse = ArtNetUniverse
def eventAction():
pass
|
EternityForest/KaithemAutomation
|
kaithem/data/modules/Chandler/2_Universes.py
|
Python
|
gpl-3.0
| 36,442
|
import os
def createTile(posx, posy, id_num, world_id_num):
looplist = '1'
values=[]#Values are all of the lines of a prefab that have the vertex coords
f = open('tf2/prefab_template/spike_prefab.txt', 'r+')
lines = f.readlines() #gathers each line of the prefab and puts numbers them
x1 = posx*512
y1 = posy*-512
z1 = 64
x2 = posx*512 + (512)
y2 = posy*-512
z2 = 64
x3 = posx*512 + (512)
y3 = posy*-512 + (-512)
z3 = 64
x4 = posx*512
y4 = posy*-512 + (-512)
z4 = 0
x5 = posx*512 + (512)
y5 = posy*-512 + (-512)
z5 = 0
x6 = posx*512 + (512)
y6 = posy*-512
z6 = 0
x7 = posx*512
y7 = posy*-512
z7 = 64
x8 = posx*512
y8 = posy*-512 + (-512)
z8 = 64
x9 = posx*512
y9 = posy*-512 + (-512)
z9 = 0
x10 = posx*512 + (512)
y10 = posy*-512
z10 = 0
x11 = posx*512 + (512)
y11 = posy*-512 + (-512)
z11 = 0
x12 = posx*512 + (512)
y12 = posy*-512 + (-512)
z12 = 64
x13 = posx*512 + (512)
y13 = posy*-512
z13 = 64
x14 = posx*512
y14 = posy*-512
z14 = 64
x15 = posx*512
y15 = posy*-512
z15 = 0
x16 = posx*512 + (512)
y16 = posy*-512 + (-512)
z16 = 0
x17 = posx*512
y17 = posy*-512 + (-512)
z17 = 0
x18 = posx*512
y18 = posy*-512 + (-512)
z18 = 64
x19 = posx*512 + (64)
y19 = posy*-512 + (-64)
z19 = 128
x20 = posx*512 + (448)
y20 = posy*-512 + (-64)
z20 = 128
x21 = posx*512 + (448)
y21 = posy*-512 + (-448)
z21 = 128
x22 = posx*512 + (64)
y22 = posy*-512 + (-448)
z22 = 64
x23 = posx*512 + (448)
y23 = posy*-512 + (-448)
z23 = 64
x24 = posx*512 + (448)
y24 = posy*-512 + (-64)
z24 = 64
x25 = posx*512 + (64)
y25 = posy*-512 + (-64)
z25 = 128
x26 = posx*512 + (64)
y26 = posy*-512 + (-448)
z26 = 128
x27 = posx*512 + (64)
y27 = posy*-512 + (-448)
z27 = 64
x28 = posx*512 + (448)
y28 = posy*-512 + (-64)
z28 = 64
x29 = posx*512 + (448)
y29 = posy*-512 + (-448)
z29 = 64
x30 = posx*512 + (448)
y30 = posy*-512 + (-448)
z30 = 128
x31 = posx*512 + (448)
y31 = posy*-512 + (-64)
z31 = 128
x32 = posx*512 + (64)
y32 = posy*-512 + (-64)
z32 = 128
x33 = posx*512 + (64)
y33 = posy*-512 + (-64)
z33 = 64
x34 = posx*512 + (448)
y34 = posy*-512 + (-448)
z34 = 64
x35 = posx*512 + (64)
y35 = posy*-512 + (-448)
z35 = 64
x36 = posx*512 + (64)
y36 = posy*-512 + (-448)
z36 = 128
x37 = posx*512 + (128)
y37 = posy*-512 + (-128)
z37 = 192
x38 = posx*512 + (384)
y38 = posy*-512 + (-128)
z38 = 192
x39 = posx*512 + (384)
y39 = posy*-512 + (-384)
z39 = 192
x40 = posx*512 + (128)
y40 = posy*-512 + (-384)
z40 = 128
x41 = posx*512 + (384)
y41 = posy*-512 + (-384)
z41 = 128
x42 = posx*512 + (384)
y42 = posy*-512 + (-128)
z42 = 128
x43 = posx*512 + (128)
y43 = posy*-512 + (-128)
z43 = 192
x44 = posx*512 + (128)
y44 = posy*-512 + (-384)
z44 = 192
x45 = posx*512 + (128)
y45 = posy*-512 + (-384)
z45 = 128
x46 = posx*512 + (384)
y46 = posy*-512 + (-128)
z46 = 128
x47 = posx*512 + (384)
y47 = posy*-512 + (-384)
z47 = 128
x48 = posx*512 + (384)
y48 = posy*-512 + (-384)
z48 = 192
x49 = posx*512 + (384)
y49 = posy*-512 + (-128)
z49 = 192
x50 = posx*512 + (128)
y50 = posy*-512 + (-128)
z50 = 192
x51 = posx*512 + (128)
y51 = posy*-512 + (-128)
z51 = 128
x52 = posx*512 + (384)
y52 = posy*-512 + (-384)
z52 = 128
x53 = posx*512 + (128)
y53 = posy*-512 + (-384)
z53 = 128
x54 = posx*512 + (128)
y54 = posy*-512 + (-384)
z54 = 192
x55 = posx*512 + (192)
y55 = posy*-512 + (-320)
z55 = 192
x56 = posx*512 + (256)
y56 = posy*-512 + (-256)
z56 = 448
x57 = posx*512 + (320)
y57 = posy*-512 + (-320)
z57 = 192
x58 = posx*512 + (320)
y58 = posy*-512 + (-192)
z58 = 192
x59 = posx*512 + (192)
y59 = posy*-512 + (-192)
z59 = 192
x60 = posx*512 + (192)
y60 = posy*-512 + (-320)
z60 = 192
x61 = posx*512 + (192)
y61 = posy*-512 + (-192)
z61 = 192
x62 = posx*512 + (256)
y62 = posy*-512 + (-256)
z62 = 448
x63 = posx*512 + (192)
y63 = posy*-512 + (-320)
z63 = 192
x64 = posx*512 + (320)
y64 = posy*-512 + (-192)
z64 = 192
x65 = posx*512 + (320)
y65 = posy*-512 + (-320)
z65 = 192
x66 = posx*512 + (256)
y66 = posy*-512 + (-256)
z66 = 448
x67 = posx*512 + (320)
y67 = posy*-512 + (-192)
z67 = 192
x68 = posx*512 + (256)
y68 = posy*-512 + (-256)
z68 = 448
x69 = posx*512 + (192)
y69 = posy*-512 + (-192)
z69 = 192
var_count = 69
values = "".join(lines)#converting list to string
ogvalues = "".join(lines)
normal_list,axislist,negaxislist,vaxis,uaxis=[],['1 0 0 1','0 1 0 1','0 0 1 1'],['-1 0 0 1','0 -1 0 1','0 0 -1 1'],0,0
def evaluate(coords):
dist_x,dist_y,dist_z = abs(coords[0]),abs(coords[1]),abs(coords[2]),
if dist_x >= dist_y and dist_x >= dist_z:
return axislist[0]
if dist_y >= dist_z:
return axislist[1]
return axislist[2]
def get_normal(coord_list):
vector_a = (coord_list[1][0]-coord_list[0][0],coord_list[1][1]-coord_list[0][1],coord_list[1][2]-coord_list[0][2])
vector_b = (coord_list[2][0]-coord_list[0][0],coord_list[2][1]-coord_list[0][1],coord_list[2][2]-coord_list[0][2])
normal = (vector_a[1]*vector_b[2]-vector_a[2]*vector_b[1],vector_a[2]*vector_b[0]-vector_a[0]*vector_b[2],vector_a[0]*vector_b[1]-vector_a[1]*vector_b[0])
return normal
for normal_num in range(1,var_count+1,3):
normal_list=[]
for i in range(3):
normal_list.append([])
for var in ["x", "y", "z"]:
normal_list[i].append(eval(var+str(normal_num+i)))
coords = get_normal(normal_list)
response = evaluate(coords)
if response == axislist[0]:
uaxis = axislist[1]
else:
uaxis = axislist[0]
if response == axislist[2]:
vaxis = negaxislist[1]
else:
vaxis = negaxislist[2]
values = values.replace('AXIS_REPLACE_U',uaxis,1)
values = values.replace('AXIS_REPLACE_V',vaxis,1)
for i in range(ogvalues.count("world_idnum")):
values = values.replace('world_idnum', str(world_id_num), 1)
world_id_num += 1
for var in ["x", "y", "z"]:
for count in range(1,var_count+1):
string = var + str(count)
string_var = str(eval(var + str(count)))
if var == "z":
values = values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
else:
values = values.replace(string + " ",string_var + " ")
for i in range(ogvalues.count('id_num')):
values = values.replace('id_num', str(id_num), 1)
id_num = id_num+1
return values, id_num, world_id_num
|
baldengineers/mapper
|
tf2/prefabs/spike_prefab.py
|
Python
|
gpl-3.0
| 7,295
|
import argparse, json, pickle, time, random
import os.path
import networkx as nx
import numpy as np
from mapserver.graph.builder import GraphBuilder
from mapserver.graph.contractor2 import GraphContractor
from mapserver.routing.router2 import Router
from networkx.readwrite import json_graph
import matplotlib.pyplot as plt
config = {}
with open('config.json', 'r') as file:
config = json.load(file)
files = [
'natchez',
'battleground',
'north_gso',
'mid_gso',
'greensboro',
'guilford',
# 'charlotte',
# 'nc',
]
def successors(graph, x):
x_pri = graph.node[x]['priority']
return [y for y in iter(graph.succ[x].keys()) if graph.node[y]['priority'] > x_pri]
degree = []
count = 0
trips = 5000
number_of_nodes = [[], [], [], []]
mean_degree = [[], [], [], []]
edges_added = [[], [], [], []]
contract_times = [[], [], [], []]
route_times = [[], [], [], []]
repair_times = [[], [], [], []]
for f in files:
data_file_path = 'data/%s.osm' % f
# ------
# EDS5 regular
# ------
config['use_fast_contract'] = False
config['use_decision_graph'] = False
# build the graph
factory = GraphBuilder(config)
graph = factory.from_file(data_file_path, config['use_decision_graph'])
numed = graph.number_of_edges()
# contract the graph
C = GraphContractor(config, graph)
start = time.perf_counter()
C.order_nodes()
C.contract_graph()
C.set_flags()
end = time.perf_counter() - start
edges = C.G.number_of_edges()
edge_delta = edges-numed
number_of_nodes[0].append(graph.number_of_nodes())
mean_degree[0].append(np.mean([len(successors(graph, n)) for n in graph.nodes()]))
edges_added[0].append(edge_delta)
contract_times[0].append(end)
router = Router(graph)
times = []
coords = []
for x in range(0, trips):
(start_node, end_node) = random.sample(list(graph.nodes()), 2)
start = time.perf_counter()
router.route(start_node, end_node)
end = time.perf_counter() - start
coords.append((start_node, end_node))
times.append(end)
route_times[0].append(np.median(times))
start = time.perf_counter()
C.repair({})
end = time.perf_counter() - start
repair_times[0].append(end)
count += 1
# ------
# D5 regular
# ------
config['use_fast_contract'] = True
config['use_decision_graph'] = False
# build the graph
factory = GraphBuilder(config)
graph2 = factory.from_file(data_file_path, config['use_decision_graph'])
sim_data = factory.get_sim_data()
numed = graph2.number_of_edges()
# contract the graph
C = GraphContractor(config, graph2)
start = time.perf_counter()
C.order_nodes()
C.contract_graph()
C.set_flags()
end = time.perf_counter() - start
edges = C.G.number_of_edges()
edge_delta = edges-numed
number_of_nodes[1].append(graph2.number_of_nodes())
mean_degree[1].append(np.mean([len(successors(graph2, n)) for n in graph2.nodes()]))
edges_added[1].append(edge_delta)
contract_times[1].append(end)
router = Router(graph2)
times = []
for x in range(0, trips):
(start_node, end_node) = coords[x]
start = time.perf_counter()
router.route(start_node, end_node)
end = time.perf_counter() - start
times.append(end)
route_times[1].append(np.median(times))
start = time.perf_counter()
C.repair({})
end = time.perf_counter() - start
repair_times[1].append(end)
count += 1
# ------
# EDS5 decision
# ------
config['use_fast_contract'] = False
config['use_decision_graph'] = True
# build the graph
factory = GraphBuilder(config)
ref_graph3, graph3 = factory.from_file(data_file_path, config['use_decision_graph'])
sim_data = factory.get_sim_data()
numed = graph3.number_of_edges()
# contract the graph
C = GraphContractor(config, graph3, decision_map=sim_data['decision_route_map'], reference_graph=ref_graph3)
start = time.perf_counter()
C.order_nodes()
C.contract_graph()
C.set_flags()
end = time.perf_counter() - start
edges = C.G.number_of_edges()
edge_delta = edges-numed
number_of_nodes[2].append(graph3.number_of_nodes())
mean_degree[2].append(np.mean([len(successors(graph3, n)) for n in graph3.nodes()]))
edges_added[2].append(edge_delta)
contract_times[2].append(end)
router = Router(graph3, decision_map=sim_data['decision_route_map'])
times = []
coords = []
for x in range(0, trips):
(start_node, end_node) = random.sample(list(graph3.nodes()), 2)
start = time.perf_counter()
router.route(start_node, end_node)
end = time.perf_counter() - start
coords.append((start_node, end_node))
times.append(end)
route_times[2].append(np.median(times))
start = time.perf_counter()
C.repair({})
end = time.perf_counter() - start
repair_times[2].append(end)
count += 1
# ------
# D5 fast
# ------
config['use_fast_contract'] = True
config['use_decision_graph'] = True
# build the graph
factory = GraphBuilder(config)
ref_graph4, graph4 = factory.from_file(data_file_path, config['use_decision_graph'])
sim_data = factory.get_sim_data()
numed = graph4.number_of_edges()
# contract the graph
C = GraphContractor(config, graph4, decision_map=sim_data['decision_route_map'], reference_graph=ref_graph4)
start = time.perf_counter()
C.order_nodes()
C.contract_graph()
C.set_flags()
end = time.perf_counter() - start
edges = C.G.number_of_edges()
edge_delta = edges-numed
number_of_nodes[3].append(graph4.number_of_nodes())
mean_degree[3].append(np.mean([len(successors(graph4, n)) for n in graph4.nodes()]))
edges_added[3].append(edge_delta)
contract_times[3].append(end)
router = Router(graph4, decision_map=sim_data['decision_route_map'])
times = []
for x in range(0, trips):
(start_node, end_node) = coords[x]
start = time.perf_counter()
router.route(start_node, end_node)
end = time.perf_counter() - start
times.append(end)
route_times[3].append(np.median(times))
start = time.perf_counter()
C.repair({})
end = time.perf_counter() - start
repair_times[3].append(end)
count += 1
print('Nodes: %s' % number_of_nodes)
print('Mean Outdegree: %s' % mean_degree)
print('Shortcuts Added: %s' % edges_added)
print('Contraction Times: %s' % contract_times)
print('Route Times: %s' % route_times)
print('Repair Times: %s' % repair_times)
print('---')
print('Nodes: %s' % number_of_nodes)
print('Mean Outdegree: %s' % mean_degree)
print('Shortcuts Added: %s' % edges_added)
print('Contraction Times: %s' % contract_times)
print('Repair Times: %s' % repair_times)
print('Route Times: %s' % route_times)
|
bradleyhd/netsim
|
graph_degree.py
|
Python
|
gpl-3.0
| 6,610
|
# Copyright 2012 David Malcolm <dmalcolm@redhat.com>
# Copyright 2012 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import commands
import datetime
import glob
import os
import re
import shutil
from subprocess import check_output, Popen, PIPE
import sys
import webbrowser
from bugreporting import NewBug, BugReportDb
from makeindex import Index
def nvr_from_srpm_path(path):
filename = os.path.basename(path)
m = re.match('(.+)-(.+)-(.+).src.rpm', filename)
name, version, release = m.groups()
return name, version, release
def get_local_python_srpms():
"""
Extract a list of srpm names that require python 2 to build
"""
cmd = ['rpm',
'-q',
'--qf=%{sourcerpm}\\n',
'--whatrequires',
'libpython2.7.so.1.0()(64bit)']
out = check_output(cmd)
result = set()
for line in out.splitlines():
m = re.match('(.+)-(.+)-(.+)', line)
name, version, release = m.groups()
result.add(name)
return sorted(result)
#print(get_local_python_srpms())
#sys.exit(0)
"""
for srpmname in get_local_python_srpms():
cmd = ['mock',
'-r', 'fedora-16-x86_64',
'--scm-enable',
'--scm-option', 'method=git',
'--scm-option', 'git_get="git clone SCM_BRN git://pkgs.fedoraproject.org/SCM_PKG.git SCM_PKG"',
'--scm-option', 'package=%s' % srpmname,
'-v']
p = Popen(cmd)
p.communicate()
"""
def get_result_dir(srpmpath):
n, v, r = nvr_from_srpm_path(srpmpath)
resultdir = 'LOGS/%s-%s-%s' % (n, v, r)
return resultdir
def local_rebuild_of_srpm_in_mock(srpmpath, mockcfg):
"""
Rebuild the given SRPM locally within mock, injecting the cpychecker
code in RPM form; gathering the results to a subdir within "LOGS"
"""
def run_mock(commands, captureOut=False, captureErr=False):
cmds = ['mock', '-r', mockcfg, '--disable-plugin=ccache'] + commands
print('--------------------------------------------------------------')
print(' '.join(cmds))
print('--------------------------------------------------------------')
args = {}
if captureOut:
args['stdout'] = PIPE
if captureErr:
args['stderr'] = PIPE
p = Popen(cmds, **args)
out, err = p.communicate()
return out, err
resultdir = get_result_dir(srpmpath)
if os.path.exists(resultdir):
shutil.rmtree(resultdir)
os.mkdir(resultdir)
# Experimenting with the script is much faster if we remove the --init here:
if 1:
run_mock(['--init'])
run_mock(['--installdeps', srpmpath])
# Install the pre-built plugin:
run_mock(['install', PLUGIN_PATH]) # this doesn't work when cleaned: can't open state.log
# Copy up latest version of the libcpychecker code from this working copy
# overriding the copy from the pre-built plugin:
if 1:
for module in glob.glob('../../libcpychecker/*.py'):
HACKED_PATH='/usr/lib/gcc/x86_64-redhat-linux/4.6.2/plugin/python2/libcpychecker'
# FIXME: ^^ this will need changing
run_mock(['--copyin', module, HACKED_PATH])
# Override the real gcc/g++ with our fake ones, which add the necessary flags
# and then invokes the real one:
run_mock(['--chroot', 'mv /usr/bin/gcc /usr/bin/the-real-gcc'])
run_mock(['--chroot', 'mv /usr/bin/g++ /usr/bin/the-real-g++'])
run_mock(['--copyin', 'fake-gcc.py', '/usr/bin/gcc'])
run_mock(['--copyin', 'fake-g++.py', '/usr/bin/g++'])
# Rebuild src.rpm, using the script:
run_mock(['--rebuild', srpmpath,
'--no-clean',
])
# Extract build logs:
shutil.copy('/var/lib/mock/%s/result/build.log' % mockcfg,
resultdir)
# Scrape out *refcount-errors.html:
BUILD_PREFIX='/builddir/build/BUILD'
out, err = run_mock(['chroot',
'find %s -name *-refcount-errors.html' % BUILD_PREFIX],
captureOut=True)
for line in out.splitlines():
if line.endswith('-refcount-errors.html'):
# Convert from e.g.
# '/builddir/build/BUILD/gst-python-0.10.19/gst/.libs/gstmodule.c.init_gst-refcount-errors.html'
# to:
# 'gst-python-0.10.19/gst/.libs/gstmodule.c.init_gst-refcount-errors.html"
dstPath = line[len(BUILD_PREFIX)+1:]
# Place it within resultdir:
dstPath = os.path.join(resultdir, dstPath)
# Lazily construct directory hierarchy:
dirPath = os.path.dirname(dstPath)
if not os.path.exists(dirPath):
os.makedirs(dirPath)
# Copy the file from the chroot to our result location:
run_mock(['--copyout', line, dstPath])
PLUGIN_PATH='gcc-python2-plugin-0.9-1.fc16.x86_64.rpm'
MOCK_CONFIG='fedora-16-x86_64'
def prepare_bug_report(srpmpath, index):
srpmname, version, release = nvr_from_srpm_path(srpmpath)
resultdir = get_result_dir(srpmpath)
# Open local copy of results for manual inspection:
webbrowser.open(os.path.join(resultdir, 'index.html'))
today = datetime.date.today()
datestr = today.isoformat() # e.g. "2012-02-15"
# Emit shell commands to be run.
# These aren't yet done automatically, since we really ought to have the
# manual review from above.
mkdircmd = 'ssh dmalcolm@fedorapeople.org mkdir public_html/gcc-python-plugin/%(datestr)s' % locals()
print(mkdircmd)
scpcmd = 'scp -r %(resultdir)s dmalcolm@fedorapeople.org:public_html/gcc-python-plugin/%(datestr)s' % locals()
print(scpcmd)
reporturl = 'http://fedorapeople.org/~dmalcolm/gcc-python-plugin/%(datestr)s/%(srpmname)s-%(version)s-%(release)s/' % locals()
gitversion = commands.getoutput('git rev-parse HEAD')
# FIXME:
categorized_notes = ''
for sev, issues in index.iter_severities():
categorized_notes += ('Within the category "%s" the %i issues reported\n'
% (sev.title, len(issues)))
for er in issues:
categorized_notes += ('%s:%s:%s\n' % (er.filename, er.function, er.errmsg))
categorized_notes += '\n'
comment = """
Description of problem:
I've been writing an experimental static analysis tool to detect bugs commonly occurring within C Python extension modules:
https://fedorahosted.org/gcc-python-plugin/
http://gcc-python-plugin.readthedocs.org/en/latest/cpychecker.html
http://fedoraproject.org/wiki/Features/StaticAnalysisOfPythonRefcounts
I ran the latest version of the tool (in git master; post 0.9) on
%(srpmname)s-%(version)s-%(release)s.src.rpm, and it reports various errors.
You can see a list of errors here, triaged into categories (from most significant to least significant):
%(reporturl)s
I've manually reviewed the issues reported by the tool.
FIXME:
%(categorized_notes)s
There may of course be other bugs in my checker tool.
Hope this is helpful; let me know if you need help reading the logs that the tool generates - I know that it could use some improvement.
Version-Release number of selected component (if applicable):
%(srpmname)s-%(version)s-%(release)s
gcc-python-plugin post-0.9 git %(gitversion)s running the checker in an *f16* chroot
""" % locals()
bug = NewBug(product='Fedora',
version='rawhide',
component=srpmname,
summary=('Bugs found in %s-%s-%s using gcc-with-cpychecker'
' static analyzer' % (srpmname, version, release)),
comment=comment,
blocked=['cpychecker'],
bug_file_loc=reporturl)
bugurl = bug.make_url()
webbrowser.open(bugurl)
# Rebuild all src.rpm files found in "SRPMS" as necessary:
if 1:
for srpmpath in sorted(glob.glob('SRPMS/*.src.rpm')):
srpmname, version, release = nvr_from_srpm_path(srpmpath)
bugdb = BugReportDb()
# print(bugdb.bugs)
bugdb.print_summary()
print('Processing %s' % srpmname)
statuses = bugdb.find(srpmname)
if statuses:
for status in statuses:
print(status.get_status())
continue
resultdir = get_result_dir(srpmpath)
if not os.path.exists(resultdir):
local_rebuild_of_srpm_in_mock(srpmpath, MOCK_CONFIG)
index = Index(resultdir, 'Errors seen in %s' % resultdir)
prepare_bug_report(srpmpath, index)
break
# TODO:
# - automate grabbing the src.rpms; see e.g.:
# http://download.fedora.devel.redhat.com/pub/fedora/linux/releases/16/Everything/source/SRPMS/
# http://download.fedora.devel.redhat.com/pub/fedora/linux/development/17/source/SRPMS/
|
jasonxmueller/gcc-python-plugin
|
misc/fedora/mass-rebuild.py
|
Python
|
gpl-3.0
| 9,408
|
#! /usr/bin/env python3
import sys
import os
import json
import operator
import base64
from subprocess import call
from .ete2 import Tree, SeqGroup
from .taxonomy_util import TaxCode
class EpaJsonParser:
"""This class parses the RAxML-EPA json output file"""
def __init__(self, jsonfin):
f=open(jsonfin)
try:
self.jdata = json.load(f)
finally:
f.close()
def get_placement(self):
return self.jdata["placements"]
def get_tree(self):
return self.jdata["tree"]
def get_std_newick_tree(self):
tree = self.jdata["tree"]
tree = tree.replace("{", "[&&NHX:B=")
tree = tree.replace("}", "]")
return tree
def get_raxml_version(self):
return self.jdata["metadata"]["raxml_version"]
def get_raxml_invocation(self):
return self.jdata["metadata"]["invocation"]
class RefJsonChecker:
def __init__(self, jsonfin= None, jdata = None):
if jsonfin!=None:
f=open(jsonfin)
try:
self.jdata = json.load(f)
finally:
f.close()
else:
self.jdata = jdata
def check_field(self, fname, ftype, fvals=None, fopt=False):
if fname in self.jdata:
field = self.jdata[fname]
if isinstance(field, ftype):
if not fvals or field in fvals:
return True
else:
self.error = "Invalid value of field '%s': %s" % fname, repr(field)
return False
else:
self.error = "Field '%s' has a wrong type: %s (expected: %s)" % fname, type(field).__name__, ftype.__name__
return False
else:
if fopt:
return True
else:
self.error = "Field not found: %s" % fname
return False
def validate(self, ver = "1.6"):
nver = float(ver)
self.error = None
valid = self.check_field("tree", str) \
and self.check_field("raxmltree", str) \
and self.check_field("rate", float) \
and self.check_field("node_height", dict) \
and self.check_field("origin_taxonomy", dict) \
and self.check_field("sequences", list) \
and self.check_field("binary_model", str) \
and self.check_field("hmm_profile", list, fopt=True)
# check v1.1 fields, if needed
if nver >= 1.1:
valid = valid and \
self.check_field("ratehet_model", str) # ["GTRGAMMA", "GTRCAT"]
# check v1.2 fields, if needed
if nver >= 1.2:
valid = valid and \
self.check_field("tax_tree", str)
# check v1.3 fields, if needed
if nver >= 1.3:
valid = valid and \
self.check_field("taxcode", str, TaxCode.TAX_CODE_MAP)
# check v1.4 fields, if needed
if nver >= 1.4:
valid = valid \
and self.check_field("corr_seqid_map", dict) \
and self.check_field("corr_ranks_map", dict)
# check v1.5 fields, if needed
if nver >= 1.5:
valid = valid \
and self.check_field("merged_ranks_map", dict)
# "taxonomy" field has been renamed and its format was changed in v1.6
if nver >= 1.6:
valid = valid \
and self.check_field("branch_tax_map", dict)
else:
valid = valid \
and self.check_field("taxonomy", dict)
return (valid, self.error)
class RefJsonParser:
"""This class parses the EPA Classifier reference json file"""
def __init__(self, jsonfin):
f=open(jsonfin)
self.jdata = json.load(f)
f.close()
self.version = self.jdata["version"]
self.nversion = float(self.version)
self.corr_seqid = None
self.corr_ranks = None
self.corr_seqid_reverse = None
def validate(self):
jc = RefJsonChecker(jdata = self.jdata)
return jc.validate(self.version)
def get_version(self):
return self.version
def get_rate(self):
return self.jdata["rate"]
def get_node_height(self):
return self.jdata["node_height"]
def get_raxml_readable_tree(self, fout_name = None):
tree_str = self.jdata["raxmltree"]
#t.unroot()
if fout_name != None:
with open(fout_name, "w") as fout:
fout.write(tree_str)
else:
return tree_str
def get_reftree(self, fout_name = None):
tree_str = self.jdata["tree"]
if fout_name != None:
with open(fout_name, "w") as fout:
fout.write(tree_str)
else:
return Tree(tree_str, format=1)
def get_tax_tree(self):
t = Tree(self.jdata["tax_tree"], format=8)
return t
def get_outgroup(self):
t = Tree(self.jdata["outgroup"], format=9)
return t
def get_branch_tax_map(self):
if self.nversion >= 1.6:
return self.jdata["branch_tax_map"]
else:
return None
def get_taxonomy(self):
if self.nversion < 1.6:
return self.jdata["taxonomy"]
else:
return None
def get_origin_taxonomy(self):
return self.jdata["origin_taxonomy"]
def get_alignment(self, fout):
entries = self.jdata["sequences"]
with open(fout, "w") as fo:
for entr in entries:
fo.write(">%s\n%s\n" % (entr[0], entr[1]))
return fout
def get_ref_alignment(self):
entries = self.jdata["sequences"]
alignment = SeqGroup()
for entr in entries:
alignment.set_seq(entr[0], entr[1])
return alignment
def get_alignment_list(self):
return self.jdata["sequences"]
def get_sequences_names(self):
nameset = set()
entries = self.jdata["sequences"]
for entr in entries:
nameset.add(entr[0])
return nameset
def get_alignment_length(self):
entries = self.jdata["sequences"]
return len(entries[0][1])
def get_hmm_profile(self, fout):
if "hmm_profile" in self.jdata:
lines = self.jdata["hmm_profile"]
with open(fout, "w") as fo:
for line in lines:
fo.write(line)
return fout
else:
return None
def get_binary_model(self, fout):
model_str = self.jdata["binary_model"]
with open(fout, "wb") as fo:
fo.write(base64.b64decode(model_str))
def get_ratehet_model(self):
return self.jdata["ratehet_model"]
def get_pattern_compression(self):
if "pattern_compression" in self.jdata:
return self.jdata["pattern_compression"]
else:
return False
def get_taxcode(self):
return self.jdata["taxcode"]
def get_corr_seqid_map(self):
if "corr_seqid_map" in self.jdata:
self.corr_seqid = self.jdata["corr_seqid_map"]
else:
self.corr_seqid = {}
return self.corr_seqid
def get_corr_ranks_map(self):
if "corr_ranks_map" in self.jdata:
self.corr_ranks = self.jdata["corr_ranks_map"]
else:
self.corr_ranks = {}
return self.corr_ranks
def get_merged_ranks_map(self):
if "merged_ranks_map" in self.jdata:
self.merged_ranks = self.jdata["merged_ranks_map"]
else:
self.merged_ranks = {}
return self.merged_ranks
def get_metadata(self):
return self.jdata["metadata"]
def get_field_string(self, field_name):
if field_name in self.jdata:
return json.dumps(self.jdata[field_name], indent=4, separators=(',', ': ')).strip("\"")
else:
return None
def get_uncorr_seqid(self, new_seqid):
if not self.corr_seqid:
self.get_corr_seqid_map()
return self.corr_seqid.get(new_seqid, new_seqid)
def get_corr_seqid(self, old_seqid):
if not self.corr_seqid_reverse:
if not self.corr_seqid:
self.get_corr_seqid_map()
self.corr_seqid_reverse = dict((reversed(item) for item in list(self.corr_seqid.items())))
return self.corr_seqid_reverse.get(old_seqid, old_seqid)
def get_uncorr_ranks(self, ranks):
if not self.corr_ranks:
self.get_corr_ranks_map()
uncorr_ranks = list(ranks)
for i in range(len(ranks)):
uncorr_ranks[i] = self.corr_ranks.get(ranks[i], ranks[i])
return uncorr_ranks
class RefJsonBuilder:
"""This class builds the EPA Classifier reference json file"""
def __init__(self, old_json=None):
if old_json:
self.jdata = old_json.jdata
else:
self.jdata = {}
self.jdata["version"] = "1.6"
# self.jdata["author"] = "Jiajie Zhang"
def set_branch_tax_map(self, bid_ranks_map):
self.jdata["branch_tax_map"] = bid_ranks_map
def set_origin_taxonomy(self, orig_tax_map):
self.jdata["origin_taxonomy"] = orig_tax_map
def set_tax_tree(self, tr):
self.jdata["tax_tree"] = tr.write(format=8)
def set_tree(self, tr):
self.jdata["tree"] = tr
self.jdata["raxmltree"] = Tree(tr, format=1).write(format=5)
def set_outgroup(self, outgr):
self.jdata["outgroup"] = outgr.write(format=9)
def set_sequences(self, seqs):
self.jdata["sequences"] = seqs
def set_hmm_profile(self, fprofile):
with open(fprofile) as fp:
lines = fp.readlines()
self.jdata["hmm_profile"] = lines
def set_rate(self, rate):
self.jdata["rate"] = rate
def set_nodes_height(self, height):
self.jdata["node_height"] = height
def set_binary_model(self, model_fname):
with open(model_fname, "rb") as fin:
model_str = base64.b64encode(fin.read())
self.jdata["binary_model"] = model_str.decode()
def set_ratehet_model(self, model):
self.jdata["ratehet_model"] = model
def set_pattern_compression(self, value):
self.jdata["pattern_compression"] = value
def set_taxcode(self, value):
self.jdata["taxcode"] = value
def set_corr_seqid_map(self, seqid_map):
self.jdata["corr_seqid_map"] = seqid_map
def set_corr_ranks_map(self, ranks_map):
self.jdata["corr_ranks_map"] = ranks_map
def set_merged_ranks_map(self, merged_ranks_map):
self.jdata["merged_ranks_map"] = merged_ranks_map
def set_metadata(self, metadata):
self.jdata["metadata"] = metadata
def dump(self, out_fname):
self.jdata.pop("fields", 0)
self.jdata["fields"] = list(self.jdata.keys())
with open(out_fname, "w") as fo:
json.dump(self.jdata, fo, indent=4, sort_keys=True)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: ./json_util.py jsonfile")
sys.exit()
jc = json_checker(jsonfin = sys.argv[1])
if jc.valid():
print("The json file is OK for EPA-classifer")
else:
print("!!!Invalid json file!!!")
|
amkozlov/sativa
|
epac/json_util.py
|
Python
|
gpl-3.0
| 11,651
|
import types, string, pprint, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList, startAt=0):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = startAt
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
setattr(self, attr, self.lookup[attr])
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
|
fsav/diffrev
|
generation/wikidpad_formatter/Enum.py
|
Python
|
gpl-3.0
| 1,381
|
import pytest
from feeluown.media import Media
@pytest.fixture
def media():
return Media('zzz://xxx.yyy',
http_headers={'referer': 'http://xxx.yyy'})
def test_media_copy(media):
media2 = Media(media)
assert media2.http_headers == media.http_headers
def test_media_http_headers(media):
assert 'referer' in media.http_headers
|
cosven/FeelUOwn
|
tests/test_media.py
|
Python
|
gpl-3.0
| 364
|
#!/usr/bin/env python3
"""
Math library for calculator.
"""
__author__ = "Mark Birger, Daniil Khudiakov, Martin Knotek"
__date__ = "26 Apr 2015"
__credits__ = ["Mark Birger", "Daniil Khudiakov", "Martin Knotek"]
__license__ = "GNU General Public License v3.0"
__version__ = "1.0"
__maintainer__ = "Mark Birger"
__status__ = "Development"
import math
import re
MATH_NAMESPACE = vars(math).copy()
MATH_NAMESPACE['__builtins__'] = None
DIGIT = r"([\-\+]?\d+(\.\d+)?)"
LEFT = r"[\(\~]"
RIGHT = r"[\)\!]"
OPERANDS = r"[\+\-\*\/\^\%]"
DIG = r"\d+(\.\d+)?"
VALUE = LEFT+r"*"+DIGIT+RIGHT+r"*"
# TODO: expression for substitution feature
# TAG = r"(\#[1-9][0-9]?)"
# DIG = r"\d+(\.\d+)?"
# VALUE = LEFT+r"*("+DIGIT+r"|"+TAG+r")"+RIGHT+r"*"
UNIT = r"("+VALUE+r"|("+OPERANDS+VALUE+r"))*"
REGEX_ALL = r"^"+VALUE+r"("+OPERANDS+VALUE+r")*$"
VALID_PATTERN = re.compile(REGEX_ALL)
def evaluate(expression, story):
"""
Function evaluates literal. Full syntax description in user manual.
@param expression string with an expression
@param story array with another calculations (TODO)
@result string with result or None if input is invalid
"""
if expression is None:
return None
expression = expression
namespace = MATH_NAMESPACE
namespace["evaluate"] = evaluate
namespace["story"] = story
try:
if VALID_PATTERN.match(expression):
expression = "("+expression+")"
while True:
expression = re.sub(
r'(\~('+DIGIT+'))', "("+r'\1'+")", expression)
expression = re.sub(r'('+DIG+'!)', "("+r'\1'+")", expression)
expression = re.sub(
r'('+DIG+r'\^('+DIGIT+'))', "("+r'\1'+")", expression)
oldexpr = expression
subexpr = re.sub(r'(.*)(\([^\(\)]+\))(.*)', r'\2', expression)
subexpr = re.sub(r'\~('+DIGIT+')', "floor("+r'\1'+")", subexpr)
subexpr = re.sub(
r'('+DIG+r')\^('+DIGIT+')',
"pow("+r'\1'+","+r'\3'+")", subexpr)
subexpr = re.sub(r'('+DIG+')!', "factorial("+r'\1'+")", subexpr)
# TODO: expression for substitution feature
# substitution for story
# if re.findall(r'\#[1-9][0-9]?', subexpr):
# namespace["story"] = [None] + \
# [float(i[1]) if i[1] is not None else i[1] for i in story]
# print(namespace["story"])
# print(namespace["story"][1])
# subexpr=re.sub(r'\#([1-9][0-9]?)',
# r'evaluate(story[\1], story)',subexpr)
# print("INPUT EVAL:", subexpr,end=" ")
subresult = eval(subexpr, namespace)
subresult = int(subresult) \
if int(subresult) == float(subresult) \
else round(float(subresult), 8)
expression = re.sub(
r'(.*)(\([^\(\)]+\))(.*)',
r'\1'+"\""+str(subresult)+"\""+r'\3', expression)
expression = re.sub(
"\""+str(subresult)+"\"",
str(subresult), expression)
if oldexpr == expression:
break
# TODO: expression for substitution feature
# if re.findall(r'\#[1-9][0-9]?', expression):
# print(namespace["story"])
# namespace["story"] = [None] + [float(i[1]) \
# if i[1] is not None else i[1] for i in story]
# print(namespace["story"])
# expression=re.sub(r'\#([1-9][0-9]?)', \
# r'evaluate(story[\1])',expression)
result = eval(expression, namespace)
result = int(result) if int(result) == float(result)\
else round(float(result), 8)
return str(result)
return None
except Exception:
return None
|
kusha/calculator
|
mathlib.py
|
Python
|
gpl-3.0
| 4,002
|
#/* bigbob11.cc
# * Shows gripper, and how it works.
# * K. Nickels 7/24/13
# */
import sys, os
sys.path.append('/usr/local/lib/python2.7/site-packages/')
sys.path.append('/usr/local/lib64/python2.7/site-packages/')
from playerc import *
import math,time
def gripper_state (state):
return {
PLAYER_GRIPPER_STATE_OPEN: 'open',
PLAYER_GRIPPER_STATE_CLOSED: 'closed',
PLAYER_GRIPPER_STATE_MOVING: 'moving',
PLAYER_GRIPPER_STATE_ERROR: 'error',
}.get(state,'unknown')
# Make proxies for Client, Gripper, and Position2d
robot = playerc_client(None, 'localhost', 6665)
if robot.connect():
raise Exception(playerc_error_str())
gp = playerc_gripper(robot,0)
if gp.subscribe(PLAYERC_OPEN_MODE):
raise Exception(playerc_error_str())
p = playerc_position2d(robot,0)
if p.subscribe(PLAYERC_OPEN_MODE):
raise Exception(playerc_error_str())
# /* read from the proxies */
gp.get_geom()
robot.read()
gp.printout("state of gripper");
print "Number of breakbeams: %d" % gp.num_beams
# start moving
p.set_cmd_vel(0.1, 0.0, 0.0, 1)
# Keep going till you see something
while(not gp.beams):
robot.read()
print "Gripper is", gripper_state(gp.state)
while (not gp.beams):
robot.read()
# print "%d " % gp.beams
print "."
print "Arrived at object."
print "Gripper is", gripper_state(gp.state)
# /* stop and close gripper */
print "Stop and close gripper..."
print "Gripper is ", gripper_state(gp.state)
p.set_cmd_vel(0.0, 0.0, 0, 1)
gp.close_cmd()
while (gp.state != PLAYER_GRIPPER_STATE_CLOSED):
robot.read()
# print "Gripper is ", gripper_state(gp.state)
print "Gripper is ", gripper_state(gp.state)
# /* Note - in stage there is a strange bug drawing the paddles on closing
# * the first time.
# */
# /* drive around with your box for a while */
p.set_cmd_vel(-0.1, 0.0, math.radians(30), 1)
time.sleep(2)
# /* Now drop the box and speed up */
p.set_cmd_vel(-0.5, 0.0, 0, 1)
gp.open_cmd()
time.sleep(2)
# Now stop
p.set_cmd_vel(0.0, 0.0, 0.0, 1)
# /* Shutdown */
gp.unsubscribe()
p.unsubscribe()
gp.destroy()
p.destroy()
robot.disconnect()
robot.destroy()
|
lsa-pucrs/Player-Stage-Manual
|
code/Ch9.3/bigbob11_c.py
|
Python
|
gpl-3.0
| 2,123
|
from django.apps import AppConfig
class FragTrackerConfig(AppConfig):
name = 'frag_tracker'
|
MariusLauge/dnd_tracker
|
frag_tracker/apps.py
|
Python
|
gpl-3.0
| 98
|
#!/usr/bin/env python
import sys
import re
from setuptools import setup
from npm2spec import __version__
description = "Small library to help you generate spec file for npm project."
long_description = """
npm2spec makes you life easier at packaging npm project for Fedora.
"""
download_url = "http://pypi.python.org/packages/source/p/npm2spec/npm2spec-%s.tar.gz" % __version__
requirements = [
'requests',
'jinja2',
'sh',
]
try:
import argparse
except ImportError:
requirements.append('argparse')
setup(
name='npm2spec',
version=__version__,
description=description,
author="Ralph Bean",
author_email="rbean@redhat.com",
maintainer="Ralph Bean",
maintainer_email="rbean@redhat.com",
url="http://github.com/ralphbean/npm2spec",
license="GPLv3+",
long_description=long_description,
download_url=download_url,
packages=['npm2spec'],
include_package_data=True,
install_requires=requirements,
entry_points="""
[console_scripts]
npm2spec = npm2spec:main
""",
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Environment :: Console",
],
)
|
ralphbean/npm2spec
|
setup.py
|
Python
|
gpl-3.0
| 1,393
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# timezone.py
#
# Copyright © 2013-2017 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Timezone screen """
import os
import multiprocessing
import queue
import urllib.request
import urllib.error
import time
import logging
import hashlib
import misc.tz as tz
import misc.extra as misc
import misc.timezonemap as timezonemap
from pages.gtkbasebox import GtkBaseBox
if __debug__:
def _(x): return x
NM = 'org.freedesktop.NetworkManager'
NM_STATE_CONNECTED_GLOBAL = 70
class Timezone(GtkBaseBox):
""" Timezone screen """
def __init__(self, params, prev_page="location", next_page="keymap"):
super().__init__(self, params, "timezone", prev_page, next_page)
self.map_window = self.ui.get_object('timezone_map_window')
self.combobox_zone = self.ui.get_object('comboboxtext_zone')
self.combobox_region = self.ui.get_object('comboboxtext_region')
# Show regions in three columns
self.combobox_region.set_wrap_width(3)
self.tzdb = tz.Database()
self.timezone = None
# This is for populate_cities
self.old_zone = None
# Autotimezone process will store detected coords in this queue
self.auto_timezone_coords = multiprocessing.Queue()
# Process to try to determine timezone.
self.autodetected_coords = None
self.start_auto_timezone_process()
# Setup window
self.tzmap = timezonemap.TimezoneMap()
self.tzmap.connect('location-changed', self.on_location_changed)
# Strip .UTF-8 from locale, icu doesn't parse it
self.locale = os.environ['LANG'].rsplit('.', 1)[0]
self.map_window.add(self.tzmap)
self.tzmap.show()
def translate_ui(self):
""" Translates all ui elements """
label = self.ui.get_object('label_zone')
txt = _("Zone:")
label.set_markup(txt)
label = self.ui.get_object('label_region')
txt = _("Region:")
label.set_markup(txt)
label = self.ui.get_object('label_ntp')
txt = _("Use Network Time Protocol (NTP) for clock synchronization")
label.set_markup(txt)
self.header.set_subtitle(_("Select Your Timezone"))
def on_location_changed(self, tzmap, tz_location):
""" User changed its location """
# loc = self.tzdb.get_loc(self.timezone)
if not tz_location:
self.timezone = None
self.forward_button.set_sensitive(False)
else:
self.timezone = tz_location.get_property('zone')
logging.info("Location changed to : %s", self.timezone)
self.update_comboboxes(self.timezone)
self.forward_button.set_sensitive(True)
def update_comboboxes(self, timezone):
""" Location has changed, update comboboxes """
zone, region = timezone.split('/', 1)
self.select_combobox_item(self.combobox_zone, zone)
self.populate_cities(zone)
self.select_combobox_item(self.combobox_region, region)
@staticmethod
def select_combobox_item(combobox, item):
""" Make combobox select an item """
tree_model = combobox.get_model()
tree_iter = tree_model.get_iter_first()
while tree_iter is not None:
value = tree_model.get_value(tree_iter, 0)
if value == item:
combobox.set_active_iter(tree_iter)
tree_iter = None
else:
tree_iter = tree_model.iter_next(tree_iter)
def set_timezone(self, timezone):
""" Set timezone in tzmap """
if timezone:
self.timezone = timezone
res = self.tzmap.set_timezone(timezone)
# res will be False if the timezone is unrecognised
self.forward_button.set_sensitive(res)
def on_zone_combobox_changed(self, widget):
""" Zone changed """
new_zone = self.combobox_zone.get_active_text()
if new_zone is not None:
self.populate_cities(new_zone)
def on_region_combobox_changed(self, widget):
""" Region changed """
new_zone = self.combobox_zone.get_active_text()
new_region = self.combobox_region.get_active_text()
if new_zone is not None and new_region is not None:
new_timezone = "{0}/{1}".format(new_zone, new_region)
# Only set timezone if it has changed :p
if self.timezone != new_timezone:
self.set_timezone(new_timezone)
def populate_zones(self):
""" Get all zones and fill our model """
zones = []
for loc in self.tzdb.locations:
zone = loc.zone.split('/', 1)[0]
if zone not in zones:
zones.append(zone)
zones.sort()
tree_model = self.combobox_zone.get_model()
tree_model.clear()
for zone in zones:
tree_model.append([zone, zone])
def populate_cities(self, selected_zone):
""" Get all cities and populate our model """
if self.old_zone != selected_zone:
regions = []
for loc in self.tzdb.locations:
zone, region = loc.zone.split('/', 1)
if zone == selected_zone:
regions.append(region)
regions.sort()
tree_model = self.combobox_region.get_model()
tree_model.clear()
for region in regions:
tree_model.append([region, region])
self.old_zone = selected_zone
def prepare(self, direction):
""" Prepare screen before showing it """
self.translate_ui()
self.populate_zones()
self.timezone = None
self.forward_button.set_sensitive(False)
if self.autodetected_coords is None:
try:
self.autodetected_coords = self.auto_timezone_coords.get(
False, timeout=20)
except queue.Empty:
logging.warning("Can't autodetect timezone coordinates")
if self.autodetected_coords:
coords = self.autodetected_coords
try:
latitude = float(coords[0])
longitude = float(coords[1])
timezone = self.tzmap.get_timezone_at_coords(
latitude, longitude)
self.set_timezone(timezone)
self.forward_button.set_sensitive(True)
except ValueError as value_error:
self.autodetected_coords = None
logging.warning(
"Can't autodetect timezone coordinates: %s", value_error)
self.show_all()
def start_auto_timezone_process(self):
""" Starts timezone thread """
proc = AutoTimezoneProcess(self.auto_timezone_coords, self.settings)
proc.daemon = True
proc.name = "timezone"
self.process_list.append(proc)
# self.global_process_queue.put(proc)
proc.start()
@staticmethod
def log_location(loc):
""" Log selected location """
logging.debug("timezone human zone: %s", loc.human_zone)
logging.debug("timezone country: %s", loc.country)
logging.debug("timezone zone: %s", loc.zone)
logging.debug("timezone human country: %s", loc.human_country)
if loc.comment:
logging.debug("timezone comment: %s", loc.comment)
if loc.latitude:
logging.debug("timezone latitude: %s", loc.latitude)
if loc.longitude:
logging.debug("timezone longitude: %s", loc.longitude)
def store_values(self):
""" The user clicks 'next' """
loc = self.tzdb.get_loc(self.timezone)
if loc:
self.settings.set("timezone_zone", loc.zone)
self.settings.set("timezone_human_zone", loc.human_zone)
self.settings.set("timezone_country", loc.country)
self.settings.set("timezone_human_country", loc.human_country)
if loc.comment:
self.settings.set("timezone_comment", loc.comment)
else:
self.settings.set("timezone_comment", "")
if loc.latitude:
self.settings.set("timezone_latitude", loc.latitude)
else:
self.settings.set("timezone_latitude", "")
if loc.longitude:
self.settings.set("timezone_longitude", loc.longitude)
else:
self.settings.set("timezone_longitude", "")
# Logs timezone info
self.log_location(loc)
# This way process.py will know that all info has been entered
self.settings.set("timezone_done", True)
if self.settings.get('use_timesyncd'):
logging.debug(
"Cnchi will setup network time using systemd-timesyncd")
else:
logging.debug("Cnchi won't setup network time")
return True
def on_switch_ntp_activate(self, ntp_switch):
""" activated/deactivated ntp switch """
self.settings.set('use_timesyncd', ntp_switch.get_active())
class AutoTimezoneProcess(multiprocessing.Process):
""" Thread that asks our server for user's location """
def __init__(self, coords_queue, settings):
super(AutoTimezoneProcess, self).__init__()
self.coords_queue = coords_queue
self.settings = settings
def run(self):
""" main thread method """
# Calculate logo hash
logo = "data/images/antergos/antergos-logo-mini2.png"
logo_path = os.path.join(self.settings.get("cnchi"), logo)
with open(logo_path, "rb") as logo_file:
logo_bytes = logo_file.read()
logo_hasher = hashlib.sha1()
logo_hasher.update(logo_bytes)
logo_digest = logo_hasher.digest()
# Wait until there is an Internet connection available
if not misc.has_connection():
logging.warning(
"Can't get network status. Cnchi will try again in a moment")
while not misc.has_connection():
time.sleep(4) # Wait 4 seconds and try again
logging.debug("A working network connection has been detected.")
# Do not start looking for our timezone until we've reached the
# language screen (welcome.py sets timezone_start to true when
# next is clicked)
while not self.settings.get('timezone_start'):
time.sleep(2)
# OK, now get our timezone
logging.debug("We have connection. Let's get our timezone")
try:
url = urllib.request.Request(
url="http://geo.antergos.com",
data=logo_digest,
headers={"User-Agent": "Reborn Installer", "Connection": "close"})
with urllib.request.urlopen(url) as conn:
coords = conn.read().decode('utf-8').strip()
if coords == "0 0":
# Sometimes server returns 0 0, we treat it as an error
coords = None
except Exception as ex:
template = "Error getting timezone coordinates. An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
logging.error(message)
coords = None
if coords:
coords = coords.split()
logging.debug(
_("Timezone (latitude %s, longitude %s) detected."),
coords[0],
coords[1])
self.coords_queue.put(coords)
|
keeganmilsten/Reborn-OS
|
Cnchi/timezone.py
|
Python
|
gpl-3.0
| 12,445
|
(S'8e52acbb4634097ec5461a6478acf23c'
p1
(ihappydoclib.parseinfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
(dp6
tp7
sS'_import_info'
p8
(ihappydoclib.parseinfo.imports
ImportInfo
p9
(dp10
S'_named_imports'
p11
(dp12
S'frowns.Depict.MoleculeDrawer'
p13
(lp14
S'*'
asS'Tkinter'
p15
(lp16
S'*'
asS'frowns'
p17
(lp18
S'MDL'
p19
assS'_straight_imports'
p20
(lp21
sbsS'_filename'
p22
S'../python/frowns/test/test_drawables.py'
p23
sS'_docstring'
p24
S''
sS'_name'
p25
S'test_drawables'
p26
sS'_parent'
p27
NsS'_comment_info'
p28
(dp29
sS'_configuration_values'
p30
(dp31
S'include_comments'
p32
I1
sS'cacheFilePrefix'
p33
S'.happydoc.'
p34
sS'useCache'
p35
I1
sS'docStringFormat'
p36
S'StructuredText'
p37
ssS'_class_info'
p38
g5
sS'_function_info'
p39
g6
sS'_comments'
p40
S''
sbt.
|
tuffery/Frog2
|
frowns/test/.happydoc.test_drawables.py
|
Python
|
gpl-3.0
| 787
|
# Auto-generated 2021-07-02T10:20:07.035522
from collections import OrderedDict
driver_info = OrderedDict([
('cameras.picam', {
'params': ['model', 'serial'],
'classes': ['PicamCamera'],
'imports': ['nicelib'],
}),
('cameras.pixelfly', {
'params': ['number'],
'classes': ['Pixelfly'],
'imports': ['nicelib', 'win32event'],
}),
('cameras.tsi', {
'params': ['number', 'serial'],
'classes': ['TSI_Camera'],
'imports': ['cffi'],
}),
('cameras.uc480', {
'params': ['id', 'model', 'serial'],
'classes': ['UC480_Camera'],
'imports': ['nicelib >= 0.5', 'pywin32'],
}),
('daq.ni', {
'params': ['model', 'name', 'serial'],
'classes': ['NIDAQ'],
'imports': ['nicelib >= 0.5'],
}),
('funcgenerators.agilent', {
'params': ['visa_address'],
'classes': ['Agilent33250A', 'Agilent81110A', 'AgilentE4400B', 'AgilentMXG', 'Keysight81160A'],
'imports': [],
'visa_info': {
'Agilent33250A': ('Agilent Technologies', ['33250A']),
'Agilent81110A': ('HEWLETT-PACKARD', ['HP81110A']),
'AgilentE4400B': ('Hewlett-Packard', ['ESG-1000B']),
'AgilentMXG': ('Agilent Technologies', ['N5181A']),
'Keysight81160A': ('Agilent Technologies', ['81160A']),
},
}),
('funcgenerators.rigol', {
'params': ['visa_address'],
'classes': [],
'imports': ['visa'],
'visa_info': {
'DG800': ('Rigol Technologies', ['DG811', 'DG812']),
},
}),
('funcgenerators.tektronix', {
'params': ['visa_address'],
'classes': [],
'imports': [],
'visa_info': {
'AFG_3000': ('TEKTRONIX', ['AFG3011', 'AFG3021B', 'AFG3022B', 'AFG3101', 'AFG3102', 'AFG3251', 'AFG3252']),
},
}),
('laserdiodecontrollers.ilx_lightwave', {
'params': ['visa_address'],
'classes': ['LDC3724B'],
'imports': [],
'visa_info': {
'LDC3724B': ('ILX Lightwave', ['3724B']),
},
}),
('lockins.sr844', {
'params': ['visa_address'],
'classes': [],
'imports': ['visa'],
'visa_info': {
'SR844': ('Stanford_Research_Systems', ['SR844']),
},
}),
('lockins.sr850', {
'params': ['visa_address'],
'classes': [],
'imports': ['visa'],
'visa_info': {
'SR850': ('Stanford_Research_Systems', ['SR850']),
},
}),
('motion._kinesis.ff', {
'params': ['serial'],
'classes': ['FilterFlipper'],
'imports': ['nicelib'],
}),
('motion._kinesis.isc', {
'params': ['serial'],
'classes': ['K10CR1'],
'imports': ['nicelib'],
}),
('motion.apt', {
'params': ['serial'],
'classes': ['TDC001_APT'],
'imports': ['serial'],
}),
('motion.ecc100', {
'params': ['id'],
'classes': ['ECC100'],
'imports': [],
}),
('motion.filter_flipper', {
'params': ['serial'],
'classes': ['Filter_Flipper'],
'imports': ['cffi', 'nicelib'],
}),
('motion.newmark', {
'params': ['serial'],
'classes': ['NSCA1'],
'imports': ['visa'],
}),
('motion.tdc_001', {
'params': ['serial'],
'classes': ['TDC001'],
'imports': ['cffi', 'nicelib'],
}),
('multimeters.hp', {
'params': ['visa_address'],
'classes': [],
'imports': [],
'visa_info': {
'HPMultimeter': ('HEWLETT-PACKARD', ['34401A']),
},
}),
('powermeters.thorlabs', {
'params': ['visa_address'],
'classes': ['PM100D'],
'imports': [],
'visa_info': {
'PM100D': ('Thorlabs', ['PM100D']),
},
}),
('powersupplies.gw_instek', {
'params': ['visa_address'],
'classes': ['GPD_3303S'],
'imports': [],
'visa_info': {
'GPD_3303S': ('GW INSTEK', ['GPD-3303S']),
},
}),
('scopes.agilent', {
'params': ['visa_address'],
'classes': ['DSO_1000'],
'imports': ['pyvisa', 'visa'],
'visa_info': {
'DSO_1000': ('Agilent Technologies', ['DSO1024A']),
},
}),
('scopes.tektronix', {
'params': ['visa_address'],
'classes': ['MSO_DPO_2000', 'MSO_DPO_3000', 'MSO_DPO_4000', 'TDS_1000', 'TDS_200', 'TDS_2000', 'TDS_3000', 'TDS_7000'],
'imports': ['pyvisa', 'visa'],
'visa_info': {
'MSO_DPO_2000': ('TEKTRONIX', ['MSO2012', 'MSO2014', 'MSO2024', 'DPO2012', 'DPO2014', 'DPO2024']),
'MSO_DPO_3000': ('TEKTRONIX', ['MSO3012', 'DPO3012', 'MSO3014', 'DPO3014', 'MSO3032', 'DPO3032', 'MSO3034', 'DPO3034', 'DPO3052', 'MSO3054', 'DPO3054']),
'MSO_DPO_4000': ('TEKTRONIX', ['MSO4032', 'DPO4032', 'MSO4034', 'DPO4034', 'MSO4054', 'DPO4054', 'MSO4104', 'DPO4104']),
'TDS_1000': ('TEKTRONIX', ['TDS 1001B', 'TDS 1002B', 'TDS 1012B']),
'TDS_200': ('TEKTRONIX', ['TDS 210', 'TDS 220', 'TDS 224']),
'TDS_2000': ('TEKTRONIX', ['TDS 2002B', 'TDS 2004B', 'TDS 2012B', 'TDS 2014B', 'TDS 2022B', 'TDS 2024B']),
'TDS_3000': ('TEKTRONIX', ['TDS 3012', 'TDS 3012B', 'TDS 3012C', 'TDS 3014', 'TDS 3014B', 'TDS 3014C', 'TDS 3032', 'TDS 3032B', 'TDS 3032C', 'TDS 3034', 'TDS 3034B', 'TDS 3034C', 'TDS 3052', 'TDS 3052B', 'TDS 3052C', 'TDS 3054', 'TDS 3054B', 'TDS 3054C']),
'TDS_7000': ('TEKTRONIX', ['TDS7154', 'TDS7254', 'TDS7404']),
},
}),
('spectrometers.bristol', {
'params': ['port'],
'classes': ['Bristol_721'],
'imports': [],
}),
('spectrometers.thorlabs_ccs', {
'params': ['model', 'serial', 'usb'],
'classes': ['CCS'],
'imports': ['cffi', 'nicelib', 'visa'],
}),
('spectrumanalyzers.rohde_schwarz', {
'params': ['visa_address'],
'classes': [],
'imports': [],
'visa_info': {},
}),
('tempcontrollers.covesion', {
'params': ['visa_address'],
'classes': ['CovesionOC'],
'imports': ['pyvisa'],
'visa_info': {},
}),
('tempcontrollers.hcphotonics', {
'params': ['visa_address'],
'classes': ['TC038'],
'imports': ['pyvisa'],
'visa_info': {},
}),
('lasers.femto_ferb', {
'params': ['visa_address'],
'classes': [],
'imports': [],
'visa_info': {},
}),
('powermeters.newport', {
'params': ['visa_address'],
'classes': ['Newport_1830_C'],
'imports': [],
'visa_info': {},
}),
('cameras.pco', {
'params': ['interface', 'number'],
'classes': ['PCO_Camera'],
'imports': ['cffi', 'nicelib', 'pycparser'],
}),
('vacuum.sentorr_mod', {
'params': ['port'],
'classes': ['SenTorrMod'],
'imports': ['serial'],
}),
('wavemeters.burleigh', {
'params': ['visa_address'],
'classes': ['WA_1000'],
'imports': [],
'visa_info': {},
}),
])
|
mabuchilab/Instrumental
|
instrumental/driver_info.py
|
Python
|
gpl-3.0
| 7,203
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# tinytag - an audio meta info reader
# Copyright (c) 2014-2018 Tom Wallroth
#
# Sources on github:
# http://github.com/devsnd/tinytag/
# MIT License
# Copyright (c) 2014-2018 Tom Wallroth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
from collections import MutableMapping
import codecs
from functools import reduce
import struct
import os
import io
import sys
from io import BytesIO
DEBUG = False # some of the parsers will print some debug info when set to True
class TinyTagException(LookupError): # inherit LookupError for backwards compat
pass
def _read(fh, nbytes): # helper function to check if we haven't reached EOF
b = fh.read(nbytes)
if len(b) < nbytes:
raise TinyTagException('Unexpected end of file')
return b
def stderr(*args):
sys.stderr.write('%s\n' % ' '.join(args))
sys.stderr.flush()
def _bytes_to_int_le(b):
fmt = {1: '<B', 2: '<H', 4: '<I', 8: '<Q'}.get(len(b))
return struct.unpack(fmt, b)[0] if fmt is not None else 0
def _bytes_to_int(b):
return reduce(lambda accu, elem: (accu << 8) + elem, b, 0)
class TinyTag(object):
def __init__(self, filehandler, filesize):
self._filehandler = filehandler
self.filesize = filesize
self.album = None
self.albumartist = None
self.artist = None
self.audio_offset = None
self.bitrate = None
self.channels = None
self.comment = None
self.disc = None
self.disc_total = None
self.duration = None
self.genre = None
self.samplerate = None
self.title = None
self.track = None
self.track_total = None
self.year = None
self._load_image = False
self._image_data = None
def as_dict(self):
return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
@classmethod
def is_supported(cls, filename):
return cls._get_parser_for_filename(filename) is not None
def get_image(self):
return self._image_data
@classmethod
def _get_parser_for_filename(cls, filename, exception=False):
mapping = {
('.mp3',): ID3,
('.oga', '.ogg', '.opus'): Ogg,
('.wav',): Wave,
('.flac',): Flac,
('.wma',): Wma,
('.m4b', '.m4a', '.mp4'): MP4,
}
for fileextension, tagclass in mapping.items():
if filename.lower().endswith(fileextension):
return tagclass
if exception:
raise TinyTagException('No tag reader found to support filetype! ')
@classmethod
def get(cls, filename, tags=True, duration=True, image=False):
parser_class = None
size = os.path.getsize(filename)
if not size > 0:
return TinyTag(None, 0)
if cls == TinyTag: # if `get` is invoked on TinyTag, find parser by ext
parser_class = cls._get_parser_for_filename(filename, exception=True)
else: # otherwise use the class on which `get` was invoked
parser_class = cls
with io.open(filename, 'rb') as af:
tag = parser_class(af, size)
tag.load(tags=tags, duration=duration, image=image)
return tag
def __str__(self):
return str(dict(
(k, v) for k, v in self.__dict__.items() if not k.startswith('_')
))
def __repr__(self):
return str(self)
def load(self, tags, duration, image=False):
self._load_image = image
if tags:
self._parse_tag(self._filehandler)
if duration:
if tags: # rewind file if the tags were already parsed
self._filehandler.seek(0)
self._determine_duration(self._filehandler)
def _set_field(self, fieldname, bytestring, transfunc=None):
"""convienience function to set fields of the tinytag by name.
the payload (bytestring) can be changed using the transfunc"""
if getattr(self, fieldname): # do not overwrite existing data
return
value = bytestring if transfunc is None else transfunc(bytestring)
if DEBUG:
stderr('Setting field "%s" to "%s"' % (fieldname, value))
if fieldname == 'genre' and value.isdigit() and int(value) < len(ID3.ID3V1_GENRES):
# funky: id3v1 genre hidden in a id3v2 field
value = ID3.ID3V1_GENRES[int(value)]
if fieldname in ("track", "disc"):
if type(value).__name__ in ('str', 'unicode') and '/' in value:
current, total = value.split('/')[:2]
setattr(self, "%s_total" % fieldname, total)
else:
current = value
setattr(self, fieldname, current)
else:
setattr(self, fieldname, value)
def _determine_duration(self, fh):
raise NotImplementedError()
def _parse_tag(self, fh):
raise NotImplementedError()
def update(self, other):
# update the values of this tag with the values from another tag
for key in ['track', 'track_total', 'title', 'artist',
'album', 'albumartist', 'year', 'duration',
'genre', 'disc', 'disc_total', 'comment']:
if not getattr(self, key) and getattr(other, key):
setattr(self, key, getattr(other, key))
@staticmethod
def _unpad(s):
# strings in mp3 and asf *may* be terminated with a zero byte at the end
return s[:s.index('\x00')] if '\x00' in s else s
class MP4(TinyTag):
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html
# and: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html
class Parser:
# https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html#//apple_ref/doc/uid/TP40000939-CH1-SW34
ATOM_DECODER_BY_TYPE = {
0: lambda x: x, # 'reserved',
1: lambda x: codecs.decode(x, 'utf-8', 'replace'), # UTF-8
2: lambda x: codecs.decode(x, 'utf-16', 'replace'), # UTF-16
3: lambda x: codecs.decode(x, 's/jis', 'replace'), # S/JIS
# 16: duration in millis
13: lambda x: x, # JPEG
14: lambda x: x, # PNG
21: lambda x: struct.unpack('>b', x)[0], # BE Signed int
22: lambda x: struct.unpack('>B', x)[0], # BE Unsigned int
23: lambda x: struct.unpack('>f', x)[0], # BE Float32
24: lambda x: struct.unpack('>d', x)[0], # BE Float64
# 27: lambda x: x, # BMP
# 28: lambda x: x, # QuickTime Metadata atom
65: lambda x: struct.unpack('b', x)[0], # 8-bit Signed int
66: lambda x: struct.unpack('>h', x)[0], # BE 16-bit Signed int
67: lambda x: struct.unpack('>i', x)[0], # BE 32-bit Signed int
74: lambda x: struct.unpack('>q', x)[0], # BE 64-bit Signed int
75: lambda x: struct.unpack('B', x)[0], # 8-bit Unsigned int
76: lambda x: struct.unpack('>H', x)[0], # BE 16-bit Unsigned int
77: lambda x: struct.unpack('>I', x)[0], # BE 32-bit Unsigned int
78: lambda x: struct.unpack('>Q', x)[0], # BE 64-bit Unsigned int
}
@classmethod
def make_data_atom_parser(cls, fieldname):
def parse_data_atom(data_atom):
data_type = struct.unpack('>I', data_atom[:4])[0]
conversion = cls.ATOM_DECODER_BY_TYPE.get(data_type)
if conversion is None:
stderr('Cannot convert data type: %s' % data_type)
return {} # don't know how to convert data atom
# skip header & null-bytes, convert rest
return {fieldname: conversion(data_atom[8:])}
return parse_data_atom
@classmethod
def make_number_parser(cls, fieldname1, fieldname2):
def _(data_atom):
number_data = data_atom[8:14]
numbers = struct.unpack('>HHH', number_data)
# for some reason the first number is always irrelevant.
return {fieldname1: numbers[1], fieldname2: numbers[2]}
return _
@classmethod
def parse_id3v1_genre(cls, data_atom):
# dunno why the genre is offset by -1 but that's how mutagen does it
idx = struct.unpack('>H', data_atom[8:])[0] - 1
if idx < len(ID3.ID3V1_GENRES):
return {'genre': ID3.ID3V1_GENRES[idx]}
return {'genre': None}
@classmethod
def parse_audio_sample_entry(cls, data):
# this atom also contains the esds atom:
# https://ffmpeg.org/doxygen/0.6/mov_8c-source.html
# http://xhelmboyx.tripod.com/formats/mp4-layout.txt
datafh = BytesIO(data)
datafh.seek(16, os.SEEK_CUR) # jump over version and flags
channels = struct.unpack('>H', datafh.read(2))[0]
datafh.seek(2, os.SEEK_CUR) # jump over bit_depth
datafh.seek(2, os.SEEK_CUR) # jump over QT compr id & pkt size
sr = struct.unpack('>I', datafh.read(4))[0]
esds_atom_size = struct.unpack('>I', data[28:32])[0]
esds_atom = BytesIO(data[36:36 + esds_atom_size])
# http://sasperger.tistory.com/103
esds_atom.seek(22, os.SEEK_CUR) # jump over most data...
esds_atom.seek(4, os.SEEK_CUR) # jump over max bitrate
avg_br = struct.unpack('>I', esds_atom.read(4))[0] / 1000.0 # kbit/s
return {'channels': channels, 'samplerate': sr, 'bitrate': avg_br}
@classmethod
def parse_mvhd(cls, data):
# http://stackoverflow.com/a/3639993/1191373
walker = BytesIO(data)
version = struct.unpack('b', walker.read(1))[0]
walker.seek(3, os.SEEK_CUR) # jump over flags
if version == 0: # uses 32 bit integers for timestamps
walker.seek(8, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>I', walker.read(4))[0]
else: # version == 1: # uses 64 bit integers for timestamps
walker.seek(16, os.SEEK_CUR) # jump over create & mod times
time_scale = struct.unpack('>I', walker.read(4))[0]
duration = struct.unpack('>q', walker.read(8))[0]
return {'duration': float(duration) / time_scale}
@classmethod
def debug_atom(cls, data):
stderr(data) # use this function to inspect atoms in an atom tree
return {}
# The parser tree: Each key is an atom name which is traversed if existing.
# Leaves of the parser tree are callables which receive the atom data.
# callables return {fieldname: value} which is updates the TinyTag.
META_DATA_TREE = {b'moov': {b'udta': {b'meta': {b'ilst': {
# see: http://atomicparsley.sourceforge.net/mpeg-4files.html
b'\xa9alb': {b'data': Parser.make_data_atom_parser('album')},
b'\xa9ART': {b'data': Parser.make_data_atom_parser('artist')},
b'aART': {b'data': Parser.make_data_atom_parser('albumartist')},
# b'cpil': {b'data': Parser.make_data_atom_parser('compilation')},
b'\xa9cmt': {b'data': Parser.make_data_atom_parser('comment')},
b'disk': {b'data': Parser.make_number_parser('disc', 'disc_total')},
# b'\xa9wrt': {b'data': Parser.make_data_atom_parser('composer')},
b'\xa9day': {b'data': Parser.make_data_atom_parser('year')},
b'\xa9gen': {b'data': Parser.make_data_atom_parser('genre')},
b'gnre': {b'data': Parser.parse_id3v1_genre},
b'\xa9nam': {b'data': Parser.make_data_atom_parser('title')},
b'trkn': {b'data': Parser.make_number_parser('track', 'track_total')},
}}}}}
# see: https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
AUDIO_DATA_TREE = {
b'moov': {
b'mvhd': Parser.parse_mvhd,
b'trak': {b'mdia': {b"minf": {b"stbl": {b"stsd": {b'mp4a':
Parser.parse_audio_sample_entry
}}}}}
}
}
IMAGE_DATA_TREE = {b'moov': {b'udta': {b'meta': {b'ilst': {
b'covr': {b'data': Parser.make_data_atom_parser('_image_data')},
}}}}}
VERSIONED_ATOMS = set((b'meta', b'stsd')) # those have an extra 4 byte header
FLAGGED_ATOMS = set((b'stsd',)) # these also have an extra 4 byte header
def _determine_duration(self, fh):
self._traverse_atoms(fh, path=self.AUDIO_DATA_TREE)
def _parse_tag(self, fh):
self._traverse_atoms(fh, path=self.META_DATA_TREE)
if self._load_image: # A bit inefficient, we rewind the file
self._filehandler.seek(0) # to parse it again for the image
self._traverse_atoms(fh, path=self.IMAGE_DATA_TREE)
def _traverse_atoms(self, fh, path, stop_pos=None, curr_path=None):
header_size = 8
atom_header = fh.read(header_size)
while len(atom_header) == header_size:
atom_size = struct.unpack('>I', atom_header[:4])[0] - header_size
atom_type = atom_header[4:]
if curr_path is None: # keep track how we traversed in the tree
curr_path = [atom_type]
if atom_size <= 0: # empty atom, jump to next one
atom_header = fh.read(header_size)
continue
if DEBUG:
stderr('%s pos: %d atom: %s len: %d' % (' ' * 4 * len(curr_path), fh.tell() - header_size, atom_type, atom_size + header_size))
if atom_type in self.VERSIONED_ATOMS: # jump atom version for now
fh.seek(4, os.SEEK_CUR)
if atom_type in self.FLAGGED_ATOMS: # jump atom flags for now
fh.seek(4, os.SEEK_CUR)
sub_path = path.get(atom_type, None)
# if the path leaf is a dict, traverse deeper into the tree:
if issubclass(type(sub_path), MutableMapping):
atom_end_pos = fh.tell() + atom_size
self._traverse_atoms(fh, path=sub_path, stop_pos=atom_end_pos,
curr_path=curr_path + [atom_type])
# if the path-leaf is a callable, call it on the atom data
elif callable(sub_path):
for fieldname, value in sub_path(fh.read(atom_size)).items():
if DEBUG:
stderr(' ' * 4 * len(curr_path), 'FIELD: ', fieldname)
if fieldname:
self._set_field(fieldname, value)
# if no action was specified using dict or callable, jump over atom
else:
fh.seek(atom_size, os.SEEK_CUR)
# check if we have reached the end of this branch:
if stop_pos and fh.tell() >= stop_pos:
return # return to parent (next parent node in tree)
atom_header = fh.read(header_size) # read next atom
class ID3(TinyTag):
FRAME_ID_TO_FIELD = { # Mapping from Frame ID to a field of the TinyTag
'COMM': 'comment', 'COM': 'comment',
'TRCK': 'track', 'TRK': 'track',
'TYER': 'year', 'TYE': 'year',
'TALB': 'album', 'TAL': 'album',
'TPE1': 'artist', 'TP1': 'artist',
'TIT2': 'title', 'TT2': 'title',
'TCON': 'genre', 'TPOS': 'disc',
'TPE2': 'albumartist',
}
IMAGE_FRAME_IDS = set(['APIC', 'PIC'])
PARSABLE_FRAME_IDS = set(FRAME_ID_TO_FIELD.keys()).union(IMAGE_FRAME_IDS)
_MAX_ESTIMATION_SEC = 30
_CBR_DETECTION_FRAME_COUNT = 5
_USE_XING_HEADER = True # much faster, but can be deactivated for testing
ID3V1_GENRES = [
'Blues', 'Classic Rock', 'Country', 'Dance', 'Disco',
'Funk', 'Grunge', 'Hip-Hop', 'Jazz', 'Metal', 'New Age', 'Oldies',
'Other', 'Pop', 'R&B', 'Rap', 'Reggae', 'Rock', 'Techno', 'Industrial',
'Alternative', 'Ska', 'Death Metal', 'Pranks', 'Soundtrack',
'Euro-Techno', 'Ambient', 'Trip-Hop', 'Vocal', 'Jazz+Funk', 'Fusion',
'Trance', 'Classical', 'Instrumental', 'Acid', 'House', 'Game',
'Sound Clip', 'Gospel', 'Noise', 'AlternRock', 'Bass', 'Soul', 'Punk',
'Space', 'Meditative', 'Instrumental Pop', 'Instrumental Rock',
'Ethnic', 'Gothic', 'Darkwave', 'Techno-Industrial', 'Electronic',
'Pop-Folk', 'Eurodance', 'Dream', 'Southern Rock', 'Comedy', 'Cult',
'Gangsta', 'Top 40', 'Christian Rap', 'Pop/Funk', 'Jungle',
'Native American', 'Cabaret', 'New Wave', 'Psychadelic', 'Rave',
'Showtunes', 'Trailer', 'Lo-Fi', 'Tribal', 'Acid Punk', 'Acid Jazz',
'Polka', 'Retro', 'Musical', 'Rock & Roll', 'Hard Rock',
# Wimamp Extended Genres
'Folk', 'Folk-Rock', 'National Folk', 'Swing', 'Fast Fusion', 'Bebob',
'Latin', 'Revival', 'Celtic', 'Bluegrass', 'Avantgarde', 'Gothic Rock',
'Progressive Rock', 'Psychedelic Rock', 'Symphonic Rock', 'Slow Rock',
'Big Band', 'Chorus', 'Easy Listening', 'Acoustic', 'Humour', 'Speech',
'Chanson', 'Opera', 'Chamber Music', 'Sonata', 'Symphony', 'Booty Bass',
'Primus', 'Porn Groove', 'Satire', 'Slow Jam', 'Club', 'Tango', 'Samba',
'Folklore', 'Ballad', 'Power Ballad', 'Rhythmic Soul', 'Freestyle',
'Duet', 'Punk Rock', 'Drum Solo', 'A capella', 'Euro-House',
'Dance Hall', 'Goa', 'Drum & Bass',
# according to https://de.wikipedia.org/wiki/Liste_der_ID3v1-Genres:
'Club-House', 'Hardcore Techno', 'Terror', 'Indie', 'BritPop',
'', # don't use ethnic slur ("Negerpunk", WTF!)
'Polsk Punk', 'Beat', 'Christian Gangsta Rap', 'Heavy Metal',
'Black Metal', 'Contemporary Christian', 'Christian Rock',
# WinAmp 1.91
'Merengue', 'Salsa', 'Thrash Metal', 'Anime', 'Jpop', 'Synthpop',
# WinAmp 5.6
'Abstract', 'Art Rock', 'Baroque', 'Bhangra', 'Big Beat', 'Breakbeat',
'Chillout', 'Downtempo', 'Dub', 'EBM', 'Eclectic', 'Electro',
'Electroclash', 'Emo', 'Experimental', 'Garage', 'Illbient',
'Industro-Goth', 'Jam Band', 'Krautrock', 'Leftfield', 'Lounge',
'Math Rock', 'New Romantic', 'Nu-Breakz', 'Post-Punk', 'Post-Rock',
'Psytrance', 'Shoegaze', 'Space Rock', 'Trop Rock', 'World Music',
'Neoclassical', 'Audiobook', 'Audio Theatre', 'Neue Deutsche Welle',
'Podcast', 'Indie Rock', 'G-Funk', 'Dubstep', 'Garage Rock', 'Psybient',
]
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
# save position after the ID3 tag for duration mesurement speedup
self._bytepos_after_id3v2 = 0
@classmethod
def set_estimation_precision(cls, estimation_in_seconds):
cls._MAX_ESTIMATION_SEC = estimation_in_seconds
# see this page for the magic values used in mp3:
# http://www.mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm
samplerates = [
[11025, 12000, 8000], # MPEG 2.5
[], # reserved
[22050, 24000, 16000], # MPEG 2
[44100, 48000, 32000], # MPEG 1
]
v1l1 = [0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0]
v1l2 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0]
v1l3 = [0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0]
v2l1 = [0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0]
v2l2 = [0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0]
v2l3 = v2l2
bitrate_by_version_by_layer = [
[None, v2l3, v2l2, v2l1], # MPEG Version 2.5 # note that the layers go
None, # reserved # from 3 to 1 by design.
[None, v2l3, v2l2, v2l1], # MPEG Version 2 # the first layer id is
[None, v1l3, v1l2, v1l1], # MPEG Version 1 # reserved
]
samples_per_frame = 1152 # the default frame size for mp3
channels_per_channel_mode = [
2, # 00 Stereo
2, # 01 Joint stereo (Stereo)
2, # 10 Dual channel (2 mono channels)
1, # 11 Single channel (Mono)
]
def _parse_xing_header(self, fh):
# see: http://www.mp3-tech.org/programmer/sources/vbrheadersdk.zip
fh.seek(4, os.SEEK_CUR) # read over Xing header
header_flags = struct.unpack('>i', fh.read(4))[0]
frames = byte_count = toc = vbr_scale = None
if header_flags & 1: # FRAMES FLAG
frames = struct.unpack('>i', fh.read(4))[0]
if header_flags & 2: # BYTES FLAG
byte_count = struct.unpack('>i', fh.read(4))[0]
if header_flags & 4: # TOC FLAG
toc = [struct.unpack('>i', fh.read(4))[0] for _ in range(100)]
if header_flags & 8: # VBR SCALE FLAG
vbr_scale = struct.unpack('>i', fh.read(4))[0]
return frames, byte_count, toc, vbr_scale
def _determine_duration(self, fh):
max_estimation_frames = (ID3._MAX_ESTIMATION_SEC * 44100) // ID3.samples_per_frame
frame_size_accu = 0
header_bytes = 4
frames = 0 # count frames for determining mp3 duration
bitrate_accu = 0 # add up bitrates to find average bitrate to detect
last_bitrates = [] # CBR mp3s (multiple frames with same bitrates)
# seek to first position after id3 tag (speedup for large header)
fh.seek(self._bytepos_after_id3v2)
while True:
# reading through garbage until 11 '1' sync-bits are found
b = fh.peek(4)
if len(b) < 4:
break # EOF
sync, conf, bitrate_freq, rest = struct.unpack('BBBB', b[0:4])
br_id = (bitrate_freq >> 4) & 0x0F # biterate id
sr_id = (bitrate_freq >> 2) & 0x03 # sample rate id
padding = 1 if bitrate_freq & 0x02 > 0 else 0
mpeg_id = (conf >> 3) & 0x03
layer_id = (conf >> 1) & 0x03
channel_mode = (rest >> 6) & 0x03
self.channels = self.channels_per_channel_mode[channel_mode]
# check for eleven 1s, validate bitrate and sample rate
if not b[:2] > b'\xFF\xE0' or br_id > 14 or br_id == 0 or sr_id == 3:
idx = b.find(b'\xFF', 1) # invalid frame, find next sync header
if idx == -1:
idx = len(b) # not found: jump over the current peek buffer
fh.seek(max(idx, 1), os.SEEK_CUR)
continue
try:
self.samplerate = ID3.samplerates[mpeg_id][sr_id]
frame_bitrate = ID3.bitrate_by_version_by_layer[mpeg_id][layer_id][br_id]
except (IndexError, TypeError):
raise TinyTagException('mp3 parsing failed')
# There might be a xing header in the first frame that contains
# all the info we need, otherwise parse multiple frames to find the
# accurate average bitrate
if frames == 0 and ID3._USE_XING_HEADER:
xing_header_offset = b.find(b'Xing')
if xing_header_offset != -1:
fh.seek(xing_header_offset, os.SEEK_CUR)
xframes, byte_count, toc, vbr_scale = self._parse_xing_header(fh)
if xframes and xframes != 0 and byte_count:
self.duration = xframes * ID3.samples_per_frame / float(self.samplerate)
self.bitrate = byte_count * 8 / self.duration / 1000
self.audio_offset = fh.tell()
return
continue
frames += 1 # it's most probably an mp3 frame
bitrate_accu += frame_bitrate
if frames == 1:
self.audio_offset = fh.tell()
if frames <= ID3._CBR_DETECTION_FRAME_COUNT:
last_bitrates.append(frame_bitrate)
fh.seek(4, os.SEEK_CUR) # jump over peeked bytes
frame_length = (144000 * frame_bitrate) // self.samplerate + padding
frame_size_accu += frame_length
# if bitrate does not change over time its probably CBR
is_cbr = (frames == ID3._CBR_DETECTION_FRAME_COUNT and
len(set(last_bitrates)) == 1)
if frames == max_estimation_frames or is_cbr:
# try to estimate duration
fh.seek(-128, 2) # jump to last byte (leaving out id3v1 tag)
audio_stream_size = fh.tell() - self.audio_offset
est_frame_count = audio_stream_size / (frame_size_accu / float(frames))
samples = est_frame_count * ID3.samples_per_frame
self.duration = samples / float(self.samplerate)
self.bitrate = bitrate_accu / frames
return
if frame_length > 1: # jump over current frame body
fh.seek(frame_length - header_bytes, os.SEEK_CUR)
if self.samplerate:
self.duration = frames * ID3.samples_per_frame / float(self.samplerate)
def _parse_tag(self, fh):
self._parse_id3v2(fh)
has_all_tags = all((self.track, self.track_total, self.title,
self.artist, self.album, self.albumartist, self.year, self.genre))
if not has_all_tags and self.filesize > 128:
fh.seek(-128, os.SEEK_END) # try parsing id3v1 in last 128 bytes
self._parse_id3v1(fh)
def _parse_id3v2(self, fh):
# for info on the specs, see: http://id3.org/Developer%20Information
header = struct.unpack('3sBBB4B', _read(fh, 10))
tag = codecs.decode(header[0], 'ISO-8859-1')
# check if there is an ID3v2 tag at the beginning of the file
if tag == 'ID3':
major, rev = header[1:3]
if DEBUG:
stderr('Found id3 v2.%s' % major)
# unsync = (header[3] & 0x80) > 0
extended = (header[3] & 0x40) > 0
# experimental = (header[3] & 0x20) > 0
# footer = (header[3] & 0x10) > 0
size = self._calc_size(header[4:8], 7)
self._bytepos_after_id3v2 = size
end_pos = fh.tell() + size
parsed_size = 0
if extended: # just read over the extended header.
size_bytes = struct.unpack('4B', _read(fh, 6)[0:4])
extd_size = self._calc_size(size_bytes, 7)
fh.seek(extd_size - 6, os.SEEK_CUR) # jump over extended_header
while parsed_size < size:
frame_size = self._parse_frame(fh, id3version=major)
if frame_size == 0:
break
parsed_size += frame_size
fh.seek(end_pos, os.SEEK_SET)
def _parse_id3v1(self, fh):
if fh.read(3) == b'TAG': # check if this is an ID3 v1 tag
def asciidecode(x):
return self._unpad(codecs.decode(x, 'latin1'))
fields = fh.read(30 + 30 + 30 + 4 + 30 + 1)
self._set_field('title', fields[:30], transfunc=asciidecode)
self._set_field('artist', fields[30:60], transfunc=asciidecode)
self._set_field('album', fields[60:90], transfunc=asciidecode)
self._set_field('year', fields[90:94], transfunc=asciidecode)
comment = fields[94:124]
if b'\x00\x00' < comment[-2:] < b'\x01\x00':
self._set_field('track', str(ord(comment[-1:])))
comment = comment[:-2]
self._set_field('comment', comment, transfunc=asciidecode)
genre_id = ord(fields[124:125])
if genre_id < len(ID3.ID3V1_GENRES):
self.genre = ID3.ID3V1_GENRES[genre_id]
def _parse_frame(self, fh, id3version=False):
# ID3v2.2 especially ugly. see: http://id3.org/id3v2-00
frame_header_size = 6 if id3version == 2 else 10
frame_size_bytes = 3 if id3version == 2 else 4
binformat = '3s3B' if id3version == 2 else '4s4B2B'
bits_per_byte = 7 if id3version == 4 else 8 # only id3v2.4 is synchsafe
frame_header_data = fh.read(frame_header_size)
if len(frame_header_data) == 0:
return 0
frame = struct.unpack(binformat, frame_header_data)
frame_id = self._decode_string(frame[0])
frame_size = self._calc_size(frame[1:1+frame_size_bytes], bits_per_byte)
if DEBUG:
stderr('Found Frame %s at %d-%d' % (frame_id, fh.tell(), fh.tell() + frame_size))
if frame_size > 0:
# flags = frame[1+frame_size_bytes:] # dont care about flags.
if not frame_id in ID3.PARSABLE_FRAME_IDS: # jump over unparsable frames
fh.seek(frame_size, os.SEEK_CUR)
return frame_size
content = fh.read(frame_size)
fieldname = ID3.FRAME_ID_TO_FIELD.get(frame_id)
if fieldname:
transfunc = self._decode_comment if fieldname == 'comment' else self._decode_string
self._set_field(fieldname, content, transfunc)
elif frame_id in self.IMAGE_FRAME_IDS and self._load_image:
# See section 4.14: http://id3.org/id3v2.4.0-frames
if frame_id == 'PIC': # ID3 v2.2:
desc_end_pos = content.index(b'\x00', 1) + 1
else: # ID3 v2.3+
mimetype_end_pos = content.index(b'\x00', 1) + 1
desc_start_pos = mimetype_end_pos + 1 # jump over picture type
desc_end_pos = content.index(b'\x00', desc_start_pos) + 1
if content[desc_end_pos:desc_end_pos+1] == b'\x00':
desc_end_pos += 1 # the description ends with 1 or 2 null bytes
self._image_data = content[desc_end_pos:]
return frame_size
return 0
def _decode_comment(self, b):
comment = self._decode_string(b)
return comment[4:] if comment[:3] == 'eng' else comment # remove language
def _decode_string(self, b):
try: # it's not my fault, this is the spec.
first_byte = b[:1]
if first_byte == b'\x00': # ISO-8859-1
return self._unpad(codecs.decode(b[1:], 'ISO-8859-1'))
elif first_byte == b'\x01': # UTF-16 with BOM
# read byte order mark to determine endianess
encoding = 'UTF-16be' if b[1:3] == b'\xfe\xff' else 'UTF-16le'
# strip the bom and optional null bytes
bytestr = b[3:-1] if len(b) % 2 == 0 else b[3:]
return self._unpad(codecs.decode(bytestr, encoding))
elif first_byte == b'\x02': # UTF-16LE
# strip optional null byte, if byte count uneven
bytestr = b[1:-1] if len(b) % 2 == 0 else b[1:]
return self._unpad(codecs.decode(bytestr, 'UTF-16le'))
elif first_byte == b'\x03': # UTF-8
return codecs.decode(b[1:], 'UTF-8')
return self._unpad(codecs.decode(b, 'ISO-8859-1')) # wild guess
except UnicodeDecodeError:
raise TinyTagException('Error decoding ID3 Tag!')
def _calc_size(self, bytestr, bits_per_byte):
# length of some mp3 header fields is described by 7 or 8-bit-bytes
return reduce(lambda accu, elem: (accu << bits_per_byte) + elem, bytestr, 0)
class Ogg(TinyTag):
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self._tags_parsed = False
self._max_samplenum = 0 # maximum sample position ever read
def _determine_duration(self, fh):
MAX_PAGE_SIZE = 65536 # https://xiph.org/ogg/doc/libogg/ogg_page.html
if not self._tags_parsed:
self._parse_tag(fh) # determine sample rate
fh.seek(0) # and rewind to start
if self.filesize > MAX_PAGE_SIZE:
fh.seek(-MAX_PAGE_SIZE, 2) # go to last possible page position
while True:
b = fh.peek(4)
if len(b) == 0:
return # EOF
if b[:4] == b'OggS': # look for an ogg header
for packet in self._parse_pages(fh):
pass # parse all remaining pages
self.duration = self._max_samplenum / float(self.samplerate)
else:
idx = b.find(b'OggS') # try to find header in peeked data
seekpos = idx if idx != -1 else len(b) - 3
fh.seek(max(seekpos, 1), os.SEEK_CUR)
def _parse_tag(self, fh):
page_start_pos = fh.tell() # set audio_offest later if its audio data
for packet in self._parse_pages(fh):
walker = BytesIO(packet)
if packet[0:7] == b"\x01vorbis":
(channels, self.samplerate, max_bitrate, bitrate,
min_bitrate) = struct.unpack("<B4i", packet[11:28])
if not self.audio_offset:
self.bitrate = bitrate / 1024.0
self.audio_offset = page_start_pos
elif packet[0:7] == b"\x03vorbis":
walker.seek(7, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
elif packet[0:8] == b'OpusHead': # parse opus header
# https://www.videolan.org/developers/vlc/modules/codec/opus_header.c
# https://mf4.xiph.org/jenkins/view/opus/job/opusfile-unix/ws/doc/html/structOpusHead.html
walker.seek(8, os.SEEK_CUR) # jump over header name
(version, ch, _, sr, _, _) = struct.unpack("<BBHIHB", walker.read(11))
if (version & 0xF0) == 0: # only major version 0 supported
self.channels = ch
self.samplerate = sr
elif packet[0:8] == b'OpusTags': # parse opus metadata:
walker.seek(8, os.SEEK_CUR) # jump over header name
self._parse_vorbis_comment(walker)
else:
break
page_start_pos = fh.tell()
def _parse_vorbis_comment(self, fh):
# for the spec, see: http://xiph.org/vorbis/doc/v-comment.html
# discnumber tag based on: https://en.wikipedia.org/wiki/Vorbis_comment
comment_type_to_attr_mapping = {
'album': 'album',
'albumartist': 'albumartist',
'title': 'title',
'artist': 'artist',
'date': 'year',
'tracknumber': 'track',
'discnumber': 'disc',
'genre': 'genre',
'description': 'comment',
}
vendor_length = struct.unpack('I', fh.read(4))[0]
fh.seek(vendor_length, os.SEEK_CUR) # jump over vendor
elements = struct.unpack('I', fh.read(4))[0]
for i in range(elements):
length = struct.unpack('I', fh.read(4))[0]
try:
keyvalpair = codecs.decode(fh.read(length), 'UTF-8')
except UnicodeDecodeError:
continue
if '=' in keyvalpair:
key, value = keyvalpair.split('=', 1)
fieldname = comment_type_to_attr_mapping.get(key.lower())
if fieldname:
self._set_field(fieldname, value)
def _parse_pages(self, fh):
# for the spec, see: https://wiki.xiph.org/Ogg
previous_page = b'' # contains data from previous (continuing) pages
header_data = fh.read(27) # read ogg page header
while len(header_data) != 0:
header = struct.unpack('<4sBBqIIiB', header_data)
oggs, version, flags, pos, serial, pageseq, crc, segments = header
self._max_samplenum = max(self._max_samplenum, pos)
if oggs != b'OggS' or version != 0:
raise TinyTagException('Not a valid ogg file!')
segsizes = struct.unpack('B'*segments, fh.read(segments))
total = 0
for segsize in segsizes: # read all segments
total += segsize
if total < 255: # less than 255 bytes means end of page
yield previous_page + fh.read(total)
previous_page = b''
total = 0
if total != 0:
if total % 255 == 0:
previous_page += fh.read(total)
else:
yield previous_page + fh.read(total)
previous_page = b''
header_data = fh.read(27)
class Wave(TinyTag):
riff_mapping = {
b'INAM': 'title',
b'TITL': 'title',
b'IART': 'artist',
b'ICMT': 'comment',
b'ICRD': 'year',
b'IGNR': 'genre',
b'TRCK': 'track',
b'PRT1': 'track',
b'PRT2': 'track_number',
b'YEAR': 'year',
}
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self._duration_parsed = False
def _determine_duration(self, fh):
# see: https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
# and: https://en.wikipedia.org/wiki/WAV
riff, size, fformat = struct.unpack('4sI4s', fh.read(12))
if riff != b'RIFF' or fformat != b'WAVE':
raise TinyTagException('not a wave file!')
channels, bitdepth = 2, 16 # assume CD quality
chunk_header = fh.read(8)
while len(chunk_header) == 8:
subchunkid, subchunksize = struct.unpack('4sI', chunk_header)
if subchunkid == b'fmt ':
_, channels, self.samplerate = struct.unpack('HHI', fh.read(8))
_, _, bitdepth = struct.unpack('<IHH', fh.read(8))
self.bitrate = self.samplerate * channels * bitdepth / 1024.0
elif subchunkid == b'data':
self.duration = float(subchunksize)/channels/self.samplerate/(bitdepth/8)
self.audio_offest = fh.tell() - 8 # rewind to data header
fh.seek(subchunksize, 1)
elif subchunkid == b'LIST':
is_info = fh.read(4) # check INFO header
if is_info != b'INFO': # jump over non-INFO sections
fh.seek(subchunksize - 4, os.SEEK_CUR)
continue
sub_fh = BytesIO(fh.read(subchunksize - 4))
field = sub_fh.read(4)
while len(field):
data_length = struct.unpack('I', sub_fh.read(4))[0]
data = sub_fh.read(data_length).split(b'\x00', 1)[0] # strip zero-byte
data = codecs.decode(data, 'utf-8')
fieldname = self.riff_mapping.get(field)
if fieldname:
self._set_field(fieldname, data)
field = sub_fh.read(4)
elif subchunkid == b'id3 ' or subchunkid == b'ID3 ':
id3 = ID3(fh, 0)
id3._parse_id3v2(fh)
self.update(id3)
else: # some other chunk, just skip the data
fh.seek(subchunksize, 1)
chunk_header = fh.read(8)
self._duration_parsed = True
def _parse_tag(self, fh):
if not self._duration_parsed:
self._determine_duration(fh) # parse whole file to determine tags:(
class Flac(TinyTag):
METADATA_STREAMINFO = 0
METADATA_VORBIS_COMMENT = 4
def load(self, tags, duration, image=False):
header = self._filehandler.peek(4)
if header[:3] == b'ID3': # parse ID3 header if it exists
id3 = ID3(self._filehandler, 0)
id3._parse_id3v2(self._filehandler)
self.update(id3)
header = self._filehandler.peek(4) # after ID3 should be fLaC
if header[:4] != b'fLaC':
raise TinyTagException('Invalid flac header')
self._filehandler.seek(4, os.SEEK_CUR)
self._determine_duration(self._filehandler, skip_tags=not tags)
def _determine_duration(self, fh, skip_tags=False):
# for spec, see https://xiph.org/flac/ogg_mapping.html
header_data = fh.read(4)
while len(header_data):
meta_header = struct.unpack('B3B', header_data)
block_type = meta_header[0] & 0x7f
is_last_block = meta_header[0] & 0x80
size = _bytes_to_int(meta_header[1:4])
# http://xiph.org/flac/format.html#metadata_block_streaminfo
if block_type == Flac.METADATA_STREAMINFO:
stream_info_header = fh.read(size)
if len(stream_info_header) < 34: # invalid streaminfo
return
header = struct.unpack('HH3s3s8B16s', stream_info_header)
# From the ciph documentation:
# py | <bits>
# ----------------------------------------------
# H | <16> The minimum block size (in samples)
# H | <16> The maximum block size (in samples)
# 3s | <24> The minimum frame size (in bytes)
# 3s | <24> The maximum frame size (in bytes)
# 8B | <20> Sample rate in Hz.
# | <3> (number of channels)-1.
# | <5> (bits per sample)-1.
# | <36> Total samples in stream.
# 16s| <128> MD5 signature
min_blk, max_blk, min_frm, max_frm = header[0:4]
min_frm = _bytes_to_int(struct.unpack('3B', min_frm))
max_frm = _bytes_to_int(struct.unpack('3B', max_frm))
# channels--. bits total samples
# |----- samplerate -----| |-||----| |---------~ ~----|
# 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000
# #---4---# #---5---# #---6---# #---7---# #--8-~ ~-12-#
self.samplerate = _bytes_to_int(header[4:7]) >> 4
self.channels = ((header[6] >> 1) & 0x07) + 1
# bit_depth = ((header[6] & 1) << 4) + ((header[7] & 0xF0) >> 4)
# bit_depth = (bit_depth + 1)
total_sample_bytes = [(header[7] & 0x0F)] + list(header[8:12])
total_samples = _bytes_to_int(total_sample_bytes)
self.duration = float(total_samples) / self.samplerate
if self.duration > 0:
self.bitrate = self.filesize / self.duration * 8 / 1024
elif block_type == Flac.METADATA_VORBIS_COMMENT and not skip_tags:
oggtag = Ogg(fh, 0)
oggtag._parse_vorbis_comment(fh)
self.update(oggtag)
elif block_type >= 127:
return # invalid block type
else:
fh.seek(size, 1) # seek over this block
if is_last_block:
return
header_data = fh.read(4)
class Wma(TinyTag):
ASF_CONTENT_DESCRIPTION_OBJECT = b'3&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel'
ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT = b'@\xa4\xd0\xd2\x07\xe3\xd2\x11\x97\xf0\x00\xa0\xc9^\xa8P'
STREAM_BITRATE_PROPERTIES_OBJECT = b'\xceu\xf8{\x8dF\xd1\x11\x8d\x82\x00`\x97\xc9\xa2\xb2'
ASF_FILE_PROPERTY_OBJECT = b'\xa1\xdc\xab\x8cG\xa9\xcf\x11\x8e\xe4\x00\xc0\x0c Se'
ASF_STREAM_PROPERTIES_OBJECT = b'\x91\x07\xdc\xb7\xb7\xa9\xcf\x11\x8e\xe6\x00\xc0\x0c Se'
STREAM_TYPE_ASF_AUDIO_MEDIA = b'@\x9ei\xf8M[\xcf\x11\xa8\xfd\x00\x80_\\D+'
# see:
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx
# and (japanese, but none the less helpful)
# http://uguisu.skr.jp/Windows/format_asf.html
def __init__(self, filehandler, filesize):
TinyTag.__init__(self, filehandler, filesize)
self.__tag_parsed = False
def _determine_duration(self, fh):
if not self.__tag_parsed:
self._parse_tag(fh)
def read_blocks(self, fh, blocks):
# blocks are a list(tuple('fieldname', byte_count, cast_int), ...)
decoded = {}
for block in blocks:
val = fh.read(block[1])
if block[2]:
val = _bytes_to_int_le(val)
decoded[block[0]] = val
return decoded
def __bytes_to_guid(self, obj_id_bytes):
return '-'.join([
hex(_bytes_to_int_le(obj_id_bytes[:-12]))[2:].zfill(6),
hex(_bytes_to_int_le(obj_id_bytes[-12:-10]))[2:].zfill(4),
hex(_bytes_to_int_le(obj_id_bytes[-10:-8]))[2:].zfill(4),
hex(_bytes_to_int(obj_id_bytes[-8:-6]))[2:].zfill(4),
hex(_bytes_to_int(obj_id_bytes[-6:]))[2:].zfill(12),
])
def __decode_string(self, bytestring):
return self._unpad(codecs.decode(bytestring, 'utf-16'))
def __decode_ext_desc(self, value_type, value):
""" decode ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT values"""
if value_type == 0: # Unicode string
return self.__decode_string(value)
elif value_type == 1: # BYTE array
return value
elif 1 < value_type < 6: # DWORD / QWORD / WORD
return _bytes_to_int_le(value)
def _parse_tag(self, fh):
self.__tag_parsed = True
guid = fh.read(16) # 128 bit GUID
if guid != b'0&\xb2u\x8ef\xcf\x11\xa6\xd9\x00\xaa\x00b\xcel':
return # not a valid ASF container! see: http://www.garykessler.net/library/file_sigs.html
size = struct.unpack('Q', fh.read(8))[0]
obj_count = struct.unpack('I', fh.read(4))[0]
if fh.read(2) != b'\x01\x02':
# http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc521913958
return # not a valid asf header!
while True:
object_id = fh.read(16)
object_size = _bytes_to_int_le(fh.read(8))
if object_size == 0 or object_size > self.filesize:
break # invalid object, stop parsing.
if object_id == Wma.ASF_CONTENT_DESCRIPTION_OBJECT:
len_blocks = self.read_blocks(fh, [
('title_length', 2, True),
('author_length', 2, True),
('copyright_length', 2, True),
('description_length', 2, True),
('rating_length', 2, True),
])
data_blocks = self.read_blocks(fh, [
('title', len_blocks['title_length'], False),
('artist', len_blocks['author_length'], False),
('', len_blocks['copyright_length'], True),
('comment', len_blocks['description_length'], False),
('', len_blocks['rating_length'], True),
])
for field_name, bytestring in data_blocks.items():
if field_name:
self._set_field(field_name, bytestring, self.__decode_string)
elif object_id == Wma.ASF_EXTENDED_CONTENT_DESCRIPTION_OBJECT:
mapping = {
'WM/TrackNumber': 'track',
'WM/PartOfSet': 'disc',
'WM/Year': 'year',
'WM/AlbumArtist': 'albumartist',
'WM/Genre': 'genre',
'WM/AlbumTitle': 'album',
}
# see: http://web.archive.org/web/20131203084402/http://msdn.microsoft.com/en-us/library/bb643323.aspx#_Toc509555195
descriptor_count = _bytes_to_int_le(fh.read(2))
for _ in range(descriptor_count):
name_len = _bytes_to_int_le(fh.read(2))
name = self.__decode_string(fh.read(name_len))
value_type = _bytes_to_int_le(fh.read(2))
value_len = _bytes_to_int_le(fh.read(2))
value = fh.read(value_len)
field_name = mapping.get(name)
if field_name:
field_value = self.__decode_ext_desc(value_type, value)
self._set_field(field_name, field_value)
elif object_id == Wma.ASF_FILE_PROPERTY_OBJECT:
blocks = self.read_blocks(fh, [
('file_id', 16, False),
('file_size', 8, False),
('creation_date', 8, True),
('data_packets_count', 8, True),
('play_duration', 8, True),
('send_duration', 8, True),
('preroll', 8, True),
('flags', 4, False),
('minimum_data_packet_size', 4, True),
('maximum_data_packet_size', 4, True),
('maximum_bitrate', 4, False),
])
self.duration = blocks.get('play_duration') / float(10000000)
elif object_id == Wma.ASF_STREAM_PROPERTIES_OBJECT:
blocks = self.read_blocks(fh, [
('stream_type', 16, False),
('error_correction_type', 16, False),
('time_offset', 8, True),
('type_specific_data_length', 4, True),
('error_correction_data_length', 4, True),
('flags', 2, True),
('reserved', 4, False)
])
already_read = 0
if blocks['stream_type'] == Wma.STREAM_TYPE_ASF_AUDIO_MEDIA:
stream_info = self.read_blocks(fh, [
('codec_id_format_tag', 2, True),
('number_of_channels', 2, True),
('samples_per_second', 4, True),
('avg_bytes_per_second', 4, True),
('block_alignment', 2, True),
('bits_per_sample', 2, True),
])
self.samplerate = stream_info['samples_per_second']
self.bitrate = stream_info['avg_bytes_per_second'] * 8 / float(1000)
already_read = 16
fh.seek(blocks['type_specific_data_length'] - already_read, os.SEEK_CUR)
fh.seek(blocks['error_correction_data_length'], os.SEEK_CUR)
else:
fh.seek(object_size - 24, os.SEEK_CUR) # read over onknown object ids
|
EternityForest/KaithemAutomation
|
kaithem/src/thirdparty/tinytag/tinytag.py
|
Python
|
gpl-3.0
| 51,655
|
## HISTOGRAM PLOTTING FOR REYNOLDS AND ROSSBY NUMBERS
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
import time as tictoc
from netCDF4 import Dataset
import glob
import matplotlib
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
# OPTIONS
# runfolder = [0,6,10,14,15]
# print('Creating ReRo histogram plot from run '+str(runfolder))
#
# # read data
# runpath = path+'data/run%04i' % runfolder[0]
# D1 = np.load(runpath+'/analysis/Re_hist.npy').all()
#
# runpath = path+'data/run%04i' % runfolder[1]
# D2 = np.load(runpath+'/analysis/Re_hist.npy').all()
#
# runpath = path+'data/run%04i' % runfolder[2]
# D3 = np.load(runpath+'/analysis/Re_hist.npy').all()
#
# runpath = path+'data/run%04i' % runfolder[3]
# D4 = np.load(runpath+'/analysis/Re_hist.npy').all()
#
# runpath = path+'data/run%04i' % runfolder[4]
# D5 = np.load(runpath+'/analysis/Re_hist.npy').all()
# OPTIONS
runfolder = [3,10,13,12,14]
print('Creating ReRo histogram plot from run '+str(runfolder))
## read data
runpath = path+'data/newold/run%04i' % runfolder[0]
D1 = np.load(runpath+'/analysis/Re_hist.npy').all()
runpath = path+'data/newold/run%04i' % runfolder[1]
D2 = np.load(runpath+'/analysis/Re_hist.npy').all()
runpath = path+'stoch/data/run%04i' % runfolder[2]
D3 = np.load(runpath+'/analysis/Re_hist.npy').all()
runpath = path+'stoch/data/run%04i' % runfolder[3]
D4 = np.load(runpath+'/analysis/Re_hist.npy').all()
runpath = path+'stoch/data/run%04i' % runfolder[4]
D5 = np.load(runpath+'/analysis/Re_hist.npy').all()
## PLOT
fig,ax1 = plt.subplots(1,1,figsize=(8,6))
ax1.plot(D1['Re_mid'],D1['ReH'],'C0',label=r'Low resolution, $\Delta x = $30km',lw=3)
ax1.plot(D2['Re_mid'],D2['ReH']/16,'C2',label=r'High resolution, $\Delta x = $7.5km',lw=3)
ax1.plot(D3['Re_mid'],D3['ReH'],'C3',label=r'LR + weak backscatter',ls='--')
ax1.plot(D4['Re_mid'],D4['ReH'],'C1',label=r'LR + moderate backscatter',ls='--')
ax1.plot(D5['Re_mid'],D5['ReH'],'C5',label=r'LR + strong backscatter',ls='--')
ax1.axvline(np.log10(D1['Re_mean']),c='C0',ls='-',lw=2)
ax1.axvline(np.log10(D2['Re_mean']),c='C2',ls='-',lw=2)
ax1.axvline(np.log10(D3['Re_mean']),c='C3',ls='--')
ax1.axvline(np.log10(D4['Re_mean']),c='C1',ls='--')
ax1.axvline(np.log10(D5['Re_mean']),c='C5',ls='--')
ax1.text(np.log10(D1['Re_mean']),5e5,'mean($R_e$)',rotation=90,ha='right',color='k')
#ax1.set_yscale('log')
ax1.legend(loc=2)
ax1.set_xlim(-3,5)
ax1.set_ylim(1,3e6)
ax1.set_title('Reynolds number histogram',loc='left')
ax1.set_xlabel('log$_{10}(R_e)$')
ax1.set_ylabel(r'$N$')
plt.tight_layout()
plt.savefig(path+'compare/Re_hist_nobf.png')
plt.close(fig)
|
milankl/swm
|
calc/misc/Re_hist_plot.py
|
Python
|
gpl-3.0
| 2,796
|
##Add Advanced Labeling Fields To Selected Layer=name
from PyQt4.QtCore import QVariant
from qgis.utils import iface
from qgis.core import QgsField, QgsPalLayerSettings
# Function source: https://gis.stackexchange.com/a/216110/55741
def setColumnVisibility( layer, columnName, visible ):
config = layer.attributeTableConfig()
form = layer.editFormConfig()
form.setWidgetType(layer.fieldNameIndex(columnName), "Hidden")
columns = config.columns()
for column in columns:
if column.name == columnName:
column.hidden = not visible
break
config.setColumns( columns )
layer.setAttributeTableConfig( config )
lager = iface.activeLayer()
if not lager.isEditable():
iface.actionToggleEditing().trigger()
lager.dataProvider().addAttributes([QgsField("Text_X", QVariant.Double, 'double', 10, 5),
QgsField("Text_Y", QVariant.Double, 'double', 10, 5), QgsField("Text_Ri", QVariant.Double, 'double', 3, 2), QgsField("Text_Vis", QVariant.Int, "", 3)])
lager.updateFields()
lager.commitChanges()
lager.setDefaultValueExpression(lager.fieldNameIndex("Text_Vis"), '1')
palager = QgsPalLayerSettings()
palager.readFromLayer(lager)
palager.enabled = True
palager.setDataDefinedProperty(QgsPalLayerSettings.PositionX, True, False, '', "Text_X")
palager.setDataDefinedProperty(QgsPalLayerSettings.PositionY, True, False, '', "Text_Y")
palager.setDataDefinedProperty(QgsPalLayerSettings.Rotation, True, False, '', "Text_Ri")
palager.setDataDefinedProperty(QgsPalLayerSettings.Show, True, False, '', "Text_Vis")
setColumnVisibility( lager, 'Text_X', False)
setColumnVisibility( lager, 'Text_Y', False)
setColumnVisibility( lager, 'Text_Ri', False)
setColumnVisibility( lager, 'Text_Vis', False)
palager.writeToLayer(lager)
rader = lager.getFeatures()
for rad in rader:
rad['Text_Vis'] = 1
lager.updateFeature(rad)
lager.commitChanges()
lager.startEditing()
iface.mapCanvas().refresh()
|
klakar/QGIS_resources
|
collections/Geosupportsystem/processing/enabelAdvancedLabels.py
|
Python
|
gpl-3.0
| 1,939
|
__author__ = 'mnowotka'
from django.db import models
from chembl_core_model.models import *
from chembl_core_db.db.models.abstractModel import ChemblCoreAbstractModel
from chembl_core_db.db.models.abstractModel import ChemblModelMetaClass
from django.utils import six
#-----------------------------------------------------------------------------------------------------------------------
class AssayType(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
assay_type = ChemblCharField(primary_key=True, max_length=1, help_text=u'Single character representing assay type')
assay_desc = ChemblCharField(max_length=250, blank=True, null=True, help_text=u'Description of assay type')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class RelationshipType(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
relationship_type = ChemblCharField(primary_key=True, max_length=1, help_text=u'Relationship_type flag used in the assay2target table')
relationship_desc = ChemblCharField(max_length=250, blank=True, null=True, help_text=u'Description of relationship_type flags')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class ConfidenceScoreLookup(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
confidence_score = ChemblPositiveIntegerField(primary_key=True, length=1, help_text=u'0-9 score showing level of confidence in assignment of the precise molecular target of the assay')
description = ChemblCharField(max_length=100, help_text=u'Description of the target types assigned with each score')
target_mapping = ChemblCharField(max_length=30, help_text=u'Short description of the target types assigned with each score')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class CurationLookup(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
curated_by = ChemblCharField(primary_key=True, max_length=32, help_text=u'Short description of the level of curation')
description = ChemblCharField(max_length=100, help_text=u'Definition of terms in the curated_by field.')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class ActivityStdsLookup(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
std_act_id = ChemblAutoField(primary_key=True, length=9, help_text=u'Primary key.')
standard_type = ChemblCharField(max_length=250, help_text=u'The standard_type that other published_types in the activities table have been converted to.')
definition = ChemblCharField(max_length=500, blank=True, null=True, help_text=u'A description/definition of the standard_type.')
standard_units = ChemblCharField(max_length=100, help_text=u'The units that are applied to this standard_type and to which other published_units are converted. Note a standard_type may have more than one allowable standard_unit and therefore multiple rows in this table.')
normal_range_min = models.DecimalField(blank=True, null=True, max_digits=24, decimal_places=12, help_text=u"The lowest value for this activity type that is likely to be genuine. This is only an approximation, so lower genuine values may exist, but it may be desirable to validate these before using them. For a given standard_type/units, values in the activities table below this threshold are flagged with a data_validity_comment of 'Outside typical range'.")
normal_range_max = models.DecimalField(blank=True, null=True, max_digits=24, decimal_places=12, help_text=u"The highest value for this activity type that is likely to be genuine. This is only an approximation, so higher genuine values may exist, but it may be desirable to validate these before using them. For a given standard_type/units, values in the activities table above this threshold are flagged with a data_validity_comment of 'Outside typical range'.")
class Meta(ChemblCoreAbstractModel.Meta):
unique_together = ( ("standard_type", "standard_units"), )
#-----------------------------------------------------------------------------------------------------------------------
class DataValidityLookup(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
data_validity_comment = ChemblCharField(primary_key=True, max_length=30, help_text=u'Primary key. Short description of various types of errors/warnings applied to values in the activities table.')
description = ChemblCharField(max_length=200, blank=True, null=True, help_text=u'Definition of the terms in the data_validity_comment field.')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class ParameterType(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
parameter_type = ChemblCharField(primary_key=True, max_length=20, help_text=u'Short name for the type of parameter associated with an assay')
description = ChemblCharField(max_length=2000, blank=True, null=True, help_text=u'Description of the parameter type')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class Assays(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
ASSAY_CATEGORY_CHOICES = (
('screening', 'screening'),
('panel', 'panel'),
('confirmatory', 'confirmatory'),
('summary', 'summary'),
('other', 'other'),
)
ASSAY_TEST_TYPE_CHOICES = (
('In vivo', 'In vivo'),
('In vitro', 'In vitro'),
('Ex vivo', 'Ex vivo'),
)
assay_id = ChemblAutoField(primary_key=True, length=9, help_text=u'Unique ID for the assay')
doc = models.ForeignKey(Docs, help_text=u'Foreign key to documents table')
description = ChemblCharField(max_length=4000, db_index=True, blank=True, null=True, help_text=u'Description of the reported assay')
assay_type = models.ForeignKey(AssayType, blank=True, null=True, db_column='assay_type', help_text=u'Assay classification, e.g. B=Binding assay, A=ADME assay, F=Functional assay')
assay_test_type = ChemblCharField(max_length=20, blank=True, null=True, choices=ASSAY_TEST_TYPE_CHOICES, help_text=u'Type of assay system (i.e., in vivo or in vitro)')
assay_category = ChemblCharField(max_length=20, blank=True, null=True, choices=ASSAY_CATEGORY_CHOICES, help_text=u'screening, confirmatory (ie: dose-response), summary, panel or other.')
assay_organism = ChemblCharField(max_length=250, blank=True, null=True, help_text=u'Name of the organism for the assay system (e.g., the organism, tissue or cell line in which an assay was performed). May differ from the target organism (e.g., for a human protein expressed in non-human cells, or pathogen-infected human cells).')
assay_tax_id = ChemblPositiveIntegerField(length=11, blank=True, null=True, help_text=u'NCBI tax ID for the assay organism.') # TODO: should be FK to OrganismClass.tax_id
assay_strain = ChemblCharField(max_length=200, blank=True, null=True, help_text=u'Name of specific strain of the assay organism used (where known)')
assay_tissue = ChemblCharField(max_length=100, blank=True, null=True, help_text=u'Name of tissue used in the assay system (e.g., for tissue-based assays) or from which the assay system was derived (e.g., for cell/subcellular fraction-based assays).')
assay_cell_type = ChemblCharField(max_length=100, blank=True, null=True, help_text=u'Name of cell type or cell line used in the assay system (e.g., for cell-based assays).')
assay_subcellular_fraction = ChemblCharField(max_length=100, blank=True, null=True, help_text=u'Name of subcellular fraction used in the assay system (e.g., microsomes, mitochondria).')
target = models.ForeignKey(TargetDictionary, blank=True, null=True, db_column='tid', help_text=u'Target identifier to which this assay has been mapped. Foreign key to target_dictionary. From ChEMBL_15 onwards, an assay will have only a single target assigned.')
relationship_type = models.ForeignKey(RelationshipType, blank=True, null=True, db_column='relationship_type', help_text=u'Flag indicating of the relationship between the reported target in the source document and the assigned target from TARGET_DICTIONARY. Foreign key to RELATIONSHIP_TYPE table.')
confidence_score = models.ForeignKey(ConfidenceScoreLookup, blank=True, null=True, db_column='confidence_score', help_text=u'Confidence score, indicating how accurately the assigned target(s) represents the actually assay target. Foreign key to CONFIDENCE_SCORE table. 0 means uncurated/unassigned, 1 = low confidence to 9 = high confidence.')
curated_by = models.ForeignKey(CurationLookup, blank=True, null=True, db_column='curated_by', help_text=u'Indicates the level of curation of the target assignment. Foreign key to curation_lookup table.')
activity_count = ChemblPositiveIntegerField(length=9, blank=True, null=True, help_text=u'Number of activities recorded for this assay')
assay_source = ChemblCharField(max_length=50, db_index=True, blank=True, null=True)
src = models.ForeignKey(Source, help_text=u'Foreign key to source table')
src_assay_id = ChemblCharField(max_length=50, blank=True, null=True, help_text=u'Identifier for the assay in the source database/deposition (e.g., pubchem AID)')
chembl = models.ForeignKey(ChemblIdLookup, unique=True, help_text=u'ChEMBL identifier for this assay (for use on web interface etc)')
updated_on = ChemblDateField(blank=True, null=True)
updated_by = ChemblCharField(max_length=250, blank=True, null=True)
orig_description = ChemblCharField(max_length=4000, blank=True, null=True)
a2t_complex = ChemblNullableBooleanField()
a2t_multi = ChemblNullableBooleanField()
mc_tax_id = ChemblPositiveIntegerField(length=11, blank=True, null=True)
mc_organism = ChemblCharField(max_length=100, blank=True, null=True)
mc_target_type = ChemblCharField(max_length=25, blank=True, null=True)
mc_target_name = ChemblCharField(max_length=4000, blank=True, null=True)
mc_target_accession = ChemblCharField(max_length=255, blank=True, null=True)
a2t_assay_tax_id = ChemblPositiveIntegerField(length=11, blank=True, null=True)
a2t_assay_organism = ChemblCharField(max_length=250, blank=True, null=True)
a2t_updated_on = ChemblDateField(blank=True, null=True)
a2t_updated_by = ChemblCharField(max_length=100, blank=True, null=True)
cell = models.ForeignKey(CellDictionary, blank=True, null=True, help_text=u'Foreign key to cell dictionary. The cell type or cell line used in the assay')
bao_format = ChemblCharField(max_length=11, db_index=True, blank=True, null=True, help_text=u'ID for the corresponding format type in BioAssay Ontology (e.g., cell-based, biochemical, organism-based etc)')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class Activities(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
MANUAL_CURATION_FLAG_CHOICES = (
(0, '0'),
(1, '1'),
(2, '2'),
)
STANDARD_RELATION_CHOICES = (
('>', '>'),
('<', '<'),
('=', '='),
('~', '~'),
('<=', '<='),
('>=', '>='),
('<<', '<<'),
('>>', '>>'),
)
activity_id = ChemblAutoField(primary_key=True, length=11, help_text=u'Unique ID for the activity row')
assay = models.ForeignKey(Assays, help_text=u'Foreign key to the assays table (containing the assay description)')
doc = models.ForeignKey(Docs, blank=True, null=True, help_text=u'Foreign key to documents table (for quick lookup of publication details - can also link to documents through compound_records or assays table)')
record = models.ForeignKey(CompoundRecords, help_text=u'Foreign key to the compound_records table (containing information on the compound tested)')
molecule = models.ForeignKey(MoleculeDictionary, blank=True, null=True, db_column='molregno', help_text=u'Foreign key to compounds table (for quick lookup of compound structure - can also link to compounds through compound_records table)')
standard_relation = ChemblCharField(max_length=50, db_index=True, blank=True, null=True, novalidate=True, choices=STANDARD_RELATION_CHOICES, help_text=u'Symbol constraining the activity value (e.g. >, <, =)')
published_value = ChemblNoLimitDecimalField(db_index=True, blank=True, null=True, help_text=u'Datapoint value as it appears in the original publication.') # TODO: NUMBER in Oracle
published_units = ChemblCharField(max_length=100, db_index=True, blank=True, null=True, help_text=u'Units of measurement as they appear in the original publication')
standard_value = ChemblNoLimitDecimalField(db_index=True, blank=True, null=True, help_text=u'Same as PUBLISHED_VALUE but transformed to common units: e.g. mM concentrations converted to nM.') # TODO: NUMBER in Oracle
standard_units = ChemblCharField(max_length=100, db_index=True, blank=True, null=True, help_text=u"Selected 'Standard' units for data type: e.g. concentrations are in nM.")
standard_flag = ChemblNullableBooleanField(help_text=u'Shows whether the standardised columns have been curated/set (1) or just default to the published data (0).')
standard_type = ChemblCharField(max_length=250, db_index=True, blank=True, null=True, help_text=u'Standardised version of the published_activity_type (e.g. IC50 rather than Ic-50/Ic50/ic50/ic-50)')
updated_by = ChemblCharField(max_length=100, blank=True, null=True)
updated_on = ChemblDateField(blank=True, null=True)
activity_comment = ChemblCharField(max_length=4000, blank=True, null=True, help_text=u"Describes non-numeric activities i.e. 'Slighty active', 'Not determined'")
published_type = ChemblCharField(max_length=250, db_index=True, blank=True, null=True, help_text=u'Type of end-point measurement: e.g. IC50, LD50, %inhibition etc, as it appears in the original publication')
manual_curation_flag = ChemblPositiveIntegerField(length=1, blank=False, null=True, default=0, choices=MANUAL_CURATION_FLAG_CHOICES) # blank is false because it has default value
data_validity_comment = models.ForeignKey(DataValidityLookup, blank=True, null=True, db_column='data_validity_comment', help_text=u"Comment reflecting whether the values for this activity measurement are likely to be correct - one of 'Manually validated' (checked original paper and value is correct), 'Potential author error' (value looks incorrect but is as reported in the original paper), 'Outside typical range' (value seems too high/low to be correct e.g., negative IC50 value), 'Non standard unit type' (units look incorrect for this activity type).")
potential_duplicate = ChemblNullableBooleanField(help_text=u'Indicates whether the value is likely to be a repeat citation of a value reported in a previous ChEMBL paper, rather than a new, independent measurement.') # TODO: this has only two states: (null, 1), change it to (0,1)
published_relation = ChemblCharField(max_length=50, db_index=True, blank=True, null=True, help_text=u'Symbol constraining the activity value (e.g. >, <, =), as it appears in the original publication')
original_activity_id = ChemblPositiveIntegerField(length=11, blank=True, null=True) # TODO: should that be FK referencing Activities in future?
pchembl_value = models.DecimalField(db_index=True, blank=True, null=True, max_digits=4, decimal_places=2, help_text=u'Negative log of selected concentration-response activity values (IC50/EC50/XC50/AC50/Ki/Kd/Potency)')
bao_endpoint = ChemblCharField(max_length=11, blank=True, null=True, help_text=u'ID for the corresponding result type in BioAssay Ontology (based on standard_type)')
uo_units = ChemblCharField(max_length=10, blank=True, null=True, help_text=u'ID for the corresponding unit in Unit Ontology (based on standard_units)')
qudt_units = ChemblCharField(max_length=70, blank=True, null=True, help_text=u'ID for the corresponding unit in QUDT Ontology (based on standard_units)')
class Meta(ChemblCoreAbstractModel.Meta):
pass
#-----------------------------------------------------------------------------------------------------------------------
class AssayParameters(six.with_metaclass(ChemblModelMetaClass, ChemblCoreAbstractModel)):
assay_param_id = ChemblPositiveIntegerField(primary_key=True, length=9, help_text=u'Numeric primary key')
assay = models.ForeignKey(Assays, help_text=u'Foreign key to assays table. The assay to which this parameter belongs')
parameter_type = models.ForeignKey(ParameterType, db_column='parameter_type', help_text=u'Foreign key to parameter_type table, defining the meaning of the parameter')
parameter_value = ChemblCharField(max_length=2000, help_text=u'The value of the particular parameter')
class Meta(ChemblCoreAbstractModel.Meta):
unique_together = ( ("assay", "parameter_type"), )
#-----------------------------------------------------------------------------------------------------------------------
|
thesgc/chembiohub_ws
|
chembl_core_model/models/experimentalData.py
|
Python
|
gpl-3.0
| 17,784
|
"""
This class maps abundances from a MESA profile to a reduced set for FLASH.
Copyright 2015 Donald E. Willcox
This file is part of mesa2flash.
mesa2flash is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
mesa2flash is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with mesa2flash. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from collections import OrderedDict
# Map abundances to set of C12, O16, Ne20, Ne22 for flash!
class MapMesaComposition:
def __init__(self):
self.fmap = OrderedDict([('c12',np.array([])),
('o16',np.array([])),
('ne20',np.array([])),
('ne22',np.array([]))])
def getmap(self,ms):
## Maintain constant C12 abundance
self.fmap['c12'] = ms['c12']
## Determine Ne22 abundance from model Ye
self.fmap['ne22'] = 22.0*(0.5-ms['ye'])
## Normalization requires Ne20 + O16 abundances = xother
xother = 1.0 - self.fmap['c12'] - self.fmap['ne22']
## Ne20/O16 ratio remains constant
rneo = ms['ne20']/ms['o16']
## Use rneo and xother constraints to find Ne20 and O16 abundances
self.fmap['o16'] = xother/(rneo+1.0)
self.fmap['ne20'] = rneo*xother/(rneo+1.0)
for k in self.fmap.keys():
ms[k] = self.fmap[k]
return ms
|
jselsing/mesa2flash
|
MapMesaComposition.py
|
Python
|
gpl-3.0
| 1,668
|
# models.py -- nothing to see here
|
Fizzmint/django-hideshowpassword
|
hideshowpassword/models.py
|
Python
|
gpl-3.0
| 35
|
# auto-generated test file
import unittest
import umlgen.Specific.STK.StkDataTypes.StkS8
# Start of user code imports
# End of user code
class StkS8Test(unittest.TestCase):
def setUp(self):
# self._testInstance = umlgen.Specific.STK.StkDataTypes.StkS8.StkS8()
# Start of user code setUp
self._testInstance = umlgen.Specific.STK.StkDataTypes.StkS8.StkS8()
# End of user code
pass
def tearDown(self):
# Start of user code tearDown
# End of user code
pass
def test___init__(self):
# Start of user code __init__
self.assertEqual(self._testInstance.Name, 'S8')
self.assertEqual(self._testInstance.lowerLimit, -126)
self.assertEqual(self._testInstance.upperLimit, 127)
# End of user code
pass
if __name__ == '__main__':
unittest.main()
|
dmanev/ArchExtractor
|
ArchExtractor/tests/testgen/Specific/STK/StkDataTypes/StkS8_test.py
|
Python
|
gpl-3.0
| 863
|
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import cookielib
import json
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
query = "porn"#raw_input("porn")# you can change the query for the image here
image_type="ActiOn"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
print url
#add the directory for your image here
DIR="Pictures"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
soup = get_soup(url,header)
file_h = open(os.path.join(".", "pagina.html"), 'wb')
file_h.write(urllib2.urlopen(urllib2.Request(url,headers=header)).read())
file_h.close()
print("------------------------")
print(soup)
print("------------------------")
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("div",{"class":"rg_meta"}):
print(a)
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
print "there are total" , len(ActualImages),"images"
if not os.path.exists(DIR):
os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])
if not os.path.exists(DIR):
os.mkdir(DIR)
###print images
for i , (img , Type) in enumerate( ActualImages):
try:
req = urllib2.Request(img, headers={'User-Agent' : header})
raw_img = urllib2.urlopen(req).read()
cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
print cntr
if len(Type)==0:
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
else :
f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')
f.write(raw_img)
f.close()
except Exception as e:
print "could not load : "+img
print e
|
Piltra/TwitterBot
|
tests/pydownloader.py
|
Python
|
gpl-3.0
| 1,951
|
#!/usr/bin/python
# Copyright (C) 2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@bitergia.com>
#
from grimoirelib_metadata.data_source import DataSource
from grimoirelib_settings.settings import Settings
class SCM(DataSource):
""" Class that adds and filters data in a CVSAnalY-type database
"""
# Meta table name
METATABLE_NAME = Settings.SCM_METATABLE_NAME
# Constants used to specify new columns to add to the metatable
DATA_ORGANIZATIONS = "organizations"
DATA_COUNTRIES = "countries"
DATA_DOMAINS = "domains"
DATA_BRANCHES = "branches"
DATA_LINES = "lines"
DATA_BOTS = "bots"
DATA_MERGES = "merges"
# Constants used to filter information from the metatable
FILTER_MERGES = "merges"
FILTER_START_DATE = "startdate"
FILTER_END_DATE = "enddate"
def __init__(self, options):
""" Init the SCM class
:param options: config file options
"""
# Initializing the dictionary with addition of new columns
# and their correspondandt private methods
self.new_columns = {}
self.new_columns[SCM.DATA_ORGANIZATIONS] = self._add_column_organizations
self.new_columns[SCM.DATA_COUNTRIES] = self._add_column_countries
self.new_columns[SCM.DATA_DOMAINS] = self._add_column_domains
self.new_columns[SCM.DATA_BRANCHES] = self._add_column_branches
self.new_columns[SCM.DATA_LINES] = self._add_column_lines
self.new_columns[SCM.DATA_BOTS] = self._add_column_bots
self.new_columns[SCM.DATA_MERGES] = self._add_column_merges
# Initializing the dictionary with filters to be applied
# and their methods
self.filters = {}
self.filters[SCM.FILTER_MERGES] = self._add_filter_merges
self.filters[SCM.FILTER_START_DATE] = self._add_filter_startdate
self.filters[SCM.FILTER_END_DATE] = self._add_filter_enddate
# Initializing database options
self.scm_db = options["databases"]["scm"]
self.identities_db = options["databases"]["identities"]
self.user_db = options["databases_access"]["user"]
self.password_db = options["databases_access"]["password"]
# By default, if the CVSAnalY tool schema is used, the following fields
# are used to build the basic metatable:
# scmlog.id, scmlog.author_id, scmlog.author_date, scmlog.repository_id
self.db, self.cursor = self._init_metatable()
print "Init database correct"
def _add_column_organizations(self):
""" This private method adds a new column with organizations info.
This takes into account the initial and final date of enrollment of a
developer in a company.
Information is found in the 'enrollments' table, created by SortingHat.
"""
query = """ ALTER TABLE %s
ADD organizations INTEGER(11)
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
query = """UPDATE %s sm,
people_uidentities pui,
%s.enrollments enr
SET sm.organizations = enr.organization_id
WHERE sm.author = pui.people_id AND
pui.uuid = enr.uuid AND
sm.date >= enr.start and sm.date < enr.end
""" % (SCM.METATABLE_NAME, self.identities_db)
self.cursor.execute(query)
def _add_column_countries(self):
""" This private method adds a new column with countries info.
Information is found in the 'profiles' table, created by SortingHat.
"""
query = """ ALTER TABLE %s
ADD countries VARCHAR(2)
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
query = """UPDATE %s sm,
people_uidentities pui,
%s.profiles pro
SET sm.countries = pro.country_code
WHERE sm.author = pui.people_id AND
pui.uuid = pro.uuid
""" % (SCM.METATABLE_NAME, self.identities_db)
self.cursor.execute(query)
def _add_column_domains(self):
pass
def _add_column_branches(self):
pass
def _add_column_lines(self):
""" This private method adds two new columns: added_lines and removed_lines
Information is found in the 'commits_lines' table, created by CVSAnalY
"""
query = """ ALTER TABLE %s
ADD added_lines INTEGER(11) DEFAULT 0,
ADD removed_lines INTEGER(11) DEFAULT 0
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
query = """ UPDATE %s sm,
commits_lines cl
SET sm.added_lines = cl.added,
sm.removed_lines = cl.removed
WHERE cl.commit_id = sm.commit
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
def _add_column_merges(self):
""" This private method adds a new column with info checking if
a commit is a merge or not.
A commit is defined as a merge when this does not appear in the
'actions' table defined by CVSAnalY.
A value of 1 means that this commit is detected as merge.
"""
query = """ ALTER TABLE %s
ADD is_merge TINYINT(1)
DEFAULT 1
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
query = """ UPDATE %s sm
SET sm.is_merge = 0
WHERE sm.commit IN
(SELECT distinct(commit_id) from actions)
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
def _add_column_bots(self):
""" This private method adds a new column with info checking if
a commit was done by a bot.
A value of 1 means that this commit was done by a bot. Otherwise, a
0 would be found.
Information is found in the 'profiles' table, created by SortingHat.
"""
query = """ ALTER TABLE %s
ADD is_bot TINYINT(1)
DEFAULT 0
""" % (SCM.METATABLE_NAME)
self.cursor.execute(query)
query = """ UPDATE %s sm,
people_uidentities pui,
%s.profiles pro
SET sm.is_bot = pro.is_bot
WHERE sm.author = pui.people_id AND
pui.uuid = pro.uuid
""" % (SCM.METATABLE_NAME, self.identities_db)
self.cursor.execute(query)
def add_annotation(self, metric):
""" An new annotation adds a new column with the specified 'metric'
:param metric: contains the name of the new column to be added
"""
self.new_columns[metric]()
def _add_filter_merges(self, values):
pass
def _add_filter_startdate(self, startdate):
pass
def _add_filter_enddate(self, enddate):
pass
def add_filter(self, filter_, values = None):
""" Add a new filter to the already created metadata.
This type of filters should be used as 'global' ones that will
affect all of the metrics to be analyzed. An example of this type
of filter is to remove 'merges' from a list of analysis.
:param filter_: contains the type of filter to be applied
:param values: contains the values to be applied to such filter_
"""
self.filters[filter_](values)
def _init_metatable(self):
db, cursor = self._db_connection(self.user_db, self.password_db, self.scm_db)
query = """ CREATE TABLE %s as
SELECT id as commit,
author_id as author,
author_date as date,
repository_id as repository
FROM scmlog
""" % (SCM.METATABLE_NAME)
cursor.execute(query)
query = "ALTER TABLE %s ENGINE = MYISAM" % (SCM.METATABLE_NAME)
cursor.execute(query)
return db, cursor
|
dicortazar/grimoirelib-metadata
|
grimoirelib_metadata/scm.py
|
Python
|
gpl-3.0
| 9,002
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tag.verbose_title'
db.add_column(u'tags_tag', 'verbose_title',
self.gf('django.db.models.fields.CharField')(default='', max_length=50, blank=True),
keep_default=False)
# Adding field 'Tag.description'
db.add_column(u'tags_tag', 'description',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Tag.verbose_title'
db.delete_column(u'tags_tag', 'verbose_title')
# Deleting field 'Tag.description'
db.delete_column(u'tags_tag', 'description')
models = {
u'tags.tag': {
'Meta': {'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'verbose_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['tags']
|
urlist/devcharm
|
tags/migrations/0002_auto__add_field_tag_verbose_title__add_field_tag_description.py
|
Python
|
gpl-3.0
| 1,651
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Stefan Brüns <stefan.bruens@rwth-aachen.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
import struct
class SamplerateError(Exception):
pass
class pcap_usb_pkt():
# Linux usbmon format, see Documentation/usb/usbmon.txt
h = b'\x00\x00\x00\x00' # ID part 1
h += b'\x00\x00\x00\x00' # ID part 2
h += b'C' # 'S'ubmit / 'C'omplete / 'E'rror
h += b'\x03' # ISO (0), Intr, Control, Bulk (3)
h += b'\x00' # Endpoint
h += b'\x00' # Device address
h += b'\x00\x00' # Bus number
h += b'-' # Setup tag - 0: Setup present, '-' otherwise
h += b'<' # Data tag - '<' no data, 0 otherwise
# Timestamp
h += b'\x00\x00\x00\x00' # TS seconds part 1
h += b'\x00\x00\x00\x00' # TS seconds part 2
h += b'\x00\x00\x00\x00' # TS useconds
#
h += b'\x00\x00\x00\x00' # Status 0: OK
h += b'\x00\x00\x00\x00' # URB length
h += b'\x00\x00\x00\x00' # Data length
# Setup packet data, valid if setup tag == 0
h += b'\x00' # bmRequestType
h += b'\x00' # bRequest
h += b'\x00\x00' # wValue
h += b'\x00\x00' # wIndex
h += b'\x00\x00' # wLength
#
h += b'\x00\x00\x00\x00' # ISO/interrupt interval
h += b'\x00\x00\x00\x00' # ISO start frame
h += b'\x00\x00\x00\x00' # URB flags
h += b'\x00\x00\x00\x00' # Number of ISO descriptors
def __init__(self, req, ts, is_submit):
self.header = bytearray(pcap_usb_pkt.h)
self.data = b''
self.set_urbid(req['id'])
self.set_urbtype('S' if is_submit else 'C')
self.set_timestamp(ts)
self.set_addr_ep(req['addr'], req['ep'])
if req['type'] in ('SETUP IN', 'SETUP OUT'):
self.set_transfertype(2) # Control
self.set_setup(req['setup_data'])
if req['type'] in ('BULK IN'):
self.set_addr_ep(req['addr'], 0x80 | req['ep'])
self.set_data(req['data'])
def set_urbid(self, urbid):
self.header[4:8] = struct.pack('>I', urbid)
def set_urbtype(self, urbtype):
self.header[8] = ord(urbtype)
def set_transfertype(self, transfertype):
self.header[9] = transfertype
def set_addr_ep(self, addr, ep):
self.header[11] = addr
self.header[10] = ep
def set_timestamp(self, ts):
self.timestamp = ts
self.header[20:24] = struct.pack('>I', ts[0]) # seconds
self.header[24:28] = struct.pack('>I', ts[1]) # microseconds
def set_data(self, data):
self.data = data
self.header[15] = 0
self.header[36:40] = struct.pack('>I', len(data))
def set_setup(self, data):
self.header[14] = 0
self.header[40:48] = data
def packet(self):
return bytes(self.header) + bytes(self.data)
def record_header(self):
# See https://wiki.wireshark.org/Development/LibpcapFileFormat.
(secs, usecs) = self.timestamp
h = struct.pack('>I', secs) # TS seconds
h += struct.pack('>I', usecs) # TS microseconds
# No truncation, so both lengths are the same.
h += struct.pack('>I', len(self)) # Captured len (usb hdr + data)
h += struct.pack('>I', len(self)) # Original len
return h
def __len__(self):
return 64 + len(self.data)
class Decoder(srd.Decoder):
api_version = 2
id = 'usb_request'
name = 'USB request'
longname = 'Universal Serial Bus (LS/FS) transaction/request'
desc = 'USB (low-speed and full-speed) transaction/request protocol.'
license = 'gplv2+'
inputs = ['usb_packet']
outputs = ['usb_request']
annotations = (
('request-setup-read', 'Setup: Device-to-host'),
('request-setup-write', 'Setup: Host-to-device'),
('request-bulk-read', 'Bulk: Device-to-host'),
('request-bulk-write', 'Bulk: Host-to-device'),
('errors', 'Unexpected packets'),
)
annotation_rows = (
('request', 'USB requests', tuple(range(4))),
('errors', 'Errors', (4,)),
)
binary = (
('pcap', 'PCAP format'),
)
def __init__(self):
self.samplerate = None
self.request = {}
self.request_id = 0
self.transaction_state = 'IDLE'
self.ss_transaction = None
self.es_transaction = None
self.transaction_ep = None
self.transaction_addr = None
self.wrote_pcap_header = False
def putr(self, ss, es, data):
self.put(ss, es, self.out_ann, data)
def putb(self, ts, data):
self.put(ts, ts, self.out_binary, data)
def pcap_global_header(self):
# See https://wiki.wireshark.org/Development/LibpcapFileFormat.
h = b'\xa1\xb2\xc3\xd4' # Magic, indicate microsecond ts resolution
h += b'\x00\x02' # Major version 2
h += b'\x00\x04' # Minor version 4
h += b'\x00\x00\x00\x00' # Correction vs. UTC, seconds
h += b'\x00\x00\x00\x00' # Timestamp accuracy
h += b'\xff\xff\xff\xff' # Max packet len
# LINKTYPE_USB_LINUX_MMAPPED 220
# Linux usbmon format, see Documentation/usb/usbmon.txt.
h += b'\x00\x00\x00\xdc' # Link layer
return h
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.secs_per_sample = float(1) / float(self.samplerate)
def start(self):
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_ann = self.register(srd.OUTPUT_ANN)
def handle_transfer(self):
request_started = 0
request_end = self.handshake in ('ACK', 'STALL', 'timeout')
ep = self.transaction_ep
addr = self.transaction_addr
if not (addr, ep) in self.request:
self.request[(addr, ep)] = {'setup_data': [], 'data': [],
'type': None, 'ss': self.ss_transaction, 'es': None,
'id': self.request_id, 'addr': addr, 'ep': ep}
self.request_id += 1
request_started = 1
request = self.request[(addr,ep)]
# BULK or INTERRUPT transfer
if request['type'] in (None, 'BULK IN') and self.transaction_type == 'IN':
request['type'] = 'BULK IN'
request['data'] += self.transaction_data
request['es'] = self.es_transaction
self.handle_request(request_started, request_end)
elif request['type'] in (None, 'BULK OUT') and self.transaction_type == 'OUT':
request['type'] = 'BULK OUT'
request['data'] += self.transaction_data
request['es'] = self.es_transaction
self.handle_request(request_started, request_end)
# CONTROL, SETUP stage
elif request['type'] is None and self.transaction_type == 'SETUP':
request['setup_data'] = self.transaction_data
request['wLength'] = struct.unpack('<H',
bytes(self.transaction_data[6:8]))[0]
if self.transaction_data[0] & 0x80:
request['type'] = 'SETUP IN'
self.handle_request(1, 0)
else:
request['type'] = 'SETUP OUT'
self.handle_request(request['wLength'] == 0, 0)
# CONTROL, DATA stage
elif request['type'] == 'SETUP IN' and self.transaction_type == 'IN':
request['data'] += self.transaction_data
elif request['type'] == 'SETUP OUT' and self.transaction_type == 'OUT':
request['data'] += self.transaction_data
if request['wLength'] == len(request['data']):
self.handle_request(1, 0)
# CONTROL, STATUS stage
elif request['type'] == 'SETUP IN' and self.transaction_type == 'OUT':
request['es'] = self.es_transaction
self.handle_request(0, request_end)
elif request['type'] == 'SETUP OUT' and self.transaction_type == 'IN':
request['es'] = self.es_transaction
self.handle_request(0, request_end)
else:
return
return
def ts_from_samplenum(self, sample):
ts = float(sample) * self.secs_per_sample
return (int(ts), int((ts % 1.0) * 1e6))
def write_pcap_header(self):
if not self.wrote_pcap_header:
self.put(0, 0, self.out_binary, [0, self.pcap_global_header()])
self.wrote_pcap_header = True
def request_summary(self, request):
s = '['
if request['type'] in ('SETUP IN', 'SETUP OUT'):
for b in request['setup_data']:
s += ' %02X' % b
s += ' ]['
for b in request['data']:
s += ' %02X' % b
s += ' ] : %s' % self.handshake
return s
def handle_request(self, request_start, request_end):
if request_start != 1 and request_end != 1:
return
self.write_pcap_header()
ep = self.transaction_ep
addr = self.transaction_addr
request = self.request[(addr, ep)]
ss, es = request['ss'], request['es']
if request_start == 1:
# Issue PCAP 'SUBMIT' packet.
ts = self.ts_from_samplenum(ss)
pkt = pcap_usb_pkt(request, ts, True)
self.putb(ss, [0, pkt.record_header()])
self.putb(ss, [0, pkt.packet()])
if request_end == 1:
# Write annotation.
summary = self.request_summary(request)
if request['type'] == 'SETUP IN':
self.putr(ss, es, [0, ['SETUP in: %s' % summary]])
elif request['type'] == 'SETUP OUT':
self.putr(ss, es, [1, ['SETUP out: %s' % summary]])
elif request['type'] == 'BULK IN':
self.putr(ss, es, [2, ['BULK in: %s' % summary]])
elif request['type'] == 'BULK OUT':
self.putr(ss, es, [3, ['BULK out: %s' % summary]])
# Issue PCAP 'COMPLETE' packet.
ts = self.ts_from_samplenum(es)
pkt = pcap_usb_pkt(request, ts, False)
self.putb(ss, [0, pkt.record_header()])
self.putb(ss, [0, pkt.packet()])
del self.request[(addr, ep)]
def decode(self, ss, es, data):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
ptype, pdata = data
# We only care about certain packet types for now.
if ptype not in ('PACKET'):
return
pcategory, pname, pinfo = pdata
if pcategory == 'TOKEN':
if pname == 'SOF':
return
if self.transaction_state == 'TOKEN RECEIVED':
transaction_timeout = self.es_transaction
# Token length is 35 bits, timeout is 16..18 bit times
# (USB 2.0 7.1.19.1).
transaction_timeout += int((self.es_transaction - self.ss_transaction) / 2)
if ss > transaction_timeout:
self.es_transaction = transaction_timeout
self.handshake = 'timeout'
self.handle_transfer()
self.transaction_state = 'IDLE'
if self.transaction_state != 'IDLE':
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
sync, pid, addr, ep, crc5 = pinfo
self.transaction_data = []
self.ss_transaction = ss
self.es_transaction = es
self.transaction_state = 'TOKEN RECEIVED'
self.transaction_ep = ep
self.transaction_addr = addr
self.transaction_type = pname # IN OUT SETUP
elif pcategory == 'DATA':
if self.transaction_state != 'TOKEN RECEIVED':
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
self.transaction_data = pinfo[2]
self.transaction_state = 'DATA RECEIVED'
elif pcategory == 'HANDSHAKE':
if self.transaction_state not in ('TOKEN RECEIVED', 'DATA RECEIVED'):
self.putr(ss, es, [4, ['ERR: received %s token in state %s' %
(pname, self.transaction_state)]])
return
self.handshake = pname
self.transaction_state = 'IDLE'
self.es_transaction = es
self.handle_transfer()
elif pname == 'PRE':
return
else:
self.putr(ss, es, [4, ['ERR: received unhandled %s token in state %s' %
(pname, self.transaction_state)]])
return
|
zeldin/libsigrokdecode
|
decoders/usb_request/pd.py
|
Python
|
gpl-3.0
| 13,475
|
# minqlbot - A Quake Live server administrator bot.
# Copyright (C) Mino <mino@minomino.org>
# This file is part of minqlbot.
# minqlbot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# minqlbot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with minqlbot. If not, see <http://www.gnu.org/licenses/>.
import minqlbot
import time
class motd(minqlbot.Plugin):
def __init__(self):
super().__init__()
self.add_hook("player_connect", self.handle_player_connect, minqlbot.PRI_LOWEST)
self.add_command("motd", self.cmd_motd, 4, usage="(set <motd> | add <motd> | clear | get)")
def handle_player_connect(self, player):
"""Send the message of the day to the player in a tell.
This should be set to lowest priority so that we don't execute anything if "ban" or
a similar plugin determines the player should be kicked.
"""
c = self.db_query("SELECT message FROM Motd ORDER BY time DESC LIMIT 1")
row = c.fetchone()
if row and row["message"]:
self.delay(15, self.tell_motd, args=(player, row["message"]))
def cmd_motd(self, player, msg, channel):
if len(msg) < 2:
return minqlbot.RET_USAGE
# NEW
elif msg[1].lower() == "set" or msg[1].lower() == "new":
new_motd = " ".join(msg[2:])
self.db_query("INSERT INTO Motd VALUES(?, ?, ?)", int(time.time()), player.clean_name.lower(), new_motd)
self.db_commit()
channel.reply("^7You have successfully set a new MOTD.")
# ADD
elif msg[1].lower() == "add":
c = self.db_query("SELECT message FROM Motd ORDER BY time DESC LIMIT 1")
row = c.fetchone()
if row and row["message"]:
add_motd = "{} {}".format(row["message"], " ".join(msg[2:]))
self.db_query("INSERT INTO Motd VALUES(?, ?, ?)", int(time.time()), player.clean_name.lower(), add_motd)
self.db_commit()
channel.reply("^7The current MOTD has been successfully updated.")
else:
channel.reply("^7There is no active MOTD.")
# CLEAR
elif msg[1].lower() == "clear":
self.db_query("INSERT INTO Motd VALUES(?, ?, ?)", int(time.time()), player.clean_name.lower(), "")
self.db_commit()
channel.reply("^7You have successfully cleared the MOTD.")
# GET
elif msg[1].lower() == "get":
c = self.db_query("SELECT message FROM Motd ORDER BY time DESC LIMIT 1")
row = c.fetchone()
if row and row["message"]:
channel.reply("^7The current MOTD: ^2{}".format(row["message"]))
else:
channel.reply("^7There is no active MOTD.")
def tell_motd(self, player, msg):
self.tell("^6*** ^7Message of the Day ^6***", player)
self.tell(msg, player)
|
razers3dge/Modified-QL-Minobot
|
plugins/motd.py
|
Python
|
gpl-3.0
| 3,357
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011, 2012 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Id$
#
"""Unit test module weewx.wxstats"""
from __future__ import with_statement
import math
import os.path
import shutil
import sys
import syslog
import time
import unittest
import configobj
os.environ['TZ'] = 'America/Los_Angeles'
import weeutil.weeutil
import weewx.tags
import gen_fake_data
from weewx.units import ValueHelper
day_keys = [x[0] for x in gen_fake_data.schema if x[0] not in ['dateTime', 'interval', 'usUnits']] + ['wind']
# Find the configuration file. It's assumed to be in the same directory as me:
config_path = os.path.join(os.path.dirname(__file__), "testgen.conf")
cwd = None
skin_dict = {'Units' : {'Trend': {'time_delta': 3600, 'time_grace': 300},
'DegreeDay' : {'heating_base' : "65, degree_F",
'cooling_base' : "65, degree_C"} } }
class Common(unittest.TestCase):
def setUp(self):
global config_path
global cwd
weewx.debug = 1
syslog.openlog('test_stats', syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
# Save and set the current working directory in case some service changes it.
if not cwd:
cwd = os.getcwd()
else:
os.chdir(cwd)
try :
self.config_dict = configobj.ConfigObj(config_path, file_error=True)
except IOError:
sys.stderr.write("Unable to open configuration file %s" % self.config_path)
# Reraise the exception (this will eventually cause the program to exit)
raise
except configobj.ConfigObjError:
sys.stderr.write("Error while parsing configuration file %s" % config_path)
raise
# Remove the old directory:
try:
test_html_dir = os.path.join(self.config_dict['WEEWX_ROOT'], self.config_dict['StdReport']['HTML_ROOT'])
shutil.rmtree(test_html_dir)
except OSError, e:
if os.path.exists(test_html_dir):
print >>sys.stderr, "\nUnable to remove old test directory %s", test_html_dir
print >>sys.stderr, "Reason:", e
print >>sys.stderr, "Aborting"
exit(1)
# This will generate the test databases if necessary:
gen_fake_data.configDatabases(self.config_dict, database_type=self.database_type)
def tearDown(self):
pass
def test_create_stats(self):
global day_keys
with weewx.manager.open_manager_with_config(self.config_dict, 'wx_binding') as manager:
self.assertItemsEqual(sorted(manager.daykeys), sorted(day_keys))
self.assertEqual(manager.connection.columnsOf('archive_day_barometer'),
['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count', 'wsum', 'sumtime'])
self.assertEqual(manager.connection.columnsOf('archive_day_wind'),
['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count', 'wsum', 'sumtime',
'max_dir', 'xsum', 'ysum', 'dirsumtime', 'squaresum', 'wsquaresum'])
def testScalarTally(self):
with weewx.manager.open_manager_with_config(self.config_dict, 'wx_binding') as manager:
# Pick a random day, say 15 March:
start_ts = int(time.mktime((2010,3,15,0,0,0,0,0,-1)))
stop_ts = int(time.mktime((2010,3,16,0,0,0,0,0,-1)))
# Sanity check that this is truly the start of day:
self.assertEqual(start_ts, weeutil.weeutil.startOfDay(start_ts))
# Get a day's stats from the daily summaries:
allStats = manager._get_day_summary(start_ts)
# Now calculate the same summaries from the raw data in the archive.
# Here are some random observation types:
for stats_type in ['barometer', 'outTemp', 'rain']:
# Now test all the aggregates:
for aggregate in ['min', 'max', 'sum', 'count', 'avg']:
# Compare to the main archive:
res = manager.getSql("SELECT %s(%s) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, stats_type), (start_ts, stop_ts))
# The results from the daily summaries for this aggregation
allStats_res = getattr(allStats[stats_type], aggregate)
self.assertAlmostEqual(allStats_res, res[0], msg="Value check. Failing type %s, aggregate: %s" % (stats_type, aggregate))
# Check the times of min and max as well:
if aggregate in ['min','max']:
res2 = manager.getSql("SELECT dateTime FROM archive WHERE %s = ? AND dateTime>? AND dateTime <=?" % (stats_type,), (res[0], start_ts, stop_ts))
stats_time = getattr(allStats[stats_type], aggregate+'time')
self.assertEqual(stats_time, res2[0], "Time check. Failing type %s, aggregate: %s" % (stats_type, aggregate))
def testWindTally(self):
with weewx.manager.open_manager_with_config(self.config_dict, 'wx_binding') as manager:
# Pick a random day, say 15 March:
start_ts = int(time.mktime((2010,3,15,0,0,0,0,0,-1)))
stop_ts = int(time.mktime((2010,3,16,0,0,0,0,0,-1)))
# Sanity check that this is truly the start of day:
self.assertEqual(start_ts, weeutil.weeutil.startOfDay(start_ts))
allStats = manager._get_day_summary(start_ts)
# Test all the aggregates:
for aggregate in ['min', 'max', 'sum', 'count', 'avg']:
if aggregate == 'max':
res = manager.getSql("SELECT MAX(windGust) FROM archive WHERE dateTime>? AND dateTime <=?;", (start_ts, stop_ts))
else:
res = manager.getSql("SELECT %s(windSpeed) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, ), (start_ts, stop_ts))
# From StatsDb:
allStats_res = getattr(allStats['wind'], aggregate)
self.assertAlmostEqual(allStats_res, res[0])
# Check the times of min and max as well:
if aggregate == 'min':
resmin = manager.getSql("SELECT dateTime FROM archive WHERE windSpeed = ? AND dateTime>? AND dateTime <=?", (res[0], start_ts, stop_ts))
self.assertEqual(allStats['wind'].mintime, resmin[0])
elif aggregate == 'max':
resmax = manager.getSql("SELECT dateTime FROM archive WHERE windGust = ? AND dateTime>? AND dateTime <=?", (res[0], start_ts, stop_ts))
self.assertEqual(allStats['wind'].maxtime, resmax[0])
# Check RMS:
(squaresum, count) = manager.getSql("SELECT SUM(windSpeed*windSpeed), COUNT(windSpeed) from archive where dateTime>? AND dateTime<=?;", (start_ts, stop_ts))
rms = math.sqrt(squaresum/count) if count else None
self.assertAlmostEqual(allStats['wind'].rms, rms)
def testTags(self):
"""Test common tags."""
global skin_dict
db_binder = weewx.manager.DBBinder(self.config_dict['DataBindings'],
self.config_dict['Databases'])
db_lookup = db_binder.bind_default()
with weewx.manager.open_manager_with_config(self.config_dict, 'wx_binding') as manager:
spans = {'day' : weeutil.weeutil.TimeSpan(time.mktime((2010,3,15,0,0,0,0,0,-1)),
time.mktime((2010,3,16,0,0,0,0,0,-1))),
'week' : weeutil.weeutil.TimeSpan(time.mktime((2010,3,14,0,0,0,0,0,-1)),
time.mktime((2010,3,21,0,0,0,0,0,-1))),
'month': weeutil.weeutil.TimeSpan(time.mktime((2010,3,01,0,0,0,0,0,-1)),
time.mktime((2010,4,01,0,0,0,0,0,-1))),
'year' : weeutil.weeutil.TimeSpan(time.mktime((2010,1,01,0,0,0,0,0,-1)),
time.mktime((2011,1,01,0,0,0,0,0,-1)))}
# This may not necessarily execute in the order specified above:
for span in spans:
start_ts = spans[span].start
stop_ts = spans[span].stop
tagStats = weewx.tags.TimeBinder(db_lookup, stop_ts,
rain_year_start=1,
skin_dict=skin_dict)
# Cycle over the statistical types:
for stats_type in ('barometer', 'outTemp', 'rain'):
# Now test all the aggregates:
for aggregate in ('min', 'max', 'sum', 'count', 'avg'):
# Compare to the main archive:
res = manager.getSql("SELECT %s(%s) FROM archive WHERE dateTime>? AND dateTime <=?;" % (aggregate, stats_type), (start_ts, stop_ts))
archive_result = res[0]
value_helper = getattr(getattr(getattr(tagStats, span)(), stats_type), aggregate)
self.assertAlmostEqual(float(str(value_helper.formatted)), archive_result, places=1)
# Check the times of min and max as well:
if aggregate in ('min','max'):
res2 = manager.getSql("SELECT dateTime FROM archive WHERE %s = ? AND dateTime>? AND dateTime <=?" % (stats_type,), (archive_result, start_ts, stop_ts))
stats_value_helper = getattr(getattr(getattr(tagStats, span)(), stats_type), aggregate +'time')
self.assertEqual(stats_value_helper.raw, res2[0])
self.assertEqual(str(tagStats.day().barometer.avg), "30.675 inHg")
self.assertEqual(str(tagStats.day().barometer.min), "30.065 inHg")
self.assertEqual(str(tagStats.day().barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.day().barometer.mintime), "00:00")
self.assertEqual(str(tagStats.day().barometer.maxtime), "01:00")
self.assertEqual(str(tagStats.week().barometer.avg), "29.904 inHg")
self.assertEqual(str(tagStats.week().barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.week().barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.week().barometer.mintime), "01:00 on Monday")
self.assertEqual(str(tagStats.week().barometer.maxtime), "01:00 on Wednesday")
self.assertEqual(str(tagStats.month().barometer.avg), "30.021 inHg")
self.assertEqual(str(tagStats.month().barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.month().barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.month().barometer.mintime), "05-Mar-2010 00:00")
self.assertEqual(str(tagStats.month().barometer.maxtime), "03-Mar-2010 00:00")
self.assertEqual(str(tagStats.year().barometer.avg), "30.004 inHg")
self.assertEqual(str(tagStats.year().barometer.min), "29.000 inHg")
self.assertEqual(str(tagStats.year().barometer.max), "31.000 inHg")
self.assertEqual(str(tagStats.year().barometer.mintime), "04-Jan-2010 00:00")
self.assertEqual(str(tagStats.year().barometer.maxtime), "02-Jan-2010 00:00")
self.assertEqual(str(tagStats.day().outTemp.avg), "38.8°F")
self.assertEqual(str(tagStats.day().outTemp.min), "18.6°F")
self.assertEqual(str(tagStats.day().outTemp.max), "59.0°F")
self.assertEqual(str(tagStats.day().outTemp.mintime), "07:00")
self.assertEqual(str(tagStats.day().outTemp.maxtime), "19:00")
self.assertEqual(str(tagStats.week().outTemp.avg), "38.8°F")
self.assertEqual(str(tagStats.week().outTemp.min), "16.6°F")
self.assertEqual(str(tagStats.week().outTemp.max), "61.0°F")
self.assertEqual(str(tagStats.week().outTemp.mintime), "07:00 on Sunday")
self.assertEqual(str(tagStats.week().outTemp.maxtime), "19:00 on Saturday")
self.assertEqual(str(tagStats.month().outTemp.avg), "28.7°F")
self.assertEqual(str(tagStats.month().outTemp.min), "-0.9°F")
self.assertEqual(str(tagStats.month().outTemp.max), "59.0°F")
self.assertEqual(str(tagStats.month().outTemp.mintime), "01-Mar-2010 06:00")
self.assertEqual(str(tagStats.month().outTemp.maxtime), "31-Mar-2010 19:00")
self.assertEqual(str(tagStats.year().outTemp.avg), "48.3°F")
self.assertEqual(str(tagStats.year().outTemp.min), "-20.0°F")
self.assertEqual(str(tagStats.year().outTemp.max), "100.0°F")
self.assertEqual(str(tagStats.year().outTemp.mintime), "01-Jan-2010 06:00")
self.assertEqual(str(tagStats.year().outTemp.maxtime), "02-Jul-2010 19:00")
# Check the special aggregate types "exists" and "has_data":
self.assertTrue(tagStats.year().barometer.exists)
self.assertTrue(tagStats.year().barometer.has_data)
self.assertFalse(tagStats.year().bar.exists)
self.assertFalse(tagStats.year().bar.has_data)
self.assertTrue(tagStats.year().inHumidity.exists)
self.assertFalse(tagStats.year().inHumidity.has_data)
def test_agg_intervals(self):
"""Test aggregation spans that do not span a day"""
db_binder = weewx.manager.DBBinder(self.config_dict['DataBindings'],
self.config_dict['Databases'])
db_lookup = db_binder.bind_default()
# note that this spans the spring DST boundary:
six_hour_span = weeutil.weeutil.TimeSpan(time.mktime((2010,3,14,1,0,0,0,0,-1)),
time.mktime((2010,3,14,8,0,0,0,0,-1)))
tsb = weewx.tags.TimespanBinder(six_hour_span, db_lookup)
self.assertEqual(str(tsb.outTemp.max), "21.0°F")
self.assertEqual(str(tsb.outTemp.maxtime), "14-Mar-2010 01:10")
self.assertEqual(str(tsb.outTemp.min), "7.1°F")
self.assertEqual(str(tsb.outTemp.mintime), "14-Mar-2010 07:00")
self.assertEqual(str(tsb.outTemp.avg), "11.4°F")
rain_span = weeutil.weeutil.TimeSpan(time.mktime((2010,3,14,20,10,0,0,0,-1)),
time.mktime((2010,3,14,23,10,0,0,0,-1)))
tsb = weewx.tags.TimespanBinder(rain_span, db_lookup)
self.assertEqual(str(tsb.rain.sum), "0.26 in")
def test_agg(self):
"""Test aggregation in the archive table against aggregation in the daily summary"""
week_start_ts = time.mktime((2010,3,14,0,0,0,0,0,-1))
week_stop_ts = time.mktime((2010,3,21,0,0,0,0,0,-1))
with weewx.manager.open_manager_with_config(self.config_dict, 'wx_binding') as manager:
for day_span in weeutil.weeutil.genDaySpans(week_start_ts, week_stop_ts):
for aggregation in ['min', 'max', 'mintime', 'maxtime', 'avg']:
# Get the answer using the raw archive table:
table_answer = ValueHelper(weewx.manager.Manager.getAggregate(manager, day_span, 'outTemp', aggregation))
daily_answer = ValueHelper(weewx.manager.DaySummaryManager.getAggregate(manager, day_span, 'outTemp', aggregation))
self.assertEqual(str(table_answer), str(daily_answer),
msg="aggregation=%s; %s vs %s" % (aggregation, table_answer, daily_answer))
def test_rainYear(self):
db_binder = weewx.manager.DBBinder(self.config_dict['DataBindings'],
self.config_dict['Databases'])
db_lookup = db_binder.bind_default()
stop_ts = time.mktime((2011,1,01,0,0,0,0,0,-1))
# Check for a rain year starting 1-Jan
tagStats = weewx.tags.TimeBinder(db_lookup, stop_ts,
rain_year_start=1)
self.assertEqual(str(tagStats.rainyear().rain.sum), "58.68 in")
# Do it again, for starting 1-Oct:
tagStats = weewx.tags.TimeBinder(db_lookup, stop_ts,
rain_year_start=6)
self.assertEqual(str(tagStats.rainyear().rain.sum), "22.72 in")
def test_heatcool(self):
db_binder = weewx.manager.DBBinder(self.config_dict['DataBindings'],
self.config_dict['Databases'])
db_lookup = db_binder.bind_default()
#Test heating and cooling degree days:
stop_ts = time.mktime((2011,1,01,0,0,0,0,0,-1))
tagStats = weewx.tags.TimeBinder(db_lookup, stop_ts,
skin_dict=skin_dict)
self.assertEqual(str(tagStats.year().heatdeg.sum), "5126.3°F-day")
self.assertEqual(str(tagStats.year().cooldeg.sum), "1026.2°F-day")
class TestSqlite(Common):
def __init__(self, *args, **kwargs):
self.database_type = "sqlite"
super(TestSqlite, self).__init__(*args, **kwargs)
class TestMySQL(Common):
def __init__(self, *args, **kwargs):
self.database_type = "mysql"
super(TestMySQL, self).__init__(*args, **kwargs)
def suite():
tests = ['test_create_stats', 'testScalarTally', 'testWindTally',
'testTags', 'test_rainYear', 'test_agg_intervals', 'test_agg', 'test_heatcool']
# Test both sqlite and MySQL:
return unittest.TestSuite(map(TestSqlite, tests) + map(TestMySQL, tests))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
sai9/weewx-gitsvn
|
bin/weewx/test/test_daily.py
|
Python
|
gpl-3.0
| 18,244
|
import collections
import functools
import json
import logging.config
import os
from dancebooks import const
class SmtpConfig(object):
def __init__(self, params):
self.host = params["host"]
self.port = params["port"]
self.user = params["user"]
self.password = params["password"]
self.email = params["email"]
class BugReportConfig(object):
def __init__(self, params):
self.to_addr = params["to_addr"]
self.to_name = params["to_name"]
class ParserConfig(object):
def __init__(self, params):
#some directories
repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.bibdata_dir = os.path.join(repo_root, params["bibdata_dir"])
self.markdown_dir = os.path.join(repo_root, params["markdown_dir"])
#field type specification
self.list_params = set(params["list_params"])
self.file_list_params = set(params["file_list_params"])
self.keyword_list_params = set(params["keyword_list_params"])
self.int_params = set(params["int_params"])
self.year_params = set(params["year_params"])
self.date_params = set(params["date_params"])
self.bool_params = set(params["bool_params"])
#other values
self.list_sep = params["list_sep"]
self.date_format = params["date_format"]
self.blocked_domains = set(params["blocked_domains"])
self.blocked_domains_http = set(params["blocked_domains_http"])
self.domains_allowed_301 = set(params["domains_allowed_301"])
#keywords param is loaded from a single config value,
#but is splitted into a number of config fields with predictable meaning
keywords = params["keywords"]
self.keywords = set()
self.category_keywords = collections.OrderedDict()
for category, cat_keywords in keywords.items():
self.category_keywords[category] = list()
for keyword in cat_keywords:
self.keywords.add(keyword)
self.category_keywords[category].append(keyword)
self.bookkeepers = params["bookkeepers"]
#suffixes parsing
self.start_suffix = params["start_suffix"]
self.end_suffix = params["end_suffix"]
self.circa_suffix = params["circa_suffix"]
#generating additional params
suffix_adder = lambda string, suffix: string + suffix
self.year_start_params = set(map(
functools.partial(suffix_adder, suffix=self.start_suffix),
self.year_params
))
self.year_end_params = set(map(
functools.partial(suffix_adder, suffix=self.end_suffix),
self.year_params
))
self.date_start_params = set(map(
functools.partial(suffix_adder, suffix=self.start_suffix),
self.date_params
))
self.date_end_params = set(map(
functools.partial(suffix_adder, suffix=self.end_suffix),
self.date_params
))
class WwwConfig(object):
def __init__(self, params):
self.app_domain = params["app_domain"]
self.search_params = set(params["search_params"])
self.search_synonyms = params["search_synonyms"]
self.index_params = set(params["index_params"])
self.inverted_index_params = set(params["inverted_index_params"])
self.index_unique_params = set(params["index_unique_params"])
self.indexed_search_params = self.search_params & self.index_params
self.nonindexed_search_params = self.search_params - self.index_params
self.languages = params["languages"]
self.date_formats = params["date_formats"]
self.order_by_keys = set(params["order_by_keys"])
self.elibrary_dir = params["elibrary_dir"]
self.backup_dir = params["backup_dir"]
#security params
self.secret_cookie_key = params["secret_cookie_key"]
self.secret_cookie_value = params["secret_cookie_value"]
self.secret_questions = params["secret_questions"]
self.id_redirections = params["id_redirections"]
class DatabaseConfig(object):
def __init__(self, params):
self.host = params["host"]
self.port = params["port"]
self.user = params["user"]
self.password = params["password"]
self.database_name = params["database_name"]
self.options = params["options"]
@property
def connection_string(self):
#TODO: handle self.options
return (
f"host={self.host} port={self.port} "
f"user={self.user} password={self.password} "
f"dbname={self.database_name}"
)
@property
def connection_url(self):
return f"postgresql://{self.user}:{self.password}@{self.host}:{self.port}/{self.database_name}"
class Config(object):
def __init__(self, path):
with open(path, "rt") as config_file:
json_config = json.load(config_file)
#handling secrets
if "secrets" in json_config:
secrets_path = os.path.join(os.path.dirname(path), json_config["secrets"])
with open(secrets_path, "rt") as secrets_json_file:
secrets_config = json.load(secrets_json_file)
for key, value in secrets_config.items():
if key in json_config:
json_config[key].update(value)
else:
json_config[key] = value
self.smtp = SmtpConfig(json_config["smtp"])
self.bug_report = BugReportConfig(json_config["bug_report"])
self.parser = ParserConfig(json_config["parser"])
self.www = WwwConfig(json_config["www"])
self.db = DatabaseConfig(json_config["db"])
self.unittest_mode = "DANCEBOOKS_UNITTEST" in os.environ
def setup_logging(config_path):
logging.config.fileConfig(config_path)
config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "configs/dancebooks.json")
config = Config(config_path)
DEFAULT_LOGGING_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "configs/logger.development.conf")
logging_config_path = os.environ.get(const.ENV_LOGGING_CONFIG, DEFAULT_LOGGING_PATH)
setup_logging(logging_config_path)
|
georgthegreat/dancebooks-bibtex
|
dancebooks/config.py
|
Python
|
gpl-3.0
| 5,501
|
from __future__ import division # floating-point division
class DocumentationCredibility:
"""
DocumentationCredibility class provides simple key(0-5 (LOW to HIGH)) to credibility conversion
based on Unknown to Total commands ratio.
:param credibility: credibility computed from number of unknown vs. total commands provided to the constructor
:param credibilityTable: dictionary-like key-to-credibility conversion
"""
credibility = ""
credibilityTable = {0: 'None',
1: 'Very low',
2: 'Low',
3: 'Medium',
4: 'High',
5: 'Very high'}
def __init__(self, unknown_commands, total_commands):
percent_correct = self.compute_percent_correct(unknown_commands, total_commands)
self.credibility = self.compute_credibility(percent_correct)
def lookup_credibility(self, key):
return self.credibilityTable.get(key, "unknown")
def compute_credibility_key(self, percent_correct):
"""
Computes credibility key by 20% increments (known/total commands ratio), starting from zero.
:return: key for credibility computation (0-5)
"""
if self.is_zero(percent_correct):
return 0
elif self.all_correct(percent_correct):
return 5
else:
return (percent_correct // 20) + 1
def compute_credibility(self, percent_correct):
key = self.compute_credibility_key(percent_correct)
return self.lookup_credibility(key)
def get_credibility(self):
return self.credibility
def compute_percent_correct(self, unknown_commands, total_commands):
if self.is_zero(total_commands):
return 100
else:
return self.round_to_two_places((1 - (unknown_commands / total_commands)) * 100)
@staticmethod
def is_zero(number):
return round(number) == 0
@staticmethod
def all_correct(number):
return round(number) == 100
@staticmethod
def round_to_two_places(number):
return round(number, 2)
|
rh-lab-q/bkrdoc
|
bkrdoc/analysis/credibility.py
|
Python
|
gpl-3.0
| 2,147
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from mantid.api import PythonAlgorithm, AlgorithmFactory, WorkspaceGroup, MatrixWorkspace
from mantid.kernel import Direction, StringArrayLengthValidator, StringArrayProperty, FloatBoundedValidator
import mantid.simpleapi as api
import numpy as np
class CompareSampleLogs(PythonAlgorithm):
"""
Compares specified sample logs for a given list of workspaces
"""
def __init__(self):
"""
Init
"""
PythonAlgorithm.__init__(self)
self.tolerance = 0.0
self.wslist = []
def category(self):
"""
Returns category
"""
return "Utility\\Workspaces"
def seeAlso(self):
return [ "CompareWorkspaces","CheckForSampleLogs","CopySample" ]
def name(self):
"""
Returns name
"""
return "CompareSampleLogs"
def summary(self):
return "Compares specified sample logs for a given list of workspaces."
def PyInit(self):
validator = StringArrayLengthValidator()
validator.setLengthMin(1) # one group may be given
self.declareProperty(StringArrayProperty(name="InputWorkspaces", direction=Direction.Input, validator=validator),
doc="Comma separated list of workspaces or groups of workspaces.")
self.declareProperty(StringArrayProperty(name="SampleLogs", direction=Direction.Input, validator=validator),
doc="Comma separated list of sample logs to compare.")
self.declareProperty("Tolerance", 1e-3, validator=FloatBoundedValidator(lower=1e-7),
doc="Tolerance for comparison of numeric values.")
self.declareProperty("Result", "A string that will be empty if all the logs match, "
"otherwise will contain a comma separated list of not matching logs", Direction.Output)
return
def validateInputs(self):
# given workspaces must exist
# and must be public of ExperimentInfo
issues = dict()
workspaces = self.getProperty("InputWorkspaces").value
for wsname in workspaces:
if not api.AnalysisDataService.doesExist(wsname):
issues["InputWorkspaces"] = "Workspace " + wsname + " does not exist."
else:
wks = api.AnalysisDataService.retrieve(wsname)
if isinstance(wks, WorkspaceGroup):
for idx in range(wks.getNumberOfEntries()):
if not isinstance(wks.getItem(idx), MatrixWorkspace):
issues["InputWorkspaces"] = "Group " + wsname + " contains workspaces of unsupported type."
elif not isinstance(wks, MatrixWorkspace):
issues["InputWorkspaces"] = "Type of workspace " + wsname + " is not supported by this algorithm."
return issues
def _expand_groups(self):
workspaces = self.getProperty("InputWorkspaces").value
input_workspaces = []
for wsname in workspaces:
wks = api.AnalysisDataService.retrieve(wsname)
if isinstance(wks, WorkspaceGroup):
input_workspaces.extend(wks.getNames())
else:
input_workspaces.append(wsname)
return input_workspaces
def do_match(self, pname, properties, isnum):
wsnum = len(self.wslist)
nprop = len(properties)
# if some workspaces do not have this property, return False
if nprop != wsnum:
message = "Number of properties " + str(nprop) + " for property " + pname +\
" is not equal to number of workspaces " + str(wsnum)
self.log().information(message)
return False
match = True
if isnum:
# check for nan
idx_nan = np.where(np.isnan(properties))[0]
if len(idx_nan) > 0:
message = "Sample log " + pname + " contains nan values. \n" +\
"Workspaces: " + ", ".join(self.wslist) + "\n Values: " + str(properties)
self.log().information(message)
return False
if max(properties) - min(properties) > self.tolerance:
match = False
else:
pvalue = properties[0]
if properties.count(pvalue) != nprop:
match = False
if not match:
message = "Sample log " + pname + " is not identical in the given list of workspaces. \n" +\
"Workspaces: " + ", ".join(self.wslist) + "\n Values: " + str(properties)
self.log().information(message)
return match
def compare_properties(self, plist):
"""
Compares properties which are required to be the same.
Produces error message and throws exception if difference is observed
or if one of the sample logs is not found.
Important: exits after the first difference is observed. No further check is performed.
@param plist List of properties to compare
"""
# retrieve the workspaces, form dictionary {wsname: run}
runs = {}
does_not_match = []
for wsname in self.wslist:
wks = api.AnalysisDataService.retrieve(wsname)
runs[wsname] = wks.getRun()
for prop in plist:
properties = []
isnum = False
for wsname in self.wslist:
run = runs[wsname]
if not run.hasProperty(prop):
message = "Workspace " + wsname + " does not have sample log " + prop
self.log().warning(message)
else:
curprop = run.getProperty(prop)
if curprop.type == 'string' or curprop.type == 'number':
properties.append(curprop.value)
else:
message = "Comparison of " + str(curprop.type) + " properties is not yet supported. Property " +\
prop + " in the workspace " + wsname
self.log().warning(message)
# sometimes numbers are presented as strings
try:
properties = [float(val) for val in properties]
except ValueError:
pass
else:
isnum = True
# check whether number of properties and workspaces match
match = self.do_match(prop, properties, isnum)
if not match:
does_not_match.append(prop)
return does_not_match
def PyExec(self):
self.wslist = self._expand_groups()
# no sence to compare sample logs for one workspace
if len(self.wslist) < 2:
message = "At least 2 workspaces must be given as an input."
self.log().error(message)
raise RuntimeError(message)
lognames = self.getProperty("SampleLogs").value
self.tolerance = self.getProperty("Tolerance").value
result = ''
do_not_match = self.compare_properties(lognames)
# return list of not matching properties
if len(do_not_match) > 0:
result = ",".join(do_not_match)
self.setProperty("Result", result)
return
# Register algorithm with Mantid
AlgorithmFactory.subscribe(CompareSampleLogs)
|
mganeva/mantid
|
Framework/PythonInterface/plugins/algorithms/CompareSampleLogs.py
|
Python
|
gpl-3.0
| 7,729
|
# low level db access - does no error checking or validation
# provides abstraction between the actual database (sqlite, mongo, etc) and the db_frontend.py
import sqlite3
import os.path
import datetime
import re
from db import DB
from flask import g
RE_int = re.compile(r'\d+') # re to check if string is positive integer (no + prefix allowed)
class Database(DB):
def __init__(self, args):
super(Database, self).__init__(args)
self.filename = args['FILE']
self.init_file = args['INIT_SQL_FILE']
# database setup by flask - use:
# g.con == connection object
# g.cur == cursor
def first_run(self):
con = sqlite3.connect(self.filename)
cur = con.cursor()
init_sql_file = open(self.init_file)
cur.executescript(init_sql_file.read())
init_sql_file.close()
con.commit()
con.close()
def get_user(self, email):
g.cur.execute("select * from users where email = ?", (email,))
user = g.cur.fetchone()
if user:
return user
return None
def create_user(self, email, hashed):
if self.get_user(email):
return False
g.cur.execute("insert into users(email, hashed) values(?, ?)", (email, hashed))
g.con.commit()
return True
def update_token(self, email, token, tokendate):
g.cur.execute("update users set token = ?, tokendate = ? where email = ?", (token, tokendate, email))
g.con.commit()
def get_note(self, email, key, version=None):
user = self.get_user(email)
# 'and userid =' is to ensure note is owned by user
g.cur.execute("select key, deleted, modifydate, createdate, syncnum, version, minversion, sharekey, publishkey, content, pinned, markdown, unread, list from notes where key = ? and userid = ?", (key, user['id']))
note = g.cur.fetchone()
if not note:
return None
# TODO: +future +enhancement check for share key to allow sharing notes around users
# below also means getting latest version will return full note
if note and version and version != note['version']:
g.cur.execute("select * from versions where key = ? and version = ?", (key, version))
note = g.cur.fetchone()
return note
tagsOBJ = g.cur.execute("select name from tagged join tags on id=tagid where notekey=?", (key,)).fetchall()
if tagsOBJ:
note['tags'] = [x['name'] for x in tagsOBJ]
else:
note['tags'] = []
systemtags = [tag for tag in ['pinned', 'markdown', 'unread', 'list'] if note.get(tag, None)]
note['systemtags'] = systemtags
# remove unnecessary keys
del note['pinned']
del note['markdown']
del note['unread']
del note['list']
return note
def create_note(self, email, note_data):
user = self.get_user(email)
note_data['userid'] = user['id']
sys_tags = note_data['systemtags']
for t in ['pinned', 'markdown', 'list']:
note_data[t] = 1 if t in sys_tags else 0
g.cur.execute('insert into notes(userid, key, deleted, modifydate, createdate, syncnum, version, minversion, content, pinned, markdown, list) values (:userid, :key, :deleted, :modifydate, :createdate, :syncnum, :version, :minversion, :content, :pinned, :markdown, :list)', note_data)
key = note_data['key']
for t in note_data['tags']:
i = self.get_and_create_tag(t)
self.tagit(key, i)
g.con.commit()
return True
def tagit(self, notekey, tag):
g.cur.execute('insert into tagged select ?, ? where not exists (select * from tagged where notekey = ? and tagid = ?)', (notekey, tag, notekey, tag))
def get_and_create_tag(self, t):
if not g.cur.execute('select * from tags where lower_name = ?', (t.lower(),)).fetchone():
g.cur.execute('insert into tags(_index, name, lower_name, version) values (?, ?, ?, ?)', (1, t, t.lower(), 1))
g.con.commit()
return g.cur.execute('select id from tags where lower_name = ?', (t.lower(),)).fetchone()['id']
# TODO: don't forget index for tag is stored in sql as _index
def update_note(self, email, note_data):
# note - note_data contains key
user = self.get_user(email)
note_data['userid'] = user['id']
sys_tags = note_data['systemtags']
for t in ['pinned', 'markdown', 'list']:
note_data[t] = 1 if t in sys_tags else 0
g.cur.execute("update notes set deleted=:deleted, modifydate=:modifydate, createdate=:createdate, syncnum=:syncnum, minversion=:minversion, publishkey=:publishkey, content=:content, version=:version, pinned=:pinned, markdown=:markdown, list=:list where key = :key and userid = :userid", note_data)
key = note_data['key']
for t in note_data['tags']:
i = self.get_and_create_tag(t)
self.tagit(key, i)
g.con.commit()
return True
def delete_note(self, email, key):
# check user owns note
# delete all tagged entries associated
# delete all versions with same key
# delete note by key
user = self.get_user(email)
# 'and userid =' is to ensure note is owned by user
g.cur.execute("select * from notes where key = ? and userid = ?", (key, user['id']))
note = g.cur.fetchone()
if not note:
return ("note not found", 404)
elif note['deleted'] == 0:
return ("must send note to trash before permanently deleting", 400)
g.cur.execute("delete from tagged where notekey = ?", (key,))
# TODO: delete all tags that no longer have a tagged entry
g.cur.execute("delete from versions where key = ?", (key,))
g.cur.execute("delete from notes where key = ?", (key,))
g.con.commit()
return ("", 200)
def save_version(self, email, notekey):
user = self.get_user(email)
g.cur.execute('insert into versions select key, modifydate, content, version from notes where key = ? and userid = ?', (notekey, user['id']))
g.con.commit()
def drop_old_versions(self, email, notekey, minversion):
print(g.cur.execute('select * from versions').fetchall())
g.cur.execute('delete from versions where version < ? and key = ?', (minversion, notekey))
g.con.commit()
def notes_index(self, username, length, since, mark):
user = self.get_user(username)
# set defaults for mark (currently length and since must be valid)
if not mark:
mark = "0"
if RE_int.match(mark):
mark = int(mark)
else:
return ("invalid mark parameter", 400)
if length < 1:
return ("length must be greater than 0", 400)
# return { "count": 0, "data": []} # ha caught you there
# should throw error if length too large? (nah, let's be nice)
length = min(length, 100)
g.cur.execute("select rowid, key, deleted, modifydate, createdate, syncnum, version, minversion, sharekey, publishkey, pinned, markdown, unread, list from notes where userid = ? and rowid > ? and modifydate > ? order by rowid", (user['id'], mark, since))
notes = g.cur.fetchall()
newmark = None
if len(notes) > length:
newmark = notes[length-1]['rowid']
notes = notes[:length]
# ok there's probably a more efficient way to process notes here....
# contributes of code or ideas welcome ;)
for note in notes:
key = note['key']
tagsOBJ = g.cur.execute("select name from tagged join tags on id=tagid where notekey=?", (key,)).fetchall()
if tagsOBJ:
note['tags'] = [x['name'] for x in tagsOBJ]
else:
note['tags'] = []
systemtags = [tag for tag in ['pinned', 'markdown', 'unread', 'list'] if note.get(tag, None)]
note['systemtags'] = systemtags
del note['rowid']
del note['pinned']
del note['markdown']
del note['unread']
del note['list']
data = {}
data['count'] = len(notes)
data['data'] = notes
if newmark:
data['mark'] = newmark
return (data, 200)
|
swalladge/snsrv
|
old/sqlite_db.py
|
Python
|
gpl-3.0
| 8,481
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Import hook for GModule https://developer.gnome.org/glib/stable/glib-Dynamic-Loading-of-Modules.html from the GLib
library https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject
via the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection
Tested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and
GLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7
"""
from PyInstaller.utils.hooks import get_gi_typelibs
binaries, datas, hiddenimports = get_gi_typelibs('GModule', '2.0')
|
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-gi.repository.GModule.py
|
Python
|
gpl-3.0
| 1,035
|
#!/usr/bin/env python2
"""
Copyright (c) 2015 Mia Nordentoft
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pygame, subprocess, time, os, sys, json, curses, math, signal
from pymouse import PyMouse
from pykeyboard import PyKeyboard
DRAW_FPS = 20
WATCH_FPS = 60
TRIGGER_SYMBOLS = "!~@*"
dirname = os.path.dirname(__file__)
sys.path.append(os.path.join(dirname, os.path.pardir))
def signal_handler(signal, frame):
sys.exit(130)
signal.signal(signal.SIGINT, signal_handler)
def draw_controller(joystick, text):
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
clock = pygame.time.Clock()
while True:
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
for i, t in enumerate(text):
stdscr.addstr(i, 0, t);
j = len(text) + 1
try:
for i in xrange(joystick.get_numaxes()):
val = joystick.get_axis(i)
left = ("#" * int(round(max(-val, 0) * 8))).rjust(8)
right = ("#" * int(round(max(val, 0) * 8))).ljust(8)
stdscr.addstr(i + j, 0, "Axis %s" % i)
stdscr.addstr(i + j, 8, "[%s|%s]" % (left, right))
for i in xrange(joystick.get_numbuttons()):
if joystick.get_button(i):
x = "X"
else:
x = " "
stdscr.addstr(i + j, 30, "Button %s" % i)
stdscr.addstr(i + j, 40, "[%s]" % x)
stdscr.refresh()
clock.tick(DRAW_FPS)
finally:
curses.echo()
curses.nocbreak()
curses.endwin()
def watch_controller(joystick, bindings, controller):
global mousePos, timestamp, lastTimestamp, deltaTime
# Load output
mouse = PyMouse()
keyboard = PyKeyboard()
clock = pygame.time.Clock()
mousePos = (0, 0)
lastTimestamp = 0
timestamp = 0
deltaTime = timestamp - lastTimestamp
controller_watch = {}
controller_watch_previous = {}
for name, input in controller['inputs'].iteritems():
if input['type'] == 'button':
controller_watch[name] = joystick.get_button(input['id'])
elif input['type'] == 'axis':
controller_watch[name] = joystick.get_axis(input['id'])
key_watch = {}
def get_key(key):
if key in key_watch:
return key_watch[key]
else:
return False
def handle_action(action, input):
global mousePos, timestamp, deltaTime
type = action['type']
if type == 'keyboard_tap':
keyboard.tap_key(action['key'])
key_watch[ action['key'] ] = False
elif type == 'keyboard_down':
keyboard.press_key(action['key'])
key_watch[ action['key'] ] = True
elif type == 'keyboard_up':
keyboard.release_key(action['key'])
key_watch[ action['key'] ] = False
elif type == 'if':
if eval(action['if'], globals(), {
'x': controller_watch[input],
'b': controller_watch
}):
handle_actions(action['do'], input)
elif 'else' in action:
handle_actions(action['else'], input)
elif type == 'if_key':
if get_key(action['key']) == action['is']:
handle_actions(action['do'], input)
elif 'else' in action:
handle_actions(action['else'], input)
elif type == 'move_mouse_h_proportional':
speed = controller_watch[input]
mouseX, mouseY = mousePos
mouseX += speed * action['factor'] * deltaTime
mousePos = (mouseX, mouseY)
mouse.move(*mousePos)
elif type == 'move_mouse_v_proportional':
speed = controller_watch[input]
mouseX, mouseY = mousePos
mouseY += speed * action['factor'] * deltaTime
mousePos = (mouseX, mouseY)
mouse.move(*mousePos)
elif type == 'scroll':
x = 0
y = 0
if 'x' in action:
x = action['x']
if 'y' in action:
y = action['y']
mouse.scroll(y, x)
elif type == 'do_every':
if not 'last' in action:
action['last'] = 0
if timestamp - action['last'] >= action['every']:
handle_actions(action['do'], input)
action['last'] = timestamp
elif type == 'mouse_down':
mouse.press(mousePos[0], mousePos[1], action['button'])
elif type == 'mouse_up':
mouse.release(mousePos[0], mousePos[1], action['button'])
elif type == 'mouse_click':
n = 1
if 'n' in action:
n = action['n']
mouse.click(mousePos[0], mousePos[1], action['button'], n)
def handle_actions(actions, input):
if isinstance(actions, list):
for action in actions:
handle_action(action, input)
elif isinstance(actions, dict):
handle_action(actions, input)
while True:
timestamp = time.time()
deltaTime = timestamp - lastTimestamp
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
for name, value in controller_watch.iteritems():
controller_watch_previous[name] = value
input = controller['inputs'][name]
if input['type'] == 'button':
controller_watch[name] = joystick.get_button(input['id'])
elif input['type'] == 'axis':
val = joystick.get_axis(input['id'])
if ('force_positive' in input and input['force_positive']):
val = (val + 1) / 2
controller_watch[name] = val
mousePos = mouse.position()
for binding, actions in bindings.iteritems():
trigger = ""
if binding[0] in TRIGGER_SYMBOLS:
trigger = binding[0]
binding = binding[1:]
value = controller_watch[binding]
previous_value = controller_watch_previous[binding]
changed = value != previous_value
do_trigger = False
if trigger == "": # button down
do_trigger = value and changed
elif trigger == "!": # button up
do_trigger = not value and changed
elif trigger == "~": # button up/down
do_trigger = changed
elif trigger == "@": # always
do_trigger = True
elif trigger == "*": # always when down
do_trigger = value
if do_trigger:
handle_actions(actions, binding)
lastTimestamp = timestamp
clock.tick(WATCH_FPS)
euid = os.geteuid()
if euid != 0:
print "error: c2k must be run as root"
sys.exit(126)
pygame.init()
pygame.joystick.init();
joysticks = [ pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count()) ]
print "Please select a controller"
print "Available controllers:"
for joystick in joysticks:
print "(%s) %s" % (joystick.get_id(), joystick.get_name())
joystick = None
while joystick is None:
joystick_id = raw_input("Controller id: ")
try:
joystick_id = int(joystick_id)
except ValueError:
print "Invalid controller id"
continue
if joystick_id < len(joysticks):
joystick = joysticks[joystick_id]
else:
print "There's no controller with that id"
with open(os.path.join(dirname, 'controllers.json')) as controller_config_file:
controller_config = json.load(controller_config_file)
bindings_files = os.listdir(os.path.join(dirname, 'bindings'))
joystick.init()
if not joystick.get_name() in controller_config:
draw_controller(joystick, [
"%s has no config. Please create one in controllers.json" % joystick.get_name(),
"You can watch your controller live using the below meters:"])
else:
print "Please select a binding"
print "Available bindings:"
for filename in bindings_files:
with open(os.path.join(dirname, 'bindings', filename)) as file:
file_data = json.load(file)
if not joystick.get_name() in file_data['bindings']:
continue
print "* %s (%s)" % (file_data['name'], filename[:-5])
binding = None
while binding is None:
binding_id = raw_input("Binding name: ")
if binding_id + ".json" in bindings_files:
with open(os.path.join(dirname, 'bindings', binding_id + ".json")) as file:
binding_json = json.load(file)['bindings']
if joystick.get_name() in binding_json:
binding = binding_json[ joystick.get_name() ]
else:
print "That binding isn't available for your controller"
else:
print "There's no binding with that name"
watch_controller(joystick, binding, controller_config[ joystick.get_name() ])
|
miestasmia/c2k
|
c2k.py
|
Python
|
gpl-3.0
| 10,323
|
#!/usr/bin/env python3
from tkinter import Toplevel, ACTIVE
from tkinter import ttk
class Tif_export(Toplevel):
'''
Class creates modal window to save tif file
about the software.
'''
def __init__(self, main_widget, widget_geometries, samples, texts):
'''
Creates top level window used for saveing tif file.
Args:
main_widget: instance - class Tk
widget_geometries: instance - class Widget_geometries.
samples: instance - class Samples
texts: instance - class Texts
'''
Toplevel.__init__(self, main_widget)
self.widget_geometries = widget_geometries[0]
self.samples = samples[0]
self.texts = texts[0]
self.transient(main_widget)
self.main_widget = main_widget
self.previous_compression = self.samples.tif_compression
self.title('Export tif')
window_geometry = '{}x{}+{}+{}'.format(
self.widget_geometries.tif_export_window_width,
self.widget_geometries.tif_export_window_height,
self.widget_geometries.tif_export_window_x,
self.widget_geometries.tif_export_window_y)
self.geometry(window_geometry)
self.resizable(width=0, height=0)
self.top_tif_export_main_frame = ttk.Frame(
self,
width=self.widget_geometries.tif_export_window_width,
height=self.widget_geometries.tif_export_window_height)
self.top_tif_export_main_frame.place(x=0, y=0)
self.build_window_elements()
self.button_box()
self.grab_set()
self.protocol('WM_DELETE_WINDOW', self.pressed_cancel)
self.top_tif_export_main_frame.focus_set()
self.wait_window(self)
def build_window_elements(self):
'''
Create additional elements used in
modal window.
'''
self.tif_quality_label = ttk.Label(
self.top_tif_export_main_frame,
text='Choose a compression of the TIFF file:')
self.tif_quality_label.place(x=15, y=10)
self.tif_compression_none = ttk.Radiobutton(
self.top_tif_export_main_frame,
text='No compression',
variable=self.texts.tif_compression_text,
value='None')
self.tif_compression_none.place(x=15, y=35)
self.tif_compression_tiff_deflate = ttk.Radiobutton(
self.top_tif_export_main_frame,
text='Tiff deflate',
variable=self.texts.tif_compression_text,
value='tiff_deflate')
self.tif_compression_tiff_deflate.place(x=15, y=60)
self.tif_compression_tiff_adobe_deflate = ttk.Radiobutton(
self.top_tif_export_main_frame,
text='Tiff adobe deflate',
variable=self.texts.tif_compression_text,
value='tiff_adobe_deflate')
self.tif_compression_tiff_adobe_deflate.place(x=15, y=85)
def button_box(self):
'''
Creates button box for the modal window.
'''
self.confirm_quality_button = ttk.Button(
self.top_tif_export_main_frame,
text='OK',
default=ACTIVE)
self.confirm_quality_button.place(x=50, y=117)
self.confirm_quality_button.bind('<Button-1>', self.pressed_ok)
self.bind('<Return>', self.pressed_ok)
self.cancel_quality_button = ttk.Button(
self.top_tif_export_main_frame,
text='Cancel')
self.cancel_quality_button.place(x=130, y=117)
self.cancel_quality_button.bind('<Button-1>', self.pressed_cancel_handler)
self.bind('<Escape>', self.pressed_cancel_handler)
def pressed_ok(self, event):
'''
Handles the events after confirmation of
save parameters.
'''
self.samples.export_status = True
self.samples.tif_compression = self.texts.tif_compression_text.get()
self.withdraw()
self.update_idletasks()
self.destroy()
def pressed_cancel_handler(self, event):
'''
Event handle for pressed cancel method.
'''
self.pressed_cancel()
def pressed_cancel(self):
'''
Method used to destroy modal window.
'''
self.samples.export_status = False
self.samples.tif_quality = self.previous_compression
self.main_widget.focus_set()
self.destroy()
|
mariuszkowalski/BioCounter
|
gui/misc/tif_export_gui.py
|
Python
|
gpl-3.0
| 4,446
|
import unittest
from wpull.body import Body
from wpull.protocol.ftp.request import Reply, Command, Request, Response
class TestRequest(unittest.TestCase):
def test_parse_reply(self):
reply = Reply()
reply.parse(b'200 Hello\r\n')
self.assertEqual(200, reply.code)
self.assertEqual('Hello', reply.text)
reply = Reply()
reply.parse(b'200-Hello\r\n')
reply.parse(b'200 World!\r\n')
self.assertEqual(200, reply.code)
self.assertEqual('Hello\r\nWorld!', reply.text)
reply = Reply()
reply.parse(b'200-Hello\r\n')
reply.parse(b'F\r\n')
reply.parse(b' T\r\n')
reply.parse(b'200-P\r\n')
reply.parse(b'200 World!\r\n')
self.assertEqual(200, reply.code)
self.assertEqual('Hello\r\nF\r\nT\r\nP\r\nWorld!', reply.text)
self.assertRaises(AssertionError, reply.parse, b'200 Hello again')
def test_reply(self):
reply = Reply(213, 'Hello world!\nFerret transfer protocol')
self.assertEqual(
b'213-Hello world!\r\n213 Ferret transfer protocol\r\n',
reply.to_bytes()
)
self.assertEqual(213, reply.to_dict()['code'])
self.assertEqual(
'Hello world!\nFerret transfer protocol',
reply.to_dict()['text']
)
self.assertEqual((2, 1, 3), reply.code_tuple())
def test_parse_command(self):
command = Command()
command.parse(b'User narwhal@compuwhal.org\r\n')
self.assertEqual('USER', command.name)
self.assertEqual('narwhal@compuwhal.org', command.argument)
self.assertRaises(AssertionError, command.parse, b'OOPS\r\n')
command = Command()
command.parse(b'POKE\r\n')
self.assertEqual('POKE', command.name)
self.assertEqual('', command.argument)
self.assertRaises(AssertionError, command.parse, b'OOPS\r\n')
def test_command(self):
command = Command('User', 'narwhal@compuwhal.org')
self.assertEqual('USER', command.name)
self.assertEqual('narwhal@compuwhal.org', command.argument)
self.assertEqual('USER', command.to_dict()['name'])
self.assertEqual(
'narwhal@compuwhal.org', command.to_dict()['argument'])
command = Command('Poke')
self.assertEqual('POKE', command.name)
self.assertEqual('', command.argument)
self.assertEqual('POKE', command.to_dict()['name'])
self.assertEqual('', command.to_dict()['argument'])
def test_to_dict(self):
request = Request('ftp://foofle.com')
request_dict = request.to_dict()
self.assertEqual('ftp://foofle.com', request_dict['url'])
self.assertEqual('ftp', request_dict['protocol'])
response = Response()
response.request = request
response.reply = Reply(code=200, text='Success')
response_dict = response.to_dict()
self.assertEqual('ftp://foofle.com', response_dict['request']['url'])
self.assertEqual('ftp', response_dict['protocol'])
self.assertEqual(200, response_dict['reply']['code'])
self.assertEqual(200, response_dict['response_code'])
self.assertEqual('Success', response_dict['reply']['text'])
self.assertEqual('Success', response_dict['response_message'])
def test_to_dict_body(self):
response = Response()
response.body = Body()
response_dict = response.to_dict()
self.assertTrue(response_dict['body'])
response.body.close()
response = Response()
response.body = NotImplemented
response_dict = response.to_dict()
self.assertFalse(response_dict['body'])
|
chfoo/wpull
|
wpull/protocol/ftp/request_test.py
|
Python
|
gpl-3.0
| 3,726
|
# -*- coding: utf8 -*-
SQL = """select
(select count(*) FROM `af3_fond` where A100=1 )A1001,
(select count(*) FROM `af3_fond` where A100=2 )A1002,
(select count(*) FROM `af3_fond` where A100=3)A1003,
(select count(*) FROM `af3_fond` where A100=4)A1004,
(select count(*) FROM `af3_fond` where A100=5)A1005,
(select count(*) FROM `af3_fond` where A99=1)A991,
(select count(*) FROM `af3_fond` where A99=2)A992,
(select count(*) FROM `af3_fond` where A99=3)A993,
(select count(*) FROM `af3_fond` where A99=4)A994,
(select count(*) FROM `af3_fond` where A99=5)A995,
(select count(*) FROM `af3_fond` where A99=6)A996,
(select count(*) FROM `af3_fond` where A98=1)A981,
(select count(*) FROM `af3_fond` where A98=2)A982,
(select count(*) FROM `af3_fond` where A98=3)A983,
(select count(*) FROM `af3_fond` where A3=1)A31,
(select count(*) FROM `af3_fond` where A3=2)A32,
(select count(*) FROM `af3_fond` where A3=3)A33,
(select count(*) FROM `af3_fond` where A2=1)A21,
(select count(*) FROM `af3_fond` where A2=2)A22,
(select count(*) FROM `af3_fond` where A12=1)A121,
(select count(*) FROM `af3_fond` where A12=2)A122,
(select count(*) FROM `af3_fond` where A12=3)A123,
(select count(*) FROM `af3_fond` where A12=4)A124,
(select count(*) FROM `af3_fond` where A12=5)A125,
(select count(*) FROM `af3_fond` where A12=7)A127,
(select count(*) FROM `af3_fond` where A12=9)A129,
(select count(*) FROM `af3_fond` where A13=1)A131,
(select count(*) FROM `af3_fond` where A13=2)A132,
(select count(*) FROM `af3_fond` where A13=3)A133,
(select count(*) FROM `af3_fond` where A13=4)A134,
(select count(*) FROM `af3_fond` where A13=5)A135,
(select count(*) FROM `af3_fond` where A13=6)A136,
(select count(*) FROM `af3_fond` where A13=7)A137,
(select count(*) FROM `af3_fond` where A13=8)A138,
(select count(*) FROM `af3_fond` where A103=2)A1032,
(select count(*) FROM `af3_fond` where A103=3)A1033,
(select count(*) FROM `af3_fond` where A102=1)A1021,
(select count(*) FROM `af3_fond` where A102=2)A1022,
(select count(*) FROM `af3_fond` where A102=3)A1023,
(select count(*) FROM `af3_fond` where A102=4)A1024,
(select count(*) FROM `af3_fond` where A92!=0)A921,
(select count(*) FROM `af3_fond` where A93!=0)A931,
(select count(*) FROM `af3_fond` where A94!=0)A941,
(select count(*) FROM `af3_fond` where A15=1)A151,
(select count(*) FROM `af3_fond` where A39!=0)A391,
(select count(*) FROM `af3_fond` where A57!=0)A571,
(select count(*) FROM `af3_fond` where A78!=0)A781,
(select count(*) FROM `af3_fond` where A3!=1 and A3!=2 and A3!=3)A30,
(select count(*) FROM `af3_fond` where A17!=0)A171,
(select count(*) FROM `af3_fond` where A18!=0)A181,
(select count(*) FROM `af3_fond` where A19!=0)A191,
(select count(*) FROM `af3_fond` where A20!=0)A201,
(select count(*) FROM `af3_fond` where A21!=0)A211,
(select count(*) FROM `af3_fond` where A22!=0)A221,
(select count(*) FROM `af3_fond` where A23!=0)A231,
(select count(*) FROM `af3_fond` where A24!=0)A241,
(select count(*) FROM `af3_fond` where A25!=0)A251,
(select count(*) FROM `af3_fond` where A26!=0)A261,
0 as X
;"""
FOUND_ROWS = False
ROOT = "report-list"
ROOT_PREFIX = None
ROOT_POSTFIX = None
XSL_TEMPLATE = "data/af-web.xsl"
EVENT = None
WHERE = ("ID",)
PARAM = None
MESSAGE = "Страница отчётов не доступна"
TITLE="Страница списка отчётов"
ORDER = None
|
ffsdmad/af-web
|
cgi-bin/plugins2/report_list.py
|
Python
|
gpl-3.0
| 3,580
|
import numpy
from mpi4py import MPI
from ndarray import equiv_class
def centerofmass(label, pos, boxsize=1.0, comm=MPI.COMM_WORLD):
"""
Calulate the center of mass of particles of the same label.
The center of mass is defined as the mean of positions of particles,
but care has to be taken regarding to the periodic boundary.
This is a collective operation, and after the call, all ranks
will have the position of halos.
Parameters
----------
label : array_like (integers)
Halo label of particles, >=0
pos : array_like (float, 3)
position of particles.
boxsize : float or None
size of the periodic box, or None if no periodic boundary is assumed.
comm : :py:class:`MPI.Comm`
communicator for the collective operation.
Returns
-------
hpos : array_like (float, 3)
the center of mass position of the halos.
"""
Nhalo0 = max(comm.allgather(label.max())) + 1
N = numpy.bincount(label, minlength=Nhalo0)
comm.Allreduce(MPI.IN_PLACE, N, op=MPI.SUM)
if boxsize is not None:
posmin = equiv_class(label, pos, op=numpy.fmin, dense_labels=True, identity=numpy.inf,
minlength=len(N))
comm.Allreduce(MPI.IN_PLACE, posmin, op=MPI.MIN)
dpos = pos - posmin[label]
bhalf = boxsize * 0.5
dpos[dpos < -bhalf] += boxsize
dpos[dpos >= bhalf] -= boxsize
else:
dpos = pos
dpos = equiv_class(label, dpos, op=numpy.add, dense_labels=True, minlength=len(N))
comm.Allreduce(MPI.IN_PLACE, dpos, op=MPI.SUM)
dpos /= N[:, None]
if boxsize:
hpos = posmin + dpos
hpos %= boxsize
else:
hpos = dpos
return hpos
def count(label, comm=MPI.COMM_WORLD):
"""
Count the number of particles of the same label.
This is a collective operation, and after the call, all ranks
will have the particle count.
Parameters
----------
label : array_like (integers)
Halo label of particles, >=0
comm : :py:class:`MPI.Comm`
communicator for the collective operation.
Returns
-------
count : array_like
the count of number of particles in each halo
"""
Nhalo0 = max(comm.allgather(label.max())) + 1
N = numpy.bincount(label, minlength=Nhalo0)
comm.Allreduce(MPI.IN_PLACE, N, op=MPI.SUM)
return N
|
DonRegan/nbodykit
|
nbodykit/halos.py
|
Python
|
gpl-3.0
| 2,417
|
__author__ = 'Rico'
from lang.language import translation
class CardDeck:
symbols = ["♥", "♦", "♣", "♠"]
valueInt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
@staticmethod
def create_deck():
from random import shuffle
deck = list(range(1, 52)) #TODO currently only one deck ... maybe i should add another one.
shuffle(deck)
return deck[:]
def pick_one_card(self):
card = self.deck[0]
self.deck.pop(0)
return card
def get_card_name(self, card):
symbol = self.symbols[card//13]
value = self.value_str[card % 13]
card_name = "|"+symbol+" "+value+"|"
return card_name
def get_card_value(self, card):
return self.valueInt[card % 13]
def __init__(self, lang_id):
self.deck = self.create_deck()
self.value_str = [translation("ace", lang_id), "2", "3", "4", "5", "6", "7", "8", "9", "10",
translation("jack", lang_id), translation("queen", lang_id), translation("king", lang_id)]
|
d-Rickyy-b/TelegramBot
|
game/cardDeck.py
|
Python
|
gpl-3.0
| 1,060
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import contextlib
import logging
import os
import shlex
from subprocess import CalledProcessError
from typing import List
from snapcraft.project import Project
from snapcraft.internal import common, errors
from snapcraft.internal.meta.package_repository import PackageRepository
logger = logging.getLogger(__name__)
class PluginV1:
@classmethod
def schema(cls):
"""Return a json-schema for the plugin's properties as a dictionary.
Of importance to plugin authors is the 'properties' keyword and
optionally the 'requires' keyword with a list of required
'properties'.
By default the properties will be that of a standard VCS,
override in custom implementations if required.
"""
return {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"additionalProperties": False,
"properties": {},
}
@classmethod
def get_pull_properties(cls):
return []
@classmethod
def get_build_properties(cls):
return []
@classmethod
def get_required_package_repositories(self) -> List[PackageRepository]:
"""Define additional deb source lines using templates variables."""
return list()
@property
def stage_packages(self):
return self._stage_packages
@stage_packages.setter
def stage_packages(self, value):
self._stage_packages = value
def __init__(self, name, options, project=None):
self.name = name
self.build_snaps = []
self.stage_snaps = []
self.build_packages = []
self._stage_packages = []
with contextlib.suppress(AttributeError):
self._stage_packages = options.stage_packages.copy()
with contextlib.suppress(AttributeError):
self.build_packages = options.build_packages.copy()
with contextlib.suppress(AttributeError):
self.build_snaps = options.build_snaps.copy()
with contextlib.suppress(AttributeError):
self.stage_snaps = options.stage_snaps.copy()
self.project = project
self.options = options
if project:
if isinstance(project, Project) and project._get_build_base() not in (
"core",
"core16",
"core18",
):
raise errors.PluginBaseError(
part_name=self.name, base=project._get_build_base()
)
self.partdir = os.path.join(project.parts_dir, name)
else:
self.partdir = os.path.join(os.getcwd(), "parts", name)
self.sourcedir = os.path.join(self.partdir, "src")
self.installdir = os.path.join(self.partdir, "install")
self.statedir = os.path.join(self.partdir, "state")
self.build_basedir = os.path.join(self.partdir, "build")
source_subdir = getattr(self.options, "source_subdir", None)
if source_subdir:
self.builddir = os.path.join(self.build_basedir, source_subdir)
else:
self.builddir = self.build_basedir
# By default, snapcraft does an in-source build. Set this property to
# True if that's not desired.
self.out_of_source_build = False
# The API
def pull(self):
"""Pull the source code and/or internal prereqs to build the part."""
pass
def clean_pull(self):
"""Clean the pulled source for this part."""
pass
def build(self):
"""Build the source code retrieved from the pull phase."""
pass
def clean_build(self):
"""Clean the artifacts that resulted from building this part."""
pass
def get_manifest(self):
"""Return the information to record after the build of this part.
:rtype: dict
"""
pass
def snap_fileset(self):
"""Return a list of files to include or exclude in the resulting snap
The staging phase of a plugin's lifecycle may populate many things
into the staging directory in order to succeed in building a
project.
During the stripping phase and in order to have a clean snap, the
plugin can provide additional logic for stripping build components
from the final snap and alleviate the part author from doing so for
repetetive filesets.
These are the rules to honor when creating such list:
- includes can be just listed
- excludes must be preceded by -
For example::
(['bin', 'lib', '-include'])
"""
return []
def env(self, root):
"""Return a list with the execution environment for building.
Plugins often need special environment variables exported to the
system for some builds to take place. This is a list of strings
of the form key=value. The parameter root is the path to this part.
:param str root: The root for the part
"""
return []
def enable_cross_compilation(self):
"""Enable cross compilation for the plugin."""
raise errors.CrossCompilationNotSupported(part_name=self.name)
@property
def parallel_build_count(self):
"""Number of CPU's to use for building.
Number comes from `project.parallel_build_count` unless the part
has defined `disable-parallel` as `True`.
"""
if getattr(self.options, "disable_parallel", False):
return 1
else:
return self.project.parallel_build_count
# Helpers
def run(self, cmd, cwd=None, **kwargs):
if not cwd:
cwd = self.builddir
cmd_string = " ".join([shlex.quote(c) for c in cmd])
print(cmd_string)
os.makedirs(cwd, exist_ok=True)
try:
return common.run(cmd, cwd=cwd, **kwargs)
except CalledProcessError as process_error:
raise errors.SnapcraftPluginCommandError(
command=cmd, part_name=self.name, exit_code=process_error.returncode
) from process_error
def run_output(self, cmd, cwd=None, **kwargs):
if not cwd:
cwd = self.builddir
os.makedirs(cwd, exist_ok=True)
try:
return common.run_output(cmd, cwd=cwd, **kwargs)
except CalledProcessError as process_error:
raise errors.SnapcraftPluginCommandError(
command=cmd, part_name=self.name, exit_code=process_error.returncode
) from process_error
|
chipaca/snapcraft
|
snapcraft/plugins/v1/_plugin.py
|
Python
|
gpl-3.0
| 7,235
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import io
import os
import json
from crotal import logger
class Database(object):
"""
Database is the interface to the file 'db.json'.
"""
@classmethod
def from_file(cls, path):
"""
Read the database from the path argument.
:param path: The file path of the designated database file.
:return: The database object.
"""
try:
raw_db = json.loads(open(path, 'r').read())
except (ValueError, IOError):
raw_db = {}
return cls(path, content=raw_db)
def __init__(self, path, content=None):
"""
Initalize a new database object.
:param path: Indicating the path of the file that this database will
be written to when saving the database.
:param content: Content of the database if needed to predefine.
:return: None
"""
if content:
self.raw_db = content
else:
self.raw_db = {}
self._path = path
self._tables = {}
for field in self.raw_db:
self._tables[field] = Table(field, content=self.raw_db[field])
def __getitem__(self, table):
"""
Fetch a ``table`` from the database.
:param table: Name of the table acquired.
:return: A ``table`` object.
"""
return self.get_table(table)
def get_table(self, table):
"""
Get a table object, same to ``self.__getitem__``.
:param table: Name of the table acquired.
:return: A ``table`` object.
"""
if table not in self._tables:
self._tables[table] = Table(table, content={})
logger.info('New table "{0}" created.'.format(table))
return self._tables[table]
else:
return self._tables[table]
def get_item(self, table, key):
"""
Get the value directly from the database based on key and table name.
:param table: Name of the table acquired.
:param key: Name of the key in the indicated table.
:return: The corresponding value stored in the table.
"""
return self[table].get(key, {'content': None})
def set_item(self, table, key, value):
"""
Set the entry directly from the database based on key and table name.
:param table: Name of the table acquired.
:param key: Name of the key in the indicated table.
:value value: Value to be set in the table.
:return: The corresponding value stored in the table.
"""
self[table][key] = value
def remove_item(self, table, key):
"""
Remove the entry directly from the database based on key and table name.
:param table: Name of the table acquired.
:param key: Name of the key in the indicated table.
:value value: Value to be set in the table.
:return: The corresponding value stored in the table.
"""
if key in self.raw_db[table]:
del self[table][key]
else:
logger.warning("Failed to remove from database: {0}, TYPE: {1}".format(key, table))
def dumps(self):
"""
Similar to ``json.dumps``.
"""
json_output = {}
for table in self._tables:
json_output[table] = self._tables[table].content
json_string = json.dumps(json_output, ensure_ascii=False)
return json_string
def save(self):
"""
Write the content of the database to the file indicated by ``self._path``
in json format.
:return:
"""
io.open(self._path, 'w+', encoding='utf8').write(self.dumps())
class Table(object):
"""
Table is a wrapper of dictionary. The usage is the same as ``dict``.
"""
def __init__(self, table_name, content=None):
self._table_name = table_name
self._mapping = {} if not content else content
@property
def content(self):
return self._mapping
def __repr__(self):
return '<%s:%s Keys:%r>' % (
self.__class__.__name__,
self._table_name,
self._mapping.keys()
)
def __getitem__(self, item):
return self._mapping.get(item)
def __contains__(self, key):
return key in self._mapping
def __setitem__(self, key, value):
self._mapping[key] = value
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
try:
del self._mapping[key]
except KeyError, e:
raise KeyError
def keys(self):
return self._mapping.keys()
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
|
dinever/crotal
|
crotal/db.py
|
Python
|
gpl-3.0
| 4,917
|
#!/usr/bin/python3
# based on chapter 13 of Python for Kids 199
# "Adding some action making the ball move"
from tkinter import *
import random
import time
tk = Tk()
tk.title("Game")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
class Ball:
def __init__(self,canvas,color):
self.canvas = canvas
self.id = canvas.create_oval(10,10,25,25, fill = color)
self.canvas.move(self.id,245,100)
def draw(self):
self.canvas.move(self.id,0,-1)
ball = Ball(canvas, 'red')
while 1:
ball.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
mikef656/my-emacs-setup
|
mybin/python_4_kids/ch13/making_the_ball_move.py
|
Python
|
gpl-3.0
| 698
|
#!/usr/bin/env python
# -*- coding: UTF8 -*-
#
# ffmpeg library python wrapper.
# Copyright (C) 2011 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" ffmpeg library python wrapper.
"""
__all__ = ['av']
|
zepto/musio-python2
|
musio/ffmpeg/__init__.py
|
Python
|
gpl-3.0
| 834
|
# coding: utf-8
from __future__ import absolute_import, division, unicode_literals, print_function
import logging
from .exceptions import APIError
from . import api
import config
class GitHubIssueHandler(logging.Handler):
"""Emit logged messages as issues on GitHub."""
headers = {'Accept': "application/vnd.github.v3+json"}
def __init__(self, owner, repo, user, access_token):
super(GitHubIssueHandler, self).__init__()
self.known_issues = set()
self.url = "https://api.github.com/repos/{}/{}/issues".format(owner, repo)
self._auth = (user, access_token)
def _detect_duplicate(self, title, labels=None):
if title in self.known_issues:
return True
params = {'state': "open", 'sort': "created", 'direction': "desc"}
if labels:
params['labels'] = ','.join(labels)
issues = []
try:
r = api.request_api(self.url, params=params, headers=self.headers)
issues.extend(i['title'] for i in r.json())
while 'next' in r.links:
r = api.request_api(r.links['next']['url'], headers=self.headers)
issues.extend(i['title'] for i in r.json())
except APIError:
pass
self.known_issues.update(issues)
return title in self.known_issues
def emit(self, record):
payload = {}
body = self.format(record)
if '\n' in body:
payload['title'], payload['body'] = body.split('\n', 1)
else:
payload['title'] = body
if hasattr(record, "gh_labels"):
payload['labels'] = record.gh_labels
if self._detect_duplicate(payload['title'], payload.get('labels', None)):
return
try:
r = api.request_api(self.url, json=payload, auth=self._auth,
headers=self.headers, method="POST")
except APIError:
self.handleError(record)
else:
if r.status_code == 201:
self.known_issues.add(payload['title'])
def setup_logging(main_handler):
logger = logging.getLogger("vmbot")
logger.setLevel(logging.DEBUG)
cc_logger = logging.getLogger("cachecontrol")
cc_logger.setLevel(logging.WARNING)
main_handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s",
"%Y-%m-%d %H:%M:%S"))
main_handler.setLevel(config.LOGLEVEL)
logger.addHandler(main_handler)
cc_logger.addHandler(main_handler)
gh = config.GITHUB
if gh['user'] and gh['token']:
esi_handler = GitHubIssueHandler("XVMX", "VMBot", gh['user'], gh['token'])
esi_handler.setLevel(logging.WARNING)
logging.getLogger("vmbot.helpers.api.esi").addHandler(esi_handler)
return logger
|
XVMX/vmbot
|
vmbot/helpers/logging.py
|
Python
|
gpl-3.0
| 2,851
|
import sys
# example of dictionary usage
period = sys.argv[1]
# set up dictionary
period_dict={}
# add data
period_dict['Cambrian']=(541.0,485.4)
period_dict['Ordovician']=(485.4,443.4)
period_dict['Silurian']=(443.4,419.2)
period_dict['Devonian']=(419.2,358.9)
period_dict['Carboniferous']=(358.9,298.9)
period_dict['Permian']=(298.9,252.2)
period_dict['Triassic']=(252.2,201.3)
period_dict['Jurassic']=(201.3,145.0)
period_dict['Cretaceous']=(145.5,66.0)
period_dict['Paleogene']=(66.0,23.0)
period_dict['Neogene']=(23.0,2.6)
period_dict['Ternary']=(66.0,2.6)
period_dict['Quaternary']=(2.6,0.0)
# look up data
print period
print 'duration: ' + str(period_dict[period][0] - period_dict[period][1])
|
eml11/PMPPython
|
samples/geological_periods.py
|
Python
|
gpl-3.0
| 705
|
# -*- coding: utf-8 -*-
import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, "" # desactivada verificación pq se encalla!
response = httptools.downloadpage(page_url, headers={"Referer": page_url})
global page
page = response
if "no+existe" in response.data or 'no existe.</p>' in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tienes permiso para acceder a este video"
if "Se ha producido un error" in response.data:
return False, "[gvideo] Se ha producido un error en el reproductor de google"
if "No+se+puede+procesar+este" in response.data:
return False, "[gvideo] No se puede procesar este video"
if response.code == 429:
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info()
video_urls = []
urls = []
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
url = page_url
headers_string = httptools.get_url_headers(page_url, forced=True)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
if not quality:
quality = '59'
streams.append((quality, url))
else:
data = page.data
bloque= scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map(.*)')
if bloque:
data = bloque
data = data.decode('unicode-escape', errors='replace')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = httptools.get_url_headers(page_url, forced=True)
streams = scrapertools.find_multiple_matches(data,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags.get(itag, ''), video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls
|
alfa-jor/addon
|
plugin.video.alfa/servers/gvideo.py
|
Python
|
gpl-3.0
| 2,665
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import parser
import bpy
from bpy.props import BoolProperty, StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (sv_Vars, updateNode, multi_socket, changable_sockets,
dataSpoil, dataCorrect, levelsOflist,
SvSetSocketAnyType, SvGetSocketAnyType)
from math import acos, acosh, asin, asinh, atan, atan2, \
atanh,ceil,copysign,cos,cosh,degrees,e, \
erf,erfc,exp,expm1,fabs,factorial,floor, \
fmod,frexp,fsum,gamma,hypot,isfinite,isinf, \
isnan,ldexp,lgamma,log,log10,log1p,log2,modf, \
pi,pow,radians,sin,sinh,sqrt,tan,tanh,trunc
class Formula2Node(bpy.types.Node, SverchCustomTreeNode):
''' Formula2 '''
bl_idname = 'Formula2Node'
bl_label = 'Formula2'
bl_icon = 'OUTLINER_OB_EMPTY'
formula = StringProperty(name='formula',
default='x+n[0]',
update=updateNode)
typ = StringProperty(name='typ',
default='')
newsock = BoolProperty(name='newsock',
default=False)
base_name = 'n'
multi_socket_type = 'StringsSocket'
def draw_buttons(self, context, layout):
layout.prop(self, "formula", text="")
def sv_init(self, context):
self.inputs.new('StringsSocket', "X", "X")
self.inputs.new('StringsSocket', "n[0]", "n[0]")
self.outputs.new('StringsSocket', "Result", "Result")
def update(self):
# inputs
multi_socket(self, min=2, start=-1, breck=True)
if self.inputs['X'].links:
# адаптивный сокет
inputsocketname = 'X'
outputsocketname = ['Result']
changable_sockets(self, inputsocketname, outputsocketname)
def process(self):
if self.inputs['X'].is_linked:
vecs = SvGetSocketAnyType(self, self.inputs['X'])
else:
vecs = [[0.0]]
# outputs
if not self.outputs['Result'].is_linked:
return
list_mult = []
if self.inputs['n[0]'].is_linked:
i = 0
for socket in self.inputs[1:]:
if socket.is_linked:
list_mult.append(SvGetSocketAnyType(self, socket))
#print(list_mult)
code_formula = parser.expr(self.formula).compile()
# finding nasty levels, make equal nastyness (canonical 0,1,2,3)
levels = [levelsOflist(vecs)]
for n in list_mult:
levels.append(levelsOflist(n))
maxlevel = max(max(levels), 3)
diflevel = maxlevel - levels[0]
if diflevel:
vecs_ = dataSpoil([vecs], diflevel-1)
vecs = dataCorrect(vecs_, nominal_dept=2)
for i, lev in enumerate(levels):
if i == 0:
continue
diflevel = maxlevel-lev
if diflevel:
list_temp = dataSpoil([list_mult[i-1]], diflevel-1)
list_mult[i-1] = dataCorrect(list_temp, nominal_dept=2)
#print(list_mult)
r = self.inte(vecs, code_formula, list_mult, 3)
result = dataCorrect(r, nominal_dept=min((levels[0]-1), 2))
SvSetSocketAnyType(self, 'Result', result)
def inte(self, list_x, formula, list_n, levels, index=0):
''' calc lists in formula '''
out = []
new_list_n = self.normalize(list_n, list_x)
for j, x_obj in enumerate(list_x):
out1 = []
for k, x_lis in enumerate(x_obj):
out2 = []
for q, x in enumerate(x_lis):
out2.append(self.calc_item(x, formula, new_list_n, j, k, q))
out1.append(out2)
out.append(out1)
return out
def calc_item(self, x, formula, nlist, j, k, q):
X = x
n = []
a = []
list_vars = [w for w in sv_Vars.keys()]
for v in list_vars:
if v[:6] == 'sv_typ':
continue
abra = sv_Vars[v]
exec(str(v)+'=[]')
for i, aa_abra in enumerate(abra):
eva = str(v)+'.append('+str(aa_abra)+')'
eval(eva)
for nitem in nlist:
n.append(nitem[j][k][q])
N = n
return eval(formula)
def normalize(self, listN, listX):
Lennox = len(listX)
new_list_n = []
for ne in listN:
Lenin = len(ne)
equal = Lennox - Lenin
if equal > 0:
self.enlarge(ne, equal)
for i, obj in enumerate(listX):
Lennox = len(obj)
Lenin = len(ne[i])
equal = Lennox - Lenin
if equal > 0:
self.enlarge(ne[i], equal)
for j, list in enumerate(obj):
Lennox = len(list)
Lenin = len(ne[i][j])
equal = Lennox - Lenin
if equal > 0:
self.enlarge(ne[i][j], equal)
new_list_n.append(ne)
return new_list_n
def enlarge(self, lst, equal):
''' enlarge minor n[i] list to size of x list '''
lst.extend([lst[-1] for i in range(equal)])
#return lst
def register():
bpy.utils.register_class(Formula2Node)
def unregister():
bpy.utils.unregister_class(Formula2Node)
if __name__ == '__main__':
register()
|
kilon/sverchok
|
nodes/number/formula2.py
|
Python
|
gpl-3.0
| 6,390
|
# Functions for manipulating .sum summary files.
import re
import os.path
from StringIO import StringIO
# Helper regex for parse_sum_line.
sum_matcher = re.compile('^(.?(PASS|FAIL)): (.*)$')
# You must call set_web_base at startup to set this.
gdb_web_base = None
def set_web_base(arg):
global gdb_web_base
gdb_web_base = arg
if not os.path.isdir(gdb_web_base):
# If the parent doesn't exist, we're confused.
# So, use mkdir and not makedirs.
os.mkdir(gdb_web_base, 0755)
class DejaResults(object):
def __init__(self):
object.__init__(self)
# Parse a single line from a .sum file.
# Uniquify the name, and put the result into OUT_DICT.
# If the line does not appear to be about a test, ignore it.
def parse_sum_line(self, out_dict, line):
global sum_matcher
line = line.rstrip()
m = re.match(sum_matcher, line)
if m:
result = m.group(1)
test_name = m.group(3)
if test_name in out_dict:
i = 2
while True:
nname = test_name + ' <<' + str(i) + '>>'
if nname not in out_dict:
break
i = i + 1
test_name = nname
out_dict[test_name] = result
def _write_sum_file(self, sum_dict, subdir, filename):
global gdb_web_base
bdir = os.path.join(gdb_web_base, subdir)
if not os.path.isdir(bdir):
os.makedirs(bdir, 0755)
fname = os.path.join(bdir, filename)
keys = sum_dict.keys()
keys.sort()
f = open(fname, 'w')
for k in keys:
f.write(sum_dict[k] + ': ' + k + '\n')
f.close()
def write_sum_file(self, sum_dict, builder, filename):
self._write_sum_file(sum_dict, builder, filename)
def write_baseline(self, sum_dict, builder, branch):
self.write_sum_file(sum_dict, os.path.join(builder, branch),
'baseline')
# Read a .sum file.
# The builder name is BUILDER.
# The base file name is given in FILENAME. This should be a git
# revision; to read the baseline file for a branch, use `read_baseline'.
# Returns a dictionary holding the .sum contents, or None if the
# file did not exist.
def read_sum_file(self, builder, filename):
global gdb_web_base
fname = os.path.join(gdb_web_base, builder, filename)
if os.path.exists(fname):
result = {}
f = open(fname, 'r')
for line in f:
self.parse_sum_line (result, line)
f.close()
else:
result = None
return result
def read_baseline(self, builder, branch):
return self.read_sum_file(builder, os.path.join(branch, 'baseline'))
# Parse some text as a .sum file and return the resulting
# dictionary.
def read_sum_text(self, text):
cur_file = StringIO(text)
cur_results = {}
for line in cur_file.readlines():
self.parse_sum_line(cur_results, line)
return cur_results
# Compute regressions between RESULTS and BASELINE.
# BASELINE will be modified if any new PASSes are seen.
# Returns a regression report, as a string.
def compute_regressions(self, results, baseline):
our_keys = results.keys()
our_keys.sort()
result = ''
xfails = self.read_sum_file('', 'xfail')
if xfails is None:
xfails = {}
for key in our_keys:
# An XFAIL entry means we have an unreliable test.
if key in xfails:
continue
# A transition to PASS means we should update the baseline.
if results[key] == 'PASS':
if key not in baseline or baseline[key] != 'PASS':
baseline[key] = 'PASS'
# A regression is just a transition to FAIL.
if results[key] != 'FAIL':
continue
if key not in baseline:
result = result + 'new FAIL: ' + key + '\n'
elif baseline[key] != 'FAIL':
result = result + baseline[key] + ' -> FAIL: ' + key + '\n'
return result
|
tromey/gdb-buildbot
|
lib/sumfiles.py
|
Python
|
gpl-3.0
| 4,263
|
# --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2015 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
"""Brooker classes to organize ldap methods.
Stuff is split in classes, like:
* Replica
* Backend
* Suffix
You will access this from:
DirSrv.backend.methodName()
"""
from . import config
from .config import *
import lib389
from lib389 import DirSrv
conn = None
added_entries = None
added_backends = None
MOCK_REPLICA_ID = '12'
MOCK_TESTREPLICA_DN = "cn=testReplica,cn=ldbm database,cn=plugins,cn=config"
def setup():
# uses an existing 389 instance
# add a suffix
# add an agreement
# This setup is quite verbose but to test DirSrv method we should
# do things manually. A better solution would be to use an LDIF.
global conn
conn = DirSrv(**config.auth)
conn.verbose = True
conn.added_entries = []
conn.added_backends = set(['o=mockbe1'])
conn.added_replicas = []
"""
# add a backend for testing ruv and agreements
addbackend_harn(conn, 'testReplica')
# add another backend for testing replica.add()
addbackend_harn(conn, 'testReplicaCreation')
"""
def teardown():
global conn
conn.config.loglevel([lib389.LOG_CACHE])
conn.config.loglevel([256], service='access')
"""
drop_added_entries(conn)
conn.delete_s(','.join(['cn="o=testreplica"', DN_MAPPING_TREE]))
drop_backend(conn, 'o=testreplica')
#conn.delete_s('o=testreplica')
"""
def loglevel_test():
vals = [lib389.LOG_CACHE, lib389.LOG_REPLICA, lib389.LOG_CONNECT]
expected = sum(vals)
assert conn.config.loglevel(vals) == expected
ret = conn.config.get('nsslapd-errorlog-level')
assert ret == str(expected), "expected: %r got: %r" % (expected, ret)
def loglevel_update_test():
vals = [lib389.LOG_CACHE, lib389.LOG_CONNECT]
e = sum(vals)
assert conn.config.loglevel(vals) == e
vals = [lib389.LOG_REPLICA]
ret = conn.config.loglevel(vals, update=True)
assert ret == (e + sum(vals)), "expected %s got %s" % (e + sum(vals), ret)
def access_loglevel_test():
vals = [lib389.LOG_CACHE, lib389.LOG_REPLICA, lib389.LOG_CONNECT]
assert conn.config.loglevel(vals, service='access') == sum(vals)
|
Ilias95/lib389
|
lib389/tests/config_test.py
|
Python
|
gpl-3.0
| 2,329
|
import numpy as np
from pyFTS.partitioners import partitioner
from pyFTS.models.nonstationary import common, perturbation
from pyFTS.common import FuzzySet as stationary_fs
class PolynomialNonStationaryPartitioner(partitioner.Partitioner):
"""
Non Stationary Universe of Discourse Partitioner
"""
def __init__(self, data, part, **kwargs):
""""""
super(PolynomialNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions,
func=part.membership_function, names=part.setnames,
prefix=part.prefix, transformation=part.transformation,
indexer=part.indexer, preprocess=False)
self.sets = {}
loc_params, wid_params = self.get_polynomial_perturbations(data, **kwargs)
if self.ordered_sets is None and self.setnames is not None:
self.ordered_sets = part.setnames
else:
self.ordered_sets = stationary_fs.set_ordered(part.sets)
for ct, key in enumerate(self.ordered_sets):
set = part.sets[key]
loc_roots = np.roots(loc_params[ct])[0]
wid_roots = np.roots(wid_params[ct])[0]
tmp = common.FuzzySet(set.name, set.mf, set.parameters,
location=perturbation.polynomial,
location_params=loc_params[ct],
location_roots=loc_roots, #**kwargs)
width=perturbation.polynomial,
width_params=wid_params[ct],
width_roots=wid_roots, **kwargs)
self.sets[set.name] = tmp
def poly_width(self, par1, par2, rng, deg):
a = np.polyval(par1, rng)
b = np.polyval(par2, rng)
diff = [b[k] - a[k] for k in rng]
tmp = np.polyfit(rng, diff, deg=deg)
return tmp
def scale_up(self,x,pct):
if x > 0: return x*(1+pct)
else: return x*pct
def scale_down(self,x,pct):
if x > 0: return x*pct
else: return x*(1+pct)
def get_polynomial_perturbations(self, data, **kwargs):
w = kwargs.get("window_size", int(len(data) / 5))
degree = kwargs.get("degree", 2)
xmax = [data[0]]
tmax = [0]
xmin = [data[0]]
tmin = [0]
l = len(data)
for i in np.arange(0, l, w):
sample = data[i:i + w]
tx = max(sample)
xmax.append(tx)
tmax.append(np.ravel(np.argwhere(data == tx)).tolist()[0])
tn = min(sample)
xmin.append(tn)
tmin.append(np.ravel(np.argwhere(data == tn)).tolist()[0])
cmax = np.polyfit(tmax, xmax, deg=degree)
cmin = np.polyfit(tmin, xmin, deg=degree)
cmed = []
for d in np.arange(0, degree + 1):
cmed.append(np.linspace(cmin[d], cmax[d], self.partitions)[1:self.partitions - 1])
loc_params = [cmin.tolist()]
for i in np.arange(0, self.partitions - 2):
tmp = [cmed[k][i] for k in np.arange(0, degree + 1)]
loc_params.append(tmp)
loc_params.append(cmax.tolist())
rng = np.arange(0, l)
clen = []
for i in np.arange(1, self.partitions-1):
tmp = self.poly_width(loc_params[i - 1], loc_params[i + 1], rng, degree)
clen.append(tmp)
tmp = self.poly_width(loc_params[0], loc_params[1], rng, degree)
clen.insert(0, tmp)
tmp = self.poly_width(loc_params[self.partitions-2], loc_params[self.partitions-1], rng, degree)
clen.append(tmp)
tmp = (loc_params, clen)
return tmp
def build(self, data):
pass
class SimpleNonStationaryPartitioner(partitioner.Partitioner):
"""
Non Stationary Universe of Discourse Partitioner
"""
def __init__(self, data, part, **kwargs):
""""""
super(SimpleNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions,
func=part.membership_function, names=part.setnames,
prefix=part.prefix, transformation=part.transformation,
indexer=part.indexer)#, preprocess=False)
self.partitions = part.partitions
for key in part.sets.keys():
set = part.sets[key]
tmp = common.FuzzySet(set.name, set.mf, set.parameters, **kwargs)
tmp.centroid = set.centroid
self.sets[key] =tmp
self.ordered_sets = stationary_fs.set_ordered(self.sets)
def build(self, data):
return {}
def simplenonstationary_gridpartitioner_builder(data, npart, transformation):
from pyFTS.partitioners import Grid
from pyFTS.models.nonstationary import perturbation, partitioners
tmp_fs = Grid.GridPartitioner(data=data, npart=npart, transformation=transformation)
fs = partitioners.SimpleNonStationaryPartitioner(data, tmp_fs,
location=perturbation.polynomial,
location_params=[1, 0],
location_roots=0,
width=perturbation.polynomial,
width_params=[1, 0],
width_roots=0)
return fs
|
petroniocandido/pyFTS
|
pyFTS/models/nonstationary/partitioners.py
|
Python
|
gpl-3.0
| 5,675
|
#!/usr/bin/env python
from functools import partial
from math import sqrt
from netCDF4 import Dataset
import sys
import optparse
import rtree
optparse.OptionParser.format_epilog = lambda self, formatter: self.epilog
class NcFiller:
def __init__(self, options ):
self.options = options
self.ncIn = None
self.ncOut = None
def closeFiles( self ):
if self.ncIn:
self.ncIn.close()
if self.ncOut:
self.ncOut.close()
def getAverageProcessor(self):
if self.options.weighted:
return partial(computeWeightedAverage, p=self.options.weighted)
else:
return computeAverage
def initOutput(self):
# Clone dims.
for dim in self.ncIn.dimensions:
self.ncOut.createDimension( dim, len( self.ncIn.dimensions[dim] ) )
# Clone vars.
for name in self.ncIn.variables:
varIn = self.ncIn.variables[ name ]
dtype = self.options.types.get( name, varIn.dtype )
if hasattr( varIn, '_FillValue' ):
fillValue = varIn._FillValue
else:
fillValue = None
varOut = self.ncOut.createVariable( name, dtype, varIn.dimensions, fill_value=fillValue )
for att in dir( varIn ):
if att in ['_FillValue']: continue
if type(att) == unicode:
exec( 'varOut.{att} = varIn.{att}'.format(att=att) )
def openFiles( self ):
self.ncIn = Dataset( self.options.input )
self.ncOut = Dataset( self.options.output, 'w' )
def processVar(self, name):
ndims = len( self.ncIn.variables[ name ].dimensions )
varIn = self.ncIn.variables[ name ]
varOut = self.ncOut.variables[ name ]
varIn.set_auto_maskandscale(False)
# Assume static missing mask.
if 4 == ndims:
matrix = varIn[0, 0, ...]
fun = self.processVar4d
elif 3 == ndims:
matrix = varIn[0, ...]
fun = self.processVar3d
else:
matrix = varIn[...]
fun = self.processVar2d
tree, treeCoords = buildValidPixelTree( matrix, self.options.missing )
missingCoords = findMissing( matrix, self.options.missing )
avgFun = self.getAverageProcessor()
# open('/tmp/missingCoords.txt','w').write( str( missingCoords) )
fun(tree, treeCoords, missingCoords, varIn, varOut, avgFun)
def processVar4d(self, tree, treeCoords, missingCoords, varIn, varOut, avgFun):
nt = varIn.shape[0]
nz = varIn.shape[1]
for tIndex in xrange(nt):
print 'Processing t={0}/{1}'.format( tIndex+1, nt )
for zIndex in xrange(nz):
if self.options.progress:
print 'Processing z={0}/{1}'.format( zIndex+1, nz )
matrix = varIn[tIndex, zIndex, ...]
fillMissing2d(matrix, tree, treeCoords, missingCoords,
self.options.neighbors, avgFun)
varOut[ tIndex, zIndex ] = matrix
def processVar3d(self, tree, treeCoords, missingCoords, varIn, varOut, avgFun):
nz = varIn.shape[0]
for zIndex in xrange(nz):
if self.options.progress:
print 'Processing z={0}/{1}'.format( zIndex+1, nz )
matrix = varIn[tIndex, zIndex, ...]
fillMissing2d( matrix, tree, treeCoords, missingCoords, self.options.neighbors, avgFun )
varOut[ zIndex ] = matrix
def processVar2d(self, tree, treeCoords, missingCoords, varIn, varOut, avgFun):
matrix = varIn[...]
fillMissing2d( matrix, tree, treeCoords, missingCoords, self.options.neighbors, avgFun )
varOut[:] = matrix
def processVars(self):
for var in self.options.vars:
self.processVar( var )
def run(self):
self.openFiles()
self.initOutput()
self.processVars()
self.closeFiles()
def buildValidPixelTree( matrix, missingValue ):
'''
Return ij coordinate RTree for all pixels that do not have invalid values,
and the list of coords.
@param matrix 2d matrix.
@param invalid What values to ignore.
'''
idx = rtree.index.Index()
coords = [] # The inserted coords for lookup later.
ny, nx = matrix.shape[-2:]
nInserted = 0
for i in xrange( ny ):
for j in xrange( nx ):
if missingValue <> matrix[ i, j ]:
# Insert (left, bottom, right, top).
idx.insert(nInserted, (j,i,j,i) )
coords.append( (i,j) )
nInserted += 1
return idx, coords
def computeAverage( i, j, matrix, nearestCoords ):
'''
Returns single value that is average for point at
position i,j in matrix, using 'nearestCoords' for weight inputs.
'''
total = 0.
for i2,j2 in nearestCoords:
total += matrix[ i2, j2 ]
result = total / len( nearestCoords )
return result
def computeWeightedAverage( i, j, matrix, nearestCoords, p=2 ):
'''
Returns single value that is p=2 distance weighted average for point at
position i,j in matrix, using 'nearestCoords' for weight inputs.
'''
u = 0.
sumw = 0.
for i2,j2 in nearestCoords:
d = sqrt( (i-i2)**2 + (j-j2)**2 )
w = 1. / d**p
u += w * matrix[ i2, j2 ]
sumw += w
return u / sumw
def fillMissing2d(matrix, tree, treeCoords, missingCoords, nNearest, computeAvg):
'''
@param tree Rtree of valid pixel coords without missing value.
@param missingCoords List of (y,x) coords for pixels that had missing value.
'''
for i,j in missingCoords:
nearestObjs = tree.nearest( (j,i,j,i), nNearest )
nearestIds = list( nearestObjs )
nearestCoords = [ treeCoords[idx] for idx in nearestIds ]
newValue = computeAvg( i, j, matrix, nearestCoords )
matrix[ i, j ] = newValue
def findMissing( matrix, missingValue ):
'''
Returns list of (y,x) pixel coords where missing value is found.
'''
result = []
ny, nx = matrix.shape[-2:]
for i in xrange( ny ):
for j in xrange( nx ):
# print '%.17g' % matrix[ i, j ], missingValue == matrix[ i, j ]
if missingValue == matrix[ i, j ]:
result.append( (i,j) )
return result
def parseOptions( argv ):
description = 'Fill missing values with nearest neighbor (weighted) averages for Netcdf files. Assumes static grid masks.'
examples = '''
Examples:
./ncFillMissing --missing -1e20 --progress --vars u,v in.nc out.nc
'''
usage = 'Usage: %prog [options] in.nc out.nc'
parser = optparse.OptionParser(description=description,
epilog=examples,
usage=usage)
parser.add_option('-d', '--debug', help='Print debug messages.',
action="store_true")
parser.add_option('-i', '--input', help='Input file.')
parser.add_option('-m', '--missing', help='Missing value to fill.')
parser.add_option('-n', '--neighbors',
help='Number of nearest values to use. Default=1',
default=1)
parser.add_option('-o', '--output', help='Output file.')
parser.add_option('-p', '--progress', help='Print progress.',
action="store_true")
parser.add_option('-t', '--types', help='Output type override. For example, -t SST:f,TIME:d')
parser.add_option('-v', '--vars', help='Variables to process. Comma delimited.')
parser.add_option('-w', '--weighted',
help='Use inverse pixel distance weighting exponent p. Default is off.',
default=0)
opts, args = parser.parse_args( argv )
if not opts.input:
if len(args) < 2:
parser.error('--input missing')
else:
opts.input = args[0]
if not opts.output:
if len(args) < 2:
parser.error('--output missing')
else:
opts.output = args[1]
if not opts.vars:
parser.error('--vars missing')
opts.vars = opts.vars.split(',')
opts.weighted = int(opts.weighted)
types = {}
if opts.types:
for pair in opts.types.split(','):
key, val = pair.split(':')
types[key] = val
opts.types = types
if opts.debug:
print opts
try:
opts.missing = float( opts.missing )
except: pass
opts.neighbors = int( opts.neighbors )
return opts
def main( argv ):
options = parseOptions( argv )
filler = NcFiller( options )
filler.run()
if __name__ == '__main__':
main( sys.argv[1:] )
# LICENSE BEGIN
#
# ncFillMissing
# Copyright (C) 2016 Remik Ziemlinski
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# LICENSE END
|
rsmz/ncFillMissing
|
ncFillMissing.py
|
Python
|
gpl-3.0
| 9,811
|
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011-2014 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
import os.path
import wx
import outspline.interfaces.wxgui_api as wxgui_api
def save_to_json():
return wx.FileDialog(wxgui_api.get_main_frame(),
message="Export schedule view",
defaultDir=os.path.expanduser('~'),
defaultFile="outspline_events.json",
wildcard="JSON (*.json)|*.json|All files (*)|*",
style=wx.SAVE | wx.FD_OVERWRITE_PROMPT)
def save_to_tsv():
return wx.FileDialog(wxgui_api.get_main_frame(),
message="Export schedule view",
defaultDir=os.path.expanduser('~'),
defaultFile="outspline_events.tsv",
wildcard="TSV (*.tsv)|*.tsv|All files (*)|*",
style=wx.SAVE | wx.FD_OVERWRITE_PROMPT)
def save_to_xml():
return wx.FileDialog(wxgui_api.get_main_frame(),
message="Export schedule view",
defaultDir=os.path.expanduser('~'),
defaultFile="outspline_events.xml",
wildcard="XML (*.xml)|*.xml|All files (*)|*",
style=wx.SAVE | wx.FD_OVERWRITE_PROMPT)
def warn_user_rights(filename):
return wx.MessageDialog(wxgui_api.get_main_frame(), 'You are not '
'authorized to '
'create or overwrite {}.'.format(filename),
caption="Export schedule view",
style=wx.OK | wx.ICON_EXCLAMATION)
|
xguse/outspline
|
src/outspline/plugins/wxtasklist/msgboxes.py
|
Python
|
gpl-3.0
| 2,430
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import datetime
import hashlib
import pytz
import threading
import urllib2
import urlparse
from lxml import etree
from odoo import api, fields, models, tools, _
from odoo.modules import get_module_resource
from odoo.osv.expression import get_unaccent_wrapper
from odoo.exceptions import UserError, ValidationError
from odoo.osv.orm import browse_record
# Global variables used for the warning fields declared on the res.partner
# in the following modules : sale, purchase, account, stock
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
ADDRESS_FORMAT_CLASSES = {
'%(city)s %(state_code)s\n%(zip)s': 'o_city_state',
'%(zip)s %(city)s': 'o_zip_city'
}
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
@api.model
def _lang_get(self):
return self.env['res.lang'].get_installed()
@api.model
def _tz_get(self):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz, tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
class FormatAddress(object):
@api.model
def fields_view_get_address(self, arch):
address_format = self.env.user.company_id.country_id.address_format or ''
for format_pattern, format_class in ADDRESS_FORMAT_CLASSES.iteritems():
if format_pattern in address_format:
doc = etree.fromstring(arch)
for address_node in doc.xpath("//div[@class='o_address_format']"):
# add address format class to address block
address_node.attrib['class'] += ' ' + format_class
if format_class.startswith('o_zip'):
zip_fields = address_node.xpath("//field[@name='zip']")
city_fields = address_node.xpath("//field[@name='city']")
if zip_fields and city_fields:
# move zip field before city field
city_fields[0].addprevious(zip_fields[0])
arch = etree.tostring(doc)
break
return arch
class PartnerCategory(models.Model):
_description = 'Partner Tags'
_name = 'res.partner.category'
_order = 'parent_left, name'
_parent_store = True
_parent_order = 'name'
name = fields.Char(string='Category Name', required=True, translate=True)
color = fields.Integer(string='Color Index')
parent_id = fields.Many2one('res.partner.category', string='Parent Category', index=True, ondelete='cascade')
child_ids = fields.One2many('res.partner.category', 'parent_id', string='Child Tags')
active = fields.Boolean(default=True, help="The active field allows you to hide the category without removing it.")
parent_left = fields.Integer(string='Left parent', index=True)
parent_right = fields.Integer(string='Right parent', index=True)
partner_ids = fields.Many2many('res.partner', column1='category_id', column2='partner_id', string='Partners')
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error ! You can not create recursive tags.'))
@api.multi
def name_get(self):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if self._context.get('partner_category_display') == 'short':
return super(PartnerCategory, self).name_get()
res = []
for category in self:
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
return self.search(args, limit=limit).name_get()
class PartnerTitle(models.Model):
_name = 'res.partner.title'
_order = 'name'
name = fields.Char(string='Title', required=True, translate=True)
shortcut = fields.Char(string='Abbreviation', translate=True)
_sql_constraints = [('name_uniq', 'unique (name)', "Title name already exists !")]
class Partner(models.Model, FormatAddress):
_description = 'Partner'
_name = "res.partner"
_order = "display_name"
def _default_category(self):
return self.env['res.partner.category'].browse(self._context.get('category_id'))
def _default_company(self):
return self.env['res.company']._company_default_get('res.partner')
name = fields.Char(index=True)
display_name = fields.Char(compute='_compute_display_name', store=True, index=True)
date = fields.Date(index=True)
title = fields.Many2one('res.partner.title')
parent_id = fields.Many2one('res.partner', string='Related Company', index=True)
parent_name = fields.Char(related='parent_id.name', readonly=True, string='Parent name')
child_ids = fields.One2many('res.partner', 'parent_id', string='Contacts', domain=[('active', '=', True)]) # force "active_test" domain to bypass _search() override
ref = fields.Char(string='Internal Reference', index=True)
lang = fields.Selection(_lang_get, string='Language', default=lambda self: self.env.lang,
help="If the selected language is loaded in the system, all documents related to "
"this contact will be printed in this language. If not, it will be English.")
tz = fields.Selection(_tz_get, string='Timezone', default=lambda self: self._context.get('tz'),
help="The partner's timezone, used to output proper date and time values "
"inside printed reports. It is important to set a value for this field. "
"You should use the same timezone that is otherwise used to pick and "
"render date and time values: your computer's timezone.")
tz_offset = fields.Char(compute='_compute_tz_offset', string='Timezone offset', invisible=True)
user_id = fields.Many2one('res.users', string='Salesperson',
help='The internal user that is in charge of communicating with this contact if any.')
vat = fields.Char(string='TIN', help="Tax Identification Number. "
"Fill it if the company is subjected to taxes. "
"Used by the some of the legal statements.")
bank_ids = fields.One2many('res.partner.bank', 'partner_id', string='Banks')
website = fields.Char(help="Website of Partner or Company")
comment = fields.Text(string='Notes')
category_id = fields.Many2many('res.partner.category', column1='partner_id',
column2='category_id', string='Tags', default=_default_category)
credit_limit = fields.Float(string='Credit Limit')
barcode = fields.Char(oldname='ean13')
active = fields.Boolean(default=True)
customer = fields.Boolean(string='Is a Customer', default=True,
help="Check this box if this contact is a customer.")
supplier = fields.Boolean(string='Is a Vendor',
help="Check this box if this contact is a vendor. "
"If it's not checked, purchase people will not see it when encoding a purchase order.")
employee = fields.Boolean(help="Check this box if this contact is an Employee.")
function = fields.Char(string='Job Position')
type = fields.Selection(
[('contact', 'Contact'),
('invoice', 'Invoice address'),
('delivery', 'Shipping address'),
('other', 'Other address')], string='Address Type',
default='contact',
help="Used to select automatically the right address according to the context in sales and purchases documents.")
street = fields.Char()
street2 = fields.Char()
zip = fields.Char(change_default=True)
city = fields.Char()
state_id = fields.Many2one("res.country.state", string='State', ondelete='restrict')
country_id = fields.Many2one('res.country', string='Country', ondelete='restrict')
email = fields.Char()
phone = fields.Char()
fax = fields.Char()
mobile = fields.Char()
is_company = fields.Boolean(string='Is a Company', default=False,
help="Check if the contact is a company, otherwise it is a person")
# company_type is only an interface field, do not use it in business logic
company_type = fields.Selection(string='Company Type',
selection=[('person', 'Individual'), ('company', 'Company')],
compute='_compute_company_type', readonly=False)
company_id = fields.Many2one('res.company', 'Company', index=True, default=_default_company)
color = fields.Integer(string='Color Index', default=0)
user_ids = fields.One2many('res.users', 'partner_id', string='Users', auto_join=True)
contact_address = fields.Char(compute='_compute_contact_address', string='Complete Address')
# technical field used for managing commercial fields
commercial_partner_id = fields.Many2one('res.partner', compute='_compute_commercial_partner',
string='Commercial Entity', store=True)
commercial_company_name = fields.Char('Company Name Entity', compute='_compute_commercial_company_name',
store=True)
company_name = fields.Char('Company Name')
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Image", attachment=True,
help="This field holds the image used as avatar for this contact, limited to 1024x1024px",)
image_medium = fields.Binary("Medium-sized image", attachment=True,
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = fields.Binary("Small-sized image", attachment=True,
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
_sql_constraints = [
('check_name', "CHECK( (type='contact' AND name IS NOT NULL) or (type!='contact') )", 'Contacts require a name.'),
]
@api.depends('is_company', 'name', 'parent_id.name', 'type', 'company_name')
def _compute_display_name(self):
diff = dict(show_address=None, show_address_only=None, show_email=None)
names = dict(self.with_context(**diff).name_get())
for partner in self:
partner.display_name = names.get(partner.id)
@api.depends('tz')
def _compute_tz_offset(self):
for partner in self:
partner.tz_offset = datetime.datetime.now(pytz.timezone(partner.tz or 'GMT')).strftime('%z')
@api.depends(lambda self: self._display_address_depends())
def _compute_contact_address(self):
for partner in self:
partner.contact_address = partner._display_address()
@api.depends('is_company', 'parent_id.commercial_partner_id')
def _compute_commercial_partner(self):
for partner in self:
if partner.is_company or not partner.parent_id:
partner.commercial_partner_id = partner
else:
partner.commercial_partner_id = partner.parent_id.commercial_partner_id
@api.depends('company_name', 'parent_id.is_company', 'commercial_partner_id.name')
def _compute_commercial_company_name(self):
for partner in self:
p = partner.commercial_partner_id
partner.commercial_company_name = p.is_company and p.name or partner.company_name
@api.model
def _get_default_image(self, partner_type, is_company, parent_id):
if getattr(threading.currentThread(), 'testing', False) or self._context.get('install_mode'):
return False
colorize, img_path, image = False, False, False
if partner_type in ['contact', 'other'] and parent_id:
parent_image = self.browse(parent_id).image
image = parent_image and parent_image.decode('base64') or None
if not image and partner_type == 'invoice':
img_path = get_module_resource('base', 'static/src/img', 'money.png')
elif not image and partner_type == 'delivery':
img_path = get_module_resource('base', 'static/src/img', 'truck.png')
elif not image and is_company:
img_path = get_module_resource('base', 'static/src/img', 'company_image.png')
elif not image:
img_path = get_module_resource('base', 'static/src/img', 'avatar.png')
colorize = True
if img_path:
with open(img_path, 'rb') as f:
image = f.read()
if image and colorize:
image = tools.image_colorize(image)
return tools.image_resize_image_big(image.encode('base64'))
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if (not view_id) and (view_type == 'form') and self._context.get('force_email'):
view_id = self.env.ref('base.view_partner_simple_form').id
res = super(Partner, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(res['arch'])
return res
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('You cannot create recursive Partner hierarchies.'))
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {}, name=_('%s (copy)') % self.name)
return super(Partner, self).copy(default)
@api.onchange('parent_id')
def onchange_parent_id(self):
# return values in result, as this method is used by _fields_sync()
if not self.parent_id:
return
result = {}
partner = getattr(self, '_origin', self)
if partner.parent_id and partner.parent_id != self.parent_id:
result['warning'] = {
'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if partner.type == 'contact' or self.type == 'contact':
# for contacts: copy the parent address, if set (aka, at least one
# value is set in the address: otherwise, keep the one from the
# contact)
address_fields = self._address_fields()
if any(self.parent_id[key] for key in address_fields):
def convert(value):
return value.id if isinstance(value, models.BaseModel) else value
result['value'] = {key: convert(self.parent_id[key]) for key in address_fields}
return result
@api.onchange('state_id')
def onchange_state(self):
if self.state_id:
self.country_id = self.state_id.country_id
@api.onchange('email')
def onchange_email(self):
if not self.image and not self._context.get('yaml_onchange') and self.email:
self.image = self._get_gravatar_image(self.email)
@api.depends('is_company')
def _compute_company_type(self):
for partner in self:
partner.company_type = 'company' if partner.is_company else 'person'
@api.onchange('company_type')
def onchange_company_type(self):
self.is_company = (self.company_type == 'company')
@api.multi
def _update_fields_values(self, fields):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for fname in fields:
field = self._fields[fname]
if field.type == 'many2one':
values[fname] = self[fname].id
elif field.type == 'one2many':
raise AssertionError(_('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`'))
elif field.type == 'many2many':
values[fname] = [(6, 0, self[fname].ids)]
else:
values[fname] = self[fname]
return values
@api.model
def _address_fields(self):
"""Returns the list of address fields that are synced from the parent."""
return list(ADDRESS_FIELDS)
@api.multi
def update_address(self, vals):
addr_vals = {key: vals[key] for key in self._address_fields() if key in vals}
if addr_vals:
return super(Partner, self).write(addr_vals)
@api.model
def _commercial_fields(self):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat', 'credit_limit']
@api.multi
def _commercial_sync_from_company(self):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = self.commercial_partner_id
if commercial_partner != self:
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
self.write(sync_vals)
@api.multi
def _commercial_sync_to_children(self):
""" Handle sync of commercial fields to descendants """
commercial_partner = self.commercial_partner_id
sync_vals = commercial_partner._update_fields_values(self._commercial_fields())
sync_children = self.child_ids.filtered(lambda c: not c.is_company)
for child in sync_children:
child._commercial_sync_to_children()
return sync_children.write(sync_vals)
@api.multi
def _fields_sync(self, values):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if values.get('parent_id') or values.get('type', 'contact'):
# 1a. Commercial fields: sync if parent changed
if values.get('parent_id'):
self._commercial_sync_from_company()
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if self.parent_id and self.type == 'contact':
onchange_vals = self.onchange_parent_id().get('value', {})
self.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
if self.child_ids:
# 2a. Commercial Fields: sync if commercial entity
if self.commercial_partner_id == self:
commercial_fields = self._commercial_fields()
if any(field in values for field in commercial_fields):
self._commercial_sync_to_children()
# 2b. Address fields: sync if address changed
address_fields = self._address_fields()
if any(field in values for field in address_fields):
contacts = self.child_ids.filtered(lambda c: c.type == 'contact')
contacts.update_address(values)
@api.multi
def _handle_first_contact_creation(self):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = self.parent_id
address_fields = self._address_fields()
if (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(self[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(address_fields)
parent.update_address(addr_vals)
def _clean_website(self, website):
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(website)
if not scheme:
if not netloc:
netloc, path = path, ''
website = urlparse.urlunparse(('http', netloc, path, params, query, fragment))
return website
@api.multi
def write(self, vals):
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
if vals.get('company_id'):
company = self.env['res.company'].browse(vals['company_id'])
for partner in self:
if partner.user_ids:
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise UserError(_("You can not change the company as the partner/user has multiple user linked with different companies."))
tools.image_resize_images(vals)
result = super(Partner, self).write(vals)
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner._fields_sync(vals)
return result
@api.model
def create(self, vals):
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('parent_id'):
vals['company_name'] = False
# compute default image in create, because computing gravatar in the onchange
# cannot be easily performed if default images are in the way
if not vals.get('image'):
vals['image'] = self._get_default_image(vals.get('type'), vals.get('is_company'), vals.get('parent_id'))
tools.image_resize_images(vals)
partner = super(Partner, self).create(vals)
partner._fields_sync(vals)
partner._handle_first_contact_creation()
return partner
@api.multi
def create_company(self):
self.ensure_one()
if self.company_name:
# Create parent company
values = dict(name=self.company_name, is_company=True)
values.update(self._update_fields_values(self._address_fields()))
new_company = self.create(values)
# Set new company as my parent
self.write({
'parent_id': new_company.id,
'child_ids': [(1, partner_id, dict(parent_id=new_company.id)) for partner_id in self.child_ids.ids]
})
return True
@api.multi
def open_commercial_entity(self):
""" Utility method used to add an "Open Company" button in partner views """
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': self.commercial_partner_id.id,
'target': 'current',
'flags': {'form': {'action_buttons': True}}}
@api.multi
def open_parent(self):
""" Utility method used to add an "Open Parent" button in partner views """
self.ensure_one()
address_form_id = self.env.ref('base.view_partner_address_form').id
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'views': [(address_form_id, 'form')],
'res_id': self.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
@api.multi
def name_get(self):
res = []
for partner in self:
name = partner.name or ''
if partner.commercial_company_name:
if not name and partner.type in ['invoice', 'delivery', 'other']:
name = dict(self.fields_get(['type'])['type']['selection'])[partner.type]
if not partner.is_company:
name = "%s, %s" % (partner.commercial_company_name, name)
if self._context.get('show_address_only'):
name = partner._display_address(without_company=True)
if self._context.get('show_address'):
name = name + "\n" + partner._display_address(without_company=True)
name = name.replace('\n\n', '\n')
name = name.replace('\n\n', '\n')
if self._context.get('show_email') and partner.email:
name = "%s <%s>" % (name, partner.email)
if self._context.get('html_format'):
name = name.replace('\n', '<br/>')
res.append((partner.id, name))
return res
def _parse_partner_name(self, text, context=None):
""" Supported syntax:
- 'Raoul <raoul@grosbedon.fr>': will find name and email address
- otherwise: default, everything is set as the name """
emails = tools.email_split(text.replace(' ', ','))
if emails:
email = emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
else:
name, email = text, ''
return name, email
@api.model
def name_create(self, name):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
name, email = self._parse_partner_name(name)
if self._context.get('force_email') and not email:
raise UserError(_("Couldn't create contact without email address!"))
if not name and email:
name = email
partner = self.create({self._rec_name: name or email, 'email': email or False})
return partner.name_get()[0]
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
self = self.with_context(active_test=False)
return super(Partner, self)._search(args, offset=offset, limit=limit, order=order,
count=count, access_rights_uid=access_rights_uid)
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
if args is None:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights('read')
where_query = self._where_calc(args)
self._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(self.env.cr)
query = """SELECT id
FROM res_partner
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent}
OR {reference} {operator} {percent})
-- don't panic, trust postgres bitmap
ORDER BY {display_name} {operator} {percent} desc,
{display_name}
""".format(where=where_str,
operator=operator,
email=unaccent('email'),
display_name=unaccent('display_name'),
reference=unaccent('ref'),
percent=unaccent('%s'))
where_clause_params += [search_name]*4
if limit:
query += ' limit %s'
where_clause_params.append(limit)
self.env.cr.execute(query, where_clause_params)
partner_ids = map(lambda x: x[0], self.env.cr.fetchall())
if partner_ids:
return self.browse(partner_ids).name_get()
else:
return []
return super(Partner, self).name_search(name, args, operator=operator, limit=limit)
@api.model
def find_or_create(self, email):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create one
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <r.g@grosbedon.fr>"``"""
assert email, 'an email is required for find_or_create to work'
emails = tools.email_split(email)
if emails:
email = emails[0]
partners = self.search([('email', '=ilike', email)], limit=1)
return partners.id or self.name_create(email)[0]
def _get_gravatar_image(self, email):
gravatar_image = False
email_hash = hashlib.md5(email.lower()).hexdigest()
url = "https://www.gravatar.com/avatar/" + email_hash
try:
image_content = urllib2.urlopen(url + "?d=404&s=128", timeout=5).read()
gravatar_image = base64.b64encode(image_content)
except Exception:
pass
return gravatar_image
@api.multi
def _email_send(self, email_from, subject, body, on_error=None):
for partner in self.filtered('email'):
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
@api.multi
def address_get(self, adr_pref=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'contact' not in adr_pref:
adr_pref.add('contact')
result = {}
visited = set()
for partner in self:
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'contact' or the partner itself
default = result.get('contact', self.id or False)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
@api.model
def view_header_get(self, view_id, view_type):
res = super(Partner, self).view_header_get(view_id, view_type)
if res: return res
if not self._context.get('category_id'):
return False
return _('Partners: ') + self.env['res.partner.category'].browse(self._context['category_id']).name
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
@api.multi
def _display_address(self, without_company=False):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
# get the information that will be injected into the display format
# get the address format
address_format = self.country_id.address_format or \
"%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
args = {
'state_code': self.state_id.code or '',
'state_name': self.state_id.name or '',
'country_code': self.country_id.code or '',
'country_name': self.country_id.name or '',
'company_name': self.commercial_company_name or '',
}
for field in self._address_fields():
args[field] = getattr(self, field) or ''
if without_company:
args['company_name'] = ''
elif self.commercial_company_name:
address_format = '%(company_name)s\n' + address_format
return address_format % args
def _display_address_depends(self):
# field dependencies of method _display_address()
return self._address_fields() + [
'country_id.address_format', 'country_id.code', 'country_id.name',
'company_name', 'state_id.code', 'state_id.name',
]
|
ayepezv/GAD_ERP
|
openerp/addons/base/res/res_partner.py
|
Python
|
gpl-3.0
| 36,156
|
# setBonusAsklepian
#
# Used by:
# Implants named like: Grade Asklepian (16 of 16)
# Implants named like: grade Asklepian Omega (2 of 2)
runTime = "early"
type = "passive"
def handler(fit, src, context):
fit.appliedImplants.filteredItemMultiply(lambda mod: mod.item.requiresSkill("Cybernetics"),
"armorRepairBonus", src.getModifiedItemAttr("implantSetSerpentis2"))
|
Ebag333/Pyfa
|
eos/effects/setbonusasklepian.py
|
Python
|
gpl-3.0
| 416
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(to_text(self.parser.get_version()))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternative' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternative']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
# warn about typing issues with configuration entries
for unable in C.config.UNABLE:
display.warning("Unable to set correct type for configuration entry: %s" % unable)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precendence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
if ask_vault_pass or (auto_prompt and not vault_ids):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm vew vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if not sys.stdin.isatty():
continue
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globablly
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id_name=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_files):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
paths = getattr(parser.values, option.dest)
if paths is None:
paths = []
if isinstance(value, string_types):
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
elif isinstance(value, list):
paths[:0] = [unfrackpath(x) for x in value if x]
else:
pass # FIXME: should we raise options error?
setattr(parser.values, option.dest, paths)
@staticmethod
def unfrack_path(option, opt, value, parser):
if value != '-':
setattr(parser.values, option.dest, unfrackpath(value))
else:
setattr(parser.values, option.dest, value)
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path (default=[%s]) or comma separated host list. "
"--inventory-file is deprecated" % C.DEFAULT_HOST_LIST)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_paths, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--new-vault-password-file', default=[], dest='new_vault_password_files',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
help='the vault identity to use')
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
help='the new vault identity to use for rekey')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=[], action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=[], action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options
if hasattr(self.options, 'inventory'):
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt) if ',' not in opt else opt for opt in self.options.inventory]
else:
self.options.inventory = C.DEFAULT_HOST_LIST
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
vault_ids = options.vault_ids
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=options.vault_password_files,
ask_vault_pass=options.ask_vault_pass,
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
|
michael-dev2rights/ansible
|
lib/ansible/cli/__init__.py
|
Python
|
gpl-3.0
| 38,491
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('thumbnails', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='thumbnail',
name=b'id',
field=models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True),
),
migrations.AlterField(
model_name='thumbnail',
name=b'is_automatic',
field=models.BooleanField(default=False),
),
migrations.AlterUniqueTogether(
name='thumbnail',
unique_together=set([('object_type', 'object_id')]),
),
]
|
arhote/exchange
|
exchange/thumbnails/migrations/0002_auto_20170504_1443.py
|
Python
|
gpl-3.0
| 797
|
def filter_string(s):
return int(''.join(c for c in s if c.isdigit()))
|
VladKha/CodeWars
|
7 kyu/Filter the number/solve.py
|
Python
|
gpl-3.0
| 75
|