repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
emvecchi/mss
|
src/utils/crowdflower/retrieve_stats.py
|
Python
|
apache-2.0
| 925
| 0.025946
|
import sys,
|
csv, string
def generate_R_input(report_path, output_path):
confidence_values = []
report_file = open(report_path, 'r')
first_line = True
for
|
line in report_file:
if first_line:
first_line = False
continue
line = line.strip().split(',')
if line[5] == 'Yes':
confidence_values.append(line[6])
elif line[5] == 'No':
confid = 1 - float(line[6])
confidence_values.append(confid)
output_file = open(output_path, 'w')
output_file.write('Confidence' + '\n')
first_val = True
for confid in confidence_values:
if not first_val:
output_file.write(str(confid) + '\n')
else:
output_file.write(str(confid) + '\n')
first_val = False
output_file.close()
if __name__ =="__main__":
_report_path = sys.argv[1]
_output_path = sys.argv[2]
generate_R_input(_report_path, _output_path)
|
nramanan/nellie-bot
|
libs/rest.py
|
Python
|
mit
| 833
| 0.016807
|
import requests,sys
from con
|
fig_helper import get_proxies
def get_pem():
if get_proxies()==None:
return None
pem_resp = requests.get('http://curl.haxx.se/ca/cacert.pem', proxies=get_proxies())
if pem_resp.status_code != 200:
print "ERROR: Received bad response from api: %d" % pem_resp.status_code
print "API Message: "+pem_resp.text
sys.exit()
f = open('..\pemfile.pem','w')
|
f.write(pem_resp.text.encode('utf-8'))
f.close()
return '..\pemfile.pem'
def get(url, proxies=None, auth=None,verify = True):
resp = requests.get(url, proxies=get_proxies(), auth=auth, verify=verify)
if resp.status_code != 200:
print "ERROR: Received bad response from api: %d" % resp.status_code
print "API Message: "+resp.text
sys.exit()
return resp.json()
|
cmlh/Maltego-Recorded_Future
|
src/RF_Maltego_Package_1.0/rf_csv_maltegoload.py
|
Python
|
apache-2.0
| 2,043
| 0.040137
|
#!/usr/bin/env python
"""Tr
|
ansform a CSV file exported from the Recorded Future UI into Maltego entities."""
import json
import sys
import csv
impor
|
t Tkinter, tkFileDialog
from MaltegoTransform import *
mt = MaltegoTransform()
# Use Tkinter to open up a file dialog.
root = Tkinter.Tk()
root.lift()
root.withdraw()
sys.stderr.write("Click the Python icon to select a file.")
csvfilename = tkFileDialog.askopenfilename()
data = csv.DictReader(open(csvfilename), delimiter=',',fieldnames=('Event Id','Event Type','Event Title','Start Time','End Time','Precision','Count','First Published Time','Last Published Time','Sample Fragment','Entities','Locations','Source Count','Positive Sentiment','Negative Sentiment'))
next(data)
for row in data:
event = row['Event Type']+"-"+row['Event Id']
rfevent = mt.addEntity("recfut.RFEvent",event);
rfevent.addAdditionalFields("eid","Event ID",False,row['Event Id']);
rfevent.addAdditionalFields("etype","Event Type",False,row['Event Type']);
rfevent.addAdditionalFields("title","Event Title",False,row['Event Title']);
rfevent.addAdditionalFields("starttime","Start Time",False,row['Start Time']);
rfevent.addAdditionalFields("stoptime","Stop Time",False,row['End Time']);
rfevent.addAdditionalFields("fragment","Fragment",False,row['Sample Fragment']);
rfevent.addAdditionalFields("precision","Precision",False,row['Precision']);
rfevent.addAdditionalFields("count","Count",False,row['Count']);
rfevent.addAdditionalFields("firstpublished","First Published",False,row['First Published Time']);
rfevent.addAdditionalFields("lastpublished","Last Published",False,row['Last Published Time']);
rfevent.addAdditionalFields("sourcecount","Source Count",False,row['Source Count']);
rfevent.addAdditionalFields("pos_sentiment","Positive Sentiment",False,row['Positive Sentiment']);
rfevent.addAdditionalFields("neg_sentiment","Negative Sentiment",False,row['Negative Sentiment']);
mt.addUIMessage("RF event load completed!")
mt.returnOutput()
|
kakaroto/amsn2
|
amsn2/protocol/events/addressbook.py
|
Python
|
gpl-2.0
| 2,240
| 0.000893
|
# -*- coding: utf-8 -*-
#
# amsn - a python client for the WLM Network
#
# Copyright (C) 2008 Dario Freddi <drf54321@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import papyon
import papyon.event
class AddressBookEvents(papyon.event.AddressBookEventInterface):
def __init__(self, client, amsn_core):
self._amsn_core = amsn_core
self._contactlist_manager = amsn_core._contactlist_manager
papyon.event.AddressBookEventInterface.__init__(self, client)
def on_addressbook_messenger_contact_added(self, contact):
self._contactlist_manager.on_contact_added(contact)
def on_addressbook_contact_deleted(self, contact):
self._contactlist_manager.on_contact_removed(contact)
def on_addressbook_contact_blocked(self, contact):
self._contactlist_manager.on_contact_blocked(contact)
def on_addressbook_contact_unblocked(self, contact):
self._contactlist_manager.on_contact_unblocked(contact)
def on_addressbook_group_added(self, group):
self._contactlist_manager.on_group_added(group)
def on_addressbook_group_deleted(self, group):
self._contactlist_manager.on_group_deleted(group)
|
def on_addressbook_group_renamed(self, group):
self._contactlist_manager.on_group_renamed(group)
def on_addressbook_group_contact_added(self, group, contact):
self._contactlist_manager.on_group_contact_added(group, contact)
def on_addressbook_group_contact_deleted(self, group, contact):
self._contactlist_manager.on_group_contact_deleted(group, c
|
ontact)
|
peterbartha/ImmunoMod
|
res_mods/mods/packages/xvm_contacts/python/contacts.py
|
Python
|
mit
| 4,916
| 0.00773
|
""" XVM (c) www.modxvm.com 2013-2017 """
# PUBLIC
def initialize():
return _contacts.initialize()
def isAvailable():
return _contacts.is_available
def getXvmContactData(uid):
return _contacts.getXvmContactData(uid)
def setXvmContactData(uid, value):
return _contacts.setXvmContactData(uid, value)
# PRIVATE
from random import randint
import traceback
from gui import SystemMessages
import simplejson
from xfw import *
from xvm_main.python.consts import *
from xvm_main.python.loadurl import loadUrl
from xvm_main.python.logger import *
import xvm_main.python.config as config
import xvm_main.python.utils as utils
from xvm_main.python.xvm import l10n
_CONTACTS_DATA_VERSION = '1.0'
_SYSTEM_MESSAGE_TPL = '''<textformat tabstops="[130]"><img src="img://../xvm/res/icons/xvm/16x16t.png"
vspace="-5"> <a href="#XVM_SITE#"><font color="#E2D2A2">www.modxvm.com</font></a>\n\n%VALUE%</textformat>'''
class _Contacts:
def __init__(self):
self.cached_data = None
self.cached_token = None
self.is_available = False
self.contacts_disabled = False
def initialize(self):
try:
self.is_available = False
if not self.contacts_disabled:
self.contacts_disabled = not config.networkServicesSettings.comments
if self.contacts_disabled:
return
if config.token.online:
token = config.token.token
if token is None:
raise Exception('[TOKEN_NOT_INITIALIZED] {0}'.format(l10n('Network services unavailable')))
if self.cached_data is None or self.cached_token != token:
self.cached_token = token
json_data = self._doRequest('getComments')
data = {'ver':_CONTACTS_DATA_VERSION,'players':{}} if json_data is None else simplejson.loads(json_data)
if data['ver'] != _CONTACTS_DATA_VERSION:
pass # data = convertOldVersion(data)
self.cached_data = data
self.is_available = True
except Exception as ex:
self.contacts_disabled = True
self.is_available = False
self.cached_token = None
self.c
|
ached_data = None
errstr = _SYSTEM_MESSAGE_TPL.replace('%VALUE%', '<b>{0}</b>\n\n{1}\n\n{2}'.format(
l10n('Error loading comments'),
str(ex),
l10n('Comments disabled')))
SystemMessages.pushMessage(errstr, type=SystemMessages.SM_TYPE.Warning)
warn(traceback.format_exc())
#log(self.cached_data)
def getXvmContactData(self, uid):
nick = None
co
|
mment = None
if not self.contacts_disabled and self.cached_data is None:
self.initialize()
if not self.contacts_disabled and self.cached_data is not None and 'players' in self.cached_data:
data = self.cached_data['players'].get(str(uid), None)
if data is not None:
nick = data.get('nick', None)
comment = data.get('comment', None)
return {'nick':nick,'comment':comment}
def setXvmContactData(self, uid, value):
try:
if self.cached_data is None or 'players' not in self.cached_data:
raise Exception('[INTERNAL_ERROR]')
if (value['nick'] is None or value['nick'] == '') and (value['comment'] is None or value['comment'] == ''):
self.cached_data['players'].pop(str(uid), None)
else:
self.cached_data['players'][str(uid)] = value
json_data = simplejson.dumps(self.cached_data)
#log(json_data)
self._doRequest('addComments', json_data)
return True
except Exception as ex:
self.contacts_disabled = True
self.is_available = False
self.cached_token = None
self.cached_data = None
errstr = _SYSTEM_MESSAGE_TPL.replace('%VALUE%', '<b>{0}</b>\n\n{1}\n\n{2}'.format(
l10n('Error saving comments'),
str(ex),
l10n('Comments disabled')))
SystemMessages.pushMessage(errstr, type=SystemMessages.SM_TYPE.Error)
err(traceback.format_exc())
return False
# PRIVATE
def _doRequest(self, cmd, body=None):
req = '{0}/{1}'.format(cmd, self.cached_token)
server = XVM.SERVERS[randint(0, len(XVM.SERVERS) - 1)]
(response, duration, errStr) = loadUrl(server, req, body=body, api=XVM.API_VERSION_OLD)
if errStr:
raise Exception(errStr)
response = response.strip()
if response in ('', '[]', '{}'):
response = None
# log(utils.hide_guid(response))
return response
_contacts = _Contacts()
|
timlinux/watchkeeper
|
django_project/event_mapper/tasks/notify_priority_users.py
|
Python
|
bsd-2-clause
| 3,622
| 0.003313
|
# coding=utf-8
"""Select users to be notified."""
__author__ = 'Christian Christelis <christian@kartoza.com>'
__project_name = 'watchkeeper'
__date__ = '27/05/15'
__copyright__ = 'kartoza.com'
__doc__ = ''
from celery import shared_task
from notifications.tasks.send_email import send_email_message
from notifications.tasks.send_sms import send_sms_message
from event_mapper.models.user import User
from event_mapper.models.event import Event
def generate_email_report(event):
"""Generate report for email as html
:param event: Event object
:return: A html string represent the report.
"""
html_report = """
<html>
<head>
<meta name=3D"generator" content=3D"Windows Mail 17.5.9600.20911">
<style data-externalstyle=3D"true"><!--
p.MsoListParagraph, li.MsoListParagraph, div.MsoListParagraph {
margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
}
p.MsoNormal, li.MsoNormal, div.MsoNormal {
margin:0in;
margin-bottom:.0001pt;
}
p.MsoListParagraphCxSpFirst, li.MsoListParagraphCxSpFirst, div.MsoListParagraphCxSpFirst,=20
p.MsoListParagraphCxSpMiddle, li.MsoListParagraphCxSpMiddle, div.MsoListParagraphCxSpMiddle,=20
p.MsoListParagraphCxSpLast, li.MsoListParagraphCxSpLast, div.MsoListParagraphCxSpLast {
margin-top:0in;
margin-right:0in;
margin-bottom:0in;
margin-left:.5in;
margin-bottom:.0001pt;
line-height:115%;
}
--></style></head>
<body dir=3D"ltr">
<div><h2>"
</h2> <img tabindex=3D"-1" src=3D"http://watchkeeper.kartoza.com/static/event_mapper/css/images/logo.fa285e1ad75d.png">
"""
html_report += """
<table width=3D"699" tabindex=3D"-1" style=3D"border-collapse: collapse;" cellspacing=3D"0" cellpadding=3D"0">
<tbody>"""
html_report += event.html_table_row()
html_report += """</tbody>
</table><h2 style=3D"color: rgb(149, 55, 53); font-family: trebuchet MS; font-size: 12pt; font-weight: bold; margin-bottom: 0px;">"""
html_report += """
</div>
</body>
</html>
<br>
This email and any files transmitted with it are confidential and intended solely for the use of the individual or entity to whom they are addressed. If you have received this email in error please notify the system manager. Any views or opinions presented in this email are solely those of the author and do not necessarily represent those of iMMAP. The recipient should check this email and any attachments for the presence of viruses. iMMAP accepts no liability for any damage caused by any virus transmitted by this email.</font><br><div style=3D"font-family:Arial,Helvetica,sans-serif;font-size:1.3em"><div><font size=3D"1" st
|
yle=3D"background-color:white"><br></font><div><font size=3D"1" style=3D"background-color:white"><font face
|
=3D"Arial, Helvetica, sans-serif">iMMAP, 1300 Pennsylvania Avenue, N.W.,=C2=A0</font>Suite 470=C2=A0Washington DC 20004, <a href=3D"http://www.immap.org" target=3D"_blank">www.immap.org</a></font></div></div></div>
"""
return html_report
@shared_task
def notify_priority_users(event_id):
event = Event.objects.get(id=event_id)
users = User.objects.filter(
countries_notified__polygon_geometry__contains=event.location,
notify_immediately=True)
for user in users:
send_email_message(user, event.text_report(), event.html_report())
|
buzzdev/udpwatch
|
udpwatch.py
|
Python
|
gpl-2.0
| 6,405
| 0.012646
|
#!/usr/bin/env python
import os, sys, fcntl, socket, subprocess, struct, signal, time, logging
import datetime
from glob import glob
from ConfigParser import ConfigParser
#######################################################################
# CONFIG
#######################################################################
# Where to make a log file
LOGDIR = "/appl/logs/transcoder/"
#LOGFILE = LOGDIR + str(datetime.date.today()) + "_udpwatch.log"
#LOGFILE = LOGDIR + "udpwatch_" + str(datetime.date.today()) + ".log"
LOGFILE = LOGDIR + "udpwatch.log"
########################################################################
########################################################################
########################################################################
def script_running(lockfile):
global file_handle
file_handle = open(lockfile, 'w')
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
return False
except IOError:
return True
def setup_logging():
logging.NORMAL = 25
logging.addLevelName(logging.NORMAL, "NORMAL")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO, filename = LOGFILE)
#logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger("Transcoder")
logger.normal = lambda msg, *args: logger._log(logging.NORMAL, msg, args)
return logger
def create_udp_socket(ip, port, timeout):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((ip, port))
mreq = struct.pack("=4sl", socket.inet_aton(ip), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(timeout)
return sock
except socket.error as msg:
logger.error(msg)
sock.close()
sock = None
def get_enabled_channels(confdir):
CHANNELS = {}
for config_file in glob(confdir + "*.ini"):
config = ConfigParser()
config.read(config_file)
PATH, NAME = os.path.split(config_file)
NAME, EXT = os.path.splitext(NAME)
PID = get_ffmpeg_pid(config.get('General', 'MCAST_OUT_IP'), config.get('General', 'MCAST_OUT_PORT'))
CHANNELS[NAME] = {
"NAME": NAME,
"PID": PID,
"MCAST_IP": config.get('General', 'MCAST_OUT_IP'),
"MCAST_PORT": config.get('General', 'MCAST_OUT_PORT'),
"INPUT_STREAM": config.get('General', 'INPUT_STREAM'),
"VIDEO_BITRATE": config.get('General', 'VIDEO_BITRATE'),
"AUDIO_BITRATE": config.get('General', 'AUDIO_BITRATE'),
"CODEC": config.get('General', 'CODEC'),
"VIDEO_MAPPING": config.get('General', 'VIDEO_MAPPING'),
"AUDIO_MAPPING": config.get('General', 'AUDIO_MAPPING'),
"MUXRATE": config.get('General', 'MUXRATE'),
"LOGLEVEL": config.get('General', 'LOGLEVEL')
}
return CHANNELS
def get_ffmpeg_pid(ip, port):
p = subprocess.Popen(['pgrep', '-f' , ip+":"+str(port)], stdout=subprocess.PIPE)
pid, err = p.communicate()
if pid:
#return int(pid.rstrip())
return pid.rstrip()
else:
return False
def kill_pid(pid, channel_name):
logger.warning("%s Killing PID %s", channel_name, pid)
os.kill(int(pid), signal.SIGKILL)
def check_output(channel_name, mcast_ip, mcast_port, udp_data_timeout, probe_time):
logger.debug("Check output started")
PID = get_ffmpeg_pid(mcast_ip, mcast_port)
if PID != False:
logger.debug("%s PID %s is already running with %s:%s", channel_name, PID, mcast_ip, mcast_port)
# Create a UDP listening socket
s = create_udp_socket(mcast_ip, mcast_port, udp_data_timeout)
startTime = time.time()
bytes = 0
while time.time() - startTime < probe_time:
try:
data = False
data = s.recv(10240)
bytes += len(data)
logger.debug("%s PID %s Received %s bytes on %s:%s", channel_name, PID, len(data), mcast_ip, mcast_port)
#continue
except KeyboardInterrupt:
logger.info("Closing UDP socket")
s.close()
logger.info("Script terminated")
sys.exit(0)
except socket.timeout:
# socket receive timed out, means there's no data coming on that UDP
logger.error("%s PID %s - No mcast output on %s:%s", channel_name, PID, mcast_ip, mcast_port)
# Need to get the PID again here, to make sure there's something to kill,
# because ffmpeg might have died completely
PID = get_ffmpeg_pid(mcast_ip, mcast_port)
if PID != False:
kill_pid(PID, channel_name)
# and break out from the while loop
break
except socket.error:
# some other error happened on the socket
logger.error("%s Socket error", channel_name)
break
# END of while
if data != False:
# if there's UDP data again, let's log NORMAL message
logger.normal("%s PID %s received %s bytes on %s:%s", channe
|
l_name, PID, bytes, mcast_ip, mcast_port)
#logger.normal("%s PID %s is running with %s:%s", channel_name, PID, mcast_ip, mcast_port)
else:
logger.error("%s %s:%s is not running.", channel_name, mcast_ip, mcast_port)
def main():
# some dirty commandline argument
|
parser ;)
if len(sys.argv) < 2:
logger.error("No arguments - Please specify command line arguments")
logger.info(sys.argv[0] + " <CHANNEL_NAME> <MCAST_IP> <MCAST_PORT> <UDP_DATA_TIMEOUT> <PROBE_TIME>")
logger.info("Example: " + sys.argv[0] + " 239.255.14.5 3199 RCKTV 5 10")
logger.info("Exiting...")
sys.exit(1)
else:
if script_running("/dev/shm/" + str(sys.argv[1]) + "_udpwatch.lock"):
logger.warning("Script is already running - exiting...")
sys.exit(0)
logger.debug("We have arguments: %s", sys.argv)
check_output(str(sys.argv[1]), str(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5]))
####################################################################
# MAIN #############################################################
####################################################################
# setup logging
logger = setup_logging()
# prevent multiple instances
file_handle = None
if __name__ == '__main__':
main()
|
hangpark/kaistusc
|
apps/board/migrations/0007_auto_20180311_1704.py
|
Python
|
bsd-2-clause
| 1,864
| 0.003476
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-03-11 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('board', '0006_merge_20180311_1702'),
]
operations = [
migrations.CreateModel(
name='MainPoster',
fields=[
('basepost_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='board.BasePost')),
('title', models.CharField(max_length=128, verbose_name='제목')),
('tit
|
le_ko', models.CharField(max_length=128, null=True, verbose_name='제목')),
('title_en', models.CharField(max_length=128, null=True, verbose_name='제목')),
('image', models.ImageField(upload_to='banner', verbose_name='이미지')),
],
options={
|
'verbose_name': '메인포스터',
'verbose_name_plural': '메인포스터(들)',
},
bases=('board.basepost',),
),
migrations.AlterModelOptions(
name='boardbanner',
options={'verbose_name': '게시판 배너', 'verbose_name_plural': '게시판 배너(들)'},
),
migrations.AlterField(
model_name='board',
name='role',
field=models.CharField(choices=[('DEFAULT', '기본'), ('PROJECT', '사업'), ('PLANBOOK', '정책자료집'), ('DEBATE', '논의'), ('ARCHIVING', '아카이빙'), ('WORKHOUR', '상근관리'), ('SPONSOR', '제휴리스트'), ('SWIPER', '격주보고'), ('STORE', '상점'), ('CONTACT', '산하기구')], default='DEFAULT', max_length=32, verbose_name='보드 역할'),
),
]
|
google/skia-buildbot
|
scripts/run_on_swarming_bots/delete_tmp_dirs.py
|
Python
|
bsd-3-clause
| 562
| 0.003559
|
#!/usr/bin/env python
#
# Copyright 2021 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in
|
the LICENSE file.
"""Delete files from the temporary directory on a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @isdir==FALSE del @file"')
os.system(r'forfiles /P c:\users\chrome~1\appdata\local\temp '
r'/M * /C "cmd /c if @i
|
sdir==TRUE rmdir /S /Q @file"')
else:
os.system(r'rm -rf /tmp/*')
|
kblin/plunger
|
plunger/plugins/md3.py
|
Python
|
gpl-2.0
| 7,657
| 0.004963
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 by Kai Blin
#
# Plunger is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
"""The plunger file handler for the MD3 format.
The module supports export only for now.
"""
import math
import struct
try:
from plunger import toolbox
except ImportError:
import sys
sys.path.append('..')
import toolbox
sys.path.pop()
format = "md3"
extension = ".md3"
needs_dir = False
does_export = True
does_import = False
# Info from http://icculus.org/homepages/phaethon/q3a/formats/md3format.html
# Augmented by the libmodelfile headers by Alistair Ridd
|
och, as the specfile
# is kind of inaccurate.
MD3_IDENT = "IDP3"
MD3_VERSION = 15
MD3_MAX_FRAMES =
|
1024
MD3_MAX_TAGS = 16
MD3_MAX_SURFACES = 32
MD3_MAX_SHADERS = 256
MD3_MAX_VERTS = 4096
MD3_MAX_TRIANGLES = 8192
class Md3Frame:
def __init__(self):
self.min_bounds = [0,0,0]
self.max_bounds = [0,0,0]
self.local_origin = [0,0,0]
self.radius = 0.0
self.name = ""
self.fmt = "fff fff fff f 8s"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("fff", self.min_bounds.split())
pack_str += struct.pack("fff", self.max_bounds.split())
pack_str += struct.pack("fff", self.local_origin.split())
pack_str += struct.pack("f", self.radius)
pack_str += struct.pack("8s", self.name)
return pack_str
class Md3Tag:
def __init__(self):
self.name = ""
self.origin = [0,0,0]
self.axis = [[1,0,0], [0,1,0], [0,0,1]]
self.fmt = "64s fff fff fff fff"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("64s", self.name)
pack_str += struct.pack("fff", self.origin.split())
for row in self.axis:
pack_str += struct.pack("fff", row.split())
return pack_str
class Md3Shader:
def __init__(self):
self.name = ""
self.index = 0
self.fmt = "64s i"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("64s", self.name)
pack_str += struct.pack("i", self.index)
class Md3Triangle:
def __init__(self):
self.indices = [0,0,0]
self.fmt = "iii"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
return struct.pack("iii", self.indices.split())
class Md3TexCoord:
def __init__(self):
self.uv_coords = [0,0]
self.fmt = "ff"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
return struct.pack(self.fmt, self.uv_coords.split())
class Md3Vertex:
def __init__(self):
self.coord = [0,0,0]
self.normal = [0,0]
self.factor = 1.0 / 64
self.fmt = "hhh BB"
def packSize(self):
return struct.calcsize(self.fmt)
def pack(self):
pack_str = ""
pack_str += struct.pack("hhh", self.coord.split())
pack_str += struct.pack("BB", self.normal.split())
return pack_str
def scaleDown(self, coords):
return [i * self.factor for i in coords]
class Md3Surface:
def __init__(self):
self.ident = MD3_IDENT
self.name = ""
self.num_frames = 0
self.num_shaders = 0
self.num_verts = 0
self.num_triangles = 0
self.shaders = []
self.triangles = []
self.uv_coords = []
self.vertices = []
self.fmt = "4s 68s iiiiiiiii"
def packSize(self):
size = struct.calcsize(self.fmt)
size += len(self.shaders) * Md3Shader().packSize()
size += len(self.triangles) * Md3Triangle().packSize()
size += len(self.uv_coords) * Md3TexCoord().packSize()
size += len(self.vertices) * Md3Vertex().packSize()
return size
def pack(self):
pack_str = ""
pack_str += struct.pack("4s", self.ident)
pack_str += struct.pack("68s", self.name)
pack_str += struct.pack("ii", self.num_frames, self.num_shaders)
pack_str += struct.pack("ii", self.num_verts, self.num_triangles)
ofs_shaders = struct.calcsize(self.fmt)
ofs_triangles = ofs_shaders + len(self.shaders) * Md3Shader().packSize()
ofs_uv_coords = ofs_triangles + len(self.triangles) * Md3Triangle().packSize()
ofs_vertices = ofs_uv_coords + len(self.uv_coords) * Md3TexCoord().packSize()
ofs_end = ofs_vertices + len(self.vertices) * Md3Vertex().packSize()
pack_str += struct.pack("ii", ofs_triangles, ofs_shaders)
pack_str += struct.pack("iii", ofs_uv_coords, ofs_vertices, ofs_end)
for shader in self.shaders:
pack_str += shader.pack()
for tri in self.triangles:
pack_str += tri.pack()
for texcoord in self.uv_coords:
pack_str += texcoord.pack()
for vert in self.vertices:
pack_str += vert.pack()
class MD3Object:
def __init__(self):
self.ident = MD3_IDENT
self.version = MD3_VERSION
self.name = ""
self.num_frames = 0
self.num_tags = 0
self.num_surfaces = 0
self.num_skins = 0
self.frames = []
self.tags = []
self.surfaces = []
def pack(self):
pack_str = ""
fmt = "4si68siiiiiiii"
pack_str += struct.pack("4s", self.ident)
pack_str += struct.pack("i", self.version)
pack_str += struct.pack("68s", self.name)
pack_str += struct.pack("i", self.num_frames)
pack_str += struct.pack("i", self.num_tags)
pack_str += struct.pack("i", self.num_surfaces)
pack_str += struct.pack("i", self.num_skins)
ofs_frames = struct.calcsize(fmt)
ofs_tags = ofs_frames + len(self.frames) * Md3Frame().packSize()
ofs_surfaces = ofs_tags + len(self.tags) * Md3Tag().packSize()
ofs_eof = ofs_surfaces + len(self.surfaces) * Md3Surface().packSize()
pack_str += struct.pack("i", ofs_frames)
pack_str += struct.pack("i", ofs_tags)
pack_str += struct.pack("i", ofs_surfaces)
pack_str += struct.pack("i", ofs_eof)
for frame in self.frames:
pack_str += frame.pack()
for tag in self.tags:
pack_str += tag.pack()
for surface in self.surfaces:
pack_str += surface.pack()
return pack_str
def importAsset(model, asset):
raise NotImplementedError
def exportAsset(model, asset):
out = toolbox.writeAny(asset)
md3_object = MD3Object()
meshes = model.getMeshes()
#TODO: Put stuff into the MD3Object here()
out.write(md3_object.pack())
out.close()
def encodeNormal(x,y,z):
"""Returns (azimuth, zenith) angles of the normal vector
"""
azimuth = math.atan2(y, x) * 255 / (2 * math.pi)
zenith = math.acos(z) * 255 / (2 * math.pi)
return (azimuth, zenith)
|
Crompulence/cpl-library
|
examples/multi_send_recv/minimal_CFD.py
|
Python
|
gpl-3.0
| 946
| 0.015856
|
from mpi4py import MPI
from cplpy import CPL
comm = MPI.C
|
OMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
cart_comm = CFD_COMM.Create_cart([1, 1, 1])
CPL.setup_cfd(cart_comm, xyzL=[1.0, 1.0, 1.0],
xyz_orig=[0.0, 0.0, 0.0], ncxyz=[32, 32, 32])
recv_array, send_array = CPL.get_arrays(recv_size=4, send_siz
|
e=1)
for time in range(5):
recv_array, ierr = CPL.recv(recv_array)
print("CFD", time, recv_array[0,0,0,0])
send_array[0,:,:,:] = 2.*time
CPL.send(send_array)
CPL.finalize()
#Start again
CFD_COMM = CPL.init(CPL.CFD_REALM)
CPL.setup_cfd(cart_comm, xyzL=[1.0, 1.0, 1.0],
xyz_orig=[0.0, 0.0, 0.0], ncxyz=[32, 32, 32])
recv_array, send_array = CPL.get_arrays(recv_size=4, send_size=1)
for time in range(5):
recv_array, ierr = CPL.recv(recv_array)
print("CFD", time, recv_array[0,0,0,0])
send_array[0,:,:,:] = 2.*time
CPL.send(send_array)
CPL.finalize()
MPI.Finalize()
|
GeotrekCE/Geotrek-admin
|
geotrek/common/utils/postgresql.py
|
Python
|
bsd-2-clause
| 6,597
| 0.001516
|
import logging
import traceback
from functools import wraps
import os
import re
from django.conf import settings
from django.db import connection
from django.db.models import ManyToManyField
logger = logging.getLogger(__name__)
def debug_pg_notices(f):
@wraps(f)
def wrapped(*args, **kwargs):
r = None
if connection.connection:
del connection.connection.notices[:]
try:
r = f(*args, **kwargs)
finally:
# Show triggers output
allnotices = []
current = ''
if connection.connection:
notices = []
for notice in connection.connection.notices:
try:
notice, context = notice.split('CONTEXT:', 1)
context = re.sub(r"\s+", " ", context)
except ValueError:
context = ''
notices.append((context, notice))
if context != current:
allnotices.append(notices)
notices = []
current = context
allnotices.append(notices)
current = ''
for notices in allnotices:
for context, notice in notices:
if context != current:
if context != '':
logger.debug('Context %s...:' % context.strip()[:80])
current = context
notice = notice.replace('NOTICE: ', '')
prefix = ''
logger.debug('%s%s' % (prefix, notice.strip()))
return r
return wrapped
def load_sql_files(app, stage):
"""
Look for SQL files in Django app, and load them into database.
We remove RAISE NOTICE instructions from SQL outside unit testing
since they lead to interpolation errors of '%' character in python.
"""
app_dir = app.path
sql_dir = os.path.normpath(os.path.join(app_dir, 'sql'))
custom_sql_dir = os.path.join(settings.VAR_DIR, 'conf/extra_sql', app.label)
sql_files = []
r = re.compile(r'^{}_.*\.sql$'.format(stage))
if os.path.exists(sql_dir):
sql_files += [
os.path.join(sql_dir, f) for f in os.listdir(sql_dir) if r.match(f) is not None
]
if os.path.exists(custom_sql_dir):
sql_files += [
os.path.join(custom_sql_dir, f) for f in os.listdir(custom_sql_dir) if r.match(f) is not None
]
sql_files.sort()
cursor = connection.cursor()
for sql_file in sql_files:
try:
logger.info("Loading initial SQL data from '%s'" % sql_file)
f = open(sql_file)
sql = f.read()
f.close()
if not settings.TEST and not settings.DEBUG:
# Remove RAISE NOTICE (/!\ only one-liners)
sql = re.sub(r"\n.*RAISE NOTICE.*\n", "\n", sql)
# TODO: this is the ugliest driver hack ever
sql = sql.replace('%', '%%')
# Replace curly braces with settings
|
values
patter
|
n = re.compile(r'{{\s*([^\s]*)\s*}}')
for m in pattern.finditer(sql):
value = getattr(settings, m.group(1))
sql = sql.replace(m.group(0), str(value))
# Replace sharp braces with schemas
pattern = re.compile(r'{#\s*([^\s]*)\s*#}')
for m in pattern.finditer(sql):
try:
value = settings.DATABASE_SCHEMAS[m.group(1)]
except KeyError:
value = settings.DATABASE_SCHEMAS.get('default', 'public')
sql = sql.replace(m.group(0), str(value))
cursor.execute(sql)
except Exception as e:
logger.critical("Failed to install custom SQL file '%s': %s\n" %
(sql_file, e))
traceback.print_exc()
raise
def set_search_path():
# Set search path with all existing schema + new ones
cursor = connection.cursor()
cursor.execute('SELECT schema_name FROM information_schema.schemata')
search_path = set([s[0] for s in cursor.fetchall() if not s[0].startswith('pg_')])
search_path |= set(settings.DATABASE_SCHEMAS.values())
search_path.discard('public')
search_path.discard('information_schema')
search_path = ('public', ) + tuple(search_path)
cursor.execute('SET search_path TO {}'.format(', '.join(search_path)))
def move_models_to_schemas(app):
"""
Move models tables to PostgreSQL schemas.
Views, functions and triggers will be moved in Geotrek app SQL files.
"""
default_schema = settings.DATABASE_SCHEMAS.get('default', 'public')
app_schema = settings.DATABASE_SCHEMAS.get(app.name, default_schema)
table_schemas = {}
for model in app.get_models():
model_name = model._meta.model_name
table_name = model._meta.db_table
model_schema = settings.DATABASE_SCHEMAS.get(model_name, app_schema)
table_schemas.setdefault(model_schema, []).append(table_name)
for field in model._meta.get_fields():
if isinstance(field, ManyToManyField):
table_schemas[model_schema].append(field.m2m_db_table())
cursor = connection.cursor()
for schema_name in table_schemas.keys():
sql = "CREATE SCHEMA IF NOT EXISTS %s;" % model_schema
cursor.execute(sql)
logger.info("Created schema %s" % model_schema)
for schema_name, tables in table_schemas.items():
for table_name in tables:
sql = "SELECT 1 FROM information_schema.tables WHERE table_name=%s AND table_schema!=%s"
cursor.execute(sql, [table_name, schema_name])
if cursor.fetchone():
sql = "ALTER TABLE %s SET SCHEMA %s;" % (table_name, schema_name)
cursor.execute(sql)
logger.info("Moved %s to schema %s" % (table_name, schema_name))
# For Django, search_path is set in connection options.
# But when accessing the database using QGis or ETL, search_path must be
# set database level (for all users, and for this database only).
if app.name == 'geotrek.common':
dbname = settings.DATABASES['default']['NAME']
dbuser = settings.DATABASES['default']['USER']
search_path = ', '.join(('public', ) + tuple(set(settings.DATABASE_SCHEMAS.values())))
sql = "ALTER ROLE %s IN DATABASE %s SET search_path=%s;" % (dbuser, dbname, search_path)
cursor.execute(sql)
|
absalon-james/randomload
|
randomload/actions/glance/usage.py
|
Python
|
apache-2.0
| 2,200
| 0
|
"""This is just a playing around module. Please ignore it"""
import json
import six
from randomload.log import logging
from six.moves.urllib import parse
logger = logging.getLogger('randomload.actions.glance.usage')
class Controller(object):
def __init__(self, http_client):
self.http_client = http_client
def list(self, start, end, detailed=False, metadata=None):
if metadata is None:
metadata = {}
opts = {
'start': start.isoformat(),
'end': end.isoformat(),
'detailed': int(bool(detailed))
}
if isinstance(metadata, dict):
metadata = json.dumps(metadata)
if metadata:
opts['metadata'] = metadata
qparams = {}
for opt, val in opts.items():
if val:
if isinstance(val, six.text_type):
val = val.encode('utf-8')
qparams[opt] = val
query_string = '?%s' % parse.urlencode(qparams)
url = '/v2/usages%s' % query_string
return self.http_client.get(url)
def bytes_to_GB(size_in_B):
"""Return size in GB
:param size: Numeric
:returns: Float
"""
if size_in_B is None:
size_in_B = 0
return float(size_in_B) / 1024 / 1024 / 1024
def usage(clients, conf, start=None, end=None, metadata=None):
logger.info("Start: {0}".format(start))
logger.info("End: {0}".format(end))
logger.info("Metadata: {0}".format(metadata))
glance = clients.get_glance()
controller = Controller(glance.http_client)
resp, _ = controller.list(start, end, detailed=True, metadata=metadata)
for tenant_usage in resp.json().get('tenant_usages', []):
logger.info("Tenant id: {0}".format(tenant_usage.get('project_id')))
|
logger.info("Total GB Hours: {0}".
|
format(
tenant_usage.get('total_gb_hours')
))
for usage in tenant_usage.get('image_usages', []):
logger.info(
"Name: {0} - Size: {1} GB - Status: {2}".format(
usage['name'],
bytes_to_GB(usage['size']),
usage['status']
)
)
|
TheWardoctor/Wardoctors-repo
|
script.stargate.guide/resources/playwith/playwithchannel.py
|
Python
|
apache-2.0
| 2,920
| 0.008562
|
import sys
import xbmc,xbmcaddon,xbmcvfs
import sqlite3
from subprocess import Popen
import datetime,time
# from vpnapi import VPNAPI
channel = sys.argv[1]
start = sys.argv[2]
ADDON = xbmcaddon.Addon(id='script.stargate.guide')
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter('timestamp', convert_datetime)
path = xbmc.translatePath('special://profile/addon_data/script.stargate.guide/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
except Exception as detail:
xbmc.log("EXCEPTION: (script.stargate.guide) %s" % detail, xbmc.LOGERROR)
c = conn.cursor()
startDate = datetime.datetime.fromtimestamp(float(start))
c.execute('SELECT DISTINCT * FROM programs WHERE channel=? AND start_date = ?', [channel,startDate])
for row in c:
title = row["title"]
endDate = row["end_date"]
duration = endDate - startDate
before = int(ADDON.getSetting('autoplaywiths.before'))
after = int(ADDON.getSetting('autoplaywiths.after'))
extra = (before + after) * 60
#TODO start from now
#seconds = duration.seconds + extra
#if seconds > (3600*4):
seconds = 3600*4
break
# Find the channel's stream url
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
url = ""
if row:
url = row[0]
if not url:
quit()
# Uncomment this if you want to use VPN Mgr filtering. Need to import VPNAPI.py
# else:
# if ADDON.getSetting('vpnmgr.connect') == "true":
# vpndefault = False
# if ADDON.getSetting('vpnmgr.default') == "true":
# vpndefault = True
# api = VPNAPI()
# if url[0:9] == 'plugin://':
# api.filterAndSwitch(url, 0, vpndefault, True)
# else:
# if vpndefault: api.defaultVPN(True)
# Find the actual url used to play the stream
#core = "dummy"
#xbmc.executebuiltin('PlayWith(%s)' % core)
player = xbmc.Player()
player.play(url)
count = 30
url = ""
while count:
count = count - 1
time.sleep(1)
if player.isPlaying():
url = player.getPlayingFile()
break
player.stop()
# Play with your own preferred player and paths
if url:
name = "%s = %s = %s" % (start,channel,titl
|
e)
name = name.encode("cp1252")
filename = xbmc.translatePath("special://temp/%s.ts" % name)
#filename = "/storage/recordings/%s.ts" % name
ffmpeg = r"c:\utils\ffmpeg.exe"
ffmpeg = r"/usr/bin/ffmpeg"
cmd = [ffmpeg, "-y", "-i", url, "-c", "copy", "-t", str(seconds), filename]
p = P
|
open(cmd,shell=True)
#p = Popen(cmd,shell=False)
|
GLolol/PyLink
|
protocols/hybrid.py
|
Python
|
mpl-2.0
| 12,742
| 0.003375
|
"""
hybrid.py: IRCD-Hybrid protocol module for PyLink.
"""
import time
from pylinkirc import conf
from pylinkirc.classes import *
from pylinkirc.log import log
from pylinkirc.protocols.ts6 import TS6Protocol
__all__ = ['HybridProtocol']
# Th
|
is protocol module inherits from the TS6 protocol.
class HybridProtocol(TS6Protocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.casemapping = 'ascii'
self.hook_map = {'EOB': 'ENDBURST', 'TBURST': 'TOPIC', 'SJOIN': 'JOIN'}
self.protocol_caps -= {'slash-
|
in-hosts'}
def post_connect(self):
"""Initializes a connection to a server."""
ts = self.start_ts
f = self.send
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L80
# Note: according to hybrid source code, +p is paranoia, noknock,
# AND rfc1459-style private, though the last isn't documented.
cmodes = {
# TS6 generic modes:
'op': 'o', 'halfop': 'h', 'voice': 'v', 'ban': 'b', 'key': 'k',
'limit': 'l', 'moderated': 'm', 'noextmsg': 'n',
'secret': 's', 'topiclock': 't', 'private': 'p',
# hybrid-specific modes:
'blockcolor': 'c', 'inviteonly': 'i', 'noctcp': 'C',
'regmoderated': 'M', 'operonly': 'O', 'regonly': 'R',
'sslonly': 'S', 'banexception': 'e', 'noknock': 'p',
'registered': 'r', 'invex': 'I', 'paranoia': 'p',
'banexception': 'e',
# Now, map all the ABCD type modes:
'*A': 'beI', '*B': 'k', '*C': 'l', '*D': 'cimnprstCMORS'
}
self.cmodes = cmodes
umodes = {
'oper': 'o', 'invisible': 'i', 'wallops': 'w', 'locops': 'l',
'cloak': 'x', 'hidechans': 'p', 'regdeaf': 'R', 'deaf': 'D',
'callerid': 'g', 'admin': 'a', 'deaf_commonchan': 'G', 'hideoper': 'H',
'webirc': 'W', 'sno_clientconnections': 'c', 'sno_badclientconnections': 'u',
'sno_rejectedclients': 'j', 'sno_skill': 'k', 'sno_fullauthblock': 'f',
'sno_remoteclientconnections': 'F', 'sno_stats': 'y', 'sno_debug': 'd',
'sno_nickchange': 'n', 'hideidle': 'q', 'registered': 'r',
'snomask': 's', 'ssl': 'S', 'sno_serverconnects': 'e', 'sno_botfloods': 'b',
# Now, map all the ABCD type modes:
'*A': '', '*B': '', '*C': '', '*D': 'DFGHRSWabcdefgijklnopqrsuwxy'
}
self.umodes = umodes
self.extbans_matching.clear()
# halfops is mandatory on Hybrid
self.prefixmodes = {'o': '@', 'h': '%', 'v': '+'}
# https://github.com/grawity/irc-docs/blob/master/server/ts6.txt#L55
f('PASS %s TS 6 %s' % (self.serverdata["sendpass"], self.sid))
# We request the following capabilities (for hybrid):
# ENCAP: message encapsulation for certain commands
# EX: Support for ban exemptions (+e)
# IE: Support for invite exemptions (+e)
# CHW: Allow sending messages to @#channel and the like.
# KNOCK: Support for /knock
# SVS: Deal with extended NICK/UID messages that contain service IDs/stamps
# TBURST: Topic Burst command; we send this in topic_burst
# DLN: DLINE command
# UNDLN: UNDLINE command
# KLN: KLINE command
# UNKLN: UNKLINE command
# HOPS: Supports HALFOPS
# CHW: Can do channel wall (@#)
# CLUSTER: Supports server clustering
# EOB: Supports EOB (end of burst) command
f('CAPAB :TBURST DLN KNOCK UNDLN UNKLN KLN ENCAP IE EX HOPS CHW SVS CLUSTER EOB QS')
f('SERVER %s 0 :%s' % (self.serverdata["hostname"],
self.serverdata.get('serverdesc') or conf.conf['pylink']['serverdesc']))
# send endburst now
self.send(':%s EOB' % (self.sid,))
def spawn_client(self, nick, ident='null', host='null', realhost=None, modes=set(),
server=None, ip='0.0.0.0', realname=None, ts=None, opertype=None,
manipulatable=False):
"""
Spawns a new client with the given options.
Note: No nick collision / valid nickname checks are done here; it is
up to plugins to make sure they don't introduce anything invalid.
"""
server = server or self.sid
if not self.is_internal_server(server):
raise ValueError('Server %r is not a PyLink server!' % server)
uid = self.uidgen[server].next_uid()
ts = ts or int(time.time())
realname = realname or conf.conf['pylink']['realname']
realhost = realhost or host
raw_modes = self.join_modes(modes)
u = self.users[uid] = User(self, nick, ts, uid, server, ident=ident, host=host, realname=realname,
realhost=realhost, ip=ip, manipulatable=manipulatable)
self.apply_modes(uid, modes)
self.servers[server].users.add(uid)
self._send_with_prefix(server, "UID {nick} {hopcount} {ts} {modes} {ident} {host} {ip} {uid} "
"* :{realname}".format(ts=ts, host=host,
nick=nick, ident=ident, uid=uid,
modes=raw_modes, ip=ip, realname=realname,
hopcount=self.servers[server].hopcount))
return u
def update_client(self, target, field, text):
"""Updates the ident, host, or realname of a PyLink client."""
# https://github.com/ircd-hybrid/ircd-hybrid/blob/58323b8/modules/m_svsmode.c#L40-L103
# parv[0] = command
# parv[1] = nickname <-- UID works too -jlu5
# parv[2] = TS <-- Of the user, not the current time. -jlu5
# parv[3] = mode
# parv[4] = optional argument (services account, vhost)
field = field.upper()
ts = self.users[target].ts
if field == 'HOST':
self.users[target].host = text
# On Hybrid, it appears that host changing is actually just forcing umode
# "+x <hostname>" on the target. -jlu5
self._send_with_prefix(self.sid, 'SVSMODE %s %s +x %s' % (target, ts, text))
else:
raise NotImplementedError("Changing field %r of a client is unsupported by this protocol." % field)
def oper_notice(self, source, text):
"""
Send a message to all opers.
"""
self._send_with_prefix(source, 'GLOBOPS :%s' % text)
def set_server_ban(self, source, duration, user='*', host='*', reason='User banned'):
"""
Sets a server ban.
"""
# source: user
# parameters: target server mask, duration, user mask, host mask, reason
assert not (user == host == '*'), "Refusing to set ridiculous ban on *@*"
if not source in self.users:
log.debug('(%s) Forcing KLINE sender to %s as TS6 does not allow KLINEs from servers', self.name, self.pseudoclient.uid)
source = self.pseudoclient.uid
self._send_with_prefix(source, 'KLINE * %s %s %s :%s' % (duration, user, host, reason))
def topic_burst(self, numeric, target, text):
"""Sends a topic change from a PyLink server. This is usually used on burst."""
# <- :0UY TBURST 1459308205 #testchan 1459309379 dan!~d@localhost :sdf
if not self.is_internal_server(numeric):
raise LookupError('No such PyLink server exists.')
ts = self._channels[target].ts
servername = self.servers[numeric].name
self._send_with_prefix(numeric, 'TBURST %s %s %s %s :%s' % (ts, target, int(time.time()), servername, text))
self._channels[target].topic = text
self._channels[target].topicset = True
# command handlers
def handle_capab(self, numeric, command, args):
# We only get a list of keywords here. Hybrid obviously assumes that
# we know what modes it supports (indeed, this is a standard list).
# <- CAPAB :UNDLN UNKLN KLN TBURST KNOCK ENCAP DLN IE EX HOPS CHW SVS CLUSTER EOB QS
self._caps = caps = args[0].split()
for required_cap in ('SVS', 'EOB', 'HOPS', 'QS', 'TBURST'):
if required_cap not in caps:
raise Proto
|
avaitla/Haskell-to-C---Bridge
|
pygccxml-1.0.0/unittests/call_invocation_tester.py
|
Python
|
bsd-3-clause
| 4,057
| 0.027114
|
#! /usr/bin/python
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml import declarations
class tester_t( unittest.TestCase ):
def __init__(self, *args ):
unittest.TestCase.__init__( self, *args )
def __test_split_impl(self, decl_string, name, args):
self.failUnless( ( name, args ) == declarations.call_invocation.split( decl_string ) )
def __test_split_recursive_impl(self, decl_string, control_seq):
self.failUnless( control_seq == declarations.call_invocation.split_recursive( decl_string ) )
def __test_is_call_invocation_impl( self, decl_string ):
self.failUnless( declarations.call_invocation.is_call_invocation( decl_string ) )
def test_split_on_vector(self):
self.__test_is_call_invocation_impl( "vector(int,std::allocator(int) )" )
self.__test_split_impl( "vector(int,std::allocator(int) )"
, "vector"
, [ "int", "std::allocator(int)" ] )
self.__test_split_recursive_impl( "vector(int,std::allocator(int) )"
, [ ( "vector", [ "int", "std::allocator(int)" ] )
, ( "std::allocator", ["int"] ) ] )
def test_split_on_string(self):
self.__test_is_call_invocation_impl( "basic_string(char,std::char_traits(char),std::allocator(char) )" )
self.__test_split_impl( "basic_string(char,std::char_traits(char),std::allocator(char) )"
, "basic_string"
, [ "char", "std::char_traits(char)", "std::allocator(char)" ] )
def test_split_on_map(self):
self.__test_is_call_invocation_impl( "map(long int,std::vector(int, std::allocator(int) ),std::less(long int),std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) ) )" )
self.__test_split_impl( "map(long int,std::vector(int, std::allocator(int) ),std::less(long int),std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) ) )"
, "map"
, [ "long int"
, "std::vector(int, std::allocator(int) )"
, "std::less(long int)"
, "std::allocator(std::pair(const long int, std::vector(int, std::allocator(int) ) ) )" ] )
def te
|
st_join_on_vector(self):
self.failUnless( "vector( int, std::allocator(int) )"
== declarations.call_invocation.join("vector", ( "int", "std::allocator(int)" ) ) )
def test_find_args(self):
temp = 'x()()'
found = declarations.call_invocation.find_args( temp )
self.failUnless( (1,2) == found )
found = declar
|
ations.call_invocation.find_args( temp, found[1]+1 )
self.failUnless( (3, 4) == found )
temp = 'x(int,int)(1,2)'
found = declarations.call_invocation.find_args( temp )
self.failUnless( (1,9) == found )
found = declarations.call_invocation.find_args( temp, found[1]+1 )
self.failUnless( (10, 14) == found )
def test_bug_unmatched_brace( self ):
src = 'AlternativeName((&string("")), (&string("")), (&string("")))'
self.__test_split_impl( src
, 'AlternativeName'
, ['(&string(""))', '(&string(""))', '(&string(""))'] )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
bbaronSVK/plugin.video.sosac.ph
|
resources/lib/sosac.py
|
Python
|
gpl-2.0
| 25,065
| 0.003152
|
# -*- coding: UTF-8 -*-
# /*
# * Copyright (C) 2015 Libor Zoubek + jondas
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re
import urllib
import urllib2
import cookielib
import xml.etree.ElementTree as ET
import sys
import util
from provider import ContentProvider, cached, ResolveException
sys.setrecursionlimit(10000)
MOVIES_BASE_URL = "http://movies.prehraj.me"
TV_SHOWS_BASE_URL = "http://tv.prehraj.me"
MOVIES_A_TO_Z_TYPE = "movies-a-z"
MOVIES_GENRE = "filmyxmlzanr.php"
GENRE_PARAM = "zanr"
TV_SHOWS_A_TO_Z_TYPE = "tv-shows-a-z"
XML_LETTER = "xmlpismeno"
TV_SHOW_FLAG = "#tvshow#"
ISO_639_1_CZECH = "cs"
MOST_POPULAR_TYPE = "most-popular"
RECENTLY_ADDED_TYPE = "recently-added"
SEARCH_TYPE = "search"
class SosacContentProvider(ContentProvider):
ISO_639_1_CZECH = None
par = None
def __init__(self, username=None, password=None, filter=None, reverse_eps=False):
ContentProvider.__init__(self, name='sosac.ph', base_url=MOVIES_BASE_URL, username=username,
password=password, filter=filter)
util.init_urllib(self.cache)
cookies = self.cache.get(util.CACHE_COOKIES)
if not cookies or len(cookies) == 0:
util.request(self.base_url)
self.reverse_eps = reverse_eps
def on_init(self):
kodilang = self.lang or 'cs'
if kodilang == ISO_639_1_CZECH or kodilang == 'sk':
self.ISO_639_1_CZECH = ISO_639_1_CZECH + '/'
else:
self.ISO_639_1_CZECH = ''
def capabilities(self):
return ['resolve', 'categories', 'search']
def categories(self):
result = []
for title, url in [
("Movies", MOVIES_BASE_URL),
("TV Shows", TV_SHOWS_BASE_URL),
("Movies - by Genres", MOVIES_BASE_URL + "/" + MOVIES_GENRE),
("Movies - Most popular",
MOVIES_BASE_URL + "/" + self.ISO_639_1_CZECH + MOST_POPULAR_TYPE),
("TV Shows - Most popular",
TV_SHOWS_BASE_URL + "/" + self.ISO_639_1_CZECH + MOST_POPULAR_TYPE),
("Movies - Recently added",
MOVIES_BASE_URL + "/" + self.ISO_639_1_CZECH + RECENTLY_ADDED_TYPE),
("TV Shows - Recently added",
TV_SHOWS_BASE_URL + "/" + self.ISO_639_1_CZECH + RECENTLY_ADDED_TYPE)]:
item = self.dir_item(title=title, url=url)
if title == 'Movies' or title == 'TV Shows' or title == 'Movies - Recently added':
item['menu'] = {"[B][COLOR red]Add all to library[/COLOR][/B]": {
'action': 'add-all-to-library', 'title': title}}
result.append(item)
return result
def search(self, keyword):
return self.list_search('%s/%ssearch?%s' % (MOVIES_BASE_URL, self.ISO_639_1_CZECH,
urllib.urlencode({'q': keyword})))
def a_to_z(self, url_type):
result = []
for letter in ['0-9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'e', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']:
item = self.dir_item(title=letter.upper())
if url_type == MOVIES_A_TO_Z_TYPE:
item['url'] = self.base_url + "/filmyxmlpismeno.php?pismeno=" + letter
else:
item['url'] = self.base_url + "/" + self.ISO_639_1_CZECH + url_type + "/" + letter
result.append(item)
return result
@staticmethod
def remove_flag_from_url(url, flag):
return url.replace(flag, "", count=1)
@staticmethod
def is_xml_letter(url):
if XML_LETTER in url:
return True
return False
@staticmethod
def is_base_url(url):
if url in [MOVIES_BASE_URL, TV_SHOWS_BASE_URL]:
return True
else:
return False
@staticmethod
def is_movie_url(url):
if MOVIES_BASE_URL in url:
return True
else:
return False
@staticmethod
def is_tv_shows_url(url):
if TV_SHOWS_BASE_URL in url:
return True
else:
return False
@staticmethod
def is_most_popular(url):
if MOST_POPULAR_TYPE in url:
return True
else:
return False
@staticmethod
def is_recently_added(url):
if RECENTLY_ADDED_TYPE in url:
return True
else:
return False
@staticmethod
def is_search(url):
return SEARCH_TYPE in url
@staticmethod
def particular_letter(url):
return "a-z/" in url
def has_tv_show_flag(self, url):
return TV_SHOW_FLAG in url
def remove_flags(self, url):
return url.replace(TV_SHOW_FLAG, "", 1)
def list(self, url):
util.info("Examining url " + url)
if MOVIES_GENRE in url:
return self.list_by_genres(url)
if self.is_most_popular(url):
if "movie" in url:
return self.list_
|
movies_by_letter(url)
if "tv" in url:
return self.list_tv_shows_by_letter(url)
if self.is_recently_added(url):
util.debug("is recently added")
if "movie" in url:
return self.list_movie_recently_added(url)
if "tv" in url:
util.debug("is TV")
return self.list_tv_recently_added(url)
if self.is_search(u
|
rl):
return self.list_search(url)
if self.is_base_url(url):
self.base_url = url
if "movie" in url:
return self.a_to_z(MOVIES_A_TO_Z_TYPE)
if "tv" in url:
return self.a_to_z(TV_SHOWS_A_TO_Z_TYPE)
if self.particular_letter(url):
if "movie" in url:
return self.list_movies_by_letter(url)
if "tv" in url:
return self.list_tv_shows_by_letter(url)
if self.has_tv_show_flag(url):
return self.list_tv_show(self.remove_flags(url))
if self.is_xml_letter(url):
util.debug("xml letter")
if "movie" in url:
return self.list_xml_letter(url)
return [self.dir_item(title="I failed", url="fail")]
def list_by_genres(self, url):
if "?" + GENRE_PARAM in url:
return self.list_xml_letter(url)
else:
result = []
page = util.request(url)
data = util.substr(page, '<select name=\"zanr\">', '</select')
for s in re.finditer('<option value=\"([^\"]+)\">([^<]+)</option>', data,
re.IGNORECASE | re.DOTALL):
item = {'url': url + "?" + GENRE_PARAM + "=" +
s.group(1), 'title': s.group(2), 'type': 'dir'}
self._filter(result, item)
return result
def list_xml_letter(self, url):
result = []
data = util.request(url)
tree = ET.fromstring(data)
for film in tree.findall('film'):
item = self.video_item()
try:
if ISO_639_1_CZECH in self.ISO_639_1_CZECH:
title = film.findtext('nazevcs').encode('utf-8')
else:
title = film.findtext('nazeven').encode('utf-8')
basetitle = '%s (%s)' % (title, film.findtext('rokvydani'))
item['title'] = '%s - %s' %
|
dopplershift/Scattering
|
scripts/test_delta_units.py
|
Python
|
bsd-2-clause
| 1,224
| 0.011438
|
import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
import quantities as pq
def plot_csec
|
(scatterer, d, var, name):
lam = scatterer.wavelength.rescale('cm')
plt.plot(d, var,
label='%.1f %s' % (lam, lam.dimensionality))
plt.xlabel('Diameter (%s)' % d.dimensionality)
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatte
|
rers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * pq.cm
sband = pq.c / (2.8 * pq.GHz)
cband = pq.c / (5.4 * pq.GHz)
xband = pq.c / (9.4 * pq.GHz)
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
Vauxoo/stock-logistics-warehouse
|
stock_picking_procure_method/__manifest__.py
|
Python
|
agpl-3.0
| 587
| 0
|
# Copyright 2018 Tecnativa - David Vidal
# License AGPL-3.0 or later (http://ww
|
w.gnu.org/licenses/agpl.html).
{
'name': 'Stock Picking Procure Method',
'summary': 'Allows to force the procurement method from the picking',
'version': '12.0.1.0.0',
'category': 'Warehouse',
'author': 'Tecnativa,'
'Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/stock-logistics-warehouse',
'license': 'AGPL-3',
'depends': [
'stock',
],
'data': [
|
'views/stock_picking_views.xml',
],
'installable': True,
}
|
jannon/django-allauth-api
|
src/allauth_api/socialaccount/rest_framework/authentication.py
|
Python
|
bsd-2-clause
| 1,984
| 0.00252
|
from django.utils.translation import ugettext as _
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
from allauth_api.account.rest_framework import authentication as account_auth
from allauth_api.settings import allauth_api_settings
from allauth_api.socialaccount.providers import registry
class SocialAuthentication(BaseAuthentication):
"""
An authentication method that hands the duty off to the specified provider
the settings.PROVIDER_PARAMETER_NAME must be present in the request data
"""
def authenticate(self, request):
provider_id = request.DATA.get(allauth_api_settings.PROVIDER_PARAMETER_NAME)
if provider_id:
provider = registry.by_id(provider_id)
if provider:
return provider.authneticate(request)
else:
msg = "%s %s" % (_("no provider found for"), provider_id)
raise AuthenticationFailed(msg)
else:
msg = "%s %s" % (allauth_api_settings.PROVIDER_PARAMETER_NAME,
_("parameter must be provided"))
raise AuthenticationFailed(msg)
class BasicLogin(account_auth.BasicLogin):
"""
A login class that just uses the standard Django authenticate mechanism
"""
auth_class = SocialAuthentication
class TokenLogin(account_auth.TokenLogin):
"""
A login class that returns a user authentication token. This method, in its default
configuration is only available if rest_framework.authtoken is in installed_apps
"""
auth_class = SocialAuthentication
# class OAuth2Login(account_auth.OAuth2Login):
# """
# A login class that accepts oauth2 authentication requests and returns the appropriate
|
# access tokens. This login method, in its default configuration is only available if
# oauth2_provider is in installed_apps
# """
#
# auth_class = SocialAuthentica
|
tion
|
Brazelton-Lab/system
|
rename.py
|
Python
|
gpl-2.0
| 5,082
| 0.007674
|
#! /usr/bin/env python
"""
This program is for renaming files (through symbolic links) using a file
conversion table. The columns should be ordered as so: new directory, new id,
old directory, old file name. Columns can be separated using any standard
ASCII character.
If files are paired-end and follow the standard conventions for discriminating
forward from reverse reads (R1 and R2), then an asterics (*) can be used after
the file name (e.g samplename1_R*) instead of specifying each paired file
individually. The linked pairs will be differentiated using "forward" and
"reverse" in place of "R1" and "R2".
"""
from __future__ import print_function
import argparse
import glob
import locale
import re
import sys
import textwrap
from itertools import izip
from subprocess import Popen, PIPE
def format_io(old_name, new_name, ext=''):
extensions = {'fa': 'fasta', 'fasta': 'fasta', 'fna': 'fasta',
'fq': 'fastq', 'fastq': 'fastq', 'fnq': 'fastq'}
compress = ''
if ext:
file_end = ext
else:
old_name = old_name.split('.')
filetype = old_name[-1].strip()
if filetype in ["gz", "bz2", "zip"]:
compress = ".{}".format(filetype)
filetype = old_name[-2]
try:
extension = extensions[filetype]
except KeyError:
print(textwrap.fill("Error: unknown file type {}. Please make "
"sure the filenames end in one of the supported extensions "
"(fa, fna, fasta, fq, fnq, fastq)".format(filetype), 79),
file=sys.stderr)
sys.exit(1)
file_end = extensions[filetype] + compress
return "{}.{}".format(new_name, file_end)
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile',
help="input conversion table file")
parser.add_argument('-e', '--ext',
help="extension of renamed file (optional)")
parser.add_argument('-s', '--sep',
default=',',
help="field separator character [default: ,]")
args = parser.parse_args()
with open(args.infile, 'rU') as in_h:
for line in in_h:
try:
new_dir, new_id, old_dir, old_name = line.split(args.sep)
except ValueError:
print(textwrap.fill("Error: failed to properly parse {}. The "
"conversion table should contain four columns. See usage "
"for details".format(infile), 79), file=sys.stderr)
sys.exit(1)
new_dir = new_dir.strip()
new_id = new_id.strip()
old_dir = old_dir.strip()
old_name = old_name.strip()
if old_name.strip()[-1] == '*':
strand_name = {'R1': 'forward', 'R2': 'reverse'}
forwards = glob.glob('{}/{}*R1_*'.format(old_dir, old_name[:-1]
|
))
reverses = glob.glob('{}/{}*R2_*'.format(old_dir, old_name[:-1]))
if len(forwards) != len(reverses):
print(textwrap.fill("Warning: missing pair in {}. The use "
"of '*' should only be used for paired-end reads in "
"separate files".format(old_name), 79), file=sys.st
|
derr)
continue
if len(forwards) > 1:
add_det = True
else:
add_det = False
for strand in (forwards, reverses):
for filename in strand:
if add_det:
seq_detail = re.search(r'L\d{3}_R[12]_\d{3}',
filename).group()
lane, pair, number = seq_detail.split('_')
new_name = format_io(filename, "{}.{}.{}_{}"
.format(new_id, strand_name[pair], lane,
number), args.ext)
else:
new_name = format_io(filename, "{}.{}"
.format(new_id, strand_name[pair]), args.ext)
ln_out, ln_err = (Popen(['ln', "-s", filename, "{}/{}"
.format(new_dir, new_name)], stdout=PIPE,
stderr=PIPE).communicate())
if ln_err:
print(ln_err.decode(locale.getdefaultlocale()[1]),
file=sys.stderr)
else:
new_name = format_io(old_name, new_id, args.ext)
new_path = new_dir + "/" + new_name
old_path = old_dir + "/" + old_name
ln_out, ln_err = (Popen(['ln', "-s", old_path, new_path],
stdout=PIPE, stderr=PIPE).communicate())
if ln_err:
print(ln_err.decode(locale.getdefaultlocale()[1]),
file=sys.stderr)
if __name__ == '__main__':
main()
sys.exit(0)
|
xu6148152/Binea_Python_Project
|
oop/student/student.py
|
Python
|
mit
| 246
| 0.012195
|
__author__ = 'xubinggui'
class Student(object):
def __init__(self, name, s
|
core):
self.name = name
self.score = score
def print_score(self):
print(self.score)
|
bart = Student('Bart Simpson', 59)
bart.print_score()
|
alirizakeles/tendenci
|
tendenci/apps/news/admin.py
|
Python
|
gpl-3.0
| 1,737
| 0.01209
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from tendenci.apps.perms.admin import TendenciBaseModelAdmin
from tendenci.apps.news.models import News
from tendenci.apps.news.forms import NewsForm
class NewsAdmin(TendenciBaseModelAdmin):
list_display = ['headline', 'update_dt', 'owner_link', 'admin_perms', 'admin_status']
list_filter = ['status_detail', 'owner_username']
prepopulated_fields = {'slug': ['headline']}
search_fields = ['headline', 'body']
fieldsets = (
(_('News Information')
|
, {
'fields': ('headline',
'slug',
'summary',
'body',
'group',
'tags',
'source',
'website',
'release_dt',
'timezone',
)
}),
(_('Contributor'), {'fields': ('contributor_type',)}),
(_('Author'), {'fields': ('first_name',
|
'last_name',
'google_profile',
'phone',
'fax',
'email',
),
'classes': ('contact',),
}),
(_('Permissions'), {'fields': ('allow_anonymous_view',)}),
(_('Advanced Permissions'), {'classes': ('collapse',), 'fields': (
'user_perms',
'member_perms',
'group_perms',
)}),
(_('Publishing Status'), {'fields': (
'syndicate',
'status_detail',
)}),
)
form = NewsForm
ordering = ['-update_dt']
admin.site.register(News, NewsAdmin)
|
sh4d0/openvix
|
manage.py
|
Python
|
gpl-3.0
| 3,933
| 0.003051
|
import argparse
import os
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(__file__)
class ExecException(Exception):
pass
class Exec(object):
@staticmethod
def run(cmd, workingdir=None):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=workingdir)
out = p.communicate()[0]
return out
@classmethod
def run_cmd(cls, args, workingdir=None):
return cls.run('cmd /c %s' % args, workingdir)
@classmethod
def cmake_version(cls):
cmd = 'cmake --version'
out = cls.run_cmd(cmd).decode()
if 'cmake version' not in out:
raise ExecException('Unable to find cmake, if it is installed, check your PATH variable.')
@classmethod
def vs_info(cls):
if 'VCINSTALLDIR' not in os.environ:
raise ExecException('Unable to detect build environment.')
cmd = 'cl'
out = cls.run_cmd(cmd).decode()
if 'x86' in out:
arch = 'x86'
elif 'x64' in out:
arch = 'x64'
else:
raise ExecException('Unable to detect build environment.')
if '15.00' in out:
version = 9
elif '16.00' in out:
version = 10
elif '17.00' in out:
version = 11
elif '18.00' in out:
version = 12
elif '19.00' in out:
version = 14
else:
raise ExecException('Unable to detect build environment.')
return arch, version
class CMake(object):
def __init__(self, arch, version):
assert version in [7, 8, 9, 10, 11, 12, 14], 'Unsupported version (%s)' % version
assert arch.lower() in ['x86', 'x64'], 'Unsupported arch (%s)' % arch
self.version = version
self.arch = arch
@property
def generator(self):
if self.arch.lower() == 'x64':
arch = 'Win64'
else:
arch = ''
return ('Visual Studio %s %s' % (self.version, arch)).strip()
def generate(arch=None, version=None):
cmake = CMake(arch=arch, version=version)
build_dir = os.path.join(SCRIPT_DIR, 'build', cmake.arch)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
result = Exec.run_cmd(
'cmake -G "%s" ../.. --debug-output' % cmake.generator, workingdir=build_dir).decode()
print(result)
def manage(generate_sln=True, build_sln=False, test_sln=False, **kwargs):
if test_sln is True:
build_sln = True
if build_sln is True:
generate_sln = True
if generate_sln is True:
generate(**kwargs)
else:
raise Exception('Did not enable at least one of "generate", "build" or "test"');
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Manage CMake projects')
parser.add_argument('--generate', dest='generate_sln', action='store_true', default=True,
help='Generate VS Solution file')
parser.add_argument('--build', dest='build_sln', action='store_true', default=False, help='Build source')
parser.add_argument('--test', dest='test_sln', action='store_true', default=F
|
alse, help='Run tests')
parser.add_argument('--arch', default=None, choices=['x86', 'x64'],
help='Arch of Visual Studio if not run from VS command prompt')
parser.add_argument('--version', default=None, choices=[7, 8, 9, 10, 11, 12, 14], type=i
|
nt,
help='Version of Visual Studio of not run from VS command prompt')
args = parser.parse_args(argv)
Exec.cmake_version() # Make sure we have located cmake
if args.arch is None or args.version is None:
vs_info = Exec.vs_info()
if args.arch is None:
args.arch = vs_info[0]
if args.version is None:
args.version = vs_info[1]
manage(**vars(args))
if __name__ == '__main__':
main()
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/storage_profile_py3.py
|
Python
|
mit
| 2,421
| 0.000413
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StoragePro
|
file(Model):
"""Specifies the storage settings for the virtual machine disks.
:param image_reference: Specifies information about the image to use. You
can specify information about platform images, marketplace images, or
virtual machine images. This element is required when you want
|
to use a
platform image, marketplace image, or virtual machine image, but is not
used in other creation operations.
:type image_reference:
~azure.mgmt.compute.v2016_03_30.models.ImageReference
:param os_disk: Specifies information about the operating system disk used
by the virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type os_disk: ~azure.mgmt.compute.v2016_03_30.models.OSDisk
:param data_disks: Specifies the parameters that are used to add a data
disk to a virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type data_disks: list[~azure.mgmt.compute.v2016_03_30.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(self, *, image_reference=None, os_disk=None, data_disks=None, **kwargs) -> None:
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
|
junhuac/MQUIC
|
src/mojo/public/tools/manifest/manifest_collator.py
|
Python
|
mit
| 1,675
| 0.017313
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import shutil
import sys
import urlparse
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.load(json_file)
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
args, children =
|
parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
app_path = parent['name'].split(':')[1]
if app_path.startswith('//'):
raise Val
|
ueError("Application name path component '%s' must not start " \
"with //" % app_path)
if args.application_name != app_path:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, app_path))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
kikov79/scalr
|
app/python/tests/scalrpytests/load_statistics_cleaner/steps.py
|
Python
|
apache-2.0
| 3,146
| 0.005086
|
from gevent import monkey
monkey.patch_all(subprocess=True)
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__)
|
)
scalrpy_dir = os.path.normpath(os.path.join(cwd, '../../..'))
sys.path.insert(0, scalrpy_dir)
scalrpytests_dir = os.path.join(cwd, '../..')
sys.path.insert(0, scalrpytests_dir)
import time
from gev
|
ent import pywsgi
from scalrpy.util import rpc
from scalrpy.util import helper
from scalrpy.load_statistics_cleaner import LoadStatisticsCleaner
from scalrpytests.steplib import lib
from scalrpytests.steplib.steps import *
from lettuce import step, before, after
class LoadStatisticsCleanerScript(lib.Script):
app_cls = LoadStatisticsCleaner
name = 'load_statistics_cleaner'
lib.ScriptCls = LoadStatisticsCleanerScript
@step(u"White Rabbit has (\d+) farms in database")
def fill_tables(step, count):
db = dbmanager.DB(lib.world.config['connections']['mysql'])
lib.world.farms_ids = list()
for i in range(int(count)):
while True:
farm_id = random.randint(1, 9999)
query = "SELECT id FROM farms WHERE id={0}".format(farm_id)
if bool(db.execute(query)):
continue
break
query = "INSERT INTO farms (id) VALUES ({0})".format(farm_id)
db.execute(query)
try:
os.makedirs(os.path.join(lib.world.config['rrd_dir'], helper.x1x2(farm_id), str(farm_id)))
except OSError as e:
if e.args[0] != 17:
raise
lib.world.farms_ids.append(farm_id)
time.sleep(1)
@step(u"White Rabbit has (\d+) farms for delete")
def create_folder(step, count):
lib.world.farms_ids_for_delete = list()
for i in range(int(count)):
while True:
farm_id_for_delete = random.randint(1, 9999)
try:
os.makedirs('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id_for_delete),
farm_id_for_delete)
)
lib.world.farms_ids_for_delete.append(farm_id_for_delete)
break
except OSError as e:
if e.args[0] != 17:
raise
try:
os.makedirs('%s/wrongfolder' % lib.world.config['rrd_dir'])
except OSError as e:
if e.args[0] != 17:
raise
try:
os.makedirs('%s/x1x6/wrongfolder' % lib.world.config['rrd_dir'])
except OSError as e:
if e.args[0] != 17:
raise
@step(u"White Rabbit sees right folders were deleted")
def check_folders(step):
for farm_id_for_delete in lib.world.farms_ids_for_delete:
assert not os.path.exists('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id_for_delete),
farm_id_for_delete)
)
@step(u"White Rabbit sees right folders were not deleted")
def check_folders(step):
for farm_id in lib.world.farms_ids:
assert os.path.exists('%s/%s/%s' % (
lib.world.config['rrd_dir'],
helper.x1x2(farm_id),
farm_id)
), farm_id
|
nextgis-extra/tests
|
lib_gdal/gdrivers/paux.py
|
Python
|
gpl-2.0
| 2,669
| 0.010116
|
#!/usr/bin/env python
###############################################################################
# $Id: paux.py 32163 2015-12-13 17:44:50Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for PAux driver.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Read test of simple byte reference data.
def paux_1():
tst = gdaltest.GDALTest( 'PAux', 'small16.raw', 2, 12816 )
return tst.testOpen()
########################################################################
|
#######
# Test copying.
def paux_2():
tst = gdaltest.GDALTest( 'PAux', 'byte.tif', 1, 4672)
return tst.testCreateCopy( check_gt = 1 )
###############################################################################
# Test /vsimem based.
def paux_3():
tst = gdaltes
|
t.GDALTest( 'PAux', 'byte.tif', 1, 4672 )
return tst.testCreateCopy( vsimem = 1 )
###############################################################################
# Cleanup.
def paux_cleanup():
gdaltest.clean_tmp()
return 'success'
gdaltest_list = [
paux_1,
paux_2,
paux_3,
paux_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'paux' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
anatolikalysch/VMAttack
|
ui/__init__.py
|
Python
|
mit
| 31
| 0
|
__aut
|
hor__ = 'Anatoli Kalys
|
ch'
|
mrquim/mrquimrepo
|
script.skin.helper.service/resources/lib/main_module.py
|
Python
|
gpl-2.0
| 31,819
| 0.002326
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
script.skin.helper.service
Helper service and scripts for Kodi skins
mainmodule.py
All script methods provided by the addon
'''
import xbmc
import xbmcvfs
import xbmcgui
import xbmcaddon
from skinsettings import SkinSettings
from simplecache import SimpleCache
from utils import log_msg, KODI_VERSION
from utils import log_exception, get_current_content_type, ADDON_ID, recursive_delete_dir
from dialogselect import DialogSelect
from xml.dom.minidom import parse
from metadatautils import KodiDb, process_method_on_list
import urlparse
import sys
class MainModule:
'''mainmodule provides the script methods for the skinhelper addon'''
def __init__(self):
'''Initialization and main code run'''
self.win = xbmcgui.Window(10000)
self.addon = xbmcaddon.Addon(ADDON_ID)
self.kodidb = KodiDb()
self.cache = SimpleCache()
self.params = self.get_params()
log_msg("MainModule called with parameters: %s" % self.params)
action = self.params.get("action", "")
# launch module for action provided by this script
try:
getattr(self, action)()
except AttributeError:
log_exception(__name__, "No such action: %s" % action)
except Exception as exc:
log_exception(__name__, exc)
finally:
xbmc.executebuiltin("dialog.Close(busydialog)")
# do cleanup
self.close()
def close(self):
'''Cleanup Kodi Cpython instances on exit'''
self.cache.close()
del self.win
del self.addon
del self.kodidb
log_msg("MainModule exited")
@classmethod
def get_params(self):
'''extract the params from the called script path'''
params = {}
for arg in sys.argv[1:]:
paramname = arg.split('=')[0]
paramvalue = arg.replace(paramname + "=", "")
paramname = paramname.lower()
if paramname == "action":
paramvalue = paramvalue.lower()
params[paramname] = paramvalue
return params
def deprecated_method(self, newaddon):
'''
used when one of the deprecated methods is called
print warning in log and call the external script with the same parameters
'''
action = self.params.get("action")
log_msg("Deprecated method: %s. Please call %s directly" % (action, newaddon), xbmc.LOGWARNING)
paramstring = ""
for key, value in self.params.iteritems():
paramstring += ",%s=%s" % (key, value)
if xbmc.getCondVisibility("System.HasAddon(%s)" % newaddon):
xbmc.executebuiltin("RunAddon(%s%s)" % (newaddon, paramstring))
else:
# trigger install of the addon
if KODI_VERSION > 16:
xbmc.executebuiltin("InstallAddon(%s)" % newaddon)
else:
xbmc.executebuiltin("RunPlugin(plugin://%s)" % newaddon)
@staticmethod
def musicsearch():
'''helper to go directly to music search dialog'''
xbmc.executebuiltin("ActivateWindow(Music)")
xbmc.executebuiltin("SendClick(8)")
def setview(self):
'''sets the selected viewmode for the container'''
xbmc.executebuiltin("ActivateWindow(busydialog)")
content_type = get_current_content_type()
if not content_type:
content_type = "files"
current_view = xbmc.getInfoLabel("Container.Viewmode").decode("utf-8")
view_id, view_label = self.selectview(content_type, current_view)
current_forced_view = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" % content_type)
if view_id is not None:
# also store forced view
if (content_type and current_forced_view and current_forced_view != "None" and
xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.ForcedViews.Enabled)")):
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s,%s)" % (content_type, view_id))
xbmc.executebuiltin("Skin.SetString(SkinHelper.ForcedViews.%s.label,%s)" % (content_type, view_label))
self.win.setProperty("SkinHelper.ForcedView", view_id)
if not xbmc.getCondVisibility("Control.HasFocus(%s)" % current_forced_view):
xbmc.sleep(100)
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
xbmc.executebuiltin("SetFocus(%s)" % view_id)
else:
self.win.clearProperty("SkinHelper.ForcedView")
# set view
xbmc.executebuiltin("Container.SetViewMode(%s)" % view_id)
def selectview(self, content_type="other", current_view=None, display_none=False):
'''reads skinfile with all views to present a dialog
|
to choose from'''
cur_view_select_id = None
label = ""
all_views = []
if display_none:
listitem = xbmcgui.ListItem(label="None")
listitem.setProperty("id", "None")
all_views.append(listite
|
m)
# read the special skin views file
views_file = xbmc.translatePath('special://skin/extras/views.xml').decode("utf-8")
if xbmcvfs.exists(views_file):
doc = parse(views_file)
listing = doc.documentElement.getElementsByTagName('view')
itemcount = 0
for view in listing:
label = xbmc.getLocalizedString(int(view.attributes['languageid'].nodeValue))
viewid = view.attributes['value'].nodeValue
mediatypes = view.attributes['type'].nodeValue.lower().split(",")
if label.lower() == current_view.lower() or viewid == current_view:
cur_view_select_id = itemcount
if display_none:
cur_view_select_id += 1
if (("all" in mediatypes or content_type.lower() in mediatypes) and
(not "!" + content_type.lower() in mediatypes) and not
xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % viewid)):
image = "special://skin/extras/viewthumbs/%s.jpg" % viewid
listitem = xbmcgui.ListItem(label=label, iconImage=image)
listitem.setProperty("viewid", viewid)
listitem.setProperty("icon", image)
all_views.append(listitem)
itemcount += 1
dialog = DialogSelect("DialogSelect.xml", "", listing=all_views,
windowtitle=self.addon.getLocalizedString(32012), richlayout=True)
dialog.autofocus_id = cur_view_select_id
dialog.doModal()
result = dialog.result
del dialog
if result:
viewid = result.getProperty("viewid")
label = result.getLabel().decode("utf-8")
return (viewid, label)
else:
return (None, None)
# pylint: disable-msg=too-many-local-variables
def enableviews(self):
'''show select dialog to enable/disable views'''
all_views = []
views_file = xbmc.translatePath('special://skin/extras/views.xml').decode("utf-8")
richlayout = self.params.get("richlayout", "") == "true"
if xbmcvfs.exists(views_file):
doc = parse(views_file)
listing = doc.documentElement.getElementsByTagName('view')
for view in listing:
view_id = view.attributes['value'].nodeValue
label = xbmc.getLocalizedString(int(view.attributes['languageid'].nodeValue))
desc = label + " (" + str(view_id) + ")"
image = "special://skin/extras/viewthumbs/%s.jpg" % view_id
listitem = xbmcgui.ListItem(label=label, label2=desc, iconImage=image)
listitem.setProperty("viewid", view_id)
if not xbmc.getCondVisibility("Skin.HasSetting(SkinHelper.view.Disabled.%s)" % view_id):
listitem.select(selected=True)
excludefromd
|
dstegelman/rocket-python
|
rocketchat/calls/base.py
|
Python
|
mit
| 3,656
| 0
|
import logging
import pprint
import requests
logger = logging.getLogger(__name__)
class RocketChatBase(object):
settings = None
endpoint = None
headers = {}
method = 'get'
auth_token = None
auth_user_id = None
files = None
def __init__(self, settings=None, *args, **kwargs):
self.settings = settings
# Prepare for a call by fetching an Auth Token
self.set_auth_token()
self.set_auth_headers()
def set_auth_token(self):
if self.settings.get
|
('token') and self.settings.get('user_id'):
self.auth_token = self.settings.get('token')
self.auth_user_id = self.settings.get('user_id')
return
url = '{do
|
main}/api/v1/login'.format(
domain=self.settings['domain']
)
response = requests.post(url,
data={'user': self.settings['username'],
'password': self.settings['password']})
try:
self.auth_token = response.json()['data']['authToken']
self.auth_user_id = response.json()['data']['userId']
except KeyError:
response.raise_for_status()
def set_auth_headers(self):
self.headers['X-Auth-Token'] = self.auth_token
self.headers['X-User-Id'] = self.auth_user_id
def logoff(self):
url = '{domain}/api/v1/logout'.format(
domain=self.settings['domain']
)
requests.get(url, headers=self.headers)
def post_response(self, result):
return result
def build_endpoint(self, **kwargs):
"""
Build the endpoint for the user given some kwargs
from the initial calling.
:return:
"""
raise NotImplementedError()
def build_payload(self, **kwargs):
"""
Build a payload dict that will be passed directly to the
endpoint. If you need to pass this as plain text or whatever
you'll need to the dumping here.
:return:
"""
return None
def build_files(self, **kwargs):
"""
Build files
:param kwargs:
:return:
"""
return None
def call(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
timeout = kwargs.get('timeout', None)
url = '{domain}{endpoint}'.format(
domain=self.settings['domain'],
endpoint=self.build_endpoint(**kwargs)
)
result = requests.request(method=self.method, url=url,
data=self.build_payload(**kwargs),
headers=self.headers, timeout=timeout,
files=self.build_files(**kwargs))
request_data = {
'url': url,
'method': self.method,
'payload': self.build_payload(**kwargs),
'headers': self.headers,
'files': self.files
}
logger.debug('API Request - {request}'.format(
request=pprint.pformat(request_data)
))
result.raise_for_status()
self.logoff()
try:
logger.debug('API Response - {data}'.format(
data=pprint.pformat(result.json())
))
return self.post_response(result.json())
except Exception as e:
logger.error('RESTful {classname} call failed. {message}'.format(
classname=self.__class__.__name__, message=e),
exc_info=True)
raise e
class PostMixin(object):
method = 'post'
|
arantebillywilson/python-snippets
|
py3/abw-things/progress.py
|
Python
|
mit
| 399
| 0.002506
|
#!/usr/bin/python3
#
# progress.py
#
# Author: Billy Wilson Arante
# Created: 2016/02/05 PHT
# Modified:
|
2016/08/19 PHT
#
def progress():
"""Progress icon
The Python 3 version of loading_icon.py.
"""
while True:
for i in ["/", "-", "|", "\\", "|"]:
print("%s\r" % i, end=
|
"")
def main():
"""Main"""
progress()
if __name__ == "__main__":
main()
|
andialbrecht/sqlparse
|
sqlparse/cli.py
|
Python
|
bsd-3-clause
| 5,712
| 0
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will
cause problems: the code will get executed twice:
- When you run `python -m sqlparse` python will execute
``__main__.py`` as a script. That means there won't be any
``sqlparse.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``sqlparse.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
import sys
from io import TextIOWrapper
import sqlparse
from sqlparse.exceptions import SQLParseError
# TODO: Add CLI Tests
# TODO: Simplify formatter by using argparse `type` arguments
def create_parser():
_CASE_CHOICES = ['upper', 'lower', 'capitalize']
parser = argparse.ArgumentParser(
prog='sqlformat',
description='Format FILE according to OPTIONS. Use "-" as FILE '
'to read from stdin.',
usage='%(prog)s [OPTIONS] FILE, ...',
)
parser.add_argument('filename')
parser.add_argument(
'-o', '--outfile',
dest='outfile',
metavar='FILE',
help='write output to FILE (defaults to stdout)')
parser.add_argument(
'--version',
action='version',
version=sqlparse.__version__)
group = parser.add_argument_group('Formatting Options')
group.add_argument(
'-k', '--keywords',
metavar='CHOICE',
dest='keyword_case',
choices=_CASE_CHOICES,
help='change case of keywords, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-i', '--identifiers',
metavar='CHOICE',
dest='identifier_case',
choices=_CASE_CHOICES,
help='change case of identifiers, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-l', '--language',
metavar='LANG',
dest='output_format',
choices=['python', 'php'],
help='output a snippet in programming language LANG, '
'choices are "python", "php"')
group.add_argument(
'--strip-comments',
dest='strip_comments',
action='store_true',
default=False,
help='remove comments')
group.add_argument(
'-r', '--reindent',
dest='reindent',
action='store_true',
default=False,
help='reindent statements')
group.ad
|
d_argument(
'--indent_width',
dest='indent_width',
default=2,
type=int,
help='indentation width (defaults to 2 space
|
s)')
group.add_argument(
'--indent_after_first',
dest='indent_after_first',
action='store_true',
default=False,
help='indent after first line of statement (e.g. SELECT)')
group.add_argument(
'--indent_columns',
dest='indent_columns',
action='store_true',
default=False,
help='indent all columns by indent_width instead of keyword length')
group.add_argument(
'-a', '--reindent_aligned',
action='store_true',
default=False,
help='reindent statements to aligned format')
group.add_argument(
'-s', '--use_space_around_operators',
action='store_true',
default=False,
help='place spaces around mathematical operators')
group.add_argument(
'--wrap_after',
dest='wrap_after',
default=0,
type=int,
help='Column after which lists should be wrapped')
group.add_argument(
'--comma_first',
dest='comma_first',
default=False,
type=bool,
help='Insert linebreak before comma (default False)')
group.add_argument(
'--encoding',
dest='encoding',
default='utf-8',
help='Specify the input encoding (default utf-8)')
return parser
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write('[ERROR] {}\n'.format(msg))
return 1
def main(args=None):
parser = create_parser()
args = parser.parse_args(args)
if args.filename == '-': # read from stdin
wrapper = TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
try:
data = wrapper.read()
finally:
wrapper.detach()
else:
try:
with open(args.filename, encoding=args.encoding) as f:
data = ''.join(f.readlines())
except OSError as e:
return _error(
'Failed to read {}: {}'.format(args.filename, e))
close_stream = False
if args.outfile:
try:
stream = open(args.outfile, 'w', encoding=args.encoding)
close_stream = True
except OSError as e:
return _error('Failed to open {}: {}'.format(args.outfile, e))
else:
stream = sys.stdout
formatter_opts = vars(args)
try:
formatter_opts = sqlparse.formatter.validate_options(formatter_opts)
except SQLParseError as e:
return _error('Invalid options: {}'.format(e))
s = sqlparse.format(data, **formatter_opts)
stream.write(s)
stream.flush()
if close_stream:
stream.close()
return 0
|
fiquett/SupportLogger
|
logs/models.py
|
Python
|
gpl-2.0
| 623
| 0.020867
|
from
|
django.db import models
class Log(models.Model):
entry = models.CharField(max_length=255)
comments = models.TextField()
device_name = models.CharField(max_length=200)
classification = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.device_name + " " + self.classification
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_r
|
ecently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
|
fernandomr/odoo-brazil-banking
|
l10n_br_account_banking_payment_cnab/wizard/payment_order_create.py
|
Python
|
gpl-3.0
| 5,746
| 0.000696
|
# -*- coding: utf-8 -*-
# ###########################################################################
#
# Author: Luis Felipe Mileo
# Fernando Marcato Rodrigues
# Daniel Sadamo Hirayama
# Copyright 2015 KMEE - www.kmee.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class PaymentOrderCreate(models.TransientModel):
_inherit = 'payment.order.create'
@api.multi
def extend_payment_order_domain(self, payment_order, domain):
super(PaymentOrderCreate, self).extend_payment_order_domain(
payment_order, domain)
if payment_order.mode.type.code == '240':
if payment_order.mode.payment_order_type == 'cobranca':
domain += [
('debit', '>', 0)
]
# TODO: Refactory this
index = domain.index(('invoice.payment_mode_id', '=', False))
del domain[index - 1]
domain.remove(('invoice.payment_mode_id', '=', False))
index = domain.index(('date_maturity', '<=', self.duedate))
del domain[index - 1]
domain.remove(('date_maturity', '=', False))
domain.remove(('date_maturity', '<=', sel
|
f.duedate))
elif payment_order.mode.type.code == '400':
if payment_order.mode.payment_order_type == 'cobranca':
domain += [
('debit', '>',
|
0),
('account_id.type', '=', 'receivable'),
'&',
('payment_mode_id', '=', payment_order.mode.id),
'&',
('invoice.state', '=', 'open'),
('invoice.fiscal_category_id.property_journal.revenue_expense', '=', True)
]
# TODO: Refactory this
# TODO: domain do state da move_line.
# index = domain.index(('invoice.payment_mode_id', '=', False))
# del domain[index - 1]
# domain.removemove(('invoice.payment_mode_id', '=', False))
# index = domain.index(('date_maturity', '<=', self.duedate))
# del domain[index - 1]
# domain.remove(('date_maturity', '=', False))
# domain.remove(('date_maturity', '<=', self.duedate))
elif payment_order.mode.type.code == '500':
if payment_order.mode.payment_order_type == 'payment':
domain += [
'&', ('credit', '>', 0),
('account_id.type', '=', 'payable')
]
# index = domain.index(('invoice.payment_mode_id', '=', False))
# del domain[index - 1]
# domain.remove(('invoice.payment_mode_id', '=', False))
# index = domain.index(('date_maturity', '<=', self.duedate))
# del domain[index - 1]
# domain.remove(('date_maturity', '=', False))
# domain.remove(('date_maturity', '<=', self.duedate))
index = domain.index(('account_id.type', '=', 'receivable'))
del domain[index - 1]
domain.remove(('account_id.type', '=', 'receivable'))
return True
@api.multi
def _prepare_payment_line(self, payment, line):
res = super(PaymentOrderCreate, self)._prepare_payment_line(
payment, line)
# res['communication2'] = line.payment_mode_id.comunicacao_2
res['percent_interest'] = line.payment_mode_id.cnab_percent_interest
if payment.mode.type.code == '400':
# write bool to move_line to avoid it being added on cnab again
self.write_cnab_rejected_bool(line)
return res
@api.multi
def filter_lines(self, lines):
""" Filter move lines before proposing them for inclusion
in the payment order.
This implementation filters out move lines that are already
included in draft or open payment orders. This prevents the
user to include the same line in two different open payment
orders. When the payment order is sent, it is assumed that
the move will be reconciled soon (or immediately with
account_banking_payment_transfer), so it will not be
proposed anymore for payment.
See also https://github.com/OCA/bank-payment/issues/93.
:param lines: recordset of move lines
:returns: list of move line ids
"""
self.ensure_one()
payment_lines = self.env['payment.line'].\
search([('order_id.state', 'in', ('draft', 'open', 'done')),
('move_line_id', 'in', lines.ids)])
# Se foi exportada e o cnab_rejeitado dela for true, pode adicionar
# de novo
to_exclude = set([l.move_line_id.id for l in payment_lines
if not l.move_line_id.is_cnab_rejected])
return [l.id for l in lines if l.id not in to_exclude]
@api.multi
def write_cnab_rejected_bool(self, line):
line.write({'is_cnab_rejected': False})
|
KelSolaar/sIBL_GUI
|
sibl_gui/components/addons/online_updater/remote_updater.py
|
Python
|
gpl-3.0
| 31,337
| 0.003893
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**remote_updater.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`RemoteUpdater` class and others online update related objects.
**Others:**
"""
from __future__ import unicode_literals
import os
import platform
from PyQt4.QtCore import QByteArray
from PyQt4.QtCore import QString
from PyQt4.QtCore import QUrl
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAbstractItemView
from PyQt4.QtGui import QColor
from PyQt4.QtGui import QDesktopServices
from PyQt4.QtGui import QFileDialog
from PyQt4.QtGui import QMessageBox
from PyQt4.QtGui import QPalette
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QTableWidgetItem
import foundations.data_structures
import foundations.exceptions
import foundations.io
import foundations.ui.common
import foundations.verbose
import umbra.ui.common
import umbra.ui.widgets.message_box as message_box
from foundations.pkzip import Pkzip
from sibl_gui.components.addons.online_updater.download_manager import DownloadManager
from sibl_gui.components.addons.online_updater.views import TemplatesReleases_QTableWidget
from umbra.globals.constants import Constants
from umbra.globals.runtime_globals import RuntimeGlobals
from umbra.ui.widgets.variable_QPushButton import Variable_QPushButton
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER", "ReleaseObject", "RemoteUpdater"]
LOGGER = foundations.verbose.install_logger()
UI_FILE = os.path.join(os.path.dirname(__file__), "ui", "Remote_Updater.ui")
class ReleaseObject(foundations.data_structures.Structure):
"""
Defines a storage object for a :class:`RemoteUpdater` class release.
"""
def __init__(self, **kwargs):
"""
Initializes the class.
:param kwargs: name, repository_version, local_version, type, url, comment.
:type kwargs: dict
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
foundations.data_structures.Structure.__init__(self, **kwargs)
|
class RemoteUpdater(foundations.ui.common.QWidget_factory(ui_file=UI_FILE)):
"""
| Defines the Application remote updater.
| The remote updater is initialized with a list of available online releases
( List of :class:`ReleaseObject` class instances ).
"""
def __init__(self, parent, releases=None, *args, **kwargs):
"""
Initializes t
|
he class.
:param parent: Object parent.
:type parent: QObject
:param releases: Releases.
:type releases: dict
:param \*args: Arguments.
:type \*args: \*
:param \*\*kwargs: Keywords arguments.
:type \*\*kwargs: \*\*
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
super(RemoteUpdater, self).__init__(parent, *args, **kwargs)
# --- Setting class attributes. ---
self.__container = parent
self.__releases = None
self.releases = releases
self.__ui_resources_directory = "resources/"
self.__ui_resources_directory = os.path.join(os.path.dirname(__file__), self.__ui_resources_directory)
self.__ui_logo_image = "sIBL_GUI_Small_Logo.png"
self.__ui_templates_image = "Templates_Logo.png"
self.__ui_light_gray_color = QColor(240, 240, 240)
self.__ui_dark_gray_color = QColor(160, 160, 160)
self.__splitter = "|"
self.__view = None
self.__headers = ["data",
"Get it!",
"Local version",
"Repository version",
"Release type",
"Comment"]
self.__application_changes_url = "http://kelsolaar.hdrlabs.com/sIBL_GUI/Changes/Changes.html"
self.__repository_url = "http://kelsolaar.hdrlabs.com/?dir=./sIBL_GUI/Repository"
self.__download_manager = None
self.__network_access_manager = self.__container.network_access_manager
RemoteUpdater.__initialize_ui(self)
@property
def container(self):
"""
Property for **self.__container** attribute.
:return: self.__container.
:rtype: QObject
"""
return self.__container
@container.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def container(self, value):
"""
Setter for **self.__container** attribute.
:param value: Attribute value.
:type value: QObject
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "container"))
@container.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def container(self):
"""
Deleter for **self.__container** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "container"))
@property
def releases(self):
"""
Property for **self.__releases** attribute.
:return: self.__releases.
:rtype: dict
"""
return self.__releases
@releases.setter
@foundations.exceptions.handle_exceptions(AssertionError)
def releases(self, value):
"""
Setter for **self.__releases** attribute.
:param value: Attribute value.
:type value: dict
"""
if value is not None:
assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("releases", value)
for key, element in value.iteritems():
assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"variables", key)
assert type(element) is ReleaseObject, "'{0}' attribute: '{1}' type is not 'ReleaseObject'!".format(
"variables", element)
self.__releases = value
@releases.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def releases(self):
"""
Deleter for **self.__releases** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "releases"))
@property
def ui_resources_directory(self):
"""
Property for **self.__ui_resources_directory** attribute.
:return: self.__ui_resources_directory.
:rtype: unicode
"""
return self.__ui_resources_directory
@ui_resources_directory.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_resources_directory(self, value):
"""
Setter for **self.__ui_resources_directory** attribute.
:param value: Attribute value.
:type value: unicode
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ui_resources_directory"))
@ui_resources_directory.deleter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
def ui_resources_directory(self):
"""
Deleter for **self.__ui_resources_directory** attribute.
"""
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ui_resources_directory"))
@property
def ui_logo_image(self):
"""
Property for **self.__ui_logo_image** attribute.
:return: self.__ui_logo_image.
:rtype: unicode
"""
return self.__ui_logo_image
@ui_logo_image.setter
@foundations.exceptions.handle_exceptions(foundations.exceptions.ProgrammingError)
|
b29308188/Deep-Generative-Model-for-Text-Data
|
src/basic_sentiment.py
|
Python
|
mit
| 8,591
| 0.014317
|
import sys
import re
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, TimeDistributed, Dropout, Activation, LSTM, BatchNormalization
from keras.optimizers import RMSprop, Adam
from keras.layers.advanced_activations import LeakyReLU
from keras.utils import np_utils
#from seq2seq.models import SimpleSeq2Seq
from gensim import models, matutils
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
import theano
theano.config.openmp = True
from sklearn.linear_model import LogisticRegression
#OMP_NUM_THREADS=24 python sentiment.py
def read_data(file_path, w2v_model, max_len = 300, hidden_dim = 32):
X = []
with open(file_path, "r") as f:
for line in f:
x = np.zeros(hidden_dim)
tokens = line.strip().split()
index = 0
for token in map(normalize, tokens):
if token in w2v_model:
x += w2v_model[token]
index += 1
if index >= max_len:
break
x /= index
X.append(x)
return np.array(X)
def normalize(word):
word = re.sub(r'[^\w\s]' , "", word.decode("utf-8"), re.UNICODE)
word = word.lower()
return word
class Generator:
def __init__(self, max_len = 300, hidden_dim = 32):
self.max_len = max_len
self.hidden_dim = hidden_dim
self.G = self.build_generative_model()
self.D = self.build_discriminative_model()
self.GAN = self.build_GAN(self.G, self.D)
self.G.compile(loss = "mse", optimizer = Adam(lr=0.0001))
self.GAN.compile(loss = "categorical_crossentropy", optimizer = Adam(lr=0.0001))
self.D.trainable = True
self.D.compile(loss = "categorical_crossentropy", optimizer = Adam(lr=0.0001))
def build_generative_model(self):
G = Sequential()
G.add(Dense(300, input_dim = self.hidden_dim, activation = "relu"))
#G.add(BatchNormalization())
G.add(Dense(300, activation = "relu"))
G.add(Dense(300, activation = "relu"))
G.add(Dense(300, activation = "relu"))
G.add(Dense(self.hidden_dim))
return G
def build_discriminative_model(self):
D = Sequential()
D.add(Dense(300, input_dim = self.hidden_dim, activation ="relu"))
D.add(Dense(300, activation = "relu"))
D.add(Dense(300, activation = "relu"))
D.add(Dense(2, activation = "softmax"))
return D
def build_GAN(self, G, D):
GAN = Sequential()
GAN.add(G)
D.trainable = False
GAN.add(D)
return GAN
def generate_noise(self, shape):
return np.random.uniform(-1, 1, size = shape)
def pre_trainG(self, X, batch_size = 128):
print "Pre-train G ..."
L = []
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
noise = self.generate_noise(batch.shape)
loss = self.G.train_on_batch(noise, batch)
L.append(loss)
print "loss = %f" % np.mean(loss)
def pre_trainD(self, X, batch_size = 128):
print "Pre-train
|
D"
L = [
|
]
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
noise = self.generate_noise(batch.shape)
gen_batch = self.G.predict(noise)
Y = [1]*len(batch) + [0]*len(batch)
Y = np_utils.to_categorical(Y, nb_classes = 2)
combined_batch = np.concatenate((batch, gen_batch))
loss = self.D.train_on_batch(combined_batch, Y)
L.append(loss)
print "loss = %f" % np.mean(loss)
def train(self, X, batch_size = 128):
G_loss = []
D_loss = []
for index in range(0, len(X), batch_size):
batch = X[index:index+batch_size]
Y = [1]*len(batch) + [0]*len(batch)
Y = np_utils.to_categorical(Y, nb_classes = 2)
noise = self.generate_noise(batch.shape)
gen_batch = self.G.predict(noise)
combined_batch = np.concatenate((batch, gen_batch))
d_loss = self.D.train_on_batch(combined_batch, Y)
noise = self.generate_noise(batch.shape)
g_loss = self.GAN.train_on_batch(noise, np_utils.to_categorical([1]*len(batch), nb_classes = 2))
G_loss.append(g_loss)
D_loss.append(d_loss)
print "d_loss = %f, gan_loss = %f" %(np.mean(D_loss), np.mean(G_loss))
def generate(self, shape):
noise = self.generate_noise(shape)
gen_X = self.G.predict(noise)
return gen_X
def save(self, file_path):
self.G.save_weights(file_path+".G.h5")
self.D.save_weights(file_path+ ".D.h5")
self.GAN.save_weights(file_path+ ".GAN.h5")
def load(self, file_path):
self.G.load_weights(file_path+".G.h5")
self.D.load_weights(file_path+ ".D.h5")
self.GAN.load_weights(file_path+ ".GAN.h5")
class Classifier:
def __init__(self, max_len = 300, hidden_dim = 32):
self.CLF = LogisticRegression()
def train(self, X, Y):
#X, Y = shuffle(X, Y)
self.CLF.fit(X, Y)
#print X
#print self.CLF.score(X, Y)
#print self.CLF.coef_
def evaluate(self, testX, testY):
#print testX
#print self.CLF.predict(testX)
print "Accuracy on testing data :", self.CLF.score(testX, testY)
def save(self, file_path):
pass
def load(self, file_path):
pass
if __name__ == "__main__":
#RNN
np.random.seed(0)
print "Loading word vectors ..."
w2v_model = models.Word2Vec.load("../models/word2vec.mod")
print "Reading text data ..."
trainX_pos = read_data("../data/train-pos.small", w2v_model)
trainX_neg = read_data("../data/train-neg.small", w2v_model)
testX_pos = read_data("../data/test-pos.small", w2v_model)
testX_neg = read_data("../data/test-neg.small", w2v_model)
#scaler = MinMaxScaler(feature_range = (0, 1))
#trainX_pos = np.ones((500,32))*1.5
#trainX_neg = -np.ones((500,32))*1.5
#testX_pos = np.ones((500,32))*1.5
#testX_neg = -np.ones((500,32))*1.5
"""
X = np.concatenate((trainX_pos, trainX_neg, testX_pos, testX_neg))
scaler.fit(X)
trainX_pos = scaler.transform(trainX_pos)
trainX_neg = scaler.transform(trainX_neg)
testX_pos = scaler.transform(testX_pos)
testX_neg = scaler.transform(testX_neg)
"""
trainX = np.vstack((trainX_pos, trainX_neg))
trainY = [1]*len(trainX_pos)+ [0]*len(trainX_neg)
testX = np.vstack((testX_pos, testX_neg))
testY = [1]*len(testX_pos)+ [0]*len(testX_neg)
print len(testX)
print "Building the pos generative model..."
pos_gan = Generator()
"""
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.pre_trainG(trainX_pos)
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.pre_trainD(trainX_pos)
"""
#print "Training ..."
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
pos_gan.train(trainX_pos)
posX = pos_gan.generate((50, 32))
pos_gan.save("../models/pos_basic_32")
pos_gan.load("../models/pos_basic_32")
print "Building the neg generative model..."
neg_gan = Generator()
"""
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.pre_trainG(trainX_neg)
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.pre_trainD(trainX_neg)
"""
print "Training ..."
for epoch in range(1, 30):
print "==========Epoch %d===========" % (epoch)
neg_gan.train(trainX_neg)
negX = neg_gan.generate((50, 32))
neg_gan.save("../models/neg_basic_32")
neg_gan.load("../models/neg_basic_32")
sample_trainX = np.vstack((posX, negX))
sample_trainX.dump("../sampleX.np")
sample_trainY = [1]*len(posX) + [0]*len(negX)
print "Building the basic classifier ..."
|
rigdenlab/SIMBAD
|
simbad/util/tests/test_util.py
|
Python
|
bsd-3-clause
| 2,611
| 0.000766
|
"""Test functions for simbad.util.pdb_util"""
__author__ = "Adam Simpkin"
__date__ = "19 Jan 2018"
import os
import tempfile
import unittest
import simbad.util
class Test(unittest.TestCase):
"""Unit test"""
def test_result_by_score_from_csv_1(self):
"""Test case for simbad.util.result_by_score_from_csv"""
csv_temp_file = tempfile.NamedTemporaryFile("w", delete=False)
csv_temp_file.write(
"""pdb_code,alt,a,b,c,alpha,beta,gamma,length_penalty,angle_penalty,total_penalty,volume_difference,probability_score
1DTX, ,23.15,39.06,73.53,90.0,90.0,90.0,0.418,0.0,0.418,398.847,0.842"""
)
csv_temp_file.close()
data = simbad.util.result_by_score_from_csv(csv_temp_file.name, "total_penalty")
reference_data = ["1DTX", 0.41799999999999998]
self.assertEqual(data, reference_data)
def test_result_by_score_from_csv_2(self):
"""Test case for simbad.util.result_by_score_from_csv"""
csv_temp_file = tempfile.NamedTemporaryFile("w", delete=False)
csv_temp_file.write(
"""pdb_code,ALPHA,BETA,GAMMA,CC_F,RF_F,CC_I,CC_P,Icp,CC_F_Z_score,CC_P_Z_score,Number_of_rotation_searches_producing_peak
2fbb,21.63,81.88,296.6,14.1,56.2,16.5,18.6,1.0,11.6,8.6,5.0
1f10,34.27,90.0,116.04,13.0,57.1,16.4,14.2,1.0,9.0,7.0,5.0
4w94,29.28,85.42,245.3,12.9,57.2,15.2,10.8,1.0,8.9,7.1,5.0
1xei,38.87,78.75,65.8,12.3,58.0,15.4,13.9,1.0,8.1,6.6,5.0
2z18,27.6,87.35,247.57,12.3,57.5,15.3,12.5,1.0,7.8,6.1,5.0
1ps5,33.92,86.37,67.25,12.6,57.3,15.6,14.8,1.0,7.7,7.4,5.0
1v7s,34.18,87.8,66.84,12.5,57.4,15.7,12.6,1.0,7.6,6.7,5.0
2vb1,37.1,85.56,66.78,12.1,57.3,16.2,12.3,1.0,7.6,6.6,5.0
4yeo,35.02,82.52,67.02,11.8,57.2,15.5,13.8,1.0,7.6,6.7,5.0
2b5z,1.4,38.12,229.38,12.4,57.9,15.4,10.4,1.0,7.6,6.5,5.0
1ykz,26.43,88.72,247.05,12.6,57.5,15.4,11.9,1.0,7.6,6.5,5.0
4xjf,26.78,88.44,245.77,12.9,57.8,15.4,12.7,1.0,7.6,6.5,5.0
2d4j,37.18,84.17,66.64,12.4,57.7,16.1,12.8,1.0,7.5,6.0,5.0
4p2e,29.05,83.8,246.58,12.5,56.9,15.4,12.1,1.0,7.5,7.1,5.0
3wvx,35.67,85.1,67.1,12.6,57.1,15.1,13.0,1.0,7.4,6.4,5.0
2x0a,28.59,85.11,245.89,12.3,57.4,14.8,11.4,1.0,7.4,6.5,5.0
|
2z19,38.05,79.03,64.98,11.8,57.7,15.9,12.8,1.0,7.
|
1,5.9,5.0
1jj1,28.99,82.92,245.93,12.5,57.3,15.6,11.3,1.0,7.0,5.9,5.0
4j7v,28.54,86.74,246.59,12.0,57.4,14.2,10.2,1.0,7.0,5.7,5.0
2pc2,28.71,76.6,257.7,10.8,57.7,13.4,8.0,1.0,6.7,5.3,5.0"""
)
csv_temp_file.close()
data = simbad.util.result_by_score_from_csv(csv_temp_file.name, "CC_F_Z_score")
reference_data = ["2fbb", 11.6]
self.assertEqual(data, reference_data)
|
idlesign/uwsgiconf
|
uwsgiconf/options/subscriptions_algos.py
|
Python
|
bsd-3-clause
| 863
| 0
|
from ..base import ParametrizedValue
class BalancingAlgorithm(ParametrizedValue):
name_se
|
parator = ''
class BalancingAlgorithmWithBackup(BalancingAlgorithm):
def __init__(self, backup_level=None):
self.backup_level = backup_level
super().__init__()
class WeightedRoundRobin(BalancingAlgorithmWithBackup):
"""Weighted round robin algorithm with backup support.
The default algori
|
thm.
"""
name = 'wrr'
class LeastReferenceCount(BalancingAlgorithmWithBackup):
"""Least reference count algorithm with backup support."""
name = 'lrc'
class WeightedLeastReferenceCount(BalancingAlgorithmWithBackup):
"""Weighted least reference count algorithm with backup support."""
name = 'wlrc'
class IpHash(BalancingAlgorithmWithBackup):
"""IP hash algorithm with backup support."""
name = 'iphash'
|
xylsxyls/xueyelingshuang
|
src/storageMysql/scripts/rebuild_storage.py
|
Python
|
mit
| 13,961
| 0.008547
|
#!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../storage.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'storage'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['storage'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
|
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir =
|
os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
|
hippke/Pulsar-HabCat
|
matchATNF-full.py
|
Python
|
mit
| 3,210
| 0.002181
|
"""Compare Pulsar and HabCat coordinates"""
import csv
import astropy.units as u
from astropy.coordinates import SkyCoord, Angle
from astropy import coordinates as coord
def flipra(coordinate):
"""Flips RA coordinates by 180 degrees"""
coordinate = coordinate + 180
if coordinate > 360:
coordinate = coordinate - 360
return coordinate
def flipde(coordinate):
"""Flips RA coordinates by 90 degrees"""
return coordinate * (-1.)
# Load Pulsar catalogue
pulsar_id = []
pulsar_ra = []
pulsar_de = []
pulsar_period = []
with open('pulsar.csv', 'r') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
pulsar_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
pulsar_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
pulsar_de.append(de.degree)
pulsar_period.append(row[3])
print(len(pulsar_id), 'Pulsar datalines loaded')
# Load HabCat
habcat_id = []
habcat_ra = []
habcat_de = []
with open('habcat.csv', 'r') as csvfile:
dataset = csv.reader(csvfile, delimiter=';')
for row in dataset:
habcat_id.append(row[0])
ra = coord.Angle(row[1], unit=u.hour) # Define as hours
habcat_ra.append(ra.degree) # Convert to degree
de = coord.Angle(row[2], unit=u.deg)
habcat_de.append(de.degree)
print(len(habcat_id), 'HabCat datalines loaded')
# Nested loop through all Pulsars to find closest 180deg HabCat for each
for currentpulsar in range(len(pulsar_id)): # Pulsar loop
shortest_distance = 180 * 60 # set to max, in arcminutes
|
for currenthabcat in range(len(habcat_id)): # HabCat loop
# Correct calculat
|
ion is very slow, thus only try the best candidates:
if (abs(habcat_ra[currenthabcat] -
flipra(pulsar_ra[currentpulsar])) < 5.
and abs(habcat_de[currenthabcat] -
flipde(pulsar_de[currentpulsar])) < 5.):
habcat_coordinate = SkyCoord(
habcat_ra[currenthabcat],
habcat_de[currenthabcat],
unit="deg")
pulsar_coordinate_flipped = SkyCoord( # flip pulsar coordinates
flipra(pulsar_ra[currentpulsar]),
flipde(pulsar_de[currentpulsar]),
unit="deg")
distance = pulsar_coordinate_flipped.separation(habcat_coordinate)
if distance.arcminute < shortest_distance:
shortest_distance = distance.arcminute # New best found
bestfit_pulsar_id = pulsar_id[currentpulsar]
bestfit_habcat_id = habcat_id[currenthabcat]
bestfit_pulsar_period = pulsar_period[currentpulsar]
print(currentpulsar, bestfit_pulsar_id, bestfit_habcat_id, shortest_distance / 60.) # deg
with open('result-atnf-full.csv', 'a') as fp: # Append each result to CSV
a = csv.writer(fp, delimiter=',')
a.writerow([
bestfit_pulsar_id,
bestfit_habcat_id,
shortest_distance / 60., # arcmin
bestfit_pulsar_period])
print('Done.')
|
vuolter/pyload
|
src/pyload/plugins/downloaders/UpleaCom.py
|
Python
|
agpl-3.0
| 2,715
| 0.001842
|
# -*- coding: utf-8 -*-
import re
import urllib.parse
from ..base.simple_downloader import SimpleDownloader
def decode_cloudflare_email(value):
email = ""
key = int(value[:2], 16)
for i in range(2, len(value), 2):
email += chr(int(value[i : i + 2], 16) ^ key)
return email
class UpleaCom(SimpleDownloader):
__name__ = "UpleaCom"
__type__ = "downloader"
__version__ = "0.21"
__status__ = "testing"
__pattern__ = r"https?://(?:www\.)?uplea\.com/dl/\w{15}"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Uplea.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("Redleon", None), ("GammaC0de", None)]
PLUGIN_DOMAIN = "uplea.com"
SIZE_REPLACEMENTS = [
("ko", "KiB"),
("mo", "MiB"),
("go", "GiB"),
("Ko", "KiB"),
("Mo", "MiB"),
("Go", "GiB"),
]
NAME_PATTERN = r'<span class="gold-text">(?P<N>.+?)</span>'
SIZE_PATTERN = (
r'<span class="label label-info agmd">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
)
OFFLINE_PATTERN = r">You followed an invalid or expired l
|
ink"
LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
PREMIUM_ONLY_PATTERN = (
r"You need to have a Premium subscription to download this file"
)
WAIT_PATTERN = r"timeText: ?(\d+),"
STEP_PATTERN = r'<a href="(/step/.+)">'
NAME_REPLACEMENTS = [
(
r'(<a class="_
|
_cf_email__" .+? data-cfemail="(\w+?)".+)',
lambda x: decode_cloudflare_email(x.group(2)),
)
]
def setup(self):
self.multi_dl = False
self.chunk_limit = 1
self.resume_download = True
def handle_free(self, pyfile):
m = re.search(self.STEP_PATTERN, self.data)
if m is None:
self.error(self._("STEP_PATTERN not found"))
self.data = self.load(urllib.parse.urljoin("http://uplea.com/", m.group(1)))
m = re.search(self.WAIT_PATTERN, self.data)
if m is not None:
self.wait(m.group(1), True)
self.retry()
m = re.search(self.LINK_PATTERN, self.data)
if m is None:
self.error(self._("LINK_PATTERN not found"))
self.link = m.group(1)
m = re.search(r".ulCounter\({'timer':(\d+)}\)", self.data)
if m is not None:
self.wait(m.group(1))
|
hackebrot/pytest
|
src/_pytest/doctest.py
|
Python
|
mit
| 18,770
| 0.001332
|
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import platform
import sys
import traceback
from contextlib import contextmanager
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.compat import safe_getattr
from _pytest.fixtures import FixtureRequest
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super(MultipleDoctestFailures, self).__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
|
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
|
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
el
|
Octoberr/swmcdh
|
Schedul/unceshi.py
|
Python
|
apache-2.0
| 1,831
| 0.013318
|
# coding=utf-8
import numpy as np
from numpy import random
# a=np.array([[2,3,4,1,2],[1,3,4,5,4],[4,2,3,10,2],[3,5,6,7,9],[10,3,4,2,9]])
# # for i in xrange(5):
# # a[i,i]=1000
# #b=a.argmin(axis=1)
# print a
# a=random.randint(1,100,size=(8,8))
MAX = 1000
MIN = 0
CARSEATS = 6
distMat = np.array([[0, 1, 15, 15, 26, 72, 10, 57],
[73, 0, 44, 98, 5, 22, 31, 89],
[46, 84, 0, 13, 28, 58, 42, 32],
[ 9, 37, 45, 0, 40, 39, 4, 49],
[53, 9, 2, 34, 0, 39, 26, 28],
[12, 93, 97, 74, 37, 0, 85, 84],
[38, 63, 83, 59, 40, 74, 0, 88],
[ 6, 86, 71, 48, 70, 20, 87, 0]])
#每个人到机场的距离
airportDist = np.array([67, 72, 96, 96, 96, 9, 25, 25])
#random.randint(1,100,size=(distMat.
|
shape[0]))
print airportDist
#存储上车的人
idxMat = np.zeros([airportDist.shape[0]], dtype=int)
#第一个上车的人
initialIdx = np.argmax(airportDist)
idxMat[0] = initialIdx
#第一个人已经上车
airportDist[initialIdx] = MIN
|
print initialIdx
#排除点到点的距离
for k in xrange(distMat.shape[0]):
distMat[k][k] = MAX
# print("airport distance matrix: ")
# print airportDist
#
# print("distance matrix:")
# print distMat
for i in range(1,distMat.shape[0]):
if i%CARSEATS == 0:
print("new cars")
#第六个上车的人
distMat[:, idxMat[i-1]] = MAX
initialIdx = np.argmax(airportDist)
idxMat[i] = initialIdx
print initialIdx
# 第六个已经上车
airportDist[initialIdx] = MIN
# print idxMat
# print distMat
else:
tmpIdx = np.argmin(distMat[initialIdx,:])
idxMat[i] = tmpIdx
distMat[:,idxMat[i-1]] = MAX
initialIdx = tmpIdx
airportDist[tmpIdx] = MIN
print tmpIdx
print airportDist
|
nirvn/QGIS
|
python/plugins/processing/algs/qgis/FixedDistanceBuffer.py
|
Python
|
gpl-2.0
| 5,246
| 0.002859
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
FixedDistanceBuffer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsWkbTypes,
QgsProcessing,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorit
|
hm
from . import Buffer as buff
pluginPath = os.path.split(os.path.split(os.path.dir
|
name(__file__))[0])[0]
class FixedDistanceBuffer(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
DISTANCE = 'DISTANCE'
SEGMENTS = 'SEGMENTS'
DISSOLVE = 'DISSOLVE'
END_CAP_STYLE = 'END_CAP_STYLE'
JOIN_STYLE = 'JOIN_STYLE'
MITER_LIMIT = 'MITER_LIMIT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'buffer.png'))
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.DISTANCE,
self.tr('Distance'), type=QgsProcessingParameterNumber.Double,
defaultValue=10.0))
self.addParameter(QgsProcessingParameterNumber(self.SEGMENTS,
self.tr('Segments'), type=QgsProcessingParameterNumber.Integer,
minValue=1, defaultValue=5))
self.addParameter(QgsProcessingParameterBoolean(self.DISSOLVE,
self.tr('Dissolve result'), defaultValue=False))
self.end_cap_styles = [self.tr('Round'),
'Flat',
'Square']
self.addParameter(QgsProcessingParameterEnum(
self.END_CAP_STYLE,
self.tr('End cap style'),
options=self.end_cap_styles, defaultValue=0))
self.join_styles = [self.tr('Round'),
'Miter',
'Bevel']
self.addParameter(QgsProcessingParameterEnum(
self.JOIN_STYLE,
self.tr('Join style'),
options=self.join_styles, defaultValue=0))
self.addParameter(QgsProcessingParameterNumber(self.MITER_LIMIT,
self.tr('Miter limit'), type=QgsProcessingParameterNumber.Double,
minValue=0, defaultValue=2))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Buffer'), QgsProcessing.TypeVectorPolygon))
def name(self):
return 'fixeddistancebuffer'
def displayName(self):
return self.tr('Fixed distance buffer')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
distance = self.parameterAsDouble(parameters, self.DISTANCE, context)
dissolve = self.parameterAsBool(parameters, self.DISSOLVE, context)
segments = self.parameterAsInt(parameters, self.SEGMENTS, context)
end_cap_style = self.parameterAsEnum(parameters, self.END_CAP_STYLE, context) + 1
join_style = self.parameterAsEnum(parameters, self.JOIN_STYLE, context) + 1
miter_limit = self.parameterAsDouble(parameters, self.MITER_LIMIT, context)
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
source.fields(), QgsWkbTypes.Polygon, source.sourceCrs())
buff.buffering(feedback, context, sink, distance, None, False, source, dissolve, segments, end_cap_style,
join_style, miter_limit)
return {self.OUTPUT: dest_id}
|
Jumpscale/jumpscale6_core
|
apps/agentcontroller/jumpscripts/core/cleanup/cleanupredisac.py
|
Python
|
bsd-2-clause
| 1,530
| 0.004575
|
from JumpScale import j
descr = """
remove old redis cache from system
"""
organization = "jumpscale"
author = "deboeckj@codescalers.com"
license = "bsd"
version = "1.0"
category = "redis.cleanu
|
p"
period = 300 # always in sec
timeout = period * 0.2 # max runtime = 20% of period
order = 1
enable = True
async = True
log = False
roles = ['master']
def action():
import time
EXTRATIME = 120
now = time.time()
try:
import ujson as json
except:
import json
import JumpScale.gri
|
d.agentcontroller
acl = j.clients.agentcontroller.get()
rcl = j.clients.redis.getRedisClient('127.0.0.1', 9999)
for jobkey in rcl.keys('jobs:*'):
if jobkey == 'jobs:last':
continue
jobs = rcl.hgetall(jobkey)
for jobguid, jobstring in jobs.iteritems():
job = json.loads(jobstring)
if job['state'] in ['OK', 'ERROR', 'TIMEOUT']:
rcl.hdel(jobkey, jobguid)
elif job['timeStart'] + job['timeout'] + EXTRATIME < now:
rcl.hdel(jobkey, jobguid)
job['state'] = 'TIMEOUT'
eco = j.errorconditionhandler.getErrorConditionObject(msg='Job timed out')
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
eco.tb = None
eco.jid = job['guid']
eco.type = str(eco.type)
job['result'] = json.dumps(eco.__dict__)
acl.saveJob(job)
if __name__ == '__name__':
action()
|
leapp-to/snactor
|
examples/scripts/checktarget.py
|
Python
|
apache-2.0
| 400
| 0
|
""" Run target checks using snactor """
from generic_runner import run, pprint, get_actor
def check_target():
""" Run multiple checks at target
|
machine """
targetinfo = {}
get_actor('check_target_group').execute(targetinfo)
get_actor('check_target').execute(targetinfo)
pprint(targetinfo['targetinfo'])
if __name__ == '__main__':
|
run(check_target, tags=['check_target'])
|
pombredanne/anitya
|
anitya/tests/base.py
|
Python
|
gpl-2.0
| 6,973
| 0.001291
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2017 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
"""
Base class for Anitya tests.
"""
from __future__ import print_function
from functools import wraps
import unittest
import os
import vcr
import mock
import anitya.lib
import anitya.lib.model as model
#DB_PATH = 'sqlite:///:memory:'
## A file database is required to check the integrity, don't ask
DB_PATH = 'sqlite:////tmp/anitya_test.sqlite'
FAITOUT_URL = 'http://faitout.fedorainfracloud.org/'
if os.environ.get('BUILD_ID'):
try:
import requests
req = requests.get('%s/new' % FAITOUT_URL)
if req.status_code == 200:
DB_PATH = req.text
print('Using faitout at: %s' % DB_PATH)
except:
pass
def skip_jenkins(function):
""" Decorator to skip tests if AUTH is set to False """
@wraps(function)
def decorated_function(*args, **kwargs):
""" Decorated function, actually does the work. """
## We used to skip all these tests in jenkins, but now with vcrpy, we
## don't need to. We can replay the recorded request/response pairs
## for each test from disk.
#if os.environ.get('BUILD_ID'):
# raise unittest.SkipTest('Skip backend test on jenkins')
return function(*args, **kwargs)
return decorated_function
class Modeltests(unittest.TestCase):
""" Model tests. """
maxDiff = None
def __init__(self, method_name='runTest'):
""" Constructor. """
unittest.TestCase.__init__(self, method_name)
self.session = None
# pylint: disable=C0103
def setUp(self):
""" Set up the environnment, ran before every tests. """
if ':///' in DB_PATH:
dbfile = DB_PATH.split(':///')[1]
if os.path.exists(dbfile):
os.unlink(dbfile)
self.session = anitya.lib.init(DB_PATH, create=True, debug=False)
mock_query = mock.patch.object(
model.BASE, 'query', self.session.query_property(query_cls=model.BaseQuery))
mock_query.start()
self.addCleanup(mock_query.stop)
anitya.lib.plugins.load_plugins(self.session)
cwd = os.path.dirname(os.path.realpath(__file__))
self.vcr = vcr.use_cassette(os.path.join(cwd, 'request-data/', self.id()))
self.vcr.__enter__()
# pylint: disable=C0103
def tearDown(self):
""" Remove the test.db database if there is one. """
self.vcr.__exit__()
if '///' in DB_PATH:
dbfile = DB_PATH.split('///')[1]
if os.path.exists(dbfile):
os.unlink(dbfile)
self.session.rollback()
self.session.close()
if DB_PATH.startswith('postgres'):
db_name = DB_PATH.rsplit('/', 1)[1]
req = requests.get(
'%s/clean/%s' % (FAITOUT_URL, db_name))
print(req.text)
def create_distro(session):
""" Create some basic distro for testing. """
distro = model.Distro(
name='Fedora',
)
session.add(distro)
distro = model.Distro(
name='Debian',
)
session.add(distro)
session.commit()
def create_project(session):
""" Create some basic projects to work with. """
anitya.lib.create_project(
session,
name='geany',
homepage='http://www.geany.org/',
version_url='http://www.geany.org/Download/Releases',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
anitya.lib.create_project(
session,
name='subsurface',
homepage='http://subsurface.hohndel.org/',
version_url='http://subsurface.hohndel.org/downloads/',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
anitya.lib.create_project(
session,
name='R2spec',
homepage='https://fedorahosted.org/r2spec/',
user_id='noreply@fedoraproject.org',
)
def create_ecosystem_projects(session):
""" Create some fake projects from particular upstream ecosystems
Each project name is used in two different ecosystems
"""
anitya.lib.create_project(
session,
name='pypi_and_npm',
homepage='https://example.com/not-a-real-pypi-project',
backend='PyPI',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='pypi_and_npm',
homepage='https://example.com/not-a-real-npmjs-project',
backend='npmjs',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='rubygems_and_maven',
homepage='https://example.com/not-a-real-rubygems-project',
backend='Rubygems',
user_id='noreply@fedoraproject.org'
)
anitya.lib.create_project(
session,
name='rubygems_and_maven',
homepage='https://example.com/not-a-real-maven-project',
backend='Maven Central',
user_id='noreply@fedoraproject.org'
)
def create_package(session):
""" Create some basic packages to work with. """
package = model.Packages(
project_id=1,
distro='Fedora',
|
package_name='geany',
)
session.add(package)
package = model.Packages(
project_id=2,
distro='Fed
|
ora',
package_name='subsurface',
)
session.add(package)
session.commit()
def create_flagged_project(session):
""" Create and flag a project. Returns the ProjectFlag. """
project = anitya.lib.create_project(
session,
name='geany',
homepage='http://www.geany.org/',
version_url='http://www.geany.org/Download/Releases',
regex='DEFAULT',
user_id='noreply@fedoraproject.org',
)
session.add(project)
flag = anitya.lib.flag_project(
session,
project,
"This is a duplicate.",
"dgay@redhat.com",
"user_openid_id",
)
session.add(flag)
session.commit()
return flag
if __name__ == '__main__':
unittest.main(verbosity=2)
|
cshtarkov/autobump
|
autobump/handlers/python.py
|
Python
|
gpl-3.0
| 8,380
| 0.000955
|
# Copyright 2016-2017 Christian Shtarkov
#
# This file is part of Autobump.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
Convert a Python codebase into a list of Units.
"""
import os
import ast
import sys
import codecs
import logging
import traceback
from autobump import config
from autobump.capir import Type, Field, Parameter, Signature, Function, Unit
logger = logging.getLogger(__name__)
_source_file_ext = ".py"
class _PythonType(Type):
pass
class _Dynamic(_PythonType):
def __str__(self):
return self.__repr__()
def __repr__(self):
return "dynamic"
class _StructuralType(_PythonType):
def __init__(self, attr_set):
self.name = str(attr_set)
self.attr_set = attr_set
def is_compatible(self, other):
if not isinstance(other, _StructuralType):
return False
return self.attr_set.issubset(other.attr_set)
def __str__(self):
return str(self.attr_set)
class _HintedType(_PythonType):
def __init__(self, name):
self.name = name
def is_compatible(self, other):
return self.__eq__(other)
def __eq__(self, other):
return self.name == other.name
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
_dynamic = _Dynamic()
def _is_public(member_name):
"""Determine visibility of a member based on its name."""
return not (member_name.startswith("_") and member_name != "__init__")
def _get_type_of_parameter(function, parameter):
"""Return the type of a parameter used in a function AST node.
In this case, 'type' means structural instead of nominal type.
Because Python is dynamically typed, it would be very hard to guess
what type a parameter is without looking at usage. Instead of doing that,
this walks the AST node describing the function and considers the type to be
the set of all methods called on the parameter."""
assert isinstance(function, ast.FunctionDef), "Tried to get usage of parameter in a non-function."
# Check if there is a type hint for this parameter
if config.type_hinting():
for arg in function.args.args:
if arg.arg == parameter:
if arg.annotation:
return _HintedType(arg.annotation.id)
if not config.structural_typing():
return _dynamic
# TODO: Don't completely ommit 'self' in class methods,
# it can be used to identify addition or removal of fields.
if parameter == "self":
return _StructuralType(set())
# Generators to filter out AST
def gen_no_inner_definitions(node):
"""Recursively yield all descendant nodes
without walking any function or class definitions."""
yield node
for n in ast.iter_child_nodes(node):
if isinstance(n, ast.FunctionDef) or \
isinstance(n, ast.ClassDef):
continue
yield from gen_no_inner_definitions(n)
def gen_only_attributes(node):
"""Yield only descendant nodes that represent attribute access,
without traversing any function or class definitions."""
for n in gen_no_inner_definitions(node):
if isinstance(n, ast.Attribute) and \
isinstance(n.value, ast.Name):
yield n
# Find the set of attributes for that parameter
attr_set = set()
for attr in gen_only_attributes(function):
name = attr.value.id
method = attr.attr
if name == parameter:
# TODO: Also consider method signature.
attr_set.add(method)
# Convert set of attribytes to structural type
return _StructuralType(attr_set)
def _get_signature(function):
"""Return the signature of a function AST node."""
parameters = []
args = function.args.args
# Map all None parameters to a "TrueNone" object
# because None indicates the absense of a default value.
class TrueNone(object):
pass
defaults = [TrueNone
if isinstance(a, ast.NameConstant) and a.value is None
else a
for a in function.args.defaults]
# Prepend no default values.
defaults = [None] * (len(args) - len(defaults)) + defaults
args_with_defaults = list(zip(args, defaults))
for arg_with_default in args_with_defaults:
arg, default = arg_with_default
if isinstance(default, ast.Name):
# TODO: This does not differentiate between
# "abc" and abc.
default = default.id
elif isinstance(default, ast.NameConstant):
default = default.value
elif isinstance(default, ast.Num):
default = default.n
elif isinstance(default, ast.Str):
default = default.s
type = _get_type_of_parameter(function, arg.arg)
parameters.append(Parameter(arg.arg, type, default))
# Note: we need to return a list with the signature inside
# because the common representation allows for overloading,
# which Python doesn't.
return [Signature(parameters)]
def _container_to_unit(name, container):
"""Convert a Python AST module or class to a Unit."""
fields = dict()
functions = dict()
units = dict()
for node in container.body:
if hasattr(node, "name") and not _is_public(node.name):
# Completely ignore any private things -
# they are irrelevant to the
|
API.
continue
if isinstance(node, ast.ClassD
|
ef):
units[node.name] = _container_to_unit(node.name, node)
elif isinstance(node, ast.FunctionDef):
functions[node.name] = Function(node.name, _dynamic, _get_signature(node))
elif isinstance(node, ast.Assign):
# TODO: Handle other forms of assignment.
for target in [t for t in node.targets if isinstance(t, ast.Name) and _is_public(t.id)]:
fields[target.id] = Field(target.id, _dynamic)
return Unit(name, fields, functions, units)
def _module_to_unit(name, module):
"""Convert a Python AST module to a Unit."""
return _container_to_unit(name, module)
def python_codebase_to_units(location):
"""Returns a list of Units representing a Python codebase in 'location'."""
if config.type_hinting():
# When the handler is invoked, the 'ast' module needs to start
# pointing to 'ast35' from 'typed_ast' if type hinting is to be used.
# Note that 'ast' must be changed globally, as the other functions in this
# module rely on it as well.
global ast
from typed_ast import ast35
ast = ast35
units = dict()
for root, dirs, files in os.walk(location):
dirs[:] = [d for d in dirs if not config.dir_ignored(d)]
pyfiles = [f for f in files if f.endswith(_source_file_ext) and not config.file_ignored(f)]
for pyfile in pyfiles:
pymodule = pyfile[:-(len(_source_file_ext))] # Strip extension
with codecs.open(os.path.join(root, pyfile),
"r",
encoding="utf-8",
errors="replace") as f:
try:
units[pymodule] = _module_to_unit(pymodule, ast.parse(f.read()))
except Exception:
print(traceback.format_exc(), file=sys.stderr)
msg = "Failed to parse file {}".format(os.path.join(root, pyfile))
if config.python_omit_on_error():
logger.w
|
The-Penultimate-Defenestrator/memefarm
|
memefarm/pilutil.py
|
Python
|
mit
| 2,924
| 0
|
""" Helpers for making some things in PIL easier """
from PIL import ImageDraw, ImageFont, ImageStat
from math import ceil
def dra
|
wTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top.
See http://stackoverflow.com/a/8050556/4414003 """
# 3 looks good for 80px font. This allows for adjusting proportionally.
|
strokewidth = ceil(3 * fontsize / 80.0) # Use ceiling to prevent 0
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
def labelImage(im, text):
""" Label an image with a string in the bottom left, using a text color
that will ensure appropriate contrast. """
d = ImageDraw.Draw(im)
textsize = ImageFont.load_default().getsize(text)
coords = im.size[0] - textsize[0] - 5, im.size[1] - textsize[1] - 5
# Check color of image where the text would go
textarea = im.crop(coords + im.size)
textareabrightness = ImageStat.Stat(textarea.convert("L")).mean[0]
color = (0, 0, 0) if textareabrightness > 128 else (255, 255, 255)
# Draw text
d.text(coords, text, fill=color)
def proportionalResize(im, width):
""" Resize an image to be a specified width while keeping aspect ratio """
w, h = im.size
aspect = float(h) / float(w)
out = im.resize((width, int(width * aspect))) # Resize to fit width
return out
def findFontSize(text, width, font="Impact", margin=10):
""" Find the largest font size that will fit `text` onto `im`, given a
margin (in percent) that must be left around an image border. """
w = int(width * (1 - margin / 100.0 * 2)) # Width accounting for margin
wAt40 = ImageFont.truetype(font, 40).getsize(text)[0] # find size at 40px
return 40 * w / wAt40 # Use a proportion to adjust that for image size
if __name__ == "__main__":
from PIL import Image, ImageDraw
# Blank test image
i = Image.new("RGB", (1024, 768), "#abcdef")
d = ImageDraw.Draw(i)
# Calculate font size
text = "OMG SUCH FONT"
fontsize = findFontSize(text, 1024)
# Render font onto canvas (102px is 10% margin)
drawTextWithBorder(d, text, (102, 102), fontsize=fontsize)
# Test proportional resizing to a larger width
proportionalResize(i, 2000).show()
# Test proportional resizing to a smaller widthAt40
proportionalResize(i, 400).show()
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/nodes/spline/evaluate_spline.py
|
Python
|
gpl-3.0
| 1,278
| 0.007042
|
import bpy
from bpy.props import *
from mathutils import Vect
|
or
from ... base_types.node import AnimationNode
from . spline_evaluation_base import SplineEvaluationBase
class EvaluateSplineNode(bpy.types.Node, AnimationNode, SplineEvaluationBase):
bl_idname = "an_EvaluateSplineNode"
bl_label = "Evaluate Spline"
|
def create(self):
self.newInput("Spline", "Spline", "spline", defaultDrawType = "PROPERTY_ONLY")
self.newInput("Float", "Parameter", "parameter", value = 0.0)
self.newOutput("Vector", "Location", "location")
self.newOutput("Vector", "Tangent", "tangent")
def draw(self, layout):
layout.prop(self, "parameterType", text = "")
def drawAdvanced(self, layout):
col = layout.column()
col.active = self.parameterType == "UNIFORM"
col.prop(self, "resolution")
def execute(self, spline, parameter):
spline.update()
if spline.isEvaluable:
if self.parameterType == "UNIFORM":
spline.ensureUniformConverter(self.resolution)
parameter = spline.toUniformParameter(parameter)
return spline.evaluate(parameter), spline.evaluateTangent(parameter)
else:
return Vector((0, 0, 0)), Vector((0, 0, 0))
|
dywisor/kernelconfig
|
kernelconfig/pm/portagevdb/overlay.py
|
Python
|
gpl-2.0
| 14,668
| 0
|
# This file is part of kernelconfig.
# -*- coding: utf-8 -*-
import abc
import errno
import os
import shutil
|
from ...abc import loggable
|
from ...util import fs
from ...util import fspath
from . import _eclass
__all__ = ["TemporaryOverlay", "TemporaryOverlayUnion"]
class AbstractTemporaryOverlayBase(loggable.AbstractLoggable):
"""
@ivar root:
@type root: C{str}
"""
# not inheriting AbstractSourceInformed,
# overlay objects do not need to know about source_info,
# only the overlay union
# copy-paste inherit FsView
@abc.abstractmethod
def is_empty(self):
"""
@return: True if the overlay is empty, else False
@rtype: C{bool}
"""
raise NotImplementedError()
@abc.abstractmethod
def add_package(self, package_info):
"""
@param package_info: package info object
@type package_info: L{PackageInfo}
@return: True if package has been added, else False
@rtype: C{bool}
"""
raise NotImplementedError()
@abc.abstractmethod
def fs_init(self, **kwargs):
"""
Creates the overlay (at its filesystem location).
@param kwargs: undefined
@return: None (implicit)
"""
raise NotImplementedError()
@abc.abstractmethod
def iter_packages(self):
"""
@return: package info object(s)
@rtype: L{PackageInfo} (genexpr)
"""
raise NotImplementedError()
def __init__(self, root, **kwargs):
super().__init__(**kwargs)
self.root = root
def get_path(self):
return self.root
def get_filepath(self, *relpath_elements):
return fspath.join_relpaths_v(self.root, relpath_elements)
@abc.abstractmethod
def assign_repo_config(self, port_iface, fallback_repo_config=True):
raise NotImplementedError()
def get_fallback_repo_config(self, port_iface):
# If the original repo of one of the overlay repos can not be found,
# the main repo is used as fallback.
#
# Usually, this is "gentoo", but it can be overridden via os.environ.
# Setting the env var to an empty str disables the fallback behavior.
#
# FIXME: DOC: special env var KERNELCONFIG_PORTAGE_MAIN_REPO
#
main_repo_name = os.environ.get(
"KERNELCONFIG_PORTAGE_MAIN_REPO", "gentoo"
)
if not main_repo_name:
self.logger.debug("main repo fallback has been disabled via env")
return None
else:
try:
main_repo_config = port_iface.get_repo_config(main_repo_name)
except KeyError:
self.logger.warning(
"Main repo fallback is unavailable, repo %s not found.",
main_repo_name
)
return None
else:
self.logger.debug(
"Using main repo '%s' as fallback", main_repo_config.name
)
return main_repo_config
# --
# --- end of get_fallback_repo_config (...) ---
# --- end of AbstractTemporaryOverlayBase (...) ---
class AbstractTemporaryOverlay(AbstractTemporaryOverlayBase):
"""
@ivar tmp_name: the temporary repo name, e.g. "kernelconfig_tmp_gentoo"
@type tmp_name: C{str}
@ivar name: the original repo name, e.g. "gentoo"
@type name: C{str}
@ivar masters: either None or a list of "master" repo names
@ŧype masters: C{None} or C{list} of C{str}
@ivar packages: dict of all packages in this overlay
@type packages: C{dict} :: C{str} => L{PackageInfo}
@ivar categories: set of all categories
@type categories: C{set} of C{str}
"""
def __init__(self, overlay_dir, name, **kwargs):
self.name = name
self.tmp_name = "kernelconfig_tmp_{!s}".format(self.name)
self.masters = None
self.packages = {}
self.categories = set()
kwargs.setdefault("logger_name", self.name)
super().__init__(overlay_dir, **kwargs)
# --- end of __init__ (...) ---
def get_masters_str(self):
return (
" ".join(self.masters) if self.masters is not None
else self.name
)
# --- end of get_masters_str (...) ---
def is_empty(self):
return not bool(self.packages)
def add_package(self, package_info):
cpv = package_info.cpv
if cpv in self.packages:
raise KeyError("duplicate entry for package {}".format(cpv))
self.categories.add(package_info.category)
self.packages[cpv] = package_info
self.logger.debug("packaged added: %s", package_info.cpv)
return True
# --- end of add_package (...) ---
def iter_packages(self):
return self.packages.values()
# --- end of iter_packages (...) ---
# --- end of AbstractTemporaryOverlay ---
class _TemporaryOverlay(AbstractTemporaryOverlay):
def populate(self):
# initially, try to symlink ebuilds,
# and fall back to copying if symlinks are not supported
copy_or_symlink = os.symlink
copy_method_name = "symlink"
for pkg_info in self.iter_packages():
# if pkg_info.tmp_ebuild_file is not None: ignored
pkg_dir = self.get_filepath(
fspath.join_relpath(pkg_info.category, pkg_info.name)
)
ebuild_dst = fspath.join_relpath(pkg_dir, pkg_info.ebuild_name)
self.logger.debug(
"Importing ebuild for %s as %s",
pkg_info.cpv, copy_method_name
)
self.logger.debug("ebuild file: %s", pkg_info.orig_ebuild_file)
fs.dodir(pkg_dir)
# unnecessary rmfile,
# except for running mkoverlays on the same dir again
fs.rmfile(ebuild_dst)
try:
copy_or_symlink(pkg_info.orig_ebuild_file, ebuild_dst)
except OSError as oserr:
if (
copy_or_symlink is os.symlink
and oserr.errno == errno.EPERM
):
self.logger.debug(
(
'symlinks seem to be unsupported by the fs,'
' falling back to copying'
)
)
copy_or_symlink = shutil.copyfile
copy_method_name = "file"
self.logger.debug(
"Trying to import ebuild for %s as %s",
pkg_info.cpv, copy_method_name
)
# raises:
copy_or_symlink(pkg_info.orig_ebuild_file, ebuild_dst)
else:
raise
# -- end try
pkg_info.tmp_ebuild_file = ebuild_dst
# -- end for
# --- end of populate (...) ---
def fs_init(self, eclass_importer=None):
self.logger.debug("Initializing overlay directory")
try:
self.fs_init_base()
self.fs_init_profiles()
self.fs_init_metadata()
self.fs_init_eclass(eclass_importer=eclass_importer)
except (OSError, IOError):
self.logger.error("Failed to initialize overlay!")
raise
# --- end of fs_init (...) ---
def fs_init_base(self):
# reinit() or init(), i.e. mkdir with exists_ok=True or plain mkdir?
fs.dodir(self.root)
# ---
def fs_init_profiles(self):
profiles_dir = self.get_filepath("profiles")
fs.dodir(profiles_dir)
# "/repo_name"
self.logger.debug("Creating profiles/repo_name") # overly verbose
with open(fspath.join_relpath(profiles_dir, "repo_name"), "wt") as fh:
fh.write("{!s}\n".format(self.tmp_name))
# --
# "/categories"
# dedup and sort categories
self.logger.debug("Creating profiles/categories") # overly verbose
categories = sorted(self.categories)
wi
|
ultimate-pa/benchexec
|
contrib/plots/quantile-generator.py
|
Python
|
apache-2.0
| 5,188
| 0.001542
|
#!/usr/bin/env python3
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import itertools
import sys
from benchexec import result, tablegenerator
from benchexec.tablegenerator import util
sys.dont_write_bytecode = True # prevent creation of .pyc files
def get_extract_value_function(column_identifier):
"""
returns a function that extracts the value for a column.
"""
def extract_value(run_result):
pos = None
for i, column in enumerate(run_result.columns):
if column.title == column_identifier:
pos = i
break
if pos is None:
sys.exit(f"CPU time missing for task {run_result.task_id}.")
return util.to_decimal(run_result.values[pos])
return extract_value
def main(args=None):
if args is None:
args = sys.argv
parser = argparse.ArgumentParser(
fromfile_prefix_chars="@",
description="""Create CSV tables for quantile plots with the results of a benchmark execution.
The CSV tables are similar to those produced with table-generator,
but have an additional first column with the index for the quantile plot,
and they are sorted.
The output is written to stdout.
Part of BenchExec: https://github.com/sosy-lab/benchexec/""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"result",
metavar="RESULT",
type=str,
nargs="+",
help="XML files with result produced by benchexec",
)
parser.add_argument(
"--correct-only",
action="store_true",
dest="correct_only",
help="only use correct results (recommended, implied if --score-based is used)",
)
parser.add_argument(
"--score-based",
action="store_true",
dest="score_based",
help="create data for score-based quantile plot",
)
parser.add_argument(
"--sort-by",
metavar="SORT",
default="cputime",
dest="column_identifier",
type=str,
help="column identifier for sorting the values, e.g. 'cputime' or 'walltime'",
)
options = parser.parse_args(args[1:])
# load results
run_set_result = tablegenerator.RunSetResult.create_from_xml(
options.result[0], tablegenerator.parse_results_file(options.result[0])
)
for results_file in options.result[1:]:
run_set_result.append(
results_file, tablegenerator.parse_results_file(results_file)
)
run_set_result.collect_data(options.correct_only or options.score_based)
# select appropriate results
if options.score_based:
start_index = 0
index_increment = lambda run_result: run_result.score # noqa: E731
results = []
for run_result in run_set_result.results:
if run_result.score is None:
sys.exit(
f"No score available for task {run_result.task_id}, "
f"cannot produce score-based quantile data."
)
if run_result.category == result.CATEGORY_WRONG:
start_index += run_result.score
elif run_result.category == resul
|
t.CATEGORY_MISSING:
sys.exit(
f"Property missing for task {run_result.task_id}, "
f"cannot produce score-based quantile data."
)
elif run_result.category == result.CATEGORY_CORRECT:
results.append(run_result)
else:
assert run_r
|
esult.category in {
result.CATEGORY_ERROR,
result.CATEGORY_UNKNOWN,
}
else:
start_index = 0
index_increment = lambda run_result: 1 # noqa: E731
if options.correct_only:
results = [
run_result
for run_result in run_set_result.results
if run_result.category == result.CATEGORY_CORRECT
]
else:
results = run_set_result.results
# sort data for quantile plot
results.sort(key=get_extract_value_function(options.column_identifier))
# extract information which id columns should be shown
for run_result in run_set_result.results:
run_result.id = run_result.task_id
relevant_id_columns = tablegenerator.select_relevant_id_columns(results)
# write output
index = start_index
for run_result in results:
index += index_increment(run_result)
task_ids = (
task_id for task_id, show in zip(run_result.id, relevant_id_columns) if show
)
result_values = (util.remove_unit(value or "") for value in run_result.values)
print(*itertools.chain([index], task_ids, result_values), sep="\t")
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit("Script was interrupted by user.")
|
nkoech/csacompendium
|
csacompendium/csa_practice/models.py
|
Python
|
mit
| 9,828
| 0.001933
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from csacompendium.utils.abstractmodels import (
AuthUserDetail,
CreateUpdateTime,
)
from csacompendium.utils.createslug import create_slug
from csacompendium.utils.modelmanagers import (
model_instance_filter,
model_foreign_key_qs,
model_type_filter,
create_model_type,
)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
class CsaTheme(AuthUserDetail, CreateUpdateTime):
"""
CSA th
|
eme model. Creates CSA theme entity.
"""
slug = models.SlugField(max_length=12
|
0, unique=True, blank=True)
csa_theme = models.CharField(max_length=80, unique=True, verbose_name='CSA theme')
def __unicode__(self):
return self.csa_theme
def __str__(self):
return self.csa_theme
def get_api_url(self):
"""
Get CSA theme URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:csa_theme_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Themes'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaTheme)
def pre_save_csa_theme_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaTheme, instance.csa_theme)
class PracticeLevel(AuthUserDetail, CreateUpdateTime):
"""
CSA level of practice model. Creates CSA practice level entity.
"""
slug = models.SlugField(max_length=150, unique=True, blank=True)
practice_level = models.CharField(max_length=150, unique=True)
def __unicode__(self):
return self.practice_level
def __str__(self):
return self.practice_level
def get_api_url(self):
"""
Get CSA practice level URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_level_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Levels'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeLevel)
def pre_save_practice_level_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeLevel, instance.practice_level)
class PracticeType(AuthUserDetail, CreateUpdateTime):
"""
CSA practice type model. Creates CSA practice type entity.
"""
slug = models.SlugField(max_length=120, unique=True, blank=True)
practice_type = models.CharField(max_length=120, unique=True, verbose_name='Practice category')
def __unicode__(self):
return self.practice_type
def __str__(self):
return self.practice_type
def get_api_url(self):
"""
Get CSA practice type URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_type_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Types'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeType)
def pre_save_practice_type_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeType, instance.practice_type)
class CsaPracticeManager(models.Manager):
"""
CSA practice model manager
"""
def filter_by_model_type(self, instance):
"""
Query related objects/model type
:param instance: Object instance
:return: Matching object else none
:rtype: Object/record
"""
obj_qs = model_foreign_key_qs(instance, self, CsaPracticeManager)
if obj_qs.exists():
return model_type_filter(self, obj_qs, CsaPracticeManager)
class CsaPractice(AuthUserDetail, CreateUpdateTime):
"""
CSA practice model. Creates CSA practice entity.
"""
slug = models.SlugField(unique=True, blank=True)
practice_code = models.CharField(max_length=6, unique=True, help_text='User defined CSA practice code')
csatheme = models.ForeignKey(CsaTheme, on_delete=models.PROTECT, verbose_name='CSA theme')
practicelevel = models.ForeignKey(PracticeLevel, on_delete=models.PROTECT, verbose_name='Practice level')
sub_practice_level = models.TextField(blank=True, null=True)
sub_subpractice_level = models.TextField(blank=True, null=True)
definition = models.TextField(blank=True, null=True)
practicetype = models.ForeignKey(PracticeType, on_delete=models.PROTECT, verbose_name='Practice category')
objects = CsaPracticeManager()
def __unicode__(self):
return self.sub_practice_level
def __str__(self):
return self.sub_practice_level
def get_api_url(self):
"""
Get CSA practice URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:csa_practice_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practices'
@property
def research_csa_practice(self):
"""
Get related research CSA practice object/record
:return: Query result from the research CSA practice model
:rtype: object/record
"""
instance = self
qs = ResearchCsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaPractice)
def pre_save_csa_practice_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaPractice, instance.practice_code)
class ResearchCsaPracticeManager(models.Manager):
"""
Research CSA practice model manager
"""
def filter_by_instance(self, instance):
"""
Query a related research CSA practice object/record from another model's object
:param instance: Object instance
:return: Query result from content type/model
:rtye: obj
|
bblacey/FreeCAD-MacOS-CI
|
src/Mod/Ship/WeightInstance.py
|
Python
|
lgpl-2.1
| 12,814
| 0.001795
|
#***************************************************************************
#*
|
*
#* Copyright (c) 2011, 2016 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the te
|
rms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import time
from math import *
from PySide import QtGui, QtCore
import FreeCAD
import FreeCADGui
from FreeCAD import Base, Vector
import Part
import Units
from shipUtils import Paths, Math
class Weight:
def __init__(self, obj, shapes, ship):
""" Transform a generic object to a ship instance.
Position arguments:
obj -- Part::FeaturePython created object which should be transformed
in a weight instance.
shapes -- Set of shapes which will compound the weight element.
ship -- Ship where the weight is allocated.
"""
# Add an unique property to identify the Weight instances
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"True if it is a valid weight instance, False otherwise",
None))
obj.addProperty("App::PropertyBool",
"IsWeight",
"Weight",
tooltip).IsWeight = True
# Add the mass property for puntual weights
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Mass [kg]",
None))
obj.addProperty("App::PropertyFloat",
"Mass",
"Weight",
tooltip).Mass = 0.0
# Add the density property for linear elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Linear density [kg / m]",
None))
obj.addProperty("App::PropertyFloat",
"LineDens",
"Weight",
tooltip).LineDens = 0.0
# Add the area density property for surface elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Area density [kg / m^2]",
None))
obj.addProperty("App::PropertyFloat",
"AreaDens",
"Weight",
tooltip).AreaDens = 0.0
# Add the density property for volumetric elements
tooltip = str(QtGui.QApplication.translate(
"ship_weight",
"Density [kg / m^3]",
None))
obj.addProperty("App::PropertyFloat",
"Dens",
"Weight",
tooltip).Dens = 0.0
# Set the subshapes
obj.Shape = Part.makeCompound(shapes)
obj.Proxy = self
def onChanged(self, fp, prop):
"""Detects the ship data changes.
Position arguments:
fp -- Part::FeaturePython object affected.
prop -- Modified property name.
"""
if prop == "Mass":
pass
def execute(self, fp):
"""Detects the entity recomputations.
Position arguments:
fp -- Part::FeaturePython object affected.
"""
pass
def _getPuntualMass(self, fp, shape):
"""Compute the mass of a puntual element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Vertex shape object.
"""
return Units.parseQuantity('{0} kg'.format(fp.Mass))
def _getLinearMass(self, fp, shape):
"""Compute the mass of a linear element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Edge shape object.
"""
rho = Units.parseQuantity('{0} kg/m'.format(fp.LineDens))
l = Units.Quantity(shape.Length, Units.Length)
return rho * l
def _getAreaMass(self, fp, shape):
"""Compute the mass of an area element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Face shape object.
"""
rho = Units.parseQuantity('{0} kg/m^2'.format(fp.AreaDens))
a = Units.Quantity(shape.Area, Units.Area)
return rho * a
def _getVolumetricMass(self, fp, shape):
"""Compute the mass of a volumetric element.
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Solid shape object.
"""
rho = Units.parseQuantity('{0} kg/m^3'.format(fp.Dens))
v = Units.Quantity(shape.Volume, Units.Volume)
return rho * v
def getMass(self, fp):
"""Compute the mass of the object, already taking into account the
type of subentities.
Position arguments:
fp -- Part::FeaturePython object affected.
Returned value:
Object mass
"""
m = Units.parseQuantity('0 kg')
for s in fp.Shape.Solids:
m += self._getVolumetricMass(fp, s)
for f in fp.Shape.Faces:
m += self._getAreaMass(fp, f)
for e in fp.Shape.Edges:
m += self._getLinearMass(fp, e)
for v in fp.Shape.Vertexes:
m += self._getPuntualMass(fp, v)
return m
def _getPuntualMoment(self, fp, shape):
"""Compute the moment of a puntual element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Vertex shape object.
"""
m = self._getPuntualMass(fp, shape)
x = Units.Quantity(shape.X, Units.Length)
y = Units.Quantity(shape.Y, Units.Length)
z = Units.Quantity(shape.Z, Units.Length)
return (m * x, m * y, m * z)
def _getLinearMoment(self, fp, shape):
"""Compute the mass of a linear element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Edge shape object.
"""
m = self._getLinearMass(fp, shape)
cog = shape.CenterOfMass
x = Units.Quantity(cog.x, Units.Length)
y = Units.Quantity(cog.y, Units.Length)
z = Units.Quantity(cog.z, Units.Length)
return (m * x, m * y, m * z)
def _getAreaMoment(self, fp, shape):
"""Compute the mass of an area element (respect to 0, 0, 0).
Position arguments:
fp -- Part::FeaturePython object affected.
shape -- Face shape object.
"""
m = self._getAreaMass(fp, shape)
cog = shape.CenterOfMass
x = Units.Quantity(cog.x, Units.Length)
y =
|
cjbauer/brainphrase
|
adjectives.py
|
Python
|
agpl-3.0
| 20,322
| 0.000295
|
# Collection of adjectives
# September 19 2014
# Christian Bauer
d = { }
drev = { }
num = 0
def ins(x):
global num,d,drev
d[num] = x
drev[x] = num
num = num + 1
ins('abandoned')
ins('able')
ins('absolute')
ins('adorable')
ins('adventurous')
ins('academic')
ins('acceptable')
ins('acclaimed')
ins('accomplished')
ins('accurate')
ins('aching')
ins('acidic')
ins('acrobatic')
ins('active')
ins('actual')
ins('adept')
ins('admirable')
ins('admired')
ins('adolescent')
ins('adored')
ins('advanced')
ins('afraid')
ins('affectionate')
ins('aged')
ins('aggravating')
ins('aggressive')
ins('agile')
ins('agitated')
ins('agonizing')
ins('agreeable')
ins('ajar')
ins('alarmed')
ins('alarming')
ins('alert')
ins('alienated')
ins('alive')
ins('all')
ins('altruistic')
ins('amazing')
ins('ambitious')
ins('ample')
ins('amused')
ins('amusing')
ins('anchored')
ins('ancient')
ins('angelic')
ins('angry')
ins('anguished')
ins('animated')
ins('annual')
ins('another')
ins('antique')
ins('anxious')
ins('any')
ins('apprehensive')
ins('appropriate')
ins('apt')
ins('arctic')
ins('arid')
ins('aromatic')
ins('artistic')
ins('ashamed')
ins('assured')
ins('astonishing')
ins('athletic')
ins('attached')
ins('attentive')
ins('attractive')
ins('austere')
ins('authentic')
ins('authorized')
ins('automatic')
ins('avaricious')
ins('average')
ins('aware')
ins('awesome')
ins('awful')
ins('awkward')
ins('babyish')
ins('bad')
ins('back')
ins('baggy')
ins('bare')
ins('barren')
ins('basic')
ins('beautiful')
ins('belated')
ins('beloved')
ins('beneficial')
ins('better')
ins('best')
ins('bewitched')
ins('big')
ins('big-hearted')
ins('biodegradable')
ins('bite-sized')
ins('bitter')
ins('black')
ins('black-and-white')
ins('bland')
ins('blank')
ins('blaring')
ins('bleak')
ins('blind')
ins('blissful')
ins('blond')
ins('blue')
ins('blushing')
ins('bogus')
ins('boiling')
ins('bold')
ins('bony')
ins('boring')
ins('bossy')
ins('both')
ins('bouncy')
ins('bountiful')
ins('bowed')
ins('brave')
ins('breakable')
ins('brief')
ins('bright')
ins('brilliant')
ins('brisk')
ins('broken')
ins('bronze')
ins('brown')
ins('bruised')
ins('bubbly')
ins('bulky')
ins('bumpy')
ins('buoyant')
ins('burdensome')
ins('burly')
ins('bustling')
ins('busy')
ins('buttery')
ins('buzzing')
ins('calculating')
ins('calm')
ins('candid')
ins('canine')
ins('capital')
ins('carefree')
ins('careful')
ins('careless')
ins('caring')
ins('cautious')
ins('cavernous')
ins('celebrated')
ins('charming')
ins('cheap')
ins('cheerful')
ins('cheery')
ins('chief')
ins('chilly')
ins('chubby')
ins('circular')
ins('classic')
ins('clean')
ins('clear')
ins('clear-cut')
ins('clever')
ins('close')
ins('closed')
ins('cloudy')
ins('clueless')
ins('clumsy')
ins('cluttered')
ins('coarse')
ins('cold')
ins('colorful')
ins('colorless')
ins('colossal')
ins('comfortable')
ins('common')
ins('compassionate')
ins('competent')
ins('complete')
ins('complex')
ins('complicated')
ins('composed')
ins('concerned')
ins('concrete')
ins('confused')
ins('conscious')
ins('considerate')
ins('constant')
ins('content')
ins('conventional')
ins('cooked')
ins('cool')
ins('cooperative')
ins('coordinated')
ins('corny')
ins('corrupt')
ins('costly')
ins('courageous')
ins('courteous')
ins('crafty')
ins('craz
|
y')
ins('creamy')
ins('creative')
ins('creepy')
ins('criminal')
ins('crisp')
ins('critical')
ins('crooked')
ins('crowded')
ins('cruel')
ins('crushing')
ins('cuddly')
ins('cultivated')
ins('cultured')
ins('cumbersome')
ins('curly')
ins('curvy')
ins('cute')
ins('cylindrical')
ins('d
|
amaged')
ins('damp')
ins('dangerous')
ins('dapper')
ins('daring')
ins('darling')
ins('dark')
ins('dazzling')
ins('dead')
ins('deadly')
ins('deafening')
ins('dear')
ins('dearest')
ins('decent')
ins('decimal')
ins('decisive')
ins('deep')
ins('defenseless')
ins('defensive')
ins('defiant')
ins('deficient')
ins('definite')
ins('definitive')
ins('delayed')
ins('delectable')
ins('delicious')
ins('delightful')
ins('delirious')
ins('demanding')
ins('dense')
ins('dental')
ins('dependable')
ins('dependent')
ins('descriptive')
ins('deserted')
ins('detailed')
ins('determined')
ins('devoted')
ins('different')
ins('difficult')
ins('digital')
ins('diligent')
ins('dim')
ins('dimpled')
ins('dimwitted')
ins('direct')
ins('disastrous')
ins('discrete')
ins('disfigured')
ins('disgusting')
ins('disloyal')
ins('dismal')
ins('distant')
ins('downright')
ins('dreary')
ins('dirty')
ins('disguised')
ins('dishonest')
ins('distinct')
ins('distorted')
ins('dizzy')
ins('dopey')
ins('doting')
ins('double')
ins('drab')
ins('drafty')
ins('dramatic')
ins('droopy')
ins('dry')
ins('dual')
ins('dull')
ins('dutiful')
ins('eager')
ins('earnest')
ins('early')
ins('easy')
ins('easy-going')
ins('ecstatic')
ins('edible')
ins('educated')
ins('elaborate')
ins('elastic')
ins('elated')
ins('elderly')
ins('electric')
ins('elegant')
ins('elementary')
ins('elliptical')
ins('embarrassed')
ins('embellished')
ins('eminent')
ins('emotional')
ins('empty')
ins('enchanted')
ins('enchanting')
ins('energetic')
ins('enlightened')
ins('enormous')
ins('enraged')
ins('entire')
ins('envious')
ins('equal')
ins('equatorial')
ins('essential')
ins('esteemed')
ins('ethical')
ins('euphoric')
ins('even')
ins('evergreen')
ins('everlasting')
ins('every')
ins('evil')
ins('exalted')
ins('excellent')
ins('exemplary')
ins('exhausted')
ins('excitable')
ins('excited')
ins('exciting')
ins('exotic')
ins('expensive')
ins('experienced')
ins('expert')
ins('extraneous')
ins('extroverted')
ins('extra-large')
ins('extra-small')
ins('fabulous')
ins('failing')
ins('faint')
ins('fair')
ins('faithful')
ins('fake')
ins('false')
ins('familiar')
ins('famous')
ins('fancy')
ins('fantastic')
ins('far')
ins('faraway')
ins('far-flung')
ins('far-off')
ins('fast')
ins('fat')
ins('fatal')
ins('fatherly')
ins('favorable')
ins('favorite')
ins('fearful')
ins('fearless')
ins('feisty')
ins('feline')
ins('female')
ins('feminine')
ins('few')
ins('fickle')
ins('filthy')
ins('fine')
ins('finished')
ins('firm')
ins('first')
ins('firsthand')
ins('fitting')
ins('fixed')
ins('flaky')
ins('flamboyant')
ins('flashy')
ins('flat')
ins('flawed')
ins('flawless')
ins('flickering')
ins('flimsy')
ins('flippant')
ins('flowery')
ins('fluffy')
ins('fluid')
ins('flustered')
ins('focused')
ins('fond')
ins('foolhardy')
ins('foolish')
ins('forceful')
ins('forked')
ins('formal')
ins('forsaken')
ins('forthright')
ins('fortunate')
ins('fragrant')
ins('frail')
ins('frank')
ins('frayed')
ins('free')
ins('French')
ins('fresh')
ins('frequent')
ins('friendly')
ins('frightened')
ins('frightening')
ins('frigid')
ins('frilly')
ins('frizzy')
ins('frivolous')
ins('front')
ins('frosty')
ins('frozen')
ins('frugal')
ins('fruitful')
ins('full')
ins('fumbling')
ins('functional')
ins('funny')
ins('fussy')
ins('fuzzy')
ins('gargantuan')
ins('gaseous')
ins('general')
ins('generous')
ins('gentle')
ins('genuine')
ins('giant')
ins('giddy')
ins('gigantic')
ins('gifted')
ins('giving')
ins('glamorous')
ins('glaring')
ins('glass')
ins('gleaming')
ins('gleeful')
ins('glistening')
ins('glittering')
ins('gloomy')
ins('glorious')
ins('glossy')
ins('glum')
ins('golden')
ins('good')
ins('good-natured')
ins('gorgeous')
ins('graceful')
ins('gracious')
ins('grand')
ins('grandiose')
ins('granular')
ins('grateful')
ins('grave')
ins('gray')
ins('great')
ins('greedy')
ins('green')
ins('gregarious')
ins('grim')
ins('grimy')
ins('gripping')
ins('grizzled')
ins('gross')
ins('grotesque')
ins('grouchy')
ins('grounded')
ins('growing')
ins('growling')
ins('grown')
ins('grubby')
ins('gruesome')
ins('grumpy')
ins('guilty')
ins('gullible')
ins('gummy')
ins('hairy')
ins('half')
ins('handmade')
ins('handsome')
ins('handy')
ins('happy')
ins('happy-go-lucky')
ins('hard')
ins('hard-to-find')
ins('harmful')
ins('harmless')
ins('harmonious')
ins('harsh')
ins('hasty')
ins('hateful')
ins('haunting')
ins('healthy')
ins('heartfelt')
ins('hearty')
ins('heavenly')
ins('heavy')
ins('hefty')
ins('helpful')
ins('helpless')
ins('hidden')
ins('hideous')
ins('high')
ins('high-level')
ins('hilarious')
ins('hoarse')
ins('hollow')
ins('homely')
ins('honest')
ins('honorable')
ins('honored')
ins('hopeful')
ins('horrible')
ins('hospitable')
ins('hot')
ins('huge')
ins('humble')
ins('humiliati
|
nitzmahone/ansible
|
test/lib/ansible_test/_internal/completion.py
|
Python
|
gpl-3.0
| 8,217
| 0.003529
|
"""Loading, parsing and storing of completion configurations."""
from __future__ import annotations
import abc
import dataclasses
import os
import typing as t
from .constants import (
CONTROLLER_PYTHON_VERSIONS,
SUPPORTED_PYTHON_VERSIONS,
)
from .util import (
ANSIBLE_TEST_DATA_ROOT,
read_lines_without_comments,
)
from .data import (
data_context,
)
@dataclasses.dataclass(frozen=True)
class CompletionConfig(metaclass=abc.ABCMeta):
"""Base class for completion configuration."""
name: str
@property
@abc.abstractmethod
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
@dataclasses.dataclass(frozen=True)
class PosixCompletionConfig(CompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of POSIX environments."""
@property
@abc.abstractmethod
def supported_pythons(self): # type: () -> t.List[str]
"""Return a list of the supported Python versions."""
@abc.abstractmethod
def get_python_path(self, version): # type: (str) -> str
"""Return the path of the requested Python version."""
def get_default_python(self, controller): # type: (bool) -> str
"""Return the default Python version for a controller or target as specified."""
context_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
version = [python for python in self.supported_pythons if python in context_pythons][0]
return version
@property
def controller_supported(self): # type: () -> bool
"""True if at least one Python version is provided which supports the controller, otherwise False."""
return any(version in CONTROLLER_PYTHON_VERSIONS for version in self.supported_pythons)
@dataclasses.dataclass(frozen=True)
class PythonCompletionConfig(PosixCompletionConfig, metaclass=abc.ABCMeta):
"""Base class for completion configuration of Python environments."""
python: str = ''
python_dir: str = '/usr/bin'
@property
def supported_pythons(self): # type: () -> t.List[str]
"""Return a list of the supported Python versions."""
versions = self.python.split(',') if self.python else []
versions = [version for version in versions if version in SUPPORTED_PYTHON_VERSIONS]
return versions
def get_python_path(self, version): # type: (str) -> str
"""Return the path of the requested Python version."""
return os.path.join(self.python_dir, f'python{version}')
@dataclasses.dataclass(frozen=True)
class RemoteCompletionConfig(CompletionConfig):
"""Base class for completion configuration of remote environments provisioned through Ansible Core CI."""
provider: t.Optional[str] = None
@property
def platform(self):
"""The name of the platform."""
return self.name.partition('/')[0]
@property
def version(self):
"""The version of the platform."""
return self.name.partition('/')[2]
@property
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
return not self.version
def __post_init__(self):
if not self.provider:
raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.')
@dataclasses.dataclass(frozen=True)
class InventoryCompletionConfig(CompletionConfig):
"""Configuration for inventory files."""
def __init__(self): # type: () -> None
super().__init__(name='inventory')
@property
def is_default(self): # type: () -> bool
"""True if the completion entry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class PosixSshCompletionConfig(PythonCompletionConfig):
"""Configuration for a POSIX host reachable over SSH."""
def __init__(self, user, host): # type: (str, str) -> None
super().__init__(
name=f'{user}@{host}',
python=','.join(SUPPORTED_PYTHON_VERSIONS),
)
@property
def is_default(self): # type: () -> bool
"""True if the completion e
|
ntry is only used for defaults, otherwise False."""
return False
@dataclasses.dataclass(frozen=True)
class DockerCompletionConfig(PythonCo
|
mpletionConfig):
"""Configuration for Docker containers."""
image: str = ''
seccomp: str = 'default'
placeholder: bool = False
@property
def is_default(self):
"""True if the completion entry is only used for defaults, otherwise False."""
return False
def __post_init__(self):
if not self.image:
raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.')
if not self.supported_pythons and not self.placeholder:
raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.')
@dataclasses.dataclass(frozen=True)
class NetworkRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote network platforms."""
collection: str = ''
connection: str = ''
@dataclasses.dataclass(frozen=True)
class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig):
"""Configuration for remote POSIX platforms."""
placeholder: bool = False
def __post_init__(self):
if not self.supported_pythons:
if self.version and not self.placeholder:
raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.')
else:
if not self.version:
raise Exception(f'POSIX remote completion entry "{self.name}" is a platform default and cannot provide a "python" setting.')
@dataclasses.dataclass(frozen=True)
class WindowsRemoteCompletionConfig(RemoteCompletionConfig):
"""Configuration for remote Windows platforms."""
TCompletionConfig = t.TypeVar('TCompletionConfig', bound=CompletionConfig)
def load_completion(name, completion_type): # type: (str, t.Type[TCompletionConfig]) -> t.Dict[str, TCompletionConfig]
"""Load the named completion entries, returning them in dictionary form using the specified completion type."""
lines = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
if data_context().content.collection:
context = 'collection'
else:
context = 'ansible-core'
items = {name: data for name, data in [parse_completion_entry(line) for line in lines] if data.get('context', context) == context}
for item in items.values():
item.pop('context', None)
item.pop('placeholder', None)
completion = {name: completion_type(name=name, **data) for name, data in items.items()}
return completion
def parse_completion_entry(value): # type: (str) -> t.Tuple[str, t.Dict[str, str]]
"""Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
values = value.split()
name = values[0]
data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
return name, data
def filter_completion(
completion, # type: t.Dict[str, TCompletionConfig]
controller_only=False, # type: bool
include_defaults=False, # type: bool
): # type: (...) -> t.Dict[str, TCompletionConfig]
"""Return a the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified."""
if controller_only:
completion = {name: config for name, config in completion.items() if config.controller_supported}
if not include_defaults:
completion = {name: config for name, config in completion.items() if not config.is_default}
return completion
DOCKER_COMPLETION = load_completion('docker', DockerCompletionConfig)
REMOTE_COMPLETION = load_completion('remote', PosixRemoteCompletionConfig)
WINDOWS_COMPLETION = load_completion('windows', WindowsRemoteCompletionConfig)
NETWORK_COMPLETION = load_completion('netw
|
chrisspen/homebot
|
src/test/max_column/test_max_column.py
|
Python
|
mit
| 690
| 0.008696
|
#!../../../.env/bin/python
import os
import numpy as np
import time
a = np.array([
[1,0,3],
[0,2,1],
[0.1,0,0],
])
print a
row = 1
col = 2
print a[row][col]
assert a[row][col] == 1
expected_max_rows = [0, 1, 0]
expected_max_values = [1, 2, 3]
print 'expected_max_rows:', expected_max_rows
print 'expected_max
|
_values:', expected_max_values
t0 = time.time()
actual_max_rows = list(np.argmax(a, axis=0))
td = time.time() - t0
actual_max_values = l
|
ist(np.amax(a, axis=0))
print 'td:', round(td, 4)
print 'actual_max_rows:', actual_max_rows
print 'actual_max_values:', actual_max_values
assert actual_max_rows == expected_max_rows
assert actual_max_values == expected_max_values
|
liquidkarma/pyneat
|
pyNEAT/Mutator.py
|
Python
|
gpl-2.0
| 790
| 0.005063
|
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of
|
the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; wi
|
thout even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
class Mutator:
GAUSSIAN = 0
COLDGAUSSIAN = 1
|
kosystem/PythonGlutWrapper
|
GlutViewController.py
|
Python
|
mit
| 2,389
| 0.000837
|
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GLUT.freeglut import *
import GlutWrapper
import math
ESCAPE = b'\033'
class GlutViewController(GlutWrapper.GlutWrapper):
"""docstring for GlutViewController"""
def __init__(self):
super(GlutViewController, self).__init__()
self.count = 0.0
def display(self, deltaTime):
self.drawAxis(50)
self.count += 1.0
glRotate(self.count, 0, 1, 0)
glutSolidTeapot(10)
if deltaTime > 0.0:
fpsString = "FPS: %.1f" % (1.0/deltaTime)
self.overlayString(fpsString, 0.0, 0.0)
self.overlayString("LB", 0.0, -1.0)
self.overlayString("RT", -20.0, 0.0)
self.overlayString("RB", -20.0, -1.0)
# User interface -----------------------------------
def mouse(self, button, state, x, y):
# print("MousePress: button: %d, x: %d, y:%d" % (button, x, y))
self.mouseState.button = button
self.mouseState.pressed = ~state
self.mouseState.x = x
self.mouseState.y = y
if button == 3:
self.camera.distance *= 0.875
elif button == 4:
self.camera.distance *= 1.125
def motion(self, x, y):
# print("MouseMove: x: %d, y: %d" % (x, y))
movedX = x - self.mouseState.x
movedY = y - self.mouseState.y
if self.mouseState.button == 0 & self.mouseState.pressed:
self.camera.pan += float(-movedX)/100.0
|
self.camera.tilt += float(movedY)/100.0
if self.camera.tilt > math.pi/2.0:
self.camera.tilt = math.pi/2.0-0.01
if self.camera.tilt < -math.pi/2.0:
self.camera.tilt = -(math.pi/2.0-0.01)
self.mouseS
|
tate.x = x
self.mouseState.y = y
def keyboard(self, key, x, y):
print("KeyboardPress: %s" % key)
if key == ESCAPE:
sys.exit()
elif key == b'p':
self.camera.distance *= 0.875
elif key == b'n':
self.camera.distance *= 1.125
def setColor(self, color):
glColor(color[0], color[1], color[2])
glMaterial(GL_FRONT, GL_AMBIENT, color)
glMaterial(GL_FRONT, GL_DIFFUSE, color)
if __name__ == '__main__':
print("Hit ESC key to quit.")
view = GlutViewController()
view.frameTime = 1.0/60.0
view.startFramework()
|
rdkls/django-audit-mongodb
|
tests/test_models.py
|
Python
|
bsd-3-clause
| 25,093
| 0.009923
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010, 2degrees Limited <egoddard@tech.2degreesnetwork.com>.
# All Rights Reserved.
#
# This file is part of djangoaudit <https://launchpad.net/django-audit/>,
# which is subject to the provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for djangoaudit"""
from datetime import datetime, timedelta, date
from decimal import Decimal
import os
# Have to set this here to ensure this is Django-like
os.environ['DJANGO_SETTINGS_MODULE'] = "tests.fixtures.sampledjango.settings"
from django.conf import settings
from django.db.models import Sum
from nose.tools import (eq_, ok_, assert_false, assert_not_equal, assert_raises,
raises)
from pymongo.errors import PyMongoError
from fixture.django_testcase import FixtureTestCase
#from mongofixture import MongoFixtureTestCase
from djangoaudit.models import (_coerce_data_to_model_types, _audit_model,
_coerce_to_bson_compatible, AuditedModel)
from djangoaudit.connection import MONGO_CONNECTION
from tests.fixtures.sampledjango.bsg.models import *
from tests.fixtures.sampledjango.bsg.fixtures import *
class TestEnsureBSONCompatible(object):
"""Test for :func:`_coerce_to_bson_compatible`"""
def test_decimal_to_float(self):
"""Ensure that :class:`Decimal` is converted to :class:`float`"""
got = _coerce_to_bson_compatible(Decimal('1234.5678'))
expected = 1234.5678
eq_(got, expected,
"Expected %r, got %r for Decimal to float conversion" %
(expected, got))
def test_date_to_datetime(self):
"""Ensure that :class:`date` is converted to :class:`datetime`"""
got = _coerce_to_bson_compatible(date(2001, 9, 11))
expected = datetime(2001, 9, 11)
eq_(got, expected,
"Expected %r, got %r for date to datetime conversion" %
(expected, got))
class MockModelMeta(object):
""" Mock of :class:`django.db.options.Options` """
def __init__(self, app_label, model_name):
self.app_label = app_label
self.object_name = model_name
class MockModel(object):
""" Mock of :class:`django.db.models.base.Model` """
def __init__(self, app_label, model_name, pk):
self._meta = MockModelMeta(app_label, model_name)
self.pk = pk
class TestAuditModel(object):
""" Tests for :func:`djangoaudit.models.audit_model` """
def setup(self):
self.audit_collection_name = "audit_data"
self.auditing_collection = MONGO_CONNECTION\
.get_collection(self.audit_collection_name)
self.profile = MockModel("profiles", "Profile", 123)
def fetch_record_by_id(self, id):
return self.auditing_collection.find_one({"_id":id})
def test_no_changes_empty_dicts(self):
"""Check that passing two empty value dicts results in a no-op"""
result = _audit_model(self.profile, {}, {})
eq_(result, None, "No changes should not result in anything being "
"written to the database")
def test_no_changes_same_values(self):
"""Check that passing two identical dicts results in a no-op"""
result = _audit_model(self.profile,
{'foo': 1, 'bar': 'wibble', 'empty': None,
'my_date': datetime(2001, 1, 1, 9, 12)},
{'foo': 1, 'bar': 'wibble', 'empty': None,
'my_date': datetime(2001, 1, 1, 9, 12)})
eq_(result, None, "No changes should not result in anything being "
"written to the database")
def test_single_change_no_other_diff(self):
"""Check that a single changed value is correctly recorded"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
|
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a sing
|
le difference key")
def test_model_data_write_out(self):
"""Check the correct data is written out for the model"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['object_app'], self.profile._meta.app_label)
eq_(saved_record['object_model'], self.profile._meta.object_name)
eq_(saved_record['object_pk'], self.profile.pk)
def test_date_stamping(self):
"""Check that a date stamp is stored in along with the record"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
record_date_stamp = saved_record['audit_date_stamp']
now = datetime.utcnow()
ok_((now - timedelta(seconds=1)) < record_date_stamp < now,
"Date stamp should be almost the same as now (now: %s, got: %s"
% (now, record_date_stamp))
def test_addition_parameter_write_out(self):
"""Check that additional parameters are correctly stored"""
result = _audit_model(self.profile, dict(foo=None), dict(foo='bar'))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
def test_single_change_others_same(self):
"""Check that a single changed value is correctly recorded when there are no other differences"""
result = _audit_model(self.profile, dict(foo=None, wibble=0),
dict(foo='bar', wibble=0))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a single difference key")
ok_('wibble' not in saved_record, "There should be no "
"record of changes to the `wibble` key")
def test_multi_change_no_others(self):
"""Check that multiple changed values are correctly recorded when there are no other items"""
result = _audit_model(self.profile, dict(foo=None, wibble=0),
dict(foo='bar', wibble=1))
assert_not_equal(result, None,
"A change should result in a database object being "
"created")
saved_record = self.fetch_record_by_id(result)
eq_(saved_record['foo'], 'bar',
"The saved record should contain a difference for key `foo`")
eq_(saved_record['wibble'], 1,
"The saved record should contain a difference for key `wibble`")
def test_multi_change
|
Parallel-in-Time/pySDC
|
pySDC/implementations/sweeper_classes/verlet.py
|
Python
|
bsd-2-clause
| 7,323
| 0.002048
|
import numpy as np
from pySDC.core.Sweeper import sweeper
from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto
class verlet(sweeper):
"""
Custom sweeper class, implements Sweeper.py
Second-order sweeper using velocity-Verlet as base integrator
Attributes:
QQ: 0-to-node collocation matrix (second order)
QT: 0-to-node trapezoidal matrix
Qx: 0-to-node Euler half-step for position update
qQ: update rule for final value (if needed)
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'QI' not in params:
params['QI'] = 'IE'
if 'QE' not in params:
params['QE'] = 'EE'
# call parent's initialization routine
super(verlet, self).__init__(params)
# Trapezoidal rule, Qx and Double-Q as in the Boris-paper
[self.QT, self.Qx, self.QQ] = self.__get_Qd()
self.qQ = np.dot(self.coll.weights, self.coll.Qmat[1:, 1:])
def __get_Qd(self):
"""
Get integration matrices for 2nd-order SDC
Returns:
S: node-to-node collocation matrix (first order)
SQ: node-to-node collocation matrix (second order)
ST: node-to-node trapezoidal matrix
Sx: node-to-node Euler half-step for position update
"""
# set implicit and explicit Euler matrices
QI = self.get_Qdelta_implicit(self.coll, self.params.QI)
QE = self.get_Qdelta_explicit(self.coll, self.params.QE)
# trapezoidal rule
QT = 0.5 * (QI + QE)
# QT = QI
# Qx as in the paper
Qx = np.dot(QE, QT) + 0.5 * QE * QE
QQ = np.zeros(np.shape(self.coll.Qmat))
# if we have Gauss-Lobatto nodes, we can do a magic trick from the Book
# this takes Gauss-Lobatto IIIB and create IIIA out of this
if isinstance(self.coll, CollGaussLobatto):
for m in range(self.coll.num_nodes):
for n in range(self.coll.num_nodes):
QQ[m + 1, n + 1] = self.coll.weights[n] * (1.0 - self.coll.Qmat[n + 1, m + 1] /
self.coll.weights[m])
QQ = np.dot(self.coll.Qmat, QQ)
# if we do not have Gauss-Lobatto, just multiply Q (will not get a symplectic method, they say)
else:
QQ = np.dot(self.coll.Qmat, self.coll.Qmat)
return [QT, Qx, QQ]
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(1, M + 1):
integral[m].pos -= L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
integral[m].vel -= L.dt * self.QT[m + 1, j] * L.f[j]
# add initial value
integral[m].pos += L.u[0].pos
integral[m].vel += L.u[0].vel
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0, M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
L.u[m + 1] = P.dtype_u(integral[m])
for j in range(1, m + 1):
# add QxF(u^{k+1})
L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
L.u[m + 1].vel += L.dt * self.QT[m + 1, j] * L.f[j]
# get RHS with new positions
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
L.u[m + 1].vel += L.dt * self.QT[m + 1, m + 1] * L.f[m + 1]
# indicate presence of new values at this level
L.status.updated = True
# # do the sweep (alternative description)
# for m in range(0, M):
# # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
# L.u[m + 1] = P.dtype_u(integral[m])
# for j in range(1, m + 1):
# # add QxF(u^{k+1})
# L.u[m + 1].pos += L.dt * (L.dt * self.Qx[m + 1, j] * L.f[j])
#
# # get RHS with new positions
# L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
#
# for m in range(0, M):
# for n in range(0, M):
# L.u[m + 1].vel += L.dt * self.QT[m + 1, n + 1] * L.f[n + 1]
#
# # indicate presence of new values at this level
# L.status.updated = True
return None
def integrate(self):
"""
Integrates the
|
right-hand side
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
# create new instance of dtype_u, initialize values with 0
p = []
|
for m in range(1, self.coll.num_nodes + 1):
p.append(P.dtype_u(P.init, val=0.0))
# integrate RHS over all collocation nodes, RHS is here only f(x)!
for j in range(1, self.coll.num_nodes + 1):
p[-1].pos += L.dt * (L.dt * self.QQ[m, j] * L.f[j]) + L.dt * self.coll.Qmat[m, j] * L.u[0].vel
p[-1].vel += L.dt * self.coll.Qmat[m, j] * L.f[j]
# we need to set mass and charge here, too, since the code uses the integral to create new particles
p[-1].m = L.u[0].m
p[-1].q = L.u[0].q
return p
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here is a full evaluation of the Picard formulation (always!)
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# start with u0 and add integral over the full interval (using coll.weights)
if (self.coll.right_is_node and not self.params.do_coll_update):
# a copy is sufficient
L.uend = P.dtype_u(L.u[-1])
else:
L.uend = P.dtype_u(L.u[0])
for m in range(self.coll.num_nodes):
L.uend.pos += L.dt * (L.dt * self.qQ[m] * L.f[m + 1]) + L.dt * self.coll.weights[m] * L.u[0].vel
L.uend.vel += L.dt * self.coll.weights[m] * L.f[m + 1]
# remember to set mass and charge here, too
L.uend.m = L.u[0].m
L.uend.q = L.u[0].q
# add up tau correction of the full interval (last entry)
if L.tau[-1] is not None:
L.uend += L.tau[-1]
return None
|
tleonhardt/machine_learning
|
optimize_me.py
|
Python
|
apache-2.0
| 437
| 0.011442
|
#!/usr/bin/env python
import numpy as np
import scipy.optimize as spo
def integer_optimize():
x = np.arange(1,101)
f = (x % 6)**2 % 7 - np.sin(x)
return x[np.argmax(f)]
def f(x):
return -x**4 + 1000 * x**3 - 20 * x**2 + 4*x -6
if __
|
name__
|
== '__main__':
print("Integer optimium: x = {}\n".format(integer_optimize()))
max_x = spo.fmin(lambda x: -f(x), 0)
print("Rational optimum: x = {}\n".format(max_x))
|
kfdm/django-simplestats
|
quickstats/permissions.py
|
Python
|
mit
| 1,370
| 0.00073
|
from rest_framework import permissions
from . import models
class IsOwnerOrPublic(permissions.IsAuthenticatedOrReadOnly):
message = "Not object owner or public"
def has_object_permission(self, request,
|
view, obj):
if
|
request.user == obj.owner:
return True
if request.method in permissions.SAFE_METHODS:
return obj.public
return False
class IsWidgetOwnerOrPublic(permissions.IsAuthenticatedOrReadOnly):
message = "Not object owner or public"
def has_permission(self, request, view):
widget = models.Widget.objects.get(pk=view.kwargs["widget_pk"])
return IsOwnerOrPublic.has_object_permission(self, request, view, widget)
def has_object_permission(self, request, view, obj):
if request.user == obj.widget.owner:
return True
if request.method in permissions.SAFE_METHODS:
return obj.widget.public
return False
class CanSubscribe(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
if request.user == obj.owner:
return True
if obj.public:
return True
return False
class IsOwner(permissions.IsAuthenticated):
def has_object_permission(self, request, view, obj):
return request.user == obj.owner
|
sebbASF/infrastructure-puppet
|
modules/git_self_serve/files/githubcron.py
|
Python
|
apache-2.0
| 4,335
| 0.00692
|
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, re, urllib, json, subprocess
import time
import urllib.request
import smtplib
from email.mime.text import MIMEText
# Function for fetching JSON via HTTPS
def getJSON(url, creds = None, cookie = None):
headers = {}
if creds and len(creds) > 0:
xcreds = creds.encode(encoding='ascii', errors='replace')
auth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers = {"Content-type": "application/json",
"Accept": "*/*",
"Authorization": "Basic %s" % auth
}
request = urllib.request.Request(url, headers = headers)
result = urllib.request.urlopen(request)
return json.loads(result.read().decode('utf-8', errors = 'replace'))
# Get the current queue
js = getJSON("https://reporeq.apache.org/queue.json")
created = 0
# If queue is valid:
if js:
print("analysing %u items" % len(js))
# For each item:
# - Check that it hasn't been mirrored yet
# - Check that a repo with this name doesn't exist already
# - Check that name is valid
# - Mirror repo if all is okay
for item in js:
# Make sure this is a GH integration request AND it's been mirrored more than a day ago, so GH caught up.
if not 'githubbed' in item and item['github'] == True and 'mirrordate' in item and item['mirrordate'] < (time.time()-86400):
reponame = item['name']
# Check valid name
if len(reponame) < 5 or reponame.find("..") != -1 or reponame.find("/") != -1:
print("Invalid repo name!")
continue
# Set some vars
notify = item['notify']
description = item['description'] if 'description' in item else "Unknown"
# Make sure the repo exists!
if os.path.exists("/x1
|
/git/mirrors/%s" % reponame):
print("%s is there, adding web hooks" % reponame)
try:
xreponame = reponame.replace(".git"
|
, "") # Cut off the .git part, so GH will not bork
inp = subprocess.check_output("/usr/local/etc/git_self_serve/add-webhook.sh %s" % xreponame, shell = True).decode('ascii', 'replace')
except subprocess.CalledProcessError as err:
print("Borked: %s" % err.output)
continue
else:
print("Repo doesn't exist, ignoring this request...sort of")
# Notify reporeq that we've GH'ed this repository!
print("Notifying https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
request = urllib.request.Request("https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
result = urllib.request.urlopen(request)
# Inform infra@ and private@$pmc that the mirror has been set up
msg = MIMEText("New repository %s has now had GitHub integration enabled!\n\nWith regards,\nApache Infrastructure." % (reponame))
msg['Subject'] = 'Github integration set up: %s' % reponame
msg['From'] = "git@apache.org"
msg['Reply-To'] = "users@infra.apache.org"
msg['To'] = "users@infra.apache.org, private@%s.apache.org" % item['pmc']
s = smtplib.SMTP(host='mail.apache.org', port=2025)
s.send_message(msg)
s.quit()
# We made a thing!
created += 1
print("All done for today! Made %u new repos" % created)
|
SanketDG/networkx
|
networkx/generators/degree_seq.py
|
Python
|
bsd-3-clause
| 27,099
| 0.005573
|
# -*- coding: utf-8 -*-
"""Generate graphs with a given degree sequence or expected degree sequence.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import heapq
from itertools import combinations
import math
from operator import itemgetter
import random
import networkx as nx
from networkx.utils import random_weighted_sample
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart <swart@lanl.gov>',
'Dan Schult <dschult@colgate.edu>'
'Joel Miller <joel.c.miller.research@gmail.com>',
'Nathan Lemons <nlemons@gmail.com>'
'Brian Cloteaux <brian.cloteaux@nist.gov>'])
__all__ = ['configuration_model',
'directed_configuration_model',
'expected_degree_graph',
'havel_hakimi_graph',
'directed_havel_hakimi_graph',
'degree_sequence_tree',
'random_degree_sequence_graph']
def configuration_model(deg_sequence,create_using=None,seed=None):
"""Return a random graph with the given degree sequence.
The configuration model generates a random pseudograph (graph with
parallel edges and self loops) by randomly assigning edges to
match the given degree sequence.
Parameters
----------
deg_sequence : list of integers
Each list entry corresponds to the degree of a node.
create_using : graph, optional (default MultiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
Seed for random number generator.
Returns
-------
G : MultiGraph
A graph with the specified degree sequence.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequence does not have an even sum.
See Also
--------
is_valid_degree_sequence
Notes
-----
As described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequence does not have an even sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified.
The density of self-loops and parallel edges tends to decrease
as the number of nodes increases. However, typically the number
of self-loops will approach a Poisson distribution with a nonzero
mean, and similarly for the number of parallel edges. Consider a
node with k stubs. The probability of being joined to another stub of
the same node is basically (k-1)/N where k is the degree and N is
the number of nodes. So the probability of a self-loop scales like c/N
for some constant c. As N grows, this means we expect c self-loops.
Similarly for parallel edges.
References
----------
.. [1] M.E.J. Newman, "The structure and function of complex networks",
SIAM REVIEW 45-2, pp 167-256, 2003.
Examples
--------
>>> from networkx.utils import powerlaw_sequence
>>> z=nx.utils.create_degree_sequence(100,powerlaw_sequence)
>>> G=nx.configuration_model(z)
To remove parallel edges:
>>> G=nx.Graph(G)
To remove self loops:
>>> G.remove_edges_from(G.selfloop_edges())
"""
if sum(deg_sequence) % 2 != 0:
msg = 'Invalid degree sequence: sum of degrees must be even, not odd'
raise nx.NetworkXError(msg)
if create_using is None:
create_using = nx.MultiGraph()
elif create_using.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
if not seed is None:
random.seed(seed)
# start with empty N-node graph
N=len(deg_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or max(deg_sequence)==0: # done if no edges
return G
# build stublist, a list of available degree-repeated stubs
# e.g. for deg_sequence=[3,2,1,1,1]
# initially, stublist=[1,1,1,2,2,3,4,5]
# i.e., node 1 has degree=3 and is repeated 3 times, etc.
stublist=[]
for n in G:
for i in range(deg_sequence[n]):
stublist.append(n)
# shuffle stublist and assign pairs by removing 2 elements at a time
random.shuffle(stublist)
while stublist:
n1 = stublist.pop()
n2 = stublist.pop()
G.add_edge(n1,n2)
G.name="configuration_model %d nodes %d edges"%(G.order(),G.size())
return G
def directed_configuration_model(in_degree_sequence,
out_degree_sequence,
create_using=None,seed=None):
"""Return a directed_random graph with the given degree sequences.
The configuration model generates a random directed pseudograph
(graph with parallel edges and self loops) by randomly assigning
edges to match the given degree sequences.
Parameters
----------
in_degree_sequence : list of integers
Each list entry corresponds to the in-degree of a node.
out_degree_sequence : list of integers
Each list entry corresponds to the out-degree of a node.
create_using : graph, optional (default MultiDiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
Seed for random number generator.
Returns
-------
G : MultiDiGraph
A graph with the specified degree sequences.
Nodes are labeled starting at 0 with an index
corresponding to the position in deg_sequence.
Raises
------
NetworkXError
If the degree sequences do not have the same sum.
See Also
--------
configuration_model
Notes
-----
Algorithm as described by Newman [1]_.
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the degree
sequences does not have the same sum.
This configuration model construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Newman, M. E. J. and Strogatz, S. H. and Watts, D. J.
Random graphs with arbitrary degree distributions and their applications
Phys. Rev. E, 64, 026118 (2001)
Examples
--------
>>> D=nx.DiGraph([(0,1),(1,2),(2,3)]) # directed path graph
>>> din=list(d for n, d in D.in_degree())
>>> dout=list(d for n, d in D.out_degree())
>>> din.append(1)
>>> dout[0]=2
>>> D=nx.directed_configuration_model(din,dout)
To remove parallel edges:
>>> D=nx.DiGraph(D)
To remove self lo
|
ops:
>>> D.remove_edges_from(D.selfloop_edges())
"""
if not sum(in_degree_sequence) == sum(out_degree_sequence):
raise nx.NetworkXError('Invalid degree sequences. '
'Sequences must have equal sums.')
if create_using is None:
create_using = nx.MultiDiGraph()
if not seed is None:
random.seed(seed)
nin=le
|
n(in_degree_sequence)
nout=len(out_degree_sequence)
# pad in- or out-degree sequence with zeros to match lengths
if nin>nout:
out_degree_sequence.extend((nin-nout)*[0])
else:
in_degree_sequence.extend((nout-nin)*[0])
# start with empty N-node graph
N=len(in_degree_sequence)
# allow multiedges and selfloops
G=nx.empty_graph(N,create_using)
if N==0 or ma
|
ml-lab/pylearn2
|
setup.py
|
Python
|
bsd-3-clause
| 3,408
| 0
|
import sys
import warnings
from setuptools import setup, find_packages, Extension
import numpy
if 'develop' not in sys.argv:
raise NotImplementedError("since Pylearn2 is under rapid, active "
"development, `python setup.py install` is "
"intentionally disabled to prevent other "
"problem
|
s. Run `python setup.py develop` to "
"install Pylearn2.")
# Detailed notes:
# This modification of setup.py is designed to prevent two problems
# novice users frequently encountered:
# 1) Novice users frequently used "git clone" to get a copy of Pylearn2,
# then ran setup.py install,
|
then would use "git pull" to get a bug fix
# but would forget to run "setup.py install" again.
# 2) Novice users frequently used "sudo" to make an "installed" copy of
# Pylearn2, then try to use the tutorials in the "scripts" directory in
# the "installed" copy. Since the tutorials are then in a directory owned
# by root and need to create files in the local directory, some users
# would run the tutorials using "sudo". Besides being dangerous, this
# created additional problems because "sudo" does not just run the script
# with root privileges, it actually changes the user to root, and thus
# pylearn2-related environment variables configured in the user's
# .bashrc would no longer be available.
# Installing only in development mode avoids both problems because there
# is now only a single copy of the code and it is stored in a directory
# editable by the user.
# Note that none of the Pylearn2 installation documentation recommends
# using setup.py install or pip. Most of the Pylearn2 developers just
# obtain Pylearn2 via git clone and then add it to their PYTHONPATH
# manually.
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pylearn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst').read(),
install_requires=['numpy>=1.5', 'theano', 'pyyaml', 'argparse'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
|
thomasdunton/python-html-assert
|
pha/__init__.py
|
Python
|
mit
| 351
| 0
|
from matchers
|
import (
linear_match as html_match,
prune_unmatched_elements
)
from spec import (
a,
accordion,
acc_body,
acc_group,
acc_heading,
div,
elem,
heading,
html,
img,
input,
option,
option_xhtml,
select,
text,
)
from formatters import (
pretty_html,
|
pretty_spec
)
|
Avinash-Raj/appengine-django-skeleton
|
todo/tests.py
|
Python
|
bsd-3-clause
| 692
| 0
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under
|
the Apache License, Version 2.0 (the "Licen
|
se");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import http
from django.test import TestCase
from . import views
# Tests go here
|
rmoskal/e-springpad
|
collection_cache.py
|
Python
|
mit
| 845
| 0.014201
|
import uuid
from google.appengine.api import memcache
class CollectionCache:
def __init__(self, timeout=480, hash=None):
self.contents = [];
if hash:
self.contents = memcache.get(hash)
self.time
|
out = timeout
def add(self, item):
hash = uuid.uuid1().hex
memcache.add(hash, item, time = self.timeout)
self.contents.append(hash)
return hash
def commit(self):
hash = uuid.uuid1().hex
memcache.add(hash, self.contents, time = self.timeout)
return hash
def fetchAll(self):
if not self.content
|
s:
return []
return [[key,memcache.get(key)] for key in self.contents]
def fetch(self):
for key in self.contents:
item = memcache.get(key)
if item:
yield key,item
|
HoussemCharf/FunUtils
|
Fun Scripts/Player.py
|
Python
|
mit
| 351
| 0.005698
|
from pygame import *
'''
The mus
|
ic must be in the same folder/project to work
You have to install pygame
command: pip install pygame
'''
mixer.init()
msc = input('Song Name: ')
mixer.music.load('{}.mp3'.format(msc))
mixer.music.play()
while mixer.music.get_busy():
time.Clock().tick(10)
if input() == 'p
|
ause':
break;
|
roadmapper/ansible
|
lib/ansible/modules/network/cloudengine/ce_mlag_interface.py
|
Python
|
gpl-3.0
| 36,992
| 0.001892
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mlag_interface
version_added: "2.4"
short_description: Manages MLAG interfaces on HUAWEI CloudEngine switches.
description:
- Manages MLAG interface attributes on HUAWEI CloudEngine switches.
author:
- Li Yanfeng (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
eth_trunk_id:
description:
- Name of the local M-LAG interface. The value is ranging from 0 to 511.
dfs_group_id:
description:
- ID of a DFS group.The value is 1.
default: present
mlag_id:
description:
- ID of the M-LAG. The value is an integer that ranges from 1 to 2048.
mlag_system_id:
description:
- M-LAG global LACP system MAC address. The value is a string of 0 to 255 characters. The default value
is the MAC address of the Ethernet port of MPU.
mlag_priority_id:
description:
- M-LAG global LACP system priority. The value is an integer ranging from 0 to 65535.
The default value is 32768.
interface:
description:
- Name of the interface that enters the Error-Down state when the peer-link fails.
The value is a string of 1 to 63 characters.
mlag_error_down:
description:
- Configure the interface on the slave device to enter the Error-Down state.
choices: ['enable','disable']
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: mlag interface module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Set interface mlag error down
ce_mlag_interface:
interface: 10GE2/0/1
mlag_error_down: enable
provider: "{{ cli }}"
- name: Create mlag
ce_mlag_interface:
eth_trunk_id: 1
dfs_group_id: 1
mlag_id: 4
provider: "{{ cli }}"
- name: Set mlag global attribute
ce_mlag_interface:
mlag_system_id: 0020-1409-0407
mlag_priority_id: 5
provider: "{{ cli }}"
- name: Set mlag interface attribute
ce_mlag_interface:
eth_trunk_id: 1
mlag_system_id: 0020-1409-0400
mlag_priority_id: 3
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: { "interface": "eth-trunk1",
"mlag_error_down": "disable",
"state": "present"
}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: { "mlagErrorDownInfos": [
{
"dfsgroupId": "1",
"portName": "Eth-Trunk1"
}
]
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {}
updates:
description: command sent to the device
returned: always
type: list
sample: { "interface eth-trunk1",
"undo m-lag unpaired
|
-port suspend"}
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_MLAG_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance>
|
%s
</mlagInstance>
</mlagInstances>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="merge">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="delete">
<dfsgroupId>%s</dfsgroupId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_GET_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</filter>
"""
CE_NC_SET_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf operation="merge">
"""
CE_NC_SET_LACP_MLAG_INFO_TAIL = """
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</config>
"""
CE_NC_GET_GLOBAL_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</filter>
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal operation="merge">
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL = """
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</config>
"""
CE_NC_GET_MLAG_ERROR_DOWN_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown>
<dfsgroupId></dfsgroupId>
<portName></portName>
<portState></portState>
</errordown>
</errordowns>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="merge">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="delete">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
def get_interface_
|
jamespcole/home-assistant
|
homeassistant/components/serial_pm/sensor.py
|
Python
|
apache-2.0
| 2,805
| 0
|
"""
Support for particulate matter sensors connected to a serial port.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.serial_pm/
"""
import logging
import voluptuous as vol
from homeassistant.const import C
|
ONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
REQUIREMENTS = ['pmsensor==0.4']
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_DEVICE = 'serial_device'
CONF_BRAND = 'brand'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_
|
BRAND): cv.string,
vol.Required(CONF_SERIAL_DEVICE): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available PM sensors."""
from pmsensor import serial_pm as pm
try:
coll = pm.PMDataCollector(
config.get(CONF_SERIAL_DEVICE),
pm.SUPPORTED_SENSORS[config.get(CONF_BRAND)]
)
except KeyError:
_LOGGER.error("Brand %s not supported\n supported brands: %s",
config.get(CONF_BRAND), pm.SUPPORTED_SENSORS.keys())
return
except OSError as err:
_LOGGER.error("Could not open serial connection to %s (%s)",
config.get(CONF_SERIAL_DEVICE), err)
return
dev = []
for pmname in coll.supported_values():
if config.get(CONF_NAME) is not None:
name = '{} PM{}'.format(config.get(CONF_NAME), pmname)
else:
name = 'PM{}'.format(pmname)
dev.append(ParticulateMatterSensor(coll, name, pmname))
add_entities(dev)
class ParticulateMatterSensor(Entity):
"""Representation of an Particulate matter sensor."""
def __init__(self, pmDataCollector, name, pmname):
"""Initialize a new PM sensor."""
self._name = name
self._pmname = pmname
self._state = None
self._collector = pmDataCollector
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "µg/m³"
def update(self):
"""Read from sensor and update the state."""
_LOGGER.debug("Reading data from PM sensor")
try:
self._state = self._collector.read_data()[self._pmname]
except KeyError:
_LOGGER.error("Could not read PM%s value", self._pmname)
def should_poll(self):
"""Sensor needs polling."""
return True
|
Mezgrman/mezgrmanDE
|
mezgrman/views.py
|
Python
|
agpl-3.0
| 434
| 0.013825
|
from django.http import HttpResponse
from django.conf import settings
import json
def javascript_variables(request):
variables = {
'STATIC_PREFIX': settings.STATIC_URL
}
var_catalog = "// VARIABLE CATALOG FOR DJANGO VARIABLES\n\n"
var_catalog += "\n".join(("%s = %s;" % (key, json
|
.dumps(value)) for key, value in variables.items()))
return HttpResponse(var_catalog, content_type = 'text/javasc
|
ript')
|
kahowell/sixoclock
|
sixoclock/cli.py
|
Python
|
gpl-3.0
| 6,455
| 0.003098
|
# Copyright 2017 Kevin Howell
#
# This file is part of sixoclock.
#
# sixoclock is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sixoclock is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sixoclock. If not, see <http://www.gnu.org/licenses/>.
import argparse
import humanize
import logging
import os.path
import time
from sixoclock.config import Configuration
from sixoclock.backends.file import FileBackend
from sixoclock.file import File
class Cli:
def __init__(self):
config = os.path.join(os.path.expanduser('~'), '.sixoclock.yml')
self.configuration = Configuration(config)
parser = argparse.ArgumentParser(description='Simple personal backups.')
parser.add_argument('--no-log', action='store_true', help='do not log')
parser.add_argument('--log-file', help='log file')
parser.set_defaults(function=lambda args: parser.print_usage(), log_file=None)
subparsers = parser.add_subparsers(title='commands')
backup_parser = subparsers.add_parser('backup', help='perform a backup')
backup_parser.add_argument('-c', '--collection', help='backup a specific collection')
backup_parser.add_argument('--dry-run', action='store_true', help='do not backup, show what would happen')
backup_parser.set_defaults(function=self.backup)
query_parser = subparsers.add_parser('query', help='find a file in configured sources or mirrors')
query_parser.add_argument('-c', '--collection', help='look only in a specific collection')
query_parser.add_argument('-m', '--mirror', help='look only in a specific mirror')
query_parser.add_argument('--path', help='relative path of the file')
query_parser.add_argument('--filename', help='base filename (ex. foo.txt)')
query_parser.add_argument('--file', help='file to use as a basis')
query_parser.add_argument('--md5', help='md5 hash')
query_parser.add_argument('--sha1', help='sha1 hash')
query_parser.add_argument('--sha256', help='sha256 hash')
query_parser.add_argument('--size', help='file size in bytes')
query_parser.set_defaults(function=self.query)
status_parser = subparsers.add_parser('status', help='show backup status')
status_parser.add_argument('-c', '--collection', help='show status of a specific collection')
status_parser.set_defaults(function=self.status)
refresh_parser = subparsers.add_parser('refresh-cache', help='refresh cache')
refresh_parser.add_argument('-c', '--collection', help='refresh mirror caches for a specific collection')
refresh_parser.add_argument('-m', '--mirror', help='refresh mirror caches for a specific mirror')
refresh_parser.add_argument('--rebuild', action='store_true', help='remove entries and rebuild the cache')
refresh_parser.set_defaults(function=self.refresh_cache)
for name, backend in self.configuration.backends.items():
if backend.has_subparser():
backend_parser = subparsers.add_parser(name, help='{} backend subcommands'.format(name))
backend.contribute_to_subparser(backend_parser)
self.parser = parser
def main(self):
args = self.parser.parse_args()
log_filename = args.log_file or 'sixoclock.{}.log'.format(int(time.time()))
if not args.no_log:
logging.basicConfig(filename=log_filename, level=logging.INFO)
args.function(args)
def backup(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != collection:
continue
print('Backing up collection: {}'.format(name))
actions = collection.backup(args.dry_run)
if args.dry_run:
for action in actions:
print('Would back up {} to {}'.format(action.file, action.destination))
def query(self, args):
filters = []
if args.path:
filters.append(File.path == args.path)
if args.file:
filebackend = FileBackend()
file = filebackend.get(args.file)
filters.append(File.sha1 == file.sha1)
filters.append(File.path.like('%/{}'.format(os.path.basename(args.file))))
if args.filename:
filters.append(File.path.like('%/{}'.format(args.filename)))
if args.md5:
|
filters.append(File.md5 == args.md5)
if args.sha1:
filters.append(File.sha1 == args.sha1)
if args.sha256:
filters.append(File.sha256 == args.sha256)
if args.size:
filters.append(File.size == arg
|
s.size)
collections = self.configuration.collections.values()
if args.collection:
collections = [self.configuration.collections[args.collection]]
if args.mirror:
filters.append(File.mirror_uri == args.mirror)
for collection in collections:
collection.refresh_cache()
for match in collection.query(*filters):
print('Match: {}'.format(match.uri))
def status(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != args.collection:
continue
print('Collection: {}'.format(name))
stats = collection.stats()
print(' # Source files: {}'.format(stats.source_file_count))
size = humanize.naturalsize(stats.size)
percentage = 100.0
if stats.size > 0:
percentage = stats.backed_up_size / stats.size
print(' Total size: {}, {}% backed up'.format(size, percentage))
def refresh_cache(self, args):
for name, collection in self.configuration.collections.items():
if args.collection and name != args.collection:
continue
collection.refresh_cache(mirror=args.mirror, reset=args.refresh)
|
bdh1011/wau
|
app.py
|
Python
|
mit
| 4,092
| 0.000244
|
import os
import random
import time
from flask import Flask, request, render_template, session, flash, redirect, \
url_for, jsonify
from flask.ext.mail import Mail, Message
from flask.ext.sqlalchemy import SQLAlchemy
from celery import Celery
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top-secret!'
# Flask-Mail configuration
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = "bdh931101@gmail.com"
app.config['MAIL_PASSWORD'] = "1Alzkdpf*^^*go"
app.config['MAIL_DEFAULT_SENDER'] = 'bdh931101@gmail.com'
# Celery configuration
app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'
app.config.from_object(os.environ['APP_SETTINGS'])
db = SQLAlchemy(app)
from .models import MapInfo
# Initialize extensions
mail = Mail(app)
# Initialize Celery
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
@celery.task
def send_async_email(msg):
"""Background task to send an email with Flask-Mail."""
with app.app_context():
mail.send(msg)
@celery.task(bind=True)
def long_task(self):
"""Background task that runs a long function with progress reports."""
verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']
adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']
noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']
message = ''
total = random.randint(10, 50)
for i in range(total):
if not message or random.random() < 0.25:
message = '{0} {1} {2}...'.format(random.choice(verb),
random.choice(adjective),
random.choice(noun))
self.update_state(state='PROGRESS',
meta={'current': i, 'total': total,
'status': message})
time.sleep(1)
return {'current': 100, 'total': 100, 'status': 'Task completed!',
'result': 42}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html', email=session.get('email', ''))
email = request.form['email']
session['email'] = email
# send the email
msg = Message('Hello from Flask',
recipients=[request.form['email']])
msg.body = 'This is a test email sent from a background Celery task.'
if request.form['submit'] == 'Send':
# send right away
send_async_email.delay(msg)
flash('Sending email to {0}'.format(email))
else:
# send in one minute
send_async_email.apply_async(args=[msg], countdown=60)
flash('An email will be sent to {0} in one minute'.format(email))
return redirect(url_for('index'))
@app.route('/longtask', methods=['POST'])
def longtask():
task = long_task.apply_async()
return jsonify({}), 202, {'Location': url_for('taskstatus',
task_id=task.id)}
@app.route('/status/<task_id>')
def taskstatus(task_id):
task = long_task.AsyncResult(task_id)
if task.state == 'PENDING':
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went w
|
rong in the background job
response = {
'state': task.state,
'current':
|
1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return jsonify(response)
if __name__ == '__main__':
app.run(debug=True)
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/io/gcp/gcsio_overrides.py
|
Python
|
apache-2.0
| 2,063
| 0.003393
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import logging
import math
import time
from apache_beam.metrics.metric import Metrics
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
from apitools
|
.base.py import util
_LOGGER = logging.getLogger(__name__)
class GcsIOOverrides(object):
"""Functions for overriding Google Cloud Storage I/O client."""
_THROTTLED_SECS = Metrics.counter('StorageV1', "cumulativeThrottl
|
ingSeconds")
@classmethod
def retry_func(cls, retry_args):
# handling GCS download throttling errors (BEAM-7424)
if (isinstance(retry_args.exc, exceptions.BadStatusCodeError) and
retry_args.exc.status_code == http_wrapper.TOO_MANY_REQUESTS):
_LOGGER.debug(
'Caught GCS quota error (%s), retrying.', retry_args.exc.status_code)
else:
return http_wrapper.HandleExceptionsAndRebuildHttpConnections(retry_args)
http_wrapper.RebuildHttpConnections(retry_args.http)
_LOGGER.debug(
'Retrying request to url %s after exception %s',
retry_args.http_request.url,
retry_args.exc)
sleep_seconds = util.CalculateWaitForRetry(
retry_args.num_retries, max_wait=retry_args.max_retry_wait)
cls._THROTTLED_SECS.inc(math.ceil(sleep_seconds))
time.sleep(sleep_seconds)
|
rolisz/hw3
|
LFTC/L2/lexer.py
|
Python
|
bsd-3-clause
| 6,145
| 0.002929
|
#!/usr/bin/python
from fsm import parse_automaton, accept
import re
__author__ = 'Roland'
import sys
keywords = ['float', 'char', 'print', 'input', 'break', 'continue', 'return', 'def', 'if', 'elif',
'else', 'while', 'or', 'and', 'not']
operators = ['=', '<', '>', '==', '>=', '<=', '!=', '+', '-', '*', '/', '%']
separators = ['[', ']', '(', ')', ',', ':']
codif = ['var', 'const', '\n', 'indent', 'dedent'] + keywords + operators + separators
def error(line_nr, msg):
"""
Show an error message `msg` found at line number `line_nr`
"""
print("Lexical error at line %d: %s" % (line_nr, msg))
def value_or_none(tree):
"""
Helper function to return string, even if given a tree, string or None
"""
if tree is None:
return 'None'
else:
if type(tree) == str:
return tree
return str(tree.value)
class binary_tree(object):
"""
Binary search tree. It remembers the order in which elements were added.
"""
def __init__(self, value):
"""
Constructor
"""
self.value = value
if self.value:
self.elements = [value]
else:
self.elements = []
self.left = None
self.right = None
def add(self, value):
"""
Add `value` to the tree to the correct place
"""
if self.value is None:
self.value = value
elif value < self.value:
if self.left:
self.left.add(value)
else:
self.left = binary_tree(value)
else:
if self.right:
self.right.add(value)
else:
self.right = binary_tree(value)
def __contains__(self, value):
"""
Search for `value` in the tree.
"""
if value == self.value:
return True
return (self.left and value in self.left) or (self.right and value in self.right)
def index(self, value):
"""
Return the parent and sibling node of `value`. Return None if it is not found,
and (None, None) for root node.
"""
if self.value == value:
return (None, None)
if self.right and value == self.right.value:
return self.value, self.left
if self.left and value == self.left.value:
return self.value, self.right
if self.left and value in self.left:
return self.left.index(value)
if self.right and value in self.right:
return self.right.index(value)
def __str__(self):
"""
String representation of the tree, using a table with parent and sibling relations.
"""
s = ""
for i, element in enumerate(self.elements):
parent, sibling = self.index(element)
s += (str(i) + " | " + str(element) + " | " + value_or_none(parent) + " | " + value_or_none(sibling) + "\n")
return s
def get_poz(atom, ts):
"""
Get the position of `atom` in the tree `ts`, and insert it if it's not in the tree.
"""
if atom not in ts:
ts.add(atom)
ts.elements.append(atom)
parent, sibling = ts.index(atom)
return
|
ts.elements.index(atom)
var_lang = ["i a-z s B",
"i A-Z s B",
"s a-z s F",
"s A-z s F",
|
"s 0-9 s F",
"s [ t",
"t 0-9 f",
"f 0-9 f",
"f ] l F"]
var_aut = parse_automaton(var_lang)
num_lang = ["i 0 s B",
"i 1-9 t B",
"s . n",
"t 0-9 f", "t . n", "f 0-9 f", "f . n", "n 0-9 n F"]
num_aut = parse_automaton(num_lang)
def lexer(program):
"""
Function to do the actual lexing.
"""
ts_const = binary_tree(None)
ts_ident = binary_tree(None)
fip = []
indentation = [0]
for i, line in enumerate(program.splitlines()):
indent_level = len(line) - len(line.lstrip())
if indent_level != indentation[-1]:
if indent_level > indentation[-1]:
indentation.append(indent_level)
fip.append((codif.index('indent'), 0))
else:
while len(indentation) and indentation[-1] != indent_level:
fip.append((codif.index('dedent'), 0))
indentation.pop()
if len(indentation) == 0:
error(i, "incorrect indentation")
in_string = ""
for atom in re.split("( |=|<|>|==|>=|<=|!=|\+|-|\*|/|%|\[|\]|\(|\)|,|:)", line):
if len(atom.strip()) == 0 and not in_string:
continue
if '"' in atom:
if in_string:
in_string += atom
if re.search('[^ "a-zA-Z0-9]', in_string):
error(i, " invalid character in string constant")
continue
fip.append((1, get_poz(in_string, ts_const)))
in_string = ""
continue
else:
in_string = atom
continue
if in_string:
in_string += atom
continue
if atom in keywords or atom in operators or atom in separators:
fip.append((codif.index(atom), 0))
else:
if accept(*var_aut, string=atom) == True:
fip.append((0, get_poz(atom, ts_ident)))
elif accept(*num_aut, string=atom) == True:
fip.append((1, get_poz(atom, ts_const)))
else:
error(i, " unidentified expression " + atom)
if in_string:
error(i, " unterminated string constant ")
fip.append((codif.index('\n'), 0))
return fip, ts_const, ts_ident
if __name__ == "__main__":
if len(sys.argv) == 1:
print("You must give file to analyze as argument")
file = sys.argv[1]
f = open(file, "rb")
fip, ts_const, ts_ident = lexer(f.read())
print(fip)
print(ts_const)
print(ts_ident)
|
wglass/lighthouse
|
lighthouse/haproxy/stanzas/stanza.py
|
Python
|
apache-2.0
| 2,087
| 0
|
import logging
from ..directives import directives_by_section
logger = logging.getLogger(__name__)
class Stanza(object):
"""
Subclass for config file stanzas.
In an HAProxy config file, a stanza is in the form of::
stanza header
directive
directive
directive
Stanza instances have a `header` attribute for the header and a list of
`lines`, one for each directive line.
"""
def __init__(self, section_name):
self.section_name = section_name
self.header = section_name
self.lines = []
def add_lines(self, lines):
"""
Simple helper method for adding multiple lines at once.
"""
for line in lines:
self.add_line(line)
def add_line(self, line):
"""
Adds a given line string to the list of lines, validating the line
first.
"""
if not self.is_valid_line(line):
logger.warn(
"Invalid line for %s section: '%s'",
self.section_name, line
)
return
self.lines.append(line)
def is_valid_line(self, line):
"""
Validates a given line against the associated "section" (e.g. 'global'
or 'frontend', etc.) of a stanza.
If a line represents a directive that shouldn't be within the stanza
it is rejected. See the `directives.json` file for a condensed look
at valid directives based on section.
"""
adjusted_line = line.strip().lower()
return any([
adjusted_line.startswith(directive)
for directive in directives_by_section[self.section_n
|
ame]
])
def __str__(self):
"""
Returns the string representation of a Stanza, meant for use in
config file content.
if no lines are defined an empty string is returned.
""
|
"
if not self.lines:
return ""
return self.header + "\n" + "\n".join([
"\t" + line
for line in self.lines
])
|
mudler/entropy
|
lib/entropy/fetchers.py
|
Python
|
gpl-2.0
| 52,462
| 0.003298
|
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Transceivers Fetchers submodule}.
"""
import os
import errno
import sys
import time
try:
import httplib
except ImportError:
# python 3.x
import http.client as httplib
import hashlib
import socket
import pty
import subprocess
import threading
import contextlib
import base64
import ssl
from entropy.const import const_is_python3, const_file_readable
if const_is_python3():
import urllib.request as urlmod
import urllib.error as urlmod_error
else:
import urllib2 as urlmod
import urllib2 as urlmod_error
from entropy.exceptions import InterruptError
from entropy.tools import print_traceback, \
convert_seconds_to_fancy_output, bytes_into_human, spliturl, \
add_proxy_opener, md5sum
from entropy.const import etpConst, const_isfileobj, const_debug_write
from entropy.output import TextInterface, darkblue, darkred, purple, blue, \
brown, darkgreen, red
from entropy.i18n import _, ngettext
from entropy.misc import ParallelTask
from entropy.core.settings.base import SystemSettings
class UrlFetcher(TextInterface):
"""
Entropy single URL fetcher. It supports what Python's urllib2 supports,
plus resuming, proxies and custom user agents. No external tools
dependencies are required (including wget).
"""
# this dict must be kept in sync with
# the __supported_uris variable below
# until plugins support is implemented
_supported_differential_download = {
'file': False,
'http': False,
'https': False,
'ftp': False,
'ftps': False,
'rsync': True,
'ssh': True,
}
GENERIC_FETCH_ERROR = "-3"
TIMEOUT_FETCH_ERROR = "-4"
GENERIC_FETCH_WARN = "-2"
def __init__(self, url, path_to_save, checksum = True,
show_speed = True, resume = True,
abort_check_func = None, disallow_redirect = False,
thread_stop_func = None, speed_limit = None,
timeout = None, download_context_func = None,
pre_download_hook = None, post_download_hook = None,
http_basic_user = None, http_basic_pwd = None,
https_validate_cert = True):
"""
Entropy URL downloader constructor.
@param url: download URL (do not URL-encode it!)
@type url: string
@param path_to_save: file path where to save downloaded data
@type path_to_save: string
@keyword checksum: return md5 hash instead of status code
@type checksum: bool
@keyword show_speed: show download speed
@type show_speed: bool
@keyword resume: enable resume support
@type resume: bool
@keyword abort_check_func: callback used to stop download, it has to
raise an exception that has to be caught by provider application.
This exception will be considered an "abort" request.
@type abort_check_func: callable
@keyword disallow_redirect: disallow automatic HTTP redirects
@type disallow_redirect: bool
@keyword thread_stop_func: callback used to stop download, it has to
raise an exception that has to be caught by provider application.
This exception will be considered a "stop" request.
@type thread_stop_func: callable
@keyword speed_limit: speed limit in kb/sec
@type speed_limit: int
@keyword timeout: custom request timeout value (in seconds), if None
the value is read from Entropy configuration files.
@type timeout: int
@keyword download_context_func: if not None, it must be a function
exposing a context manager and taking a path (the download path)
as argument. This can be used to implement locking on files to be
downloaded.
@type download_context_func: callable
@keyword pre_download_hook: hook called before starting the download
process, inside the download_context_func context. This can be
used to verify if the download is actually needed or just return.
If the returned
|
value is not None, the download method will return
that value. The function takes a path (the download path) and the
download id as arguments.
@type pre_download_hook: callable
@keyword po
|
st_download_hook: hook called after the download is complete,
inside the download_context_func context. This can be used to verify
the integrity of the downloaded data.
The function takes a path (the download path) and the download
status and the download id as arguments.
@type post_download_hook: callable
"""
self.__supported_uris = {
'file': self._urllib_download,
'http': self._urllib_download,
'https': self._urllib_download,
'ftp': self._urllib_download,
'ftps': self._urllib_download,
'rsync': self._rsync_download,
'ssh': self._rsync_download,
}
self.__system_settings = SystemSettings()
if speed_limit == None:
speed_limit = \
self.__system_settings['repositories']['transfer_limit']
if timeout is None:
self.__timeout = \
self.__system_settings['repositories']['timeout']
else:
self.__timeout = timeout
self.__th_id = 0
if download_context_func is None:
@contextlib.contextmanager
def download_context_func(path):
yield
self.__download_context_func = download_context_func
self.__pre_download_hook = pre_download_hook
self.__post_download_hook = post_download_hook
self.__resume = resume
self.__url = url
self.__path_to_save = path_to_save
self.__checksum = checksum
self.__show_speed = show_speed
self.__abort_check_func = abort_check_func
self.__thread_stop_func = thread_stop_func
self.__disallow_redirect = disallow_redirect
self.__speedlimit = speed_limit # kbytes/sec
# HTTP Basic Authentication parameters
self.__http_basic_user = http_basic_user
self.__http_basic_pwd = http_basic_pwd
# SSL Context options
self.__https_validate_cert = https_validate_cert
self._init_vars()
self.__init_urllib()
@staticmethod
def _get_url_protocol(url):
return url.split(":")[0]
def __init_urllib(self):
# this will be moved away soon anyway
self.__localfile = None
def _init_vars(self):
self.__use_md5_checksum = False
self.__md5_checksum = hashlib.new("md5")
self.__resumed = False
self.__buffersize = 8192
self.__status = None
self.__remotefile = None
self.__downloadedsize = 0
self.__average = 0
self.__remotesize = 0
self.__oldaverage = 0.0
self.__last_output_time = time.time()
# transfer status data
self.__startingposition = 0
self.__datatransfer = 0
self.__time_remaining = "(infinite)"
self.__time_remaining_secs = 0
self.__elapsed = 0.0
self.__updatestep = 0.2
self.__starttime = time.time()
self.__last_update_time = self.__starttime
self.__last_downloadedsize = 0
self.__existed_before = False
if os.path.lexists(self.__path_to_save):
self.__existed_before = True
def __setup_urllib_resume_support(self):
# resume support
if const_file_readable(self.__path_to_save) and self.__resume:
self.__urllib_open_local_file("ab")
self.__localfile.seek(0, os.SEEK_END)
self.__startingposition = int(self.__localfile.tell())
self.__last_downloadedsize = self.__startingposition
else:
self.__urllib_open_local_file("wb")
def __urllib_open_local_
|
karacos/karacos-wsgi
|
py/karacos/core/mail.py
|
Python
|
lgpl-3.0
| 2,688
| 0.007068
|
"""
KaraCos - web platform engine - http://karacos.org/
Copyright (C) 2009-2010 Nicolas Karageuzian - Cyril Gratecis
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__license__ = 'AGPL'
import smtplib
import karacos
import random
import string
from email.MIMEMultipart
|
import MIMEMultipart
from email.MIMEText import MIMEText
def valid_email(email):
import re
reg = re.compile("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3
|
})(\\]?)$")
return reg.match(email)
def send_mail(destmail, msg):
"""
"""
try:
server = smtplib.SMTP(karacos.config.get('mail','smtp_server'),
karacos.config.get('mail','smtp_server_port'))
server.ehlo()
if karacos.config.has_option('mail', 'smtp_ssl'):
if karacos.config.get('mail', 'smtp_ssl') == "True" or karacos.config.get('mail', 'smtp_ssl'):
server.starttls()
server.ehlo()
if karacos.config.has_option('mail', 'smtp_password'):
src = karacos.config.get('mail','from_addr')
password = karacos.config.get('mail','smtp_password')
server.login(src, password)
server.sendmail(karacos.config.get('mail','from_addr'), destmail, msg)
print "mail sent"
server.close()
except Exception,e:
import sys
print sys.exc_info()
raise e
def send_domain_mail(domain, destmail, msg):
server = smtplib.SMTP(domain['site_email_service_host'],
domain['site_email_service_port'])
server.ehlo()
if 'site_email_service_secure' in domain:
if domain['site_email_service_secure'] or domain['site_email_service_secure'] == True:
server.starttls()
server.ehlo()
if 'site_email_service_password' in domain:
server.login(domain['site_email_service_username'], domain['site_email_service_password'])
server.sendmail(domain['site_email_from'], destmail, msg)
server.close()
|
vegitron/ansible
|
lib/ansible/executor/task_executor.py
|
Python
|
gpl-3.0
| 31,968
| 0.003191
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import subprocess
import sys
import time
import traceback
from ansible.compat.six import iteritems, string_types, binary_type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.executor.task_result import TaskResult
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.template import Templar
from ansible.utils.encrypt import key_for_hostname
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode, to_bytes
from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, rslt_q):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._rslt_q = rslt_q
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with self._run_loop()
or self._execute(). After that, the returned results are parsed and
returned as a dict.
'''
display.debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
res = self._execute()
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res):
if isinstance(res, dict):
for k in res.keys():
res[k] = _clean_res(res[k])
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item)
elif isinstance(res, UnsafeProxy):
return res._obj
elif isinstance(res, binary_type):
return to_unicode(res, errors='strict')
return res
display.debug("dumping result to json")
res = _clean_res(res)
display.debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
except Exception as e:
return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_unicode(traceback.format_exc()), stdout='')
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
display.debug(u"error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# save the play context variables to a temporary dictionary,
# so that we can modify the job vars without doing a full copy
# and later restore them to avoid modifying things too early
play_context_vars = dict()
self._play_context.update_vars(play_context_vars)
old_vars = dict()
for k in play_context_vars.keys():
if k in self._job_vars:
old_vars[k] = self._job_vars[k]
self._job_vars[k] = play_context_vars[k]
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
#TODO: remove convert_bare true and deprecate this in with_
if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined
# then we want to fall through to the next value rather
# than failing.
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=True)
loop_terms = [t for t in loop_te
|
rms if not templar._contains_vars(t)]
else:
|
try:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
display.deprecated("Skipping task due to undefined Error, in the future this will be a fatal error.: %s" % to_bytes(e))
return None
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._lo
|
sacovo/brainfuck
|
setup.py
|
Python
|
gpl-2.0
| 262
| 0.003817
|
__author__ = 'sandro'
from distutils.core import setup
setup(
author='Sandro Covo',
au
|
thor_email="sandro@covo.ch",
packages=['brainfuck'],
scripts=['scripts/pyfuck'],
name="Pyfuck",
desc
|
ription="Brainfuck interpreter written in python"
)
|
hzlf/openbroadcast
|
website/shop/shop_ajax/admin.py
|
Python
|
gpl-3.0
| 24
| 0.083333
|
#
|
-*-
|
coding: utf-8 -*-
|
Florents-Tselai/PyCarGr
|
pycargr/parser.py
|
Python
|
mit
| 5,387
| 0.000931
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
__author__ = 'Florents Tselai'
from datetime import datetime
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from pycargr.model import Car
class SearchResultPageParser:
def __init__(self, search_page_url):
self.search_page_url = search_page_url
req = Request(
search_page_url,
data=None,
headers={
'User-Agent': UserAgent().chrome
}
)
self.html = urlopen(req).read().decode('utf-8')
self.soup = BeautifulSoup(self.html, 'html.parser')
self.num_results = None
for f in self.soup.find_all('strong'):
if 'αγγελίες' in f.text:
if f.text.split()[0].isdigit():
self.num_results = int(f.text.split()[0])
def parse(self):
for a in self.soup.find_all('a', class_='vehicle list-group-item clsfd_list_row'):
yield str(int(a.get('href').replace('/', '').split('-')[0].replace('classifiedscarsview', '')))
def __len__(self):
return self.num_results
class CarItemParser:
def __init__(self, car_id):
self.car_id = car_id
self.req = Request(
'https://www.car.gr/%s' % self.car_id,
data=None,
headers={
'User-Agent': UserAgent().chrome
}
)
self.html = urlopen(self.req).read().decode('utf-8')
self.soup = BeautifulSoup(self.html, 'html.parser')
def parse_km(self):
try:
for td in self.soup.find_all('td'):
if 'χλμ' in td.text:
return float(td.text.replace('.', '').replace('χλμ', ''))
except Exception:
return None
return None
def parse_bhp(self):
try:
for td in self.soup.find_all('td'):
if 'bhp' in td.text:
return int(td.text.replace(' bhp', ''))
except Exception:
return None
return None
def parse_title(self):
try:
return self.soup.find('title').text
except Exception:
return None
def parse_price(self):
try:
return float(self.soup.find(itemprop='price').text.replace('.', '').replace('€ ', ''))
except Exception:
return None
def parse_release_date(self):
try:
date_str = self.soup.find(itemprop='releaseDate').text.strip()
return datetime.strptime(date_str, "%m / %Y").strftime("%b %Y")
except Exception:
return None
def parse_engine(self):
try:
return int(self.soup.find(id='clsfd_engine_%s' % self.car_id).text.replace(' cc', '').replace('.', ''))
except Exception:
return None
def parse_color(self):
try:
return self.soup.find(itemprop='color').text
except Exception:
return None
def parse_fueltype(self):
try:
return self.soup.find(id='clsfd_fueltype_%s' % self.car_id).text
except Exception:
return None
def parse_description(self):
try:
return self.soup.find(itemprop='description').text
except Exception:
return None
def parse_city(self):
try:
return self.soup.find('span', itemprop='addressLocality').text
except Exception:
return None
def parse_region(self):
|
try:
return self.soup.find('span', itemprop='addressRegion').text
except Exception:
return None
def parse_postal_code(self):
try:
return int(self.soup.find('span', itemprop='postalCode').text)
|
except Exception:
return None
def parse_transmission(self):
try:
return self.soup.find(id='clsfd_transmision_%s' % self.car_id).text
except Exception:
return None
def parse_images(self):
try:
images_urls = []
for img in self.soup.find_all('img', class_='bigphoto'):
images_urls.append(img.get('src').replace(r'//', 'https://').replace('_v', '_b'))
return images_urls
except Exception:
return None
def parse(self):
c = Car(self.car_id)
c.title = self.parse_title()
c.price = self.parse_price()
c.release_date = self.parse_release_date()
c.engine = self.parse_engine()
c.km = self.parse_km()
c.bhp = self.parse_bhp()
c.url = self.req.full_url
c.color = self.parse_color()
c.fueltype = self.parse_fueltype()
c.description = self.parse_description()
c.city = self.parse_city()
c.region = self.parse_region()
c.postal_code = self.parse_postal_code()
c.transmission = self.parse_transmission()
c.images = self.parse_images()
c.html = self.html
c.scraped_at = datetime.now().isoformat()
return c
# Utility methods
def parse_search_results(search_url):
car_ids = SearchResultPageParser(search_url).parse()
for car_id in car_ids:
yield parse_car_page(car_id)
def parse_car_page(car_id):
car = CarItemParser(car_id).parse()
return car
|
Veblin/pythonDemo
|
spiders/logins.py
|
Python
|
mit
| 401
| 0.009975
|
import requests
import time
from selenium import webdriver
# file path
import os
BASE_DIR = os.
|
path.dirname(__file__)
phjs_path = os.path.join(BASE_DIR,'login.phjs.js')
print ('*******'+phjs_path+'*******')
driver = webdriver.PhantomJS(executable_path=phjs_pat
|
h)
driver.get('http://autoinsights.autodmp.com/user/login')
time.sleep(3)
print(driver.find_element_by_tag_name('form').text)
driver.close()
|
cjaymes/pyscap
|
src/scap/model/xccdf_1_2/ModelType.py
|
Python
|
gpl-3.0
| 6,764
| 0.003999
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class ModelType(Model):
MODEL_MAP = {
'attributes': {
'system': {'required': True, 'type': 'AnyUriType'},
},
'elements': [
{'tag_name': 'param', 'class': 'ParamType', 'dict': 'params', 'key': 'name', 'min': 0, 'max': None},
],
}
def score(self, host, benchmark, profile_id):
from scap.model.xccdf_1_1.GroupType import GroupType
from scap.model.xccdf_1_1.RuleType import RuleType
if self.system == 'urn:xccdf:scoring:default':
### Score.Group.Init
# If the node is a Group or the Benchmark, assign a count of 0, a
# score s of 0.0, and an accumulator a of 0.0.
count = 0
score = 0.0
accumulator = 0.0
### Score.Group.Recurse
# For each selected child of this Group or Benchmark, do the following:
# (1) compute the count and weighted score for the child using this
# algorithm,
# (2) if the child’s count value is not 0, then add the child’s
# weighted score to this node’s score s, add 1 to this node’s count,
# and add the child’s weight value to the accumulator a.
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
if not item.selected:
continue
item_score = item.score(host, benchmark, profile_id, self.system)
if item_score[item_id]['score'] is None:
continue
if item_score[item_id]['count'] != 0:
score += item_score[item_id]['score'] * item_score[item_id]['weight']
count += 1
accumulator += item_score[item_id]['weight']
### Score.Group.Normalize
# Normalize this node’s score: compute s = s / a.
if accumulator == 0.0:
if score != 0.0:
raise ValueError('Got to score normalization with score ' + str(score) + ' / ' + str(accumulator))
else:
score = 0.0
else:
score = score / accumulator
logger.debug(self.system + ' score: ' + str(score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'system': self.system})
elif self.system == 'urn:xccdf:scoring:flat':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in scores:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += scores[rule_id]['weight']
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += scores[rule_id]['weight']
logger.debug(self.system + ' score: ' + str(score) + ' / ' + str(max_score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'max_score': max_score, 'system': self.system})
elif self.system == 'urn:xccdf:scoring:flat-unweighted':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in scores:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += 1.0
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += 1.0
logger.debug(self.system + ' score: ' + str(score) + ' / ' + str(max_score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'max_score': max_score, 'system': self.system})
elif self.system == 'urn:xccdf:scoring:absolute':
scores = {}
for item_id in benchmark.items:
item = benchmark.items[item_id]
if not isinstance(item, GroupType) \
and not isinstance(item, RuleType):
continue
# just pass the scores upstream for processing
scores.update(item.score(host, benchmark, profile_id, self.system))
score = 0.0
max_score = 0.0
for rule_id in score
|
s:
if scores[rule_id]['result'] in ['notapplicable', 'notchecked', 'informational', 'notselected']:
continue
max_score += scores[rule_id]['weight']
if scores[rule_id]['result'] in ['pass', 'fixed']:
score += scores[rule_id]['weight']
if score == max_score:
score = 1.0
else:
score = 0.0
logger.debug(self.system + ' score: ' +
|
str(score))
host.facts['checklist'][benchmark.id]['profile'][profile_id]['scores'].append({'score': score, 'system': self.system})
else:
raise NotImplementedError('Scoring model ' + self.system + ' is not implemented')
|
sujithvm/skynet
|
code/__init__.py
|
Python
|
mit
| 41
| 0
|
__author__ = 'Archana
|
V Menon, Suji
|
th V'
|
MadManRises/Madgine
|
shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py
|
Python
|
mit
| 5,864
| 0.00648
|
"""This file implements the gym environment of minitaur.
"""
import math
import random
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs import minitaur_gym_env
import pybullet_data
GOAL_DISTANCE_THRESHOLD = 0.8
GOAL_REWARD = 1000.0
REWARD_SCALING = 1e-3
INIT_BALL_ANGLE = math.pi / 3
INIT_BALL_DISTANCE = 5.0
ACTION_EPS = 0.01
class MinitaurBallGymEnv(minitaur_gym_env.MinitaurGymEnv):
"""The gym environment for the minitaur and a ball.
It simulates a minitaur (a quadruped robot) and a ball. The state space
includes the angle and distance of the ball relative to minitaur's base.
The action space is a steering command. The reward function is based
on how far the ball is relative to the minitaur's base.
"""
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
self_collision_enabled=True,
pd_control_enabled=False,
leg_model_enabled=True,
on_rack=False,
render=False):
"""Initialize the minitaur and ball gym environment.
Args:
urdf_root: The path to the urdf data folder.
self_collision_enabled: Whether to enable self collision in the sim.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
render: Whether to render the simulation.
"""
super(MinitaurBallGymEnv, self).__init__(urdf_root=urdf_root,
self_collision_enabled=self_collision_enabled,
pd_control_enabled=pd_control_enabled,
leg_model_enabled=leg_model_enabled,
on_rack=on_rack,
render=render)
self._cam_dist = 2.0
self._cam_yaw = -70
self._cam_pitch = -30
self.action_space = spaces.Box(np.array([-1]), np.array([1]))
self.observation_space = spaces.Box(np.array([-math.pi, 0]), np.array([math.pi, 100]))
def reset(self):
self._ball_id = 0
super(MinitaurBallGymEnv, self).reset()
self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
self._init_ball_distance = INIT_BALL_DISTANCE
self._ball_pos = [
self._init_ball_distance * math.cos(self._init_ball_theta),
self._init_ball_distance * math.sin(self._init_ball_theta), 1
]
self._ball_id = self._pybullet_client.loadURDF(
"%s/sphere_with_restitution.urdf" % self._urdf_root, self._ball_pos)
return self._get_observation()
def _get_observation(self):
world_translation_minitaur, world_rotation_minitaur = (
self._pybullet_client.getBasePositionAndOrientation(self.minitaur.quadruped))
world_translation_ball, world_rotation_ball = (
self._pybullet_client.getBasePositionAndOrientation(self.
|
_ball_id))
minitaur_tr
|
anslation_world, minitaur_rotation_world = (self._pybullet_client.invertTransform(
world_translation_minitaur, world_rotation_minitaur))
minitaur_translation_ball, _ = (self._pybullet_client.multiplyTransforms(
minitaur_translation_world, minitaur_rotation_world, world_translation_ball,
world_rotation_ball))
distance = math.sqrt(minitaur_translation_ball[0]**2 + minitaur_translation_ball[1]**2)
angle = math.atan2(minitaur_translation_ball[0], minitaur_translation_ball[1])
self._observation = [angle - math.pi / 2, distance]
return self._observation
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self._apply_steering_to_locomotion(action)
action = self.minitaur.ConvertFromLegModel(action)
return action
def _apply_steering_to_locomotion(self, action):
# A hardcoded feedforward walking controller based on sine functions.
amplitude_swing = 0.5
amplitude_extension = 0.5
speed = 200
steering_amplitude = 0.5 * action[0]
t = self.minitaur.GetTimeSinceReset()
a1 = math.sin(t * speed) * (amplitude_swing + steering_amplitude)
a2 = math.sin(t * speed + math.pi) * (amplitude_swing - steering_amplitude)
a3 = math.sin(t * speed) * amplitude_extension
a4 = math.sin(t * speed + math.pi) * amplitude_extension
action = [a1, a2, a2, a1, a3, a4, a4, a3]
return action
def _distance_to_ball(self):
world_translation_minitaur, _ = (self._pybullet_client.getBasePositionAndOrientation(
self.minitaur.quadruped))
world_translation_ball, _ = (self._pybullet_client.getBasePositionAndOrientation(
self._ball_id))
distance = math.sqrt((world_translation_ball[0] - world_translation_minitaur[0])**2 +
(world_translation_ball[1] - world_translation_minitaur[1])**2)
return distance
def _goal_state(self):
return self._observation[1] < GOAL_DISTANCE_THRESHOLD
def _reward(self):
reward = -self._observation[1]
if self._goal_state():
reward += GOAL_REWARD
return reward * REWARD_SCALING
def _termination(self):
if self._goal_state():
return True
return False
|
jirafe/pyleus
|
tests/cli/storm_cluster_test.py
|
Python
|
apache-2.0
| 1,989
| 0.001508
|
import os
import pytest
from pyleus.cli.storm_cluster import _get_storm_cmd_env
from pyleus.cli.storm_cluster import STORM_JAR_JVM_OPTS
from pyleus.cli.storm_cluster import StormCluster
from pyleus.cli.storm_cluster import TOPOLOGY_BUILDER_CLASS
from
|
pyleus.testing import mock
class TestGetStormCmdEnd(object):
@pytest.fixture(autouse=T
|
rue)
def mock_os_environ(self, monkeypatch):
monkeypatch.setattr(os, 'environ', {})
def test_jvm_opts_unset(self):
assert _get_storm_cmd_env(None) is None
def test_jvm_opts_set(self):
jvm_opts = "-Dfoo=bar"
env = _get_storm_cmd_env(jvm_opts)
assert env[STORM_JAR_JVM_OPTS] == jvm_opts
class TestStormCluster(object):
@pytest.fixture
def cluster(self):
return StormCluster(
mock.sentinel.storm_cmd_path,
mock.sentinel.nimbus_host,
mock.sentinel.nimbus_port,
mock.sentinel.verbose,
mock.sentinel.jvm_opts,
)
def test__build_storm_cmd_no_port(self, cluster):
cluster.nimbus_host = "test-host"
cluster.nimbus_port = None
storm_cmd = cluster._build_storm_cmd(["a", "cmd"])
assert storm_cmd == [mock.sentinel.storm_cmd_path, "a", "cmd", "-c",
"nimbus.host=test-host"]
def test__build_storm_cmd_with_port(self, cluster):
cluster.nimbus_host = "test-host"
cluster.nimbus_port = 4321
storm_cmd = cluster._build_storm_cmd(["another", "cmd"])
assert storm_cmd == [mock.sentinel.storm_cmd_path, "another", "cmd", "-c",
"nimbus.host=test-host", "-c",
"nimbus.thrift.port=4321"]
def test_submit(self, cluster):
with mock.patch.object(cluster, '_exec_storm_cmd', autospec=True) as mock_exec:
cluster.submit(mock.sentinel.jar_path)
mock_exec.assert_called_once_with(["jar", mock.sentinel.jar_path, TOPOLOGY_BUILDER_CLASS])
|
longde123/MultiversePlatform
|
tools/Machinima/sendXmlJobs.py
|
Python
|
mit
| 4,886
| 0.00614
|
#
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# includi
|
ng without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS O
|
R IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
#!/usr/bin/python
import socket
import sys
import os
import xml.dom
import xml.dom.minidom
import httplib
import time
def parse_spooler(spooler_node):
answer_node = spooler_node.getElementsByTagName('answer')[0]
ok_node = answer_node.getElementsByTagName('ok')[0]
task_node = ok_node.getElementsByTagName('task')[0]
task_id = task_node.getAttribute('id')
return task_id
def sendJob(filename):
"""
Send a job to the render system, and parse the job number from the reply
@param filename: the name of the xml file with the request data
@return: the job id from the render machine or None if there was an error
@rtype: string
"""
addOrderStart = "<add_order job_chain=\"renderscenechain\"><xml_payload>"
addOrderEnd = "</xml_payload></add_order>"
hostname = "render1"
port = 4446
f = file(filename)
command = addOrderStart + f.read() + addOrderEnd
#create an INET, STREAMing socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
s.send(command)
reply = ""
while reply.find('</spooler>') == -1:
reply = reply + s.recv(4096)
s.close()
# Right now, the reply contains a null
# do some hackery to remove it
null_index = reply.find('\0')
if null_index != -1:
reply = reply[0:null_index]
print reply
try:
replyDom = xml.dom.minidom.parseString(reply)
spooler_node = replyDom.getElementsByTagName('spooler')[0]
answer_node = spooler_node.getElementsByTagName('answer')[0]
ok_node = answer_node.getElementsByTagName('ok')[0]
task_node = ok_node.getElementsByTagName('order')[0]
task_id = task_node.getAttribute('id')
return task_id
except:
print 'Unable to parse reply:'
print reply
return None
def sendRequests(folder, output_folder):
result_hostname = 'facebook.multiverse.net'
result_port = 8087
result_folder = 'machinima'
files = os.listdir(folder)
tasks = {}
for filename in files:
if filename.endswith('.xml'):
task_id = sendJob(os.path.join(folder, filename))
if task_id is not None:
tasks[task_id] = filename[0:-4] # strip off the .xml
print 'Render job %s submitted with task id %s' % (filename, task_id)
# TODO: Automatically check the status of the postcards, and when they are ready,
# pull them locally
# sleep for 30 seconds, plus 20 seconds per postcard
sleep_time = 30 + 20 * len(tasks)
print 'Sleeping for %d seconds' % sleep_time
time.sleep(sleep_time)
conn = httplib.HTTPConnection(result_hostname, result_port)
conn.connect()
for key, value in tasks.items():
conn.request('GET', '/%s/%s.png' % (result_folder, key))
response = conn.getresponse()
if response.status == 200:
output_file = os.path.join(output_folder, '%s.png' % value)
imgData = response.read()
out = open(output_file, 'w')
out.write(imgData)
out.close()
print 'Wrote image: %s' % output_file
else:
print 'Status = %d' % response.status
print response.reason
conn.close()
source_folder = ''
dest_folder = ''
if len(sys.argv) >= 2:
source_folder = sys.argv[1]
# default to setting dest folder to source folder
dest_folder = sys.argv[1]
if len(sys.argv) >= 3:
dest_folder = sys.argv[2]
# To generate sample poses:
# sendXmlJobs.py sample_poses
# To generate sample postcards:
# sendXmlJobs.py sample_postcards
sendRequests(source_folder, dest_folder)
|
ddboline/Garmin-Forerunner-610-Extractor_fork
|
ant/easy/node.py
|
Python
|
mit
| 4,653
| 0.004083
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the fo
|
llowing conditions:
#
# The above copyrigh
|
t notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import threading
import logging
import Queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("garmin.ant.easy.node")
class Node():
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._datas = Queue.Queue()
self.channels = {}
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
def new_channel(self, ctype):
channel = Channel(0, self, self.ant)
self.channels[0] = channel
channel._assign(ctype, 0x00)
return channel
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(('burst', channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(('broadcast', channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == 'broadcast':
self.channels[channel].on_broadcast_data(data)
elif data_type == 'burst':
self.channels[channel].on_burst_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except Queue.Empty as e:
pass
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
|
giggsey/SickRage
|
sickbeard/providers/xthor.py
|
Python
|
gpl-3.0
| 8,500
| 0.004471
|
# -*- coding: latin-1 -*-
# Author: adaur <adaur.underground@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import datetime
import sickbeard
import generic
import cookielib
import urllib
import requests
from sickbeard.bs4_parser import BS4Parser
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import show_name_helpers
from sickbeard import db
from sickbeard import helpers
from unidecode import unidecode
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
class XthorProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "Xthor")
self.supportsBacklog = True
self.public = False
self.cj = cookielib.CookieJar()
self.url = "https://xthor.bz"
self.urlsearch = "https://xthor.bz/browse.php?search=\"%s\"%s"
self.categories = "&searchin=title&incldead=0"
self.enabled = False
self.username = None
self.password = None
self.ratio = None
def isEnabled(self):
return self.enabled
def imageName(self):
return 'xthor.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + '.' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + '.' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + '.S%02d' % int(ep_obj.scene_season) # 1) showName.SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + '.' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', '.', ep_string))
return [search_string]
def _get_title_and_url(self, item):
title, url = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submitme': 'X'
}
logger.log('Performing authentication to Xthor', logger.DEBUG)
response = self.getURL(self.url + '/takelogin.php', post_data=login_params, timeout=30)
if not response:
logger.log(u'Unable to connect to ' + self.name + ' provider.', logger.ERROR)
return False
if re.search('donate.php', response):
logger.log(u'Login to ' + self.name + ' was successful.', logger.DEBUG)
return True
|
else:
logger.log(u'Login to ' + self.name + ' was unsuccessful.', logger.DEBUG)
return False
retu
|
rn True
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
logger.log(u"_doSearch started with ..." + str(search_params), logger.DEBUG)
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
# check for auth
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
searchURL = self.urlsearch % (urllib.quote(search_string), self.categories)
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
resultsTable = html.find("table", { "class" : "table2 table-bordered2" })
if resultsTable:
rows = resultsTable.findAll("tr")
for row in rows:
link = row.find("a",href=re.compile("details.php"))
if link:
title = link.text
logger.log(u"Xthor title : " + title, logger.DEBUG)
downloadURL = self.url + '/' + row.find("a",href=re.compile("download.php"))['href']
logger.log(u"Xthor download URL : " + downloadURL, logger.DEBUG)
item = title, downloadURL
items[mode].append(item)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return results
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
search_params = self._get_episode_search_st
|
toofar/qutebrowser
|
tests/unit/utils/test_urlmatch.py
|
Python
|
gpl-3.0
| 17,880
| 0
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.urlmatch.
The tests are mostly inspired by Chromium's:
https://cs.chromium.org/chromium/src/extensions/common/url_pattern_unittest.cc
Currently not tested:
- The match_effective_tld attribute as it doesn't exist yet.
- Nested filesystem:// URLs as we don't have those.
- Unicode matching because QUrl doesn't like those URLs.
- Any other features we don't need, such as .GetAsString() or set operations.
"""
import re
import sys
import string
import pytest
import hypothesis
import hypothesis.strategies as hst
from PyQt5.QtCore import QUrl
from qutebrowser.utils import urlmatch
@pytest.mark.parametrize('pattern, error', [
# Chromium: PARSE_ERROR_MISSING_SCHEME_SEPARATOR
# ("http", "No scheme given"),
("http:", "Invalid port: Port is empty"),
("http:/", "Invalid port: Port is empty"),
("about://", "Pattern without path"),
("http:/bar", "Invalid port: Port is empty"),
# Chromium: PARSE_ERROR_EMPTY_HOST
("http://", "Pattern without host"),
("http:///", "Pattern without host"),
("http:// /", "Pattern without host"),
("http://:1234/", "Pattern without host"),
# Chromium: PARSE_ERROR_EMPTY_PATH
# We deviate from Chromium and allow this for ease of use
# ("http://bar", "..."),
# Chromium: PARSE_ERROR_INVALID_HOST
("http://\0ww
|
w/", "May not contain NUL byte"),
# Chromium: PARSE_ERROR_INVALID_HOST_WILDCARD
("http://*foo/bar", "Invalid host wildcard"),
("http://foo.*.bar/baz", "Invalid host wildcard"),
("http://fo.*.ba:123/baz", "Invalid host wildcard"),
("http://foo.*/bar", "TLD wildcards ar
|
e not implemented yet"),
# Chromium: PARSE_ERROR_INVALID_PORT
("http://foo:/", "Invalid port: Port is empty"),
("http://*.foo:/", "Invalid port: Port is empty"),
("http://foo:com/",
"Invalid port: invalid literal for int() with base 10: 'com'"),
pytest.param("http://foo:123456/",
"Invalid port: Port out of range 0-65535",
marks=pytest.mark.skipif(
sys.hexversion < 0x03060000,
reason="Doesn't show an error on Python 3.5")),
("http://foo:80:80/monkey",
"Invalid port: invalid literal for int() with base 10: '80:80'"),
("chrome://foo:1234/bar", "Ports are unsupported with chrome scheme"),
# Additional tests
("http://[", "Invalid IPv6 URL"),
])
def test_invalid_patterns(pattern, error):
with pytest.raises(urlmatch.ParseError, match=re.escape(error)):
urlmatch.UrlPattern(pattern)
@pytest.mark.parametrize('pattern, port', [
("http://foo:1234/", 1234),
("http://foo:1234/bar", 1234),
("http://*.foo:1234/", 1234),
("http://*.foo:1234/bar", 1234),
("http://*:1234/", 1234),
("http://*:*/", None),
("http://foo:*/", None),
("file://foo:1234/bar", None),
# Port-like strings in the path should not trigger a warning.
("http://*/:1234", None),
("http://*.foo/bar:1234", None),
("http://foo/bar:1234/path", None),
# We don't implement ALLOW_WILDCARD_FOR_EFFECTIVE_TLD yet.
# ("http://*.foo.*/:1234", None),
])
def test_port(pattern, port):
up = urlmatch.UrlPattern(pattern)
assert up._port == port
@pytest.mark.parametrize('pattern, path', [
("http://foo/", '/'),
("http://foo/*", None),
])
def test_parse_path(pattern, path):
up = urlmatch.UrlPattern(pattern)
assert up._path == path
@pytest.mark.parametrize('pattern, scheme, host, path', [
("http://example.com", 'http', 'example.com', None), # no path
("example.com/path", None, 'example.com', '/path'), # no scheme
("example.com", None, 'example.com', None), # no scheme and no path
("example.com:1234", None, 'example.com', None), # no scheme/path but port
("data:monkey", 'data', None, 'monkey'), # existing scheme
])
def test_lightweight_patterns(pattern, scheme, host, path):
"""Make sure we can leave off parts of an URL.
This is a deviation from Chromium to make patterns more user-friendly.
"""
up = urlmatch.UrlPattern(pattern)
assert up._scheme == scheme
assert up._host == host
assert up._path == path
class TestMatchAllPagesForGivenScheme:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*/*")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up._host is None
assert up._match_subdomains
assert not up._match_all
assert up._path is None
@pytest.mark.parametrize('url, expected', [
("http://google.com", True),
("http://yahoo.com", True),
("http://google.com/foo", True),
("https://google.com", False),
("http://74.125.127.100/search", True),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchAllDomains:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("https://*/foo*")
def test_attrs(self, up):
assert up._scheme == 'https'
assert up._host is None
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*'
@pytest.mark.parametrize('url, expected', [
("https://google.com/foo", True),
("https://google.com/foobar", True),
("http://google.com/foo", False),
("https://google.com/", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchSubdomains:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern("http://*.google.com/foo*bar")
def test_attrs(self, up):
assert up._scheme == 'http'
assert up._host == 'google.com'
assert up._match_subdomains
assert not up._match_all
assert up._path == '/foo*bar'
@pytest.mark.parametrize('url, expected', [
("http://google.com/foobar", True),
# FIXME The ?bar seems to be treated as path by GURL but as query by
# QUrl.
# ("http://www.google.com/foo?bar", True),
("http://monkey.images.google.com/foooobar", True),
("http://yahoo.com/foobar", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchGlobEscaping:
@pytest.fixture
def up(self):
return urlmatch.UrlPattern(r"file:///foo-bar\*baz")
def test_attrs(self, up):
assert up._scheme == 'file'
assert up._host is None
assert not up._match_subdomains
assert not up._match_all
assert up._path == r'/foo-bar\*baz'
@pytest.mark.parametrize('url, expected', [
# We use - instead of ? so it doesn't get treated as query
(r"file:///foo-bar\hellobaz", True),
(r"file:///fooXbar\hellobaz", False),
])
def test_urls(self, up, url, expected):
assert up.matches(QUrl(url)) == expected
class TestMatchIpAddresses:
@pytest.mark.parametrize('pattern, host, match_subdomains', [
("http://127.0.0.1/*", "127.0.0.1", False),
("http://*.0.0.1/*", "0.0.1", True),
])
def test_attrs(self, pattern, host, match_subdomains):
up = urlmatch.UrlPattern(pattern)
assert up._scheme == 'http'
assert up._host == host
assert up._match_subdomains == match_subdomains
assert not up._ma
|
DarKnight24/owtf
|
plugins/web/passive/Testing_for_SSL-TLS@OWTF-CM-001.py
|
Python
|
bsd-3-clause
| 489
| 0.00409
|
"""
PASSIVE Plugin for Testing_for_SSL-TLS_(OWASP-CM-001)
"""
from framework.dependen
|
cy_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Third party resources"
def run(PluginInfo):
# Vuln search box to be built in core and resued in different plugins:
resource = ServiceLocator.get_component("resource").GetResources('PassiveSSL')
Content = ServiceLocator.get_component("plugin_helper").ResourceLinkList('On
|
line Resources', resource)
return Content
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_countries_greece.py
|
Python
|
gpl-3.0
| 1,109
| 0.00271
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redis
|
tribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied war
|
ranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Countries_Greece():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
countries=["Greece"]))
|
jrspruitt/jkent-pybot
|
pybot/plugins/anyurl.py
|
Python
|
mit
| 2,136
| 0.004682
|
# -*- coding: utf-8 -*-
# vim: set ts=4 et
import cgi
import requests
from six.moves.html_parser import HTMLParser
from plugin import *
content_types = (
'text/html',
'text/xml',
'application/xhtml+xml',
'application/xml'
)
class TitleParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.match = False
self.title = ''
def handle_starttag(self, tag, attrs):
if tag == 'meta':
og_title = False
for attr in attrs:
if attr == ('property', 'og:title'):
og_title = True
if og_title:
for attr in attrs:
if attr[0] == 'content':
self.title = attr[1]
self.match = True if not self.title and tag == 'title' else False
def handle_data(self, data):
if self.match:
self.title = data.strip()
self.match = False
class Plugin(BasePlugin):
default_priority = 1
@hook
def any_url(self, msg, domain, url):
default_ua = 'Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; " \
"compatible; Googlebot/2.1; +http://www.google.com/bot.html) " \
"Safari/537.36'
user_agent = self.bot.config.get(self.name, 'user-agent', fallback=default_ua)
headers = {
'User-Agent': user_agent
}
try:
r = requests.get(url, stream=True, headers=headers, timeout=10)
except requests.exceptions.ReadTimeout:
msg.reply('URL Timeout')
return
content_type, params = cgi.parse_header(r.headers['Content-Type'])
if not content_type in content_types:
return
r.encoding = 'utf-8'
if 'charset' in params:
r.encoding = params['charset'].strip("'\"")
|
parser = TitleParser()
for line in r.iter_lines(chunk_size=1024, decode_unicode=True):
parser.feed(line)
if parser.title:
break
msg.reply('\x031,0
|
URL\x03 %s' % parser.title)
|
meghana0507/grpc-java-poll
|
lib/netty/protobuf/python/google/protobuf/internal/api_implementation.py
|
Python
|
bsd-3-clause
| 4,621
| 0.004112
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Determine which implementation of the protobuf API is used in this process.
"""
import os
import sys
try:
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import _api_implementation
# The compile-time constants in the _api_implementation module can be used to
# switch to a certain implementation of the Python API at build time.
_api_version = _api_implementation.api_version
_proto_extension_modules_exist_in_build = True
except ImportError:
_api_version = -1 # Unspecified by compiler flags.
_proto_extension_modules_exist_in_build = False
if _api_version == 1:
raise ValueError('api_version=1 is no longer supported.')
if _api_version < 0: # Still unspecified?
try:
# The presence of this module in a build allows the proto implementation to
# be upgraded merely via build deps rather than a compiler flag or the
# runtime environment variable.
# pylint: disable=g-import-not-at-top
from google.protobuf import _use_fast_cpp_protos
# Work around a known issue in the classic bootstrap .par import hook.
if not _use_fast_cpp_protos:
raise ImportError('_use_fast_cpp_protos import succeeded but
|
was None')
del _use_fast_cpp_protos
_api_version = 2
except ImportError:
if _proto_extension_modules_exist_in_build:
if sys.version_info[0] >= 3: # Python 3 defaults to C++ impl v2.
_api_ver
|
sion = 2
# TODO(b/17427486): Make Python 2 default to C++ impl v2.
_default_implementation_type = (
'python' if _api_version <= 0 else 'cpp')
# This environment variable can be used to switch to a certain implementation
# of the Python API, overriding the compile-time constants in the
# _api_implementation module. Right now only 'python' and 'cpp' are valid
# values. Any other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
_default_implementation_type)
if _implementation_type != 'python':
_implementation_type = 'cpp'
# This environment variable can be used to switch between the two
# 'cpp' implementations, overriding the compile-time constants in the
# _api_implementation module. Right now only 1 and 2 are valid values. Any other
# value will be ignored.
_implementation_version_str = os.getenv(
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', '2')
if _implementation_version_str != '2':
raise ValueError(
'unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: "' +
_implementation_version_str + '" (supported versions: 2)'
)
_implementation_version = int(_implementation_version_str)
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
# See comment on 'Type' above.
def Version():
return _implementation_version
|
joopert/home-assistant
|
tests/components/binary_sensor/test_device_condition.py
|
Python
|
apache-2.0
| 8,656
| 0.001733
|
"""The test for binary_sensor device automation."""
from datetime import timedelta
import pytest
from unittest.mock import patch
from homeassistant.components.binary_sensor import DOMAIN, DEVICE_CLASSES
from homeassistant.components.binary_sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.helpers import device_registry
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
async_get_device_automations,
async_get_device_automation_capabilities,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_
|
component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_C
|
ONDITIONS[device_class]
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor condition."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on with delay."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
point3 = point2 + timedelta(seconds=10)
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
sensor1 = platform.ENTITIES["battery"]
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "event.event_type")
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
|
mike-perdide/pcoords-gui
|
pcoordsgui/utils.py
|
Python
|
gpl-3.0
| 292
| 0
|
from PyQt4.QtGui import QFileDialog
def get_pcv_filename():
"""Opens t
|
he PCV file with a QFileDialog."""
return QFileDialog.getOpenFileName(None,
"Open Pcoords graph", "",
|
"Pcoords Files (*.pgdl *.pcv)")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.