code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from openerp import models, fields
class VolunteerUWStatus(models.Model):
_name = 'volunteer.uw_status'
name = fields.Char(required=True, string=u"nazwa")
class Volunteer(models.Model):
_inherit = 'res.users'
uw_status = fields.Many2one(
'volunteer.uw_status',
string=u"status na UW",
ondelete='restrict',
)
def __init__(self, pool, cr):
super(Volunteer, self).__init__(pool, cr)
self._add_permitted_fields(level='privileged', fields={'uw_status'})
self._add_permitted_fields(level='owner', fields={'uw_status'})
self._remove_permitted_fields(level='privileged', fields={
'email', 'phone', 'birthdate', 'place_of_birth',
'citizenship', 'street_gov', 'street_number_gov', 'apt_number_gov', 'zip_code_gov', 'city_gov',
'voivodeship_gov', 'country_gov', 'different_addresses', 'street', 'street_number', 'apt_number',
'zip_code', 'city', 'voivodeship', 'country', 'document_id_kind', 'document_id'
})
|
KrzysiekJ/bestja
|
addons/bestja_volunteer_ucw/models.py
|
Python
|
agpl-3.0
| 1,064
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility functions for efficiently processing with the job API
"""
from __future__ import absolute_import
import json
from google.protobuf import json_format
from google.protobuf import struct_pb2
def dict_to_struct(dict_obj):
return json_format.ParseDict(dict_obj, struct_pb2.Struct())
def struct_to_dict(struct_obj):
return json.loads(json_format.MessageToJson(struct_obj))
|
rangadi/beam
|
sdks/python/apache_beam/runners/job/utils.py
|
Python
|
apache-2.0
| 1,174
|
from docker.utils import create_host_config
from common_fixtures import * # NOQA
import websocket as ws
from test_container import assert_execute, assert_stats, assert_ip_inject
CONTAINER_APPEAR_TIMEOUT_MSG = 'Timed out waiting for container ' \
'to appear. Name: [%s].'
NATIVE_TEST_IMAGE = 'cattle/test-agent'
@pytest.fixture(scope='module')
def host(client):
hosts = client.list_host(kind='docker', removed_null=True, state='active')
assert len(hosts) >= 1
host = hosts[0]
return host
@pytest.fixture(scope='module')
def pull_images(client, socat_containers):
docker_client = get_docker_client(host(client))
images = [(NATIVE_TEST_IMAGE, 'latest'), ('busybox', 'latest')]
for image in images:
docker_client.pull(image[0], image[1])
@pytest.fixture(scope='module', autouse=True)
def native_cleanup(client, request):
def fin():
containers = client.list_container()
for c in containers:
try:
if c.name.startswith('native-'):
client.delete(c)
except:
# Tried our best
pass
request.addfinalizer(fin)
@pytest.fixture()
def native_name(random_str):
return 'native-' + random_str
def test_native_net_blank(socat_containers, client, native_name, pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client.create_container(NATIVE_TEST_IMAGE,
name=native_name)
rancher_container, docker_container = start_and_wait(client,
docker_container,
docker_client,
native_name)
common_network_asserts(rancher_container, docker_container, 'default')
def test_native_net_bridge(socat_containers, client, native_name, pull_images):
docker_client = get_docker_client(host(client))
host_config = create_host_config(network_mode='bridge')
docker_container = docker_client.create_container(NATIVE_TEST_IMAGE,
name=native_name,
host_config=host_config)
rancher_container, docker_container = start_and_wait(client,
docker_container,
docker_client,
native_name)
common_network_asserts(rancher_container, docker_container, 'bridge')
def test_native_net_host(socat_containers, client, native_name, pull_images):
docker_client = get_docker_client(host(client))
host_config = create_host_config(network_mode='host')
docker_container = docker_client.create_container(NATIVE_TEST_IMAGE,
name=native_name,
host_config=host_config)
rancher_container, docker_container = start_and_wait(client,
docker_container,
docker_client,
native_name)
common_network_asserts(rancher_container, docker_container, 'host')
def test_native_net_container(socat_containers, client, native_name,
pull_images):
docker_client = get_docker_client(host(client))
target_name = 'target-%s' % native_name
target_docker_con = docker_client.create_container(NATIVE_TEST_IMAGE,
name=target_name)
target_container, target_docker_con = start_and_wait(client,
target_docker_con,
docker_client,
target_name)
host_config = create_host_config(
network_mode='container:%s' % target_name)
docker_container = docker_client.create_container('busybox',
stdin_open=True,
tty=True,
name=native_name,
host_config=host_config)
container, docker_container = start_and_wait(client, docker_container,
docker_client,
native_name)
common_network_asserts(container, docker_container, 'container')
assert container['networkContainerId'] == target_container.id
def test_native_lifecycyle(socat_containers, client, native_name, pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client.create_container(NATIVE_TEST_IMAGE,
name=native_name)
rancher_container, _ = start_and_wait(client, docker_container,
docker_client,
native_name)
c_id = rancher_container.id
assert rancher_container.state == 'running'
docker_client.stop(docker_container)
wait_for_state(client, 'stopped', c_id)
docker_client.start(docker_container)
wait_for_state(client, 'running', c_id)
docker_client.kill(docker_container)
wait_for_state(client, 'stopped', c_id)
docker_client.start(docker_container)
wait_for_state(client, 'running', c_id)
docker_client.remove_container(docker_container, force=True)
wait_for_state(client, 'removed', c_id)
def test_native_managed_network(socat_containers, client, native_name,
pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
labels={'io.rancher.container.network': 'true'})
container, docker_container = start_and_wait(client, docker_container,
docker_client,
native_name)
assert container.externalId == docker_container['Id']
assert container.state == 'running'
assert container.primaryIpAddress != docker_container['NetworkSettings'][
'IPAddress']
assert container.networkMode == 'managed'
def wait_for_state(client, expected_state, c_id):
def stopped_check():
c = client.by_id_container(c_id)
return c.state == expected_state
wait_for(stopped_check,
'Timeout waiting for container to stop. Id: [%s]' % c_id)
def test_native_volumes(socat_containers, client, native_name, pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
volumes=['/foo',
'/host/var',
'/host/tmpreadonly'])
docker_client.start(docker_container,
binds={'/var': {'bind': '/host/var'},
'/tmp1': {'bind': '/host/tmpreadonly',
'ro': True}})
rancher_container = wait_on_rancher_container(client, native_name)
assert rancher_container.externalId == docker_container['Id']
assert rancher_container.state == 'running'
mounts = rancher_container.mounts
assert len(mounts) == 3
foo_mount, var_mount, tmp_mount = None, None, None
for m in mounts:
if m.path == '/foo':
foo_mount = m
elif m.path == '/host/var':
var_mount = m
elif m.path == '/host/tmpreadonly':
tmp_mount = m
assert foo_mount.path == '/foo'
assert var_mount.path == '/host/var'
assert var_mount.permission == 'rw'
assert var_mount.volumeName == '/var'
assert tmp_mount.path == '/host/tmpreadonly'
assert tmp_mount.permission == 'ro'
assert tmp_mount.volumeName == '/tmp1'
def test_native_logs(client, socat_containers, native_name, pull_images):
docker_client = get_docker_client(host(client))
test_msg = 'LOGS_WORK'
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
tty=True,
stdin_open=True,
detach=True,
command=['/bin/bash', '-c', 'echo ' + test_msg])
rancher_container, _ = start_and_wait(client, docker_container,
docker_client,
native_name)
found_msg = search_logs(rancher_container, test_msg)
assert found_msg
def test_native_exec(client, socat_containers, native_name, pull_images):
docker_client = get_docker_client(host(client))
test_msg = 'EXEC_WORKS'
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
tty=True,
stdin_open=True,
detach=True,
command=['/bin/bash'])
rancher_container, _ = start_and_wait(client, docker_container,
docker_client,
native_name)
assert_execute(rancher_container, test_msg)
def test_native_ip_inject(client, socat_containers, native_name,
pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
labels={'io.rancher.container.network': 'true'},
tty=True,
stdin_open=True,
detach=True,
command=['/bin/bash', '-c', 'until $(ip addr show | '
'grep -q 10.42); '
'do sleep 1 && echo .; '
'done; ip addr show'])
rancher_container, _ = start_and_wait(client, docker_container,
docker_client, native_name)
assert_ip_inject(client.reload(rancher_container))
def test_native_container_stats(client, socat_containers, native_name,
pull_images):
docker_client = get_docker_client(host(client))
docker_container = docker_client. \
create_container(NATIVE_TEST_IMAGE,
name=native_name,
tty=True,
stdin_open=True,
detach=True,
command=['/bin/bash'])
rancher_container, _ = start_and_wait(client, docker_container,
docker_client,
native_name)
assert_stats(rancher_container)
def search_logs(container, test_msg):
logs = container.logs()
conn = ws.create_connection(logs.url + '?token=' + logs.token, timeout=10)
count = 0
found_msg = False
while count <= 100:
count += 1
try:
result = conn.recv()
if test_msg in result:
found_msg = True
break
except ws.WebSocketConnectionClosedException:
break
return found_msg
def start_and_wait(client, docker_container, docker_client, native_name):
docker_client.start(docker_container)
docker_container = docker_client.inspect_container(docker_container)
rancher_container = wait_on_rancher_container(client, native_name,
timeout=180)
return rancher_container, docker_container
def common_network_asserts(rancher_container, docker_container,
expected_net_mode):
assert rancher_container.externalId == docker_container['Id']
assert rancher_container.state == 'running'
if rancher_container.primaryIpAddress is None:
ip_address = ""
else:
ip_address = rancher_container.primaryIpAddress
assert ip_address == \
docker_container['NetworkSettings']['IPAddress']
assert rancher_container.networkMode == expected_net_mode
def wait_on_rancher_container(client, name, timeout=None):
def check():
containers = client.list_container(name=name)
return len(containers) > 0 and containers[0].state != 'requested'
wait_for(check, timeout_message=CONTAINER_APPEAR_TIMEOUT_MSG % name)
r_containers = client.list_container(name=name)
assert len(r_containers) == 1
container = r_containers[0]
kwargs = {}
if timeout:
kwargs['timeout'] = timeout
container = client.wait_success(container, **kwargs)
return container
def test_native_fields(socat_containers, client, pull_images):
docker_client = get_docker_client(host(client))
name = 'native-%s' % random_str()
host_config = create_host_config(
privileged=True,
publish_all_ports=True,
dns=['1.2.3.4'], dns_search=['search.dns.com'],
cap_add=['SYSLOG'], cap_drop=['KILL', 'LEASE'],
restart_policy={'MaximumRetryCount': 5,
'Name': 'on-failure'},
devices=['/dev/null:/dev/xnull:rw'])
docker_container = docker_client.create_container(NATIVE_TEST_IMAGE,
name=name,
hostname='hostname1',
domainname='domainname1',
user='root',
mem_limit='16MB',
memswap_limit='32MB',
cpu_shares=1024,
cpuset='0',
tty=True,
stdin_open=True,
working_dir='/root',
environment={
'FOO': 'BA'},
command=['-c',
'sleep 3'],
entrypoint=['/bin/sh'],
host_config=host_config)
rancher_container, _ = start_and_wait(client, docker_container,
docker_client, name)
assert rancher_container.hostname == 'hostname1'
assert rancher_container.domainName == 'domainname1'
assert rancher_container.user == 'root'
assert rancher_container.memory == 16777216
assert rancher_container.cpuShares == 1024
assert rancher_container.cpuSet == '0'
assert rancher_container.tty is True
assert rancher_container.stdinOpen is True
assert rancher_container.image == NATIVE_TEST_IMAGE
assert rancher_container.workingDir == '/root'
assert rancher_container.environment['FOO'] == 'BA'
assert rancher_container.command == ['-c', 'sleep 3']
assert rancher_container.entryPoint == ['/bin/sh']
assert rancher_container.privileged is True
assert rancher_container.publishAllPorts is True
assert rancher_container.dns == ['1.2.3.4']
assert rancher_container.dnsSearch == ['search.dns.com']
assert rancher_container.capAdd == ['SYSLOG']
assert rancher_container.capDrop == ['KILL', 'LEASE']
assert rancher_container.restartPolicy["name"] == u"on-failure"
assert rancher_container.restartPolicy["maximumRetryCount"] == 5
assert rancher_container.devices == ['/dev/null:/dev/xnull:rw']
|
rancher/validation-tests
|
tests/v3_validation/cattlevalidationtest/core/test_native_docker.py
|
Python
|
apache-2.0
| 16,437
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
import gtk,gtk.glade,gobject
import sys, os, time
try:
import utils, TLV_utils, cards, readers
except ImportError, e:
try:
sys.path.append(".")
import utils, TLV_utils, cards, readers
except ImportError:
raise e
from smartcard.CardMonitoring import CardMonitor, CardObserver
from smartcard.ReaderMonitoring import ReaderMonitor, ReaderObserver
import smartcard
class FileLikeTextBuffer(object):
def __init__(self):
self.had_newline = True
self.buffer = gtk.TextBuffer()
self.endmark = self.buffer.create_mark("The End", self.buffer.get_end_iter(), False)
self.views = []
def add_view(self, v):
self.views.append(v)
v.scroll_mark_onscreen( self.endmark )
def writelines(self, sequence):
for s in sequence: self.write(s)
def write(self, s):
d = "%s: " % time.strftime("%F %T")
parts = s.split("\n")
if self.had_newline:
self.had_newline = False
s = d
else:
s = ""
if parts[-1] == '':
del parts[-1]
self.had_newline = True
s = s + ("\n"+d).join(parts)
if self.had_newline: s = s + "\n"
self.buffer.insert( self.buffer.get_end_iter(), s)
for v in self.views:
v.scroll_mark_onscreen( self.endmark )
def flush(self): pass
def for_stream(self, stream):
class stream_to_buf(object):
def __init__(self, parent, stream):
self.parent = parent
self.stream = stream
def flush(self):
self.parent.flush()
self.stream.flush()
def write(self, s):
self.parent.write(s)
self.stream.write(s)
def writelines(self, s):
self.parent.writelines(s)
self.stream.writelines(s)
return stream_to_buf(self, stream)
class ireadyou(CardObserver,ReaderObserver):
GLADE_FILE = "gui/ireadyou/ireadyou.glade"
def __init__(self, ticket = None):
"Create and show main window."
self.main_window_xml = gtk.glade.XML(self.GLADE_FILE, "main")
self.main_window = self.main_window_xml.get_widget("main")
self.card_tabs = self.main_window_xml.get_widget("card_tabs")
while self.card_tabs.get_n_pages() > 0:
self.card_tabs.remove_page(0)
for t in self.CARD_TYPES:
a, b, l = gtk.Alignment(yscale=1,xscale=1,xalign=0.5,yalign=0.5), gtk.VBox(), gtk.Label(t[1])
a.add(b)
a.show()
b.show()
l.show()
self.card_tabs.append_page(a, tab_label=l)
self.ticket_button_group = gtk.RadioButton()
self.ticket_button_group._ticket = None
self.status_area = self.main_window_xml.get_widget("status_area")
self.known_readers = []
self.known_cards = {} # Note stupid: the keys to this dict are not objects from the known_readers list but rather reader name strings
self.connected_cards = {} # Again: the keys are not cards but repr(card)
self.tickets = {} # ditto
self.ticket_displayed = None # This is either None or a tuple (card object, ticket object)
self._update_status()
self.logbuf = FileLikeTextBuffer()
sys.stdout = self.logbuf.for_stream(sys.stdout)
sys.stderr = self.logbuf.for_stream(sys.stderr)
self.logview = self.main_window_xml.get_widget("logview")
self.logview.set_buffer(self.logbuf.buffer)
self.logbuf.add_view(self.logview)
signals = {
"on_exit_clicked": self.exit_clicked,
"on_main_delete_event": self.exit_clicked,
"on_main_destroy": gtk.main_quit,
}
self.main_window_xml.signal_autoconnect(signals)
self._clear_display()
self.rmon = ReaderMonitor()
self.cmon = CardMonitor()
self.rmon.addObserver(self)
self.cmon.addObserver(self)
def _clear_display(self):
self.card_tabs.set_current_page(0)
for i in range(self.card_tabs.get_n_pages()):
a = self.card_tabs.get_nth_page(i)
vbox = a.get_child()
for c in vbox.get_children():
vbox.remove(c)
label = self.card_tabs.get_tab_label(a)
label.set_property("sensitive", False)
def _update_status(self):
for c in self.status_area.get_children():
self.status_area.remove(c)
if len(self.known_readers) == 0:
self.status_area.add( gtk.Label(u"Keine Lesegeräte angeschlossen.") )
else:
for reader in self.known_readers:
frame = gtk.Frame(label=str(reader))
if len(self.known_cards[ reader.name ]) == 0:
frame.add( gtk.Label(u"Keine Karten verbunden.") )
else:
vbox = gtk.VBox()
for card in self.known_cards[ reader.name ]:
if self.connected_cards.has_key(repr(card)):
card_ = self.connected_cards[ repr(card) ]
cardname = card_.get_driver_name()
else:
cardname = str(card)
hbox = gtk.HBox()
cardlabel = gtk.Label( "<b>%s</b>: " % cardname )
cardlabel.set_use_markup(True)
hbox.pack_start(cardlabel, expand=False)
vbox2 = gtk.VBox()
hbox.pack_start(vbox2, expand=True)
for ticket in self.tickets[ repr(card) ]:
button = gtk.RadioButton(group=self.ticket_button_group, label=str(ticket), use_underline=False)
vbox2.pack_start(button, expand=False)
button.connect("toggled", self._ticket_button_toggled)
button._ticket = (card, ticket)
if self.ticket_displayed is not None and ticket == self.ticket_displayed[1]:
button.set_active(True)
vbox.add(hbox)
frame.add(vbox)
self.status_area.add(frame)
self.status_area.show_all()
def _format_datum(d):
return d.strftime("%x")
CARD_TYPES = [
(("SCHUL_T",),
"Schulticket", (
("Name", "name_klar", None),
("Alter", "alter", None),
("Geburtsdatum", "geburtsdatum", _format_datum),
("Schule", "schule", None),
(u"Kartengültigkeit", "gueltigkeit", None),
),
),
(("JOBT_ERW",),
"Jobticket", (
("Name", "name_klar", None),
("Geburtsdatum", "geburtsdatum", _format_datum),
(u"Kartengültigkeit", "gueltigkeit", None),
),
),
(("MT_ABO",),
"Monatsabo", (
("Abo-Nummer", "abonr", None),
(u"Kartengültigkeit", "gueltigkeit", None),
),
),
(None,
"Anderes", (
),
),
]
def _ticket_button_toggled(self, togglebutton):
self.ticket_displayed = None
for b in togglebutton.get_group():
if b.get_active():
if hasattr(b, "_ticket"):
self.ticket_displayed = b._ticket
self._update_ticket_display()
def _update_ticket_display(self):
self._clear_display()
if self.ticket_displayed is None:
return
todisplay = self.ticket_displayed[1]
for i,t in enumerate(self.CARD_TYPES):
if todisplay.tickettyp in t[0]:
break
# Note: implicit selection of the last card type when no match is found
self.card_tabs.set_current_page(i)
a = self.card_tabs.get_nth_page(i)
vbox = a.get_child()
label = self.card_tabs.get_tab_label(a)
label.set_property("sensitive", True)
for labeltext, propertyname, transformation in t[2]:
frame = gtk.Frame(label=labeltext)
content = getattr(todisplay, propertyname, None)
contenttext = str( transformation is not None and transformation(content) or content )
contentlabel = gtk.Label("<b><tt><big>%s</big></tt></b>" % contenttext)
contentlabel.set_use_markup(True)
contentlabel.show()
frame.add( contentlabel )
frame.show()
vbox.add(frame)
def exit_clicked(self, widget, event=None, data=None):
gtk.main_quit()
return True
def run(self):
gtk.main()
# From the CardObserver and ReaderObserver classes
def update( self, observable, (added, removed) ):
try:
gtk.gdk.threads_enter()
#print observable, added, removed
if observable is self.rmon.instance:
self.reader_update(observable, (added, removed) )
elif observable is self.cmon.instance:
self.card_update(observable, (added, removed) )
self._update_status()
self._update_ticket_display()
finally:
gtk.gdk.threads_leave()
def reader_update( self, observable, (added, removed) ):
for r in removed:
if r in self.known_readers:
for card in list(self.known_cards[ r.name ]):
self._remove_card(card)
assert len(self.known_cards[ r.name ]) == 0
del self.known_cards[ r.name ]
self.known_readers.remove(r)
for a in added:
if a not in self.known_readers:
self.known_readers.append(a)
self.known_cards[ a.name ] = []
def card_update( self, observable, (added, removed) ):
for r in removed:
if not self.known_cards.has_key(r.reader): continue
if r in self.known_cards[r.reader]:
self._remove_card(r)
for a in added:
if not self.known_cards.has_key(a.reader): continue
if a not in self.known_cards[a.reader]:
self._add_card(a)
def _add_card(self, card):
self.known_cards[ card.reader ].append(card)
if not self.tickets.has_key( repr(card) ):
self.tickets[ repr(card) ] = []
conn = card.createConnection()
connected = False
try:
conn.connect()
connected = True
except smartcard.Exceptions.NoCardException, e:
pass
if connected:
card_ = cards.new_card_object(conn)
cards.generic_card.DEBUG = False
self.connected_cards[ repr(card) ] = card_
for i in range(1,9):
try:
ticket = cards.vrs_application.VrsTicket.from_card(card_, record_no = i)
print "Loaded ticket '%s' from record %i" % (ticket, i)
self._add_ticket(card, ticket)
except (KeyboardInterrupt, SystemExit):
raise
except Exception,e:
if not str(e).startswith("'No ticket in record no."):
print e
if not isinstance(card_, cards.vrs_application.VRS_Application):
break
def _remove_card(self, card):
if self.tickets.has_key( repr(card) ):
for t in list(self.tickets[ repr(card) ]):
self._remove_ticket(card, t)
assert len(self.tickets[ repr(card) ]) == 0
del self.tickets[ repr(card) ]
if self.connected_cards.has_key( repr(card) ):
try:
self.connected_cards[ repr(card) ].close_card()
except smartcard.Exceptions.CardConnectionException, e:
pass
del self.connected_cards[ repr(card) ]
self.known_cards[ card.reader ].remove(card)
def _add_ticket(self, card, ticket):
self.tickets[ repr(card) ].append( ticket )
if self.ticket_displayed is None:
self.ticket_displayed = ( card, ticket )
def _remove_ticket(self, card, ticket):
if self.ticket_displayed is not None and self.ticket_displayed[1] == ticket:
self.ticket_displayed = None
# TODO: Find a different ticket to display
self.tickets[ repr(card) ].remove(ticket)
OPTIONS = ""
LONG_OPTIONS = []
if __name__ == "__main__":
## c = readers.CommandLineArgumentHelper()
##
## (options, arguments) = c.getopt(sys.argv[1:], OPTIONS, LONG_OPTIONS)
##
## card_object = c.connect()
## card = cards.new_card_object(card_object)
## #cards.generic_card.DEBUG = False
##
## print >>sys.stderr, "Using %s" % card.DRIVER_NAME
##
## if len(arguments) > 0:
## ticket = cards.vrs_application.VrsTicket.from_card(card, record_no = int(arguments[0], 0))
## else:
## ticket = cards.vrs_application.VrsTicket.from_card(card)
gtk.gdk.threads_init()
g = ireadyou()
g.run()
|
12019/cyberflex-shell
|
gui/ireadyou.py
|
Python
|
gpl-2.0
| 13,861
|
import json
from .attributesholder import AttributesHolder
class SerializableAttributesHolder(AttributesHolder):
"""AttributesHolder with methods handling serialization and
deserialization according to the PPP datamodel specification."""
def as_dict(self):
"""Returns a JSON-serializeable object representing this tree."""
def conv(v):
if isinstance(v, SerializableAttributesHolder):
return v.as_dict()
elif isinstance(v, list):
return [conv(x) for x in v]
elif isinstance(v, dict):
return {x:conv(y) for (x,y) in v.items()}
else:
return v
return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()}
def as_json(self):
"""Return a JSON dump of the object."""
return json.dumps(self.as_dict())
@staticmethod
def _test_can_import_json(data):
"""Sanity check on input JSON data"""
pass
@classmethod
def from_json(cls, data):
"""Decode a JSON string and inflate a node instance."""
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data)
@classmethod
def from_dict(cls, data):
cls._test_can_import_json(data)
# Find a class that will deserialize the dict as specifically
# as possible
while True:
cls2 = cls._select_class(data)
if cls is cls2:
break
cls = cls2
conv = (lambda k,v: cls.deserialize_attribute(k, v)
if isinstance(v, dict) else v)
data = {k.replace('-', '_'): conv(k,v) for (k, v) in data.items()}
return cls(**data)
@classmethod
def deserialize_attribute(cls, key, value):
return cls.from_dict(value)
@classmethod
def _select_class(cls, data):
return cls
|
ProjetPP/PPP-datamodel-Python
|
ppp_datamodel/utils/serializableattributesholder.py
|
Python
|
agpl-3.0
| 1,989
|
# Generated by Django 3.2.10 on 2021-12-29 17:55
import django.contrib.postgres.indexes
import django.utils.timezone
import model_utils.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('company', '0023_auto_20210215_0756'),
]
operations = [
migrations.AlterModelOptions(
name='brand',
options={
'ordering': ['-created'],
'permissions': (),
'verbose_name': 'Marka',
'verbose_name_plural': 'Marki',
},
),
migrations.AlterModelOptions(
name='company',
options={
'ordering': ['-created'],
'permissions': (),
'verbose_name': 'Producent',
'verbose_name_plural': 'Producenci',
},
),
migrations.RemoveIndex(
model_name='company',
name='company_com_created_54f6ef_brin',
),
migrations.RenameField(
model_name='brand',
old_name='created_at',
new_name='created',
),
migrations.RenameField(
model_name='company',
old_name='created_at',
new_name='created',
),
migrations.AddField(
model_name='brand',
name='modified',
field=model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified'
),
),
migrations.AddField(
model_name='company',
name='modified',
field=model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now, editable=False, verbose_name='modified'
),
),
migrations.AlterField(
model_name='brand',
name='created',
field=model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created'
),
),
migrations.AlterField(
model_name='company',
name='created',
field=model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False, verbose_name='created'
),
),
migrations.AddIndex(
model_name='company',
index=django.contrib.postgres.indexes.BrinIndex(
fields=['created'], name='company_com_created_b4a129_brin', pages_per_range=16
),
),
]
|
KlubJagiellonski/pola-backend
|
pola/company/migrations/0024_auto_20211229_1855.py
|
Python
|
bsd-3-clause
| 2,609
|
#!/usr/bin/env python
"""
Copyright 2012 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Sentence:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'hasScoredWords': 'bool',
'id': 'long',
'scoredWords': 'list[ScoredWord]',
'display': 'str',
'rating': 'int',
'documentMetadataId': 'long'
}
self.hasScoredWords = None # bool
self.id = None # long
self.scoredWords = None # list[ScoredWord]
self.display = None # str
self.rating = None # int
self.documentMetadataId = None # long
|
27himanshu/vocabList-kivy
|
wordnik/wordnik/models/Sentence.py
|
Python
|
gpl-3.0
| 1,281
|
#!/usr/bin/python
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
client.subscribe("BeeeOn/#")
client = mqtt.Client()
client.on_connect = on_connect
client.connect("localhost", 1883, 60)
client.loop_start()
command_topic = "BeeeOn/set_command"
print 'Type "quit" to exit.'
while (True):
chyba = False
msg = ""
line = raw_input('PROMPT> ')
line = line.replace(' ', '')
for i in line:
msg += str(ord(i)) + ','
if len(msg) > 0:
client.publish(command_topic, msg)
|
BeeeOn/sensors
|
scripts/run.py
|
Python
|
bsd-3-clause
| 509
|
# -*- coding: utf-8 -*-
from gettext import gettext as _
APP_NAME = u'BBCalc'
APP_VERSION = u'0.8.1'
# List of authors
APP_AUTHORS = [u'Basil Shubin <bashu@users.sourceforge.net>']
APP_YEAR = u'2005-2009'
APP_WEBSITE = u'http://bbcalc.sf.net/'
APP_COPYRIGHT = _(u"Copyright © %s Basil Shubin") % (APP_YEAR)
APP_DESCRIPTION = _(u"""BBCalc (Body Building Calculators) is a set of calculators
related to body building and fitness topics.""")
# license text of this application
APP_LICENSE = _(u"""BBCalc is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
BBCalc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GnomeBaker; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""")
try:
from kiwi.environ import Application
except ImportError:
raise SystemExit("Could not find kiwi")
app = Application(APP_NAME.lower())
if app.uninstalled:
app.add_global_resource('pixmaps', 'data/pixmaps')
app.add_global_resource('glade', 'data/glade')
app.enable_translation()
app.set_application_domain(APP_NAME.lower())
|
bashu/bbcalc
|
bbcalc/__init__.py
|
Python
|
gpl-3.0
| 1,541
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateMuteConfig
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1_generated_SecurityCenter_UpdateMuteConfig_async]
from google.cloud import securitycenter_v1
async def sample_update_mute_config():
# Create a client
client = securitycenter_v1.SecurityCenterAsyncClient()
# Initialize request argument(s)
mute_config = securitycenter_v1.MuteConfig()
mute_config.filter = "filter_value"
request = securitycenter_v1.UpdateMuteConfigRequest(
mute_config=mute_config,
)
# Make the request
response = await client.update_mute_config(request=request)
# Handle the response
print(response)
# [END securitycenter_v1_generated_SecurityCenter_UpdateMuteConfig_async]
|
googleapis/python-securitycenter
|
samples/generated_samples/securitycenter_v1_generated_security_center_update_mute_config_async.py
|
Python
|
apache-2.0
| 1,620
|
from txtobjs.schema.TextObjectSchema import TextObjectSchema
from txtobjs.schema.SimpleTextField import SimpleTextField
from txtobjs.schema.SubObjectDict import SubObjectDict
from txtobjs.schema.ValueListField import ValueListField
class ServiceSchema(TextObjectSchema):
text_class = 'Service'
name = SubObjectDictKey()
role_name = SimpleTextField('role_name')
hosts = ObjIdList('hosts', text_class='Machine')
firewall = SubObjectDict('firewall', schema=ServiceFirewallSchema())
|
shearern/python-text-objects
|
src/txtobjs_test/servers_use_case/ServiceSchema.py
|
Python
|
gpl-2.0
| 519
|
from __future__ import with_statement
from ofxclient.account import Account
from configparser import ConfigParser
import os
import os.path
try:
import keyring
KEYRING_AVAILABLE = True
except:
KEYRING_AVAILABLE = False
try:
DEFAULT_CONFIG = os.path.expanduser(os.path.join('~', 'ofxclient.ini'))
except:
DEFAULT_CONFIG = None
class SecurableConfigParser(ConfigParser):
""":py:class:`ConfigParser.ConfigParser` subclass that knows how to store
options marked as secure into the OS specific
keyring/keychain.
To mark an option as secure, the caller must call
'set_secure' at least one time for the particular
option and from then on it will be seen as secure
and will be stored / retrieved from the keychain.
Example::
from ofxclient.config import SecurableConfigParser
# password will not be saved in the config file
c = SecurableConfigParser()
c.add_section('Info')
c.set('Info','username','bill')
c.set_secure('Info','password','s3cre7')
with open('config.ini','w') as fp:
c.write(fp)
"""
_secure_placeholder = '%{secured}'
def __init__(self, keyring_name='ofxclient',
keyring_available=KEYRING_AVAILABLE, **kwargs):
ConfigParser.__init__(self, interpolation = None)
self.keyring_name = keyring_name
self.keyring_available = keyring_available
self._unsaved = {}
self.keyring_name = keyring_name
def is_secure_option(self, section, option):
"""Test an option to see if it is secured or not.
:param section: section id
:type section: string
:param option: option name
:type option: string
:rtype: boolean
otherwise.
"""
if not self.has_section(section):
return False
if not self.has_option(section, option):
return False
if ConfigParser.get(self, section, option) == self._secure_placeholder:
return True
return False
def has_secure_option(self, section, option):
"""See is_secure_option"""
return self.is_secure_option(section, option)
def items(self, section):
"""Get all items for a section. Subclassed, to ensure secure
items come back with the unencrypted data.
:param section: section id
:type section: string
"""
items = []
for k, v in ConfigParser.items(self, section):
if self.is_secure_option(section, k):
v = self.get(section, k)
items.append((k, v))
return items
def secure_items(self, section):
"""Like items() but only return secure items.
:param section: section id
:type section: string
"""
return [x
for x in self.items(section)
if self.is_secure_option(section, x[0])]
def set(self, section, option, value):
"""Set an option value. Knows how to set options properly marked
as secure."""
if self.is_secure_option(section, option):
self.set_secure(section, option, value)
else:
ConfigParser.set(self, section, option, value)
def set_secure(self, section, option, value):
"""Set an option and mark it as secure.
Any subsequent uses of 'set' or 'get' will also
now know that this option is secure as well.
"""
if self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('set', value)
value = self._secure_placeholder
ConfigParser.set(self, section, option, value)
def get(self, section, option, *args):
"""Get option value from section. If an option is secure,
populates the plain text."""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
if self._unsaved.get(s_option, [''])[0] == 'set':
return self._unsaved[s_option][1]
else:
return keyring.get_password(self.keyring_name, s_option)
return ConfigParser.get(self, section, option, *args)
def remove_option(self, section, option):
"""Removes the option from ConfigParser as well as
the secure storage backend
"""
if self.is_secure_option(section, option) and self.keyring_available:
s_option = "%s%s" % (section, option)
self._unsaved[s_option] = ('delete', None)
ConfigParser.remove_option(self, section, option)
def write(self, *args):
"""See ConfigParser.write(). Also writes secure items to keystore."""
ConfigParser.write(self, *args)
if self.keyring_available:
for key, thing in self._unsaved.items():
action = thing[0]
value = thing[1]
if action == 'set':
keyring.set_password(self.keyring_name, key, value)
elif action == 'delete':
try:
keyring.delete_password(self.keyring_name, key)
except:
pass
self._unsaved = {}
class OfxConfig(object):
"""Default config file handler for other tools to use.
This can read and write from the default config which is
$USERS_HOME/ofxclient.ini
:param file_name: absolute path to a config file (optional)
:type file_name: string or None
Example usage::
from ofxclient.config import OfxConfig
from ofxclient import Account
a = Account()
c = OfxConfig(file_name='/tmp/new.ini')
c.add_account(a)
c.save()
account_list = c.accounts()
one_account = c.account( a.local_id() )
"""
def __init__(self, file_name=None):
self.secured_field_names = [
'institution.username',
'institution.password'
]
f = file_name or DEFAULT_CONFIG
if f is None:
raise ValueError('file_name is required')
self._load(f)
def reload(self):
"""Reload the config file from disk"""
return self._load()
def accounts(self):
"""List of confgured :py:class:`ofxclient.Account` objects"""
return [self._section_to_account(s)
for s in self.parser.sections()]
def encrypted_accounts(self):
return [a
for a in self.accounts()
if self.is_encrypted_account(a.local_id())]
def unencrypted_accounts(self):
return [a
for a in self.accounts()
if not self.is_encrypted_account(a.local_id())]
def account(self, id):
"""Get :py:class:`ofxclient.Account` by section id"""
if self.parser.has_section(id):
return self._section_to_account(id)
return None
def add_account(self, account):
"""Add Account to config (does not save)"""
serialized = account.serialize()
section_items = flatten_dict(serialized)
section_id = section_items['local_id']
if not self.parser.has_section(section_id):
self.parser.add_section(section_id)
for key in sorted(section_items):
self.parser.set(section_id, key, section_items[key])
self.encrypt_account(id=section_id)
return self
def encrypt_account(self, id):
"""Make sure that certain fields are encrypted."""
for key in self.secured_field_names:
value = self.parser.get(id, key)
self.parser.set_secure(id, key, value)
return self
def is_encrypted_account(self, id):
"""Are all fields for the account id encrypted?"""
for key in self.secured_field_names:
if not self.parser.is_secure_option(id, key):
return False
return True
def remove_account(self, id):
"""Add Account from config (does not save)"""
if self.parser.has_section(id):
self.parser.remove_section(id)
return True
return False
def save(self):
"""Save changes to config file"""
with open(self.file_name, 'w') as fp:
self.parser.write(fp)
return self
def _load(self, file_name=None):
self.parser = None
file_name = file_name or self.file_name
if not os.path.exists(file_name):
with open(file_name, 'a'):
os.utime(file_name, None)
self.file_name = file_name
conf = SecurableConfigParser()
conf.readfp(open(self.file_name))
self.parser = conf
return self
def _section_to_account(self, section):
section_items = dict(self.parser.items(section))
serialized = unflatten_dict(section_items)
return Account.deserialize(serialized)
def unflatten_dict(dict, prefix=None, separator='.'):
ret = {}
for k, v in dict.items():
key_parts = k.split(separator)
if len(key_parts) == 1:
ret[k] = v
else:
first = key_parts[0]
rest = key_parts[1:]
temp = ret.setdefault(first, {})
for idx, part in enumerate(rest):
if (idx+1) == len(rest):
temp[part] = v
else:
temp = temp.setdefault(part, {})
return ret
def flatten_dict(dict_, prefix=None, separator='.'):
ret = {}
for k, v in dict_.items():
if prefix:
flat_key = separator.join([prefix, k])
else:
flat_key = k
if isinstance(v, dict):
deflated = flatten_dict(v, prefix=flat_key)
for dk, dv in deflated.items():
ret[dk] = dv
else:
ret[flat_key] = v
return ret
|
jbms/ofxclient
|
ofxclient/config.py
|
Python
|
mit
| 9,882
|
# (c) Crown Copyright 2014 Defence Science and Technology Laboratory UK
# Author: Rich Brantingham
"""
Placeholder - django barfs if you don't have a models.py
"""
|
dstl/ideaworks
|
backend/ideaworks/protective_marking_app/models.py
|
Python
|
agpl-3.0
| 174
|
# -*- coding: utf-8 -*-
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['admin']
|
Karaage-Cluster/karaage-debian
|
karaage/legacy/admin/south_migrations/0004_auto__del_logentry.py
|
Python
|
gpl-3.0
| 237
|
#
# James Laska <jlaska@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.base import DeprecatedCommand
from pykickstart.commands.monitor import FC6_Monitor
class FC3_TestCase(CommandTest):
command = "monitor"
def runTest(self):
# pass
self.assert_parse("monitor", "")
self.assert_parse("monitor --hsync=HSYNC", "monitor --hsync=HSYNC\n")
self.assert_parse("monitor --vsync=VSYNC", "monitor --vsync=VSYNC\n")
self.assert_parse("monitor --monitor=MONITOR", "monitor --monitor=\"MONITOR\"\n")
self.assert_parse("monitor --hsync=HSYNC --monitor=MONITOR",
"monitor --hsync=HSYNC --monitor=\"MONITOR\"\n")
self.assert_parse("monitor --monitor=MONITOR --vsync=VSYNC",
"monitor --monitor=\"MONITOR\" --vsync=VSYNC\n")
self.assert_parse("monitor --hsync=HSYNC --monitor=MONITOR --vsync=VSYNC",
"monitor --hsync=HSYNC --monitor=\"MONITOR\" --vsync=VSYNC\n")
self.assert_parse_error("monitor BOGUS")
self.assert_parse_error("monitor --monitor=SOMETHING GREAT")
if "--noprobe" not in self.optionList:
self.assert_parse_error("monitor --noprobe")
class FC6_TestCase(FC3_TestCase):
def runTest(self):
FC3_TestCase.runTest(self)
# pass
self.assert_parse("monitor --noprobe", "monitor --noprobe\n")
# fail
self.assert_parse_error("monitor --noprobe 1")
# assert default values
self.assertTrue(FC6_Monitor().probe)
class F10_TestCase(FC6_TestCase):
def runTest(self):
# make sure we've been deprecated
parser = self.getParser("monitor")
self.assertEqual(issubclass(parser.__class__, DeprecatedCommand), True)
parser = parser._getParser()
self.assertIsNotNone(parser)
self.assertTrue(parser.description.find('deprecated:: Fedora10') > -1)
if __name__ == "__main__":
unittest.main()
|
bcl/pykickstart
|
tests/commands/monitor.py
|
Python
|
gpl-2.0
| 2,924
|
from conans import ConanFile, CMake
import os
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "flier")
class ZipkinTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = "zipkin/0.2.0@%s/%s" % (username, channel)
generators = "cmake"
def build(self):
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def test(self):
self.run(os.sep.join([".", "bin", "test"]))
|
flier/zipkin-cpp
|
conan/test_package/conanfile.py
|
Python
|
apache-2.0
| 591
|
# -*- coding: utf-8 -*-
"""
flask_rq2.functions
~~~~~~~~~~~~~~~~~~~
"""
from datetime import datetime, timedelta
class JobFunctions(object):
"""
Some helper functions that are added to a function decorated
with a :meth:`~flask_rq2.app.RQ.job` decorator.
"""
#: the methods to add to jobs automatically
functions = ['queue', 'schedule', 'cron']
def __init__(self, rq, wrapped, queue_name, timeout, result_ttl, ttl,
depends_on, at_front, meta, description):
self.rq = rq
self.wrapped = wrapped
self._queue_name = queue_name
self._timeout = timeout
self._result_ttl = result_ttl
# job TTLs don't have a default value
# https://github.com/nvie/rq/issues/873
self.ttl = ttl
self._depends_on = depends_on
self._at_front = at_front
self._meta = meta
self._description = description
def __repr__(self):
full_name = '.'.join([self.wrapped.__module__, self.wrapped.__name__])
return '<JobFunctions %s>' % full_name
@property
def queue_name(self):
# Catch empty strings and None
return self._queue_name or self.rq.default_queue
@queue_name.setter
def queue_name(self, value):
self._queue_name = value
@property
def timeout(self):
return self._timeout or self.rq.default_timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
@property
def result_ttl(self):
# Allow a result TTL of 0
if self._result_ttl is None:
return self.rq.default_result_ttl
else:
return self._result_ttl
@result_ttl.setter
def result_ttl(self, value):
self._result_ttl = value
def queue(self, *args, **kwargs):
"""
A function to queue a RQ job, e.g.::
@rq.job(timeout=60)
def add(x, y):
return x + y
add.queue(1, 2, timeout=30)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param depends_on: A job instance or id that the new job depends on.
:type depends_on: ~flask_rq2.job.FlaskJob or str
:param job_id: A custom ID for the new job. Defaults to an
:mod:`UUID <uuid>`.
:type job_id: str
:param at_front: Whether or not the job is queued in front of all other
enqueued jobs.
:type at_front: bool
:param meta: Additional meta data about the job.
:type meta: dict
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
depends_on = kwargs.pop('depends_on', self._depends_on)
job_id = kwargs.pop('job_id', None)
at_front = kwargs.pop('at_front', self._at_front)
meta = kwargs.pop('meta', self._meta)
description = kwargs.pop('description', self._description)
return self.rq.get_queue(queue_name).enqueue_call(
self.wrapped,
args=args,
kwargs=kwargs,
timeout=timeout,
result_ttl=result_ttl,
ttl=ttl,
depends_on=depends_on,
job_id=job_id,
at_front=at_front,
meta=meta,
description=description,
)
def schedule(self, time_or_delta, *args, **kwargs):
"""
A function to schedule running a RQ job at a given time
or after a given timespan::
@rq.job
def add(x, y):
return x + y
add.schedule(timedelta(hours=2), 1, 2, timeout=10)
add.schedule(datetime(2016, 12, 31, 23, 59, 59), 1, 2)
add.schedule(timedelta(days=14), 1, 2, repeat=1)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param result_ttl: The result TTL in seconds. If not provided
uses the job's result TTL or
:attr:`~flask_rq2.RQ.default_result_ttl`.
:type result_ttl: int
:param ttl: The job TTL in seconds. If not provided
uses the job's TTL or no TTL at all.
:type ttl: int
:param repeat: The number of times the job needs to be repeatedly
queued. Requires setting the ``interval`` parameter.
:type repeat: int
:param interval: The interval of repetition as defined by the
``repeat`` parameter in seconds.
:type interval: int
:param job_id: A custom ID for the new job. Defaults to a UUID.
:type job_id: str
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
result_ttl = kwargs.pop('result_ttl', self.result_ttl)
ttl = kwargs.pop('ttl', self.ttl)
repeat = kwargs.pop('repeat', None)
interval = kwargs.pop('interval', None)
job_id = kwargs.pop('job_id', None)
if isinstance(time_or_delta, timedelta):
time = datetime.utcnow() + time_or_delta
else:
time = time_or_delta
return self.rq.get_scheduler().schedule(
time,
self.wrapped,
args=args,
kwargs=kwargs,
interval=interval,
repeat=repeat,
result_ttl=result_ttl,
ttl=ttl,
timeout=timeout,
id=job_id,
description=description,
queue_name=queue_name,
)
def cron(self, pattern, name, *args, **kwargs):
"""
A function to setup a RQ job as a cronjob::
@rq.job('low', timeout=60)
def add(x, y):
return x + y
add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10)
:param \\*args: The positional arguments to pass to the queued job.
:param \\*\\*kwargs: The keyword arguments to pass to the queued job.
:param pattern: A Crontab pattern.
:type pattern: str
:param name: The name of the cronjob.
:type name: str
:param queue: Name of the queue to queue in, defaults to
queue of of job or :attr:`~flask_rq2.RQ.default_queue`.
:type queue: str
:param timeout: The job timeout in seconds.
If not provided uses the job's timeout or
:attr:`~flask_rq2.RQ.default_timeout`.
:type timeout: int
:param description: Description of the job.
:type description: str
:param repeat: The number of times the job needs to be repeatedly
queued via the cronjob. Take care only using this for
cronjob that don't already repeat themselves natively
due to their crontab.
:type repeat: int
:return: An RQ job instance.
:rtype: ~flask_rq2.job.FlaskJob
"""
queue_name = kwargs.pop('queue', self.queue_name)
timeout = kwargs.pop('timeout', self.timeout)
description = kwargs.pop('description', None)
repeat = kwargs.pop('repeat', None)
return self.rq.get_scheduler().cron(
pattern,
self.wrapped,
args=args,
kwargs=kwargs,
repeat=repeat,
queue_name=queue_name,
id='cron-%s' % name,
timeout=timeout,
description=description,
)
|
jezdez/Flask-RQ2
|
src/flask_rq2/functions.py
|
Python
|
mit
| 9,320
|
import time
import RPi.GPIO as GPIO
# use Broadcom pin numbers
GPIO.setmode(GPIO.BCM)
BUTTON_PIN = 4
GPIO.setup(BUTTON_PIN, GPIO.IN)
while True:
# print pin state
print GPIO.input(BUTTON_PIN)
time.sleep(1)
GPIO.cleanup()
|
lukaszo/rpitips-examples
|
RPi.GPIO/button.py
|
Python
|
apache-2.0
| 233
|
import os
import ccnet
from pysearpc import SearpcError
from seaf_utils import CCNET_CONF_DIR, SEAFILE_CENTRAL_CONF_DIR, multi_tenancy_enabled
import wsgidav.util as util
_logger = util.getModuleLogger(__name__)
class SeafileDomainController(object):
def __init__(self):
pool = ccnet.ClientPool(CCNET_CONF_DIR, central_config_dir=SEAFILE_CENTRAL_CONF_DIR)
self.ccnet_threaded_rpc = ccnet.CcnetThreadedRpcClient(pool, req_pool=True)
def __repr__(self):
return self.__class__.__name__
def getDomainRealm(self, inputURL, environ):
return "Seafile Authentication"
def requireAuthentication(self, realmname, envrion):
return True
def isRealmUser(self, realmname, username, environ):
return True
def getRealmUserPassword(self, realmname, username, environ):
"""
Not applicable to seafile.
"""
return ""
def authDomainUser(self, realmname, username, password, environ):
if "'" in username:
return False
try:
if self.ccnet_threaded_rpc.validate_emailuser(username, password) != 0:
return False
except:
return False
try:
user = self.ccnet_threaded_rpc.get_emailuser_with_import(username)
if user.role == 'guest':
environ['seafile.is_guest'] = True
else:
environ['seafile.is_guest'] = False
except Exception as e:
_logger.exception('get_emailuser')
if multi_tenancy_enabled():
try:
orgs = self.ccnet_threaded_rpc.get_orgs_by_user(username)
if orgs:
environ['seafile.org_id'] = orgs[0].org_id
except Exception, e:
_logger.exception('get_orgs_by_user')
pass
return True
|
saukrIppl/seahub
|
thirdpart/wsgidav/addons/seafile/domain_controller.py
|
Python
|
apache-2.0
| 1,875
|
# coding=utf-8
from finnish_functions import *
from finnish_syllables import initialize_presyllabified, make_syllables
from finnish_weight import make_weights
from finnish_sonority import make_sonorities
from finnish_stress import make_stresses
from copy import deepcopy
import os.path
# location in list of user files for each file
PRESYLL = 0
INITIAL = 1
SUFFIX = 2
COMPOUND = 3
user_files = ['dicts/fi/syllabifier/presyllabified.txt', 'dicts/fi/syllabifier/initial.txt', 'dicts/fi/syllabifier/suffix.txt', 'dicts/fi/syllabifier/compound.txt'] # default values, in case user input is ill-formed or unavailable
config_file = 'dicts/fi/syllabifier/config.txt'
initial_compounds = []
suffixes = []
compound_dict = {}
# initialize list l with words from filename, a file with words on individual lines
def initialize_list(l, filename):
try:
f = open(filename, 'r')
entries = f.readlines()
f.close()
for i in range(len(entries)-1):
l += [entries[i][:-1].lower()] # remove final newline character
l += [entries[-1].lower()] # final line has no newline
except IOError:
print "Error: File not found."
# initialize dict with entries, where key is entry from entries in lowercase without separator, and value is list of words in entry split at separator
def initialize_dict(dict, entries, separator):
for entry in entries:
entry = entry.lower()
hyphen_free = entry.replace(separator, '')
words = entry.split(separator)
dict[hyphen_free] = words
# initialize a dictionary from a file
# the first line of the file is the separator character
# the remaining lines are words with separations marked by the separator character
def initialize_dict_from_file(dict, filename):
try:
f = open(filename, 'r')
entries = f.readlines()
f.close()
for i in range(len(entries)-1):
entries[i] = entries[i][:-1] # remove final newline character
separator = entries[0]
entries = entries[1:]
initialize_dict(dict, entries, separator)
except IOError:
print "Error: File not found."
# initialize configuration
def initialize_config():
try:
f = open(config_file, 'r')
entries = f.readlines()
f.close()
if len(entries) != len(user_files):
return
for i in range(len(user_files)-1): # last word does not end in newline
entries[i] = entries[i][:-1]
for i in range(len(user_files)):
if os.path.isfile(entries[i]):
user_files[i] = entries[i]
except IOError:
print "Error: Config file not found."
initialize_presyllabified(user_files[PRESYLL])
initialize_list(initial_compounds, user_files[INITIAL])
initialize_list(suffixes, user_files[SUFFIX])
initialize_dict_from_file(compound_dict, user_files[COMPOUND])
initialize_config()
# a class representing an annotation
# the constructor assumes that the word contains no compounds
class Annotation:
def __init__(self, word):
self.word = word
self.syllables = make_syllables(word)
self.split_sylls = [split_syllable(syll) for syll in self.syllables]
self.weights = make_weights(self.split_sylls)
self.sonorities = make_sonorities(self.split_sylls)
self.stresses = make_stresses(self.weights)
def join(self, annotation):
self.word += annotation.word
self.syllables += annotation.syllables
self.weights += annotation.weights
self.sonorities += annotation.sonorities
# only concatenate stresses if there is something to concatenate
if len(annotation.stresses[0]) > 0:
total_stresses = []
for i in range(len(self.stresses)):
for j in range(len(annotation.stresses)):
total_stresses += [deepcopy(self.stresses[i])]
total_stresses[-1] += [Stress.secondary]
# replace initial (primary) stress of annotation with secondary stress
total_stresses[-1] += annotation.stresses[j][1:]
self.stresses = total_stresses
# if the final word in the list of words starts with a word in the list of compound-initial words, split the word and apply the function again
# (i.e., split off all initial words in initial_compounds)
def split_initial_compounds(words):
for word in initial_compounds:
if words[-1].lower().startswith(word):
return split_initial_compounds(words[:-1] + [words[-1][:len(word)]] + [words[-1][len(word):]])
return words
# if the final word in the list of words ends with a suffix in suffixes, split the word at the suffix
def split_suffix(words):
for suffix in suffixes:
if words[-1].lower().endswith(suffix):
boundary = len(words[-1]) - len(suffix)
return words[:-1] + [words[-1][:-len(suffix)]] + [words[-1][-len(suffix):]]
return words
# split each word in words apart if it appears in the dictionary of compounds
def split_preannotated_compounds(words):
result = []
for i in range(len(words)):
if words[i].lower() in compound_dict:
result += compound_dict[words[i].lower()]
else:
result += [words[i]]
return result
ORTHOGRAPHIC_COMPOUND_MARKER = '-' # the symbol in Finnish orthography marking compound boundaries
# make an annotation for a word
def make_annotation(word):
words = [word]
words = split_initial_compounds(words)
words = words[:-1] + words[-1].split(ORTHOGRAPHIC_COMPOUND_MARKER)
words = split_suffix(words)
words = split_preannotated_compounds(words)
annotations = [Annotation(word) for word in words]
for i in range(1, len(annotations)):
annotations[0].join(annotations[i])
return annotations[0]
# print a representation of an annotation for a word
def print_annotation(word_annotation):
print annotation_string(word_annotation)
print pattern_string(word_annotation)
print
# annotate and print the annotation for a word
def mark(word):
print_annotation(make_annotation(word))
def annotation_string(word_annotation):
result = ''
for i in range(len(word_annotation.stresses)):
result += SYLLABLE_SEPARATOR
for j in range(len(word_annotation.syllables)):
# mark stresses
if word_annotation.stresses[i][j] == Stress.primary:
result += '´'
elif word_annotation.stresses[i][j] == Stress.secondary:
result += '`'
# add syllable content and separator
result += word_annotation.syllables[j] + SYLLABLE_SEPARATOR
result += '\n'
return result[:-1] # remove final newline
# return a string representing the weight pattern
# e.g. the weights for ".´ny.ky.`en.nus.te." are represented 'LLHHL'
def syll_pattern(weights):
result = ''
for w in weights:
result += Weight.dict[w]
return result
# return a string representing the stress pattern
# e.g. the stresses for ".´ny.ky.`en.nus.te." are represented 'PUSUU'
def stress_pattern(stresses):
result = ''
for i in range(len(stresses)):
for s in stresses[i]:
result += Stress.dict[s]
result += ', '
return result[:-2] # remove last comma and space
# return a string representing the sonority pattern
# e.g. the sonority for taloiden is represented 'AAI'
def sonority_pattern(sonorities):
result = ''
for s in sonorities:
result += s
return result
# print a representation of the weights and stresses
def pattern_string(word_annotation):
return 'Weight: ' + syll_pattern(word_annotation.weights) + ' Stress: ' + stress_pattern(word_annotation.stresses) + ' Sonority: ' + sonority_pattern(word_annotation.sonorities)
|
quadrismegistus/litlab-poetry
|
prosodic/dicts/fi/syllabifier/finnish_annotator.py
|
Python
|
mit
| 8,106
|
# import os
# import utils
# import logging
|
makiolo/cmaki_generator
|
feed.py
|
Python
|
mit
| 44
|
""" URLs for save_for_later """
from django.conf.urls import include
from django.urls import path
urlpatterns = [
path('api/', include(('lms.djangoapps.save_for_later.api.urls', 'api'), namespace='api')),
]
|
eduNEXT/edx-platform
|
lms/djangoapps/save_for_later/urls.py
|
Python
|
agpl-3.0
| 213
|
"""
Direction of Arrival Finding
============================
This sub-package provides implementations of popular direction of arrival findings algorithms.
MUSIC
| Multiple Signal Classification [1]_
| :py:obj:`pyroomacoustics.doa.music`
NormMUSIC
| MUSIC with frequency normalization [2]_
| :py:obj:`pyroomacoustics.doa.normmusic`
SRP-PHAT
| Steered Response Power -- Phase Transform [3]_
| :py:obj:`pyroomacoustics.doa.srp`
CSSM
| Coherent Signal Subspace Method [4]_
| :py:obj:`pyroomacoustics.doa.cssm`
WAVES
| Weighted Average of Signal Subspaces [5]_
| :py:obj:`pyroomacoustics.doa.waves`
TOPS
| Test of Orthogonality of Projected Subspaces [6]_
| :py:obj:`pyroomacoustics.doa.tops`
FRIDA
| Finite Rate of Innovation Direction of Arrival [7]_
| :py:obj:`pyroomacoustics.doa.frida`
All these classes derive from the abstract base class
:py:obj:`pyroomacoustics.doa.doa.DOA` that offers generic methods for finding
and visualizing the locations of acoustic sources.
The constructor can be called once to build the DOA finding object. Then, the
method :py:obj:`pyroomacoustics.doa.doa.DOA.locate_sources` performs DOA
finding based on time-frequency passed to it as an argument. Extra arguments
can be supplied to indicate which frequency bands should be used for
localization.
How to use the DOA module
-------------------------
Here ``R`` is a 2xQ ndarray that contains the locations of the Q microphones
in the columns, ``fs`` is the sampling frequency of the input signal, and
``nfft`` the length of the FFT used.
The STFT snapshots are passed to the localization methods in the X ndarray of
shape ``Q x (nfft // 2 + 1) x n_snapshots``, where ``n_snapshots`` is the
number of STFT frames to use for the localization. The option ``freq_bins``
can be provided to specify which frequency bins to use for the localization.
>>> doa = pyroomacoustics.doa.MUSIC(R, fs, nfft)
>>> doa.locate_sources(X, freq_bins=np.arange(20, 40))
Other Available Subpackages
---------------------------
:py:obj:`pyroomacoustics.doa.grid`
this provides abstractions for computing functions on regular or irregular
grids defined on circles and spheres with peak finding methods
:py:obj:`pyroomacoustics.doa.plotters`
a few methods to plot functions and points on circles or spheres
:py:obj:`pyroomacoustics.doa.detect_peaks`
1D peak detection routine from Marcos Duarte
:py:obj:`pyroomacoustics.doa.tools_frid_doa_plane`
routines implementing FRIDA algorithm
Utilities
---------
:py:obj:`pyroomacoustics.doa.algorithms`
a dictionary containing all the DOA object subclasses availables indexed by
keys ``['MUSIC', 'NormMUSIC', 'SRP', 'CSSM', 'WAVES', 'TOPS', 'FRIDA']``
Note on MUSIC
-----------------
Since NormMUSIC has a more robust performance, we recommend to use NormMUSIC over MUSIC. When MUSIC is used as a baseline for publications, we recommend to use both NormMUSIC and MUSIC. For more information, you may have a look at our jupyter notebook at ``https://github.com/LCAV/pyroomacoustics/tree/master/notebooks/norm_music_demo.ipynb``
References
----------
.. [1] R. Schmidt, *Multiple emitter location and signal parameter estimation*,
IEEE Trans. Antennas Propag., Vol. 34, Num. 3, pp 276--280, 1986
.. [2] D. Salvati, C. Drioli, G. L. Foresti, *Incoherent Frequency Fusion for Broadband Steered Response Power Algorithms in
Noisy Environments*, IEEE Signal Process. Lett., Vol. 21, Num. 5, pp 581-585, 2014
.. [3] J. H. DiBiase, *A high-accuracy, low-latency technique for talker localization
in reverberant environments using microphone arrays*, PHD Thesis, Brown University, 2000
.. [4] H. Wang, M. Kaveh, *Coherent signal-subspace processing for the detection and
estimation of angles of arrival of multiple wide-band sources*, IEEE Trans. Acoust.,
Speech, Signal Process., Vol. 33, Num. 4, pp 823--831, 1985
.. [5] E. D. di Claudio, R. Parisi, *WAVES: Weighted average of signal subspaces for
robust wideband direction finding*, IEEE Trans. Signal Process., Vol. 49, Num. 10,
2179--2191, 2001
.. [6] Y. Yeo-Sun, L. M. Kaplan, J. H. McClellan, *TOPS: New DOA estimator for wideband
signals*, IEEE Trans. Signal Process., Vol. 54, Num 6., pp 1977--1989, 2006
.. [7] H. Pan, R. Scheibler, E. Bezzam, I. Dokmanic, and M. Vetterli, *FRIDA:
FRI-based DOA estimation for arbitrary array layouts*, Proc. ICASSP,
pp 3186-3190, 2017
"""
from .doa import *
from .srp import *
from .music import *
from .normmusic import *
from .cssm import *
from .waves import *
from .tops import *
from .frida import *
from .grid import *
from .utils import *
# Create this dictionary as a shortcut to different algorithms
algorithms = {
"SRP": SRP,
"MUSIC": MUSIC,
"NormMUSIC": NormMUSIC,
"CSSM": CSSM,
"WAVES": WAVES,
"TOPS": TOPS,
"FRIDA": FRIDA,
}
|
LCAV/pyroomacoustics
|
pyroomacoustics/doa/__init__.py
|
Python
|
mit
| 4,912
|
'''
Takes a positional data set (time, x, y, z) and applies the Savintzky Golay filter on it based on the
polynomial and window parameters we input
'''
from math import *
import numpy as np
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from scipy.signal import savgol_filter
from util import read_data
def golay(data, window, degree):
'''
Apply the Savintzky-Golay filter to a positional data set.
Args:
data (numpy array): containing all of the positional data in the format of (time, x, y, z)
window (int): window size of the Savintzky-Golay filter
degree (int): degree of the polynomial in Savintzky-Golay filter
Returns:
numpy array: filtered data in the same format
'''
x = data[:, 1]
y = data[:, 2]
z = data[:, 3]
x_new = savgol_filter(x, window, degree)
y_new = savgol_filter(y, window, degree)
z_new = savgol_filter(z, window, degree)
new_positions = np.zeros((len(data), 4))
new_positions[:, 1] = x_new
new_positions[:, 2] = y_new
new_positions[:, 3] = z_new
new_positions[:, 0] = data[:, 0]
return new_positions
#
# if __name__ == "__main__":
#
# pd.set_option('display.width', 1000)
# my_data = read_data.load_data('../example_data/orbit.csv')
# window = 21 # its better to select it as the len(data)/3 and it needs to be an odd number
# degree = 6
# positions_filtered = golay(my_data, window, degree)
# print(positions_filtered - my_data)
#
|
aerospaceresearch/orbitdeterminator
|
orbitdeterminator/filters/sav_golay.py
|
Python
|
mit
| 1,556
|
def showInstructions():
#print a main menu and the commands
print("RPG Game")
print("========")
print("Commands:")
print(" go [direction]")
print(" get [item]")
def showStatus():
#print the player's current status
print("---------------------------")
print("You are in the " + rooms[currentRoom]["name"])
#print the current inventory
print("Inventory : " + str(inventory))
#print an item if there is one
if "item" in rooms[currentRoom]:
print("You see a " + rooms[currentRoom]["item"])
print("---------------------------")
#an inventory, which is initially empty
inventory = []
#a dictionary linking a room to other room positions
rooms = {
1 : { "name" : "Hall" ,
"south" : 2 ,
"east" : 4 ,
"item" : "key"
} ,
2 : { "name" : "Kitchen" ,
"north" : 1 ,
"east" : 3
} ,
3 : { "name" : "Dining Room" ,
"west" : 2 ,
"east" : 5 ,
"south" : 6 ,
"item" : "shield"
} ,
4 : { "name" : "Living Room" ,
"west" : 1
} ,
5 : { "name" : "Study" ,
"west" : 3
} ,
6 : { "name" : "Garden" ,
"north" : 3
}
}
#start the player in room 1
currentRoom = 1
showInstructions()
#loop infinitely
while True:
showStatus()
#get the player's next 'move'
#.split() breaks it up into an list array
#eg typing 'go east' would give the list:
#['go','east']
move = input(">").lower().split()
#if they type 'go' first
if move[0] == "go":
#check that they are allowed wherever they want to go
if move[1] in rooms[currentRoom]:
#set the current room to the new room
currentRoom = rooms[currentRoom][move[1]]
#there is no door (link) to the new room
else:
print("You can't go that way!")
#if they type 'get' first
if move[0] == "get" :
#if the room contains an item, and the item is the one they want to get
if "item" in rooms[currentRoom] and move[1] in rooms[currentRoom]["item"]:
#add the item to their inventory
inventory += [move[1]]
#display a helpful message
print(move[1] + " got!")
#delete the item from the room
del rooms[currentRoom]["item"]
#otherwise, if the item isn't there to get
else:
#tell them they can't get it
print("Can't get " + move[1] + "!")
|
arve0/example_lessons
|
src/python/lessons/RPG/RPG-5-items.py
|
Python
|
cc0-1.0
| 2,873
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
import os
import subprocess
import unittest
import environment
import utils
import tablet
# single shard / 2 tablets
shard_0_master = tablet.Tablet()
shard_0_slave = tablet.Tablet()
cert_dir = environment.tmproot + '/certs'
def openssl(cmd):
result = subprocess.call(['openssl'] + cmd, stderr=utils.devnull)
if result != 0:
raise utils.TestError('OpenSSL command failed: %s' % ' '.join(cmd))
def setUpModule():
try:
environment.topo_server().setup()
logging.debug('Creating certificates')
os.makedirs(cert_dir)
# Create CA certificate
ca_key = cert_dir + '/ca-key.pem'
ca_cert = cert_dir + '/ca-cert.pem'
openssl(['genrsa', '-out', cert_dir + '/ca-key.pem'])
ca_config = cert_dir + '/ca.config'
with open(ca_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql CA
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-new', '-x509', '-nodes', '-days', '3600', '-batch',
'-config', ca_config,
'-key', ca_key,
'-out', ca_cert])
# Create mysql server certificate, remove passphrase, and sign it
server_key = cert_dir + '/server-key.pem'
server_cert = cert_dir + '/server-cert.pem'
server_req = cert_dir + '/server-req.pem'
server_config = cert_dir + '/server.config'
with open(server_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Server
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch',
'-config', server_config,
'-keyout', server_key, '-out', server_req])
openssl(['rsa', '-in', server_key, '-out', server_key])
openssl(['x509', '-req',
'-in', server_req,
'-days', '3600',
'-CA', ca_cert,
'-CAkey', ca_key,
'-set_serial', '01',
'-out', server_cert])
# Create mysql client certificate, remove passphrase, and sign it
client_key = cert_dir + '/client-key.pem'
client_cert = cert_dir + '/client-cert.pem'
client_req = cert_dir + '/client-req.pem'
client_config = cert_dir + '/client.config'
with open(client_config, 'w') as fd:
fd.write("""
[ req ]
default_bits = 1024
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = US
ST = California
L = Mountain View
O = Google
OU = Vitess
CN = Mysql Client
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
openssl(['req', '-newkey', 'rsa:2048', '-days', '3600', '-nodes', '-batch',
'-config', client_config,
'-keyout', client_key, '-out', client_req])
openssl(['rsa', '-in', client_key, '-out', client_key])
openssl(['x509', '-req',
'-in', client_req,
'-days', '3600',
'-CA', ca_cert,
'-CAkey', ca_key,
'-set_serial', '02',
'-out', client_cert])
extra_my_cnf = cert_dir + '/secure.cnf'
fd = open(extra_my_cnf, 'w')
fd.write('ssl-ca=' + ca_cert + '\n')
fd.write('ssl-cert=' + server_cert + '\n')
fd.write('ssl-key=' + server_key + '\n')
fd.close()
setup_procs = [
shard_0_master.init_mysql(extra_my_cnf=extra_my_cnf),
shard_0_slave.init_mysql(extra_my_cnf=extra_my_cnf),
]
utils.wait_procs(setup_procs)
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
shard_0_master.init_tablet('master', 'test_keyspace', '0')
shard_0_slave.init_tablet('replica', 'test_keyspace', '0')
# create databases so vttablet can start behaving normally
shard_0_master.create_db('vt_test_keyspace')
shard_0_slave.create_db('vt_test_keyspace')
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
shard_0_master.kill_vttablet()
shard_0_slave.kill_vttablet()
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_slave.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_slave.remove_tree()
class TestSecure(unittest.TestCase):
"""This test makes sure that we can use SSL replication with Vitess.
"""
def test_secure(self):
# start the tablets
shard_0_master.start_vttablet()
shard_0_slave.start_vttablet(wait_for_state='NOT_SERVING',
repl_extra_flags={
'flags': '2048',
'ssl-ca': cert_dir + '/ca-cert.pem',
'ssl-cert': cert_dir + '/client-cert.pem',
'ssl-key': cert_dir + '/client-key.pem',
})
# Reparent using SSL (this will also check replication works)
for t in [shard_0_master, shard_0_slave]:
t.reset_replication()
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
shard_0_master.tablet_alias], auto_log=True)
if __name__ == '__main__':
utils.main()
|
danielmt/vshard
|
vendor/github.com/youtube/vitess/test/encrypted_replication.py
|
Python
|
mit
| 6,693
|
# uid.py - functions for handling Swiss business identifiers
# coding: utf-8
#
# Copyright (C) 2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""UID (Unternehmens-Identifikationsnummer, Swiss business identifier).
The Swiss UID is used to uniquely identify businesses for taxation purposes.
The number consists of a fixed "CHE" prefix, followed by 9 digits that are
protected with a simple checksum.
This module only supports the "new" format that was introduced in 2011 which
completely replaced the "old" 6-digit format in 2014.
More information is available at:
https://www.uid.admin.ch/
https://de.wikipedia.org/wiki/Unternehmens-Identifikationsnummer
>>> validate('CHE-100.155.212')
'CHE100155212'
>>> validate('CHE-100.155.213')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('CHE100155212')
'CHE-100.155.212'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips
surrounding whitespace and separators."""
return clean(number, ' -.').strip().upper()
def calc_check_digit(number):
"""Calculate the check digit for organisations. The number passed should
not have the check digit included."""
weights = (5, 4, 3, 2, 7, 6, 5, 4)
s = sum(w * int(n) for w, n in zip(weights, number))
return str((11 - s) % 11)
def validate(number):
"""Checks to see if the number provided is a valid number. This checks
the length, formatting and check digit."""
number = compact(number)
if len(number) != 12:
raise InvalidLength()
if not number.startswith('CHE'):
raise InvalidComponent()
if not number[3:].isdigit():
raise InvalidFormat()
if number[-1] != calc_check_digit(number[3:-1]):
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid number. This checks
the length, formatting and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return number[:3] + '-' + '.'.join(
number[i:i + 3] for i in range(3, len(number), 3))
|
cgstudiomap/cgstudiomap
|
main/eggs/python_stdnum-1.2-py2.7.egg/stdnum/ch/uid.py
|
Python
|
agpl-3.0
| 3,006
|
import httplib
from flask import request, session, make_response
from flask.ext.restful import Resource
from flask_restful_swagger import swagger
from data.RoleDAO import RoleDAO
from tools.JsonConverter import json_serialize
from tools.MessageDefinitions import RoleMessage
from tools.ModelDefinitions import RoleModel, RoleEnvironmentPropertiesModel
from tools.SessionValidator import get_session_id
__author__ = 'Robin Quetin'
class RolesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all roles',
responseClass=RoleModel.__name__,
nickname='roles-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "constraint_id",
"description": "An ID used to filter the roles",
"required": False,
"default": -1,
"allowMultiple": False,
"dataType": int.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self):
session_id = get_session_id(session, request)
constraint_id = request.args.get('constraint_id', -1)
dao = RoleDAO(session_id)
roles = dao.get_roles(constraint_id)
dao.close()
resp = make_response(json_serialize(roles, session_id=session_id))
resp.contenttype = "application/json"
return resp
# region Swagger Doc
@swagger.operation(
notes='Creates a new role',
nickname='role-post',
parameters=[
{
"name": "body",
"description": "The serialized version of the new role to be added",
"required": True,
"allowMultiple": False,
"type": RoleMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def post(self):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
new_role = dao.from_json(request)
role_id = dao.add_role(new_role)
dao.close()
resp_dict = {'role_id': role_id}
resp = make_response(json_serialize(resp_dict, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
class RolesByIdAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an role by name',
responseClass=RoleModel.__name__,
nickname='role-by-name-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, id):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
found_role = dao.get_role_by_id(id)
dao.close()
resp = make_response(json_serialize(found_role, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
# region Swagger Doc
@swagger.operation(
notes='Updates an existing role',
nickname='role-put',
parameters=[
{
"name": "body",
"description": "The session ID and the serialized version of the role to be updated",
"required": True,
"allowMultiple": False,
"type": RoleMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided role name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def put(self, id):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
upd_role = dao.from_json(request)
dao.update_role(upd_role, role_id=id)
dao.close()
resp_dict = {'message': 'Update successful'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing role',
nickname='role-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided role name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, id):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
dao.delete_role(role_id=id)
dao.close()
resp_dict = {'message': 'Role successfully deleted'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
class RolesByNameAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get an role by name',
responseClass=RoleModel.__name__,
nickname='role-by-name-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
found_role = dao.get_role_by_name(name)
dao.close()
resp = make_response(json_serialize(found_role, session_id=session_id))
resp.headers['Content-Type'] = "application/json"
return resp
# region Swagger Doc
@swagger.operation(
notes='Updates an existing role',
nickname='role-put',
parameters=[
{
"name": "body",
"description": "The session ID and the serialized version of the role to be updated",
"required": True,
"allowMultiple": False,
"type": RoleMessage.__name__,
"paramType": "body"
},
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided role name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def put(self, name):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
upd_role = dao.from_json(request)
dao.update_role(upd_role, name=name)
dao.close()
resp_dict = {'message': 'Update successful'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
# region Swagger Doc
@swagger.operation(
notes='Deletes an existing role',
nickname='role-delete',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
'code': httplib.BAD_REQUEST,
'message': 'One or more attributes are missing'
},
{
'code': httplib.CONFLICT,
'message': 'Some problems were found during the name check'
},
{
'code': httplib.NOT_FOUND,
'message': 'The provided role name could not be found in the database'
},
{
'code': httplib.CONFLICT,
'message': 'A database error has occurred'
}
]
)
# endregion
def delete(self, name):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
dao.delete_role(name=name)
dao.close()
resp_dict = {'message': 'Role successfully deleted'}
resp = make_response(json_serialize(resp_dict, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
class RoleEnvironmentPropertiesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get the environment properties for a specific role',
nickname='role-envprops-by-name-get',
responseClass=RoleEnvironmentPropertiesModel.__name__,
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, name):
session_id = get_session_id(session, request)
dao = RoleDAO(session_id)
props = dao.get_role_props(name)
dao.close()
resp = make_response(json_serialize(props, session_id=session_id), httplib.OK)
resp.contenttype = 'application/json'
return resp
|
RobinQuetin/CAIRIS-web
|
cairis/cairis/controllers/RoleController.py
|
Python
|
apache-2.0
| 13,254
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral import exceptions as exc
from mistral.lang import types
from mistral.lang.v2 import base
class PublishSpec(base.BaseSpec):
_schema = {
"type": "object",
"properties": {
"branch": types.NONEMPTY_DICT,
"global": types.NONEMPTY_DICT,
"atomic": types.NONEMPTY_DICT
},
"additionalProperties": False
}
def __init__(self, data):
super(PublishSpec, self).__init__(data)
self._branch = self._data.get('branch')
self._global = self._data.get('global')
self._atomic = self._data.get('atomic')
@classmethod
def get_schema(cls, includes=['definitions']):
return super(PublishSpec, cls).get_schema(includes)
def validate_semantics(self):
if not self._branch and not self._global and not self._atomic:
raise exc.InvalidModelException(
"Either 'branch', 'global' or 'atomic' must be specified: "
% self._data
)
self.validate_expr(self._branch)
self.validate_expr(self._global)
self.validate_expr(self._atomic)
def get_branch(self):
return self._branch
def get_global(self):
return self._global
def get_atomic(self):
return self._atomic
|
StackStorm/mistral
|
mistral/lang/v2/publish.py
|
Python
|
apache-2.0
| 1,940
|
def gen():
i = 0
funky()
yield 1
i += 1
def funky():
print "cheese"
g = gen()
print g.next()
|
ArcherSys/ArcherSys
|
skulpt/test/run/t187.py
|
Python
|
mit
| 115
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A task scheduler for Resolver system node."""
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_scheduler
from tfx.utils import status as status_lib
class ResolverTaskScheduler(task_scheduler.TaskScheduler[task_lib.ExecNodeTask]
):
"""A task scheduler for Resolver system node."""
def schedule(self) -> task_scheduler.TaskSchedulerResult:
return task_scheduler.TaskSchedulerResult(
status=status_lib.Status(code=status_lib.Code.OK),
output=task_scheduler.ResolverNodeOutput(
resolved_input_artifacts=self.task.input_artifacts))
def cancel(self) -> None:
pass
|
tensorflow/tfx
|
tfx/orchestration/experimental/core/task_schedulers/resolver_task_scheduler.py
|
Python
|
apache-2.0
| 1,300
|
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import json
import sys
import copy
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, BOOLEANS_TRUE, BOOLEANS_FALSE
from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_ERROR = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, TLSParameterError, NotFound
from docker.tls import TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
from docker.types import Ulimit, LogConfig
else:
from docker import Client
from docker.utils.types import Ulimit, LogConfig
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_PY = False
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
MIN_DOCKER_VERSION = "1.7.0"
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', aliases=['docker_url']),
tls_hostname=dict(type='str'),
api_version=dict(type='str', aliases=['docker_api_version']),
timeout=dict(type='int'),
cacert_path=dict(type='str', aliases=['tls_ca_cert']),
cert_path=dict(type='str', aliases=['tls_client_cert']),
key_path=dict(type='str', aliases=['tls_client_key']),
ssl_version=dict(type='str'),
tls=dict(type='bool'),
tls_verify=dict(type='bool'),
debug=dict(type='bool', default=False),
filter_logger=dict(type='bool', default=False),
)
DOCKER_MUTUALLY_EXCLUSIVE = [
['tls', 'tls_verify']
]
DOCKER_REQUIRED_TOGETHER = [
['cert_path', 'key_path']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
EMAIL_REGEX = '[^@]+@[^@]+\.[^@]+'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if not HAS_DOCKER_PY:
# No docker-py. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object):
def __init__(self, **kwargs):
pass
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
class AnsibleDockerClient(Client):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None):
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if)
if not HAS_DOCKER_PY:
self.fail("Failed to import docker-py - %s. Try `pip install docker-py`" % HAS_DOCKER_ERROR)
if LooseVersion(docker_version) < LooseVersion(MIN_DOCKER_VERSION):
self.fail("Error: docker-py version is %s. Minimum version required is %s." % (docker_version,
MIN_DOCKER_VERSION))
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
self._connect_params = self._get_connect_params()
try:
super(AnsibleDockerClient, self).__init__(**self._connect_params)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def fail(self, msg):
self.module.fail_json(msg=msg)
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value):
if param_value is not None:
# take module parameter value
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return env_value
# take the default
return default_value
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = self.module.params.get(key)
if self.module.params.get('use_tls'):
# support use_tls option in docker_image.py. This will be deprecated.
use_tls = self.module.params.get('use_tls')
if use_tls == 'encrypt':
params['tls'] = True
if use_tls == 'verify':
params['tls_verify'] = True
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', 'localhost'),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto'),
cacert_path=self._get_value('cacert_path', params['cacert_path'], 'DOCKER_CERT_PATH', None),
cert_path=self._get_value('cert_path', params['cert_path'], 'DOCKER_CERT_PATH', None),
key_path=self._get_value('key_path', params['key_path'], 'DOCKER_CERT_PATH', None),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
tls_verify=self._get_value('tls_verfy', params['tls_verify'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS),
)
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
if ':' in parsed_url.netloc:
result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
result['tls_hostname'] = parsed_url
return result
def _get_tls_config(self, **kwargs):
self.log("get_tls_config:")
for key in kwargs:
self.log(" %s: %s" % (key, kwargs[key]))
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
self.fail("TLS config error: %s" % exc)
def _get_connect_params(self):
auth = self.auth_params
self.log("connection params:")
for key in auth:
self.log(" %s: %s" % (key, auth[key]))
if auth['tls'] or auth['tls_verify']:
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
if auth['tls'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and no host verification
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls']:
# TLS with no certs and not host verification
tls_config = self._get_tls_config(verify=False,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
# TLS with certs and host verification
if auth['cacert_path']:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
ca_cert=auth['cacert_path'],
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
else:
tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify'] and auth['cacert_path']:
# TLS with cacert only
tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
assert_hostname=auth['tls_hostname'],
verify=True,
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
if auth['tls_verify']:
# TLS with verify and no certs
tls_config = self._get_tls_config(verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'])
return dict(base_url=auth['docker_host'],
tls=tls_config,
version=auth['api_version'],
timeout=auth['timeout'])
# No TLS
return dict(base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'])
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker host name matches %s. The actual hostname is %s. "
"Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. "
"You may also use TLS without verification by setting the tls parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is not None:
try:
self.log("Inspecting container Id %s" % result['Id'])
result = self.inspect_container(container=result['Id'])
self.log("Completed container inspection")
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if len(images) == 0:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# the name does not contain a registry, so let's see if docker.io works
lookup = "docker.io/%s" % name
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the docker-py images method does not
work consistently. Instead, get the result set for name and manually check if the tag
exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
if tags and lookup in tags:
images = [image]
break
return images
def pull_image(self, name, tag="latest"):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, tag=tag, stream=True, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
|
tux-00/ansible
|
lib/ansible/module_utils/docker_common.py
|
Python
|
gpl-3.0
| 18,196
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeFieldResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'SignatureEnvelopeFieldResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # SignatureEnvelopeFieldResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/SignatureEnvelopeFieldResponse.py
|
Python
|
apache-2.0
| 1,182
|
from django.conf.urls import patterns
from . import views
urlpatterns = patterns(
'',
(r'^race', views.APIRace.as_view())
)
|
MGXRace/website
|
racesowold/urls.py
|
Python
|
gpl-2.0
| 133
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ManagementConfig(AppConfig):
name = 'management'
|
Liongold/crash
|
django/crashreport/management/apps.py
|
Python
|
mpl-2.0
| 136
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net/websockets.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
# TODO(ricea): Remove this once the old implementation has been removed and the
# list of files in the README file is no longer needed.
def _CheckReadMeComplete(input_api, output_api):
"""Verifies that any new files have been added to the README file.
Checks that if any source files were added in this CL, that they were
also added to the README file. We do not warn about pre-existing
errors, as that would be annoying.
Args:
input_api: The InputApi object provided by the presubmit framework.
output_api: The OutputApi object provided by the framework.
Returns:
A list of zero or more PresubmitPromptWarning objects.
"""
# None passed to AffectedSourceFiles means "use the default filter", which
# does what we want, ie. returns files in the CL with filenames that look like
# source code.
added_source_filenames = set(input_api.basename(af.LocalPath())
for af in input_api.AffectedSourceFiles(None)
if af.Action().startswith('A'))
if not added_source_filenames:
return []
readme = input_api.AffectedSourceFiles(
lambda af: af.LocalPath().endswith('/README'))
if not readme:
return [output_api.PresubmitPromptWarning(
'One or more files were added to net/websockets without being added\n'
'to net/websockets/README.\n', added_source_filenames)]
readme_added_filenames = set(line.strip() for line in readme[0].NewContents()
if line.strip() in added_source_filenames)
if readme_added_filenames < added_source_filenames:
return [output_api.PresubmitPromptWarning(
'One or more files added to net/websockets but not found in the README '
'file.\n', added_source_filenames - readme_added_filenames)]
else:
return []
def CheckChangeOnUpload(input_api, output_api):
return _CheckReadMeComplete(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CheckReadMeComplete(input_api, output_api)
|
Gateworks/platform-external-chromium_org
|
net/websockets/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 2,375
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import yaml
from qgis.core import Qgis, QgsWkbTypes
from qgis.PyQt.QtCore import QSettings, QLocale
def loadShortHelp():
h = {}
path = os.path.dirname(__file__)
for f in os.listdir(path):
if f.endswith("yaml"):
filename = os.path.join(path, f)
with open(filename) as stream:
h.update(yaml.load(stream))
version = ".".join(Qgis.QGIS_VERSION.split(".")[0:2])
overrideLocale = QSettings().value('locale/overrideFlag', False, bool)
if not overrideLocale:
locale = QLocale.system().name()[:2]
else:
locale = QSettings().value('locale/userLocale', '')
locale = locale.split("_")[0]
def replace(s):
if s is not None:
return s.replace("{qgisdocs}", "https://docs.qgis.org/%s/%s/docs" % (version, locale))
else:
return None
h = {k: replace(v) for k, v in list(h.items())}
return h
shortHelp = loadShortHelp()
|
wonder-sk/QGIS
|
python/plugins/processing/algs/help/__init__.py
|
Python
|
gpl-2.0
| 2,059
|
#!/usr/bin/env python
from __future__ import print_function
print("Hello World")
|
manashmndl/LearningPyQt
|
pyqt/chap01/hello.py
|
Python
|
mit
| 83
|
# -*- coding: utf-8 -*-
"""
"""
from TeleMir.gui import ScanningOscilloscope,KurtosisGraphics,SpectrumGraphics,spaceShipLauncher,Topoplot
from pyacq import StreamHandler, FakeMultiSignals, EmotivMultiSignals
from pyacq.gui import Oscilloscope, TimeFreq
import msgpack
from PyQt4 import QtCore,QtGui
import zmq
import msgpack
import time
def main():
streamhandler = StreamHandler()
dev = EmotivMultiSignals(streamhandler = streamhandler)
dev.configure(buffer_length = 1800,device_path = '',) # doit être un multiple du packet size
# Configure and start
#dev = FakeMultiSignals(streamhandler = streamhandler)
#dev.configure( #name = 'Test dev',
# nb_channel = 14,
# sampling_rate =128.,
# buffer_length = 10.,
# packet_size = 1,
# )
dev.initialize()
dev.start()
app = QtGui.QApplication([])
# w1=ScanningOscilloscope(dev.streams[2],2.,channels=[0,1])
w1=spaceShipLauncher(dev.streams[2])
# w1=SpectrumGraphics(dev.streams[0],3.,channels=[11,12])
# w2=KurtosisGraphics(dev.streams[0],3.,channels=range(2,8))
# w2=Topoplot(stream = dev.streams[1], type_Topo ='imp')
w1.run()
# w2.show()
#w1.showFullScreen()
app.exec_()
w1.connect(w1,QtCore.SIGNAL("fermeturefenetre()"),dev.stop)
dev.close()
if __name__ == '__main__':
main()
|
Hemisphere-Project/Telemir-DatabitMe
|
Telemir-EEG/examples/example_gui.py
|
Python
|
gpl-2.0
| 1,516
|
import numpy
queries = {
'10': 'SELECT {names} FROM results WHERE seed in ({seedsSeq}) ' +
'AND angle in ({anglesSeq}) ORDER BY amp1',
'else': 'SELECT {names} FROM six_post_results WHERE betx>0 '+
'AND bety>0 AND emitx>0 AND emity>0 AND seed{seedsSeq} ' + 'AND angle{anglesSeq} '+
'AND tunex{tunexSeq} '+'AND tuney{tuneySeq} AND turn_max={turns} ORDER BY amp1'
}
dataQueried = {
'10': [('six_input_id','int'),
('row_num','int'),
('turn_max', 'int'),
('sflag', 'int'),
('qx', 'float'),
('qy', 'float'),
('betx', 'float'),
('bety', 'float'),
('sigx1', 'float'),
('sigy1', 'float'),
('deltap', 'float'),
('dist', 'float'),
('distp', 'float'),
('qx_det', 'float'),
('qx_spread', 'float'),
('qy_det', 'float'),
('qy_spread', 'float'),
('resxfact', 'float'),
('resyfact', 'float'),
('resorder', 'int'),
('smearx', 'float'),
('smeary', 'float'),
('smeart', 'float'),
('sturns1', 'int'),
('sturns2', 'int'),
('sseed', 'float'),
('qs', 'float'),
('sigx2', 'float'),
('sigy2', 'float'),
('sigxmin', 'float'),
('sigxavg', 'float'),
('sigxmax', 'float'),
('sigymin', 'float'),
('sigyavg', 'float'),
('sigymax', 'float'),
('sigxminld', 'float'),
('sigxavgld', 'float'),
('sigxmaxld', 'float'),
('sigyminld', 'float'),
('sigyavgld', 'float'),
('sigymaxld', 'float'),
('sigxminnld', 'float'),
('sigxavgnld', 'float'),
('sigxmaxnld', 'float'),
('sigyminnld', 'float'),
('sigyavgnld', 'float'),
('sigymaxnld', 'float'),
('emitx', 'float'),
('emity', 'float'),
('betx2', 'float'),
('bety2', 'float'),
('qpx', 'float'),
('qpy', 'float'),
('version', 'float'),
('cx', 'float'),
('cy', 'float'),
('csigma', 'float'),
('xp', 'float'),
('yp', 'float'),
('delta', 'float'),
('dnms', 'float'),
('trttime', 'float'),
('mtime','float')],
'11': [('achaos', 'float'),
('al', numpy.dtype([('arr','f8',(48,))])),
('amin', 'float'),
('amax', 'float'),
('achaos1', 'float')],
'12': [('rad','float'),
('distp','float')],
'13': [('rad','float'),
('dist','float')],
'14': [('achaos', 'float'),
('alost3', 'float'),
('turn_max', 'float'),
('f14', 'int')],
'15': [('rad','float'),
('sturns1','float'),
('sturns2','float')],
'16': [('deltap','float'),
('qx','float'),
('qy','float')],
'17': [('deltap','float'),
('qx','float'),
('qy','float')],
'18': [('rad','float'),
('smearx','float')],
'19': [('rad','float'),
('smeary','float')],
'20': [('rad','float'),
('qx_det','float')],
'21': [('rad','float'),
('qy_det','float')],
'22': [('rad','float'),
('(rad1*sigxminnld)','float')],
'23': [('rad','float'),
('(rad1*sigxavgnld)','float')],
'24': [('rad','float'),
('(rad1*sigxmaxnld)','float')],
'25': [('qx_det+qx', 'float'),
('qy_det+qy', 'float'),
('qx_det', 'float'),
('qy_det', 'float')],
'26': [('achaos', 'float'),
( 'alost2', 'float'),
( 'amax', 'float')],
'27': [('al', numpy.dtype([('arr','f8',(48,))]))],
'28': [('al', numpy.dtype([('arr','f8',(48,))]))],
'40': [('achaos', 'float'),
('al', numpy.dtype([('arr','f8',(48,))])),
('amin', 'float'),
('amax', 'float'),
('achaos1', 'float')]
}
|
mfittere/SixDeskDB
|
sixdeskdb/queries.py
|
Python
|
lgpl-2.1
| 3,716
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
self.dxsdk_dir = _FindDirectXInstallation()
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
self.wdk_dir = os.environ.get('WDK_DIR')
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
replacements = {
'$(OutDir)\\': target_dir,
'$(TargetDir)\\': target_dir,
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(InputPath)': '${source}',
'$(InputName)': '${root}',
'$(ProjectName)': self.spec['target_name'],
'$(TargetName)': target_name,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if self.vs_version.Path():
replacements['$(VSInstallDir)'] = self.vs_version.Path()
replacements['$(VCInstallDir)'] = os.path.join(self.vs_version.Path(),
'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
replacements['$(DXSDK_DIR)'] = self.dxsdk_dir if self.dxsdk_dir else ''
replacements['$(WDK_DIR)'] = self.wdk_dir if self.wdk_dir else ''
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special):
"""Gets the explicitly overridden pdb name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def GetPrecompiledHeader(self, config, gyp_to_build_path):
"""Returns an object that handles the generation of precompiled header
build steps."""
config = self._TargetConfig(config)
return _PchHelper(self, config, gyp_to_build_path)
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special)
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def HasExplicitIdlRules(self, spec):
"""Determine if there's an explicit rule for idl files. When there isn't we
need to generate implicit rules to build MIDL .idl files."""
return self._HasExplicitRuleForExtension(spec, 'idl')
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatability
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
yinquan529/platform-external-chromium_org-tools-gyp
|
pylib/gyp/msvs_emulation.py
|
Python
|
bsd-3-clause
| 42,351
|
import os
class TailLog:
def __init__(self):
print "\n"
@staticmethod
def tail(f, lines=1, _buffer=4098):
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return lines_found[-lines:]
|
Ardjan-Aalberts/showMe
|
showMe/bin/tail.py
|
Python
|
mit
| 955
|
import math
import pytest
import random
import compas
from compas.geometry import Point
from compas.geometry import Polyline
from compas.utilities import pairwise
if not compas.IPY:
def test_data():
p = Polyline([Point(random.random(), random.random(), random.random()) for i in range(10)])
assert p.data == p.validate_data()
o = Polyline.from_data(p.data)
assert p == o
assert not (p is o)
assert o.data == o.validate_data()
def test_polyline():
points = [[0, 0, x] for x in range(5)]
polyline = Polyline(points)
assert polyline.points == points
assert polyline.lines == [(a, b) for a, b in pairwise(points)]
def test_equality():
points1 = [[0, 0, x] for x in range(5)]
polyline1 = Polyline(points1)
points2 = [[0, 0, x] for x in range(6)]
polyline2 = Polyline(points2)
assert polyline1 == polyline1
assert polyline1 == points1
assert points1 == polyline1
assert polyline1 != polyline2
assert polyline2 != polyline1
assert polyline1 != points2
assert points2 != polyline1
assert polyline1 != 1
def test___repr__():
points = [[0, 0, x] for x in range(5)]
polyline = Polyline(points)
assert polyline == eval(repr(polyline))
def test___getitem__():
points = [[0, 0, x] for x in range(5)]
polyline = Polyline(points)
for x in range(5):
assert polyline[x] == [0, 0, x]
with pytest.raises(IndexError):
polyline[6] = [0, 0, 6]
def test___setitem__():
points = [[0, 0, x] for x in range(5)]
polyline = Polyline(points)
point = [1, 1, 4]
polyline[4] = point
assert polyline[4] == point
assert isinstance(polyline[4], Point)
assert polyline.lines[-1].end == point
@pytest.mark.parametrize('coords,expected', [
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [20.0, 0.0, 0.0], [40.0, 0.0, 0.0], [60.0, 0.0, 0.0], [80.0, 0.0, 0.0], [100.0, 0.0, 0.0]]),
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0], [300.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [60.0, 0.0, 0.0], [120.0, 0.0, 0.0], [180.0, 0.0, 0.0], [240.0, 0.0, 0.0], [300.0, 0.0, 0.0]]),
([[0.0, 0.0, 0.0], [200.0, 0.0, 0.0], [200.0, 200.0, 0.0], [0.0, 200.0, 0.0], [0.0, 0.0, 0.0]], [
[0.0, 0.0, 0.0], [160.0, 0.0, 0.0], [200.0, 120.0, 0.0], [120.0, 200.0, 0.0], [0.0, 160.0, 0.0], [0.0, 0.0, 0.0]])
])
def test_polyline_divide(coords, expected):
assert expected == Polyline(coords).divide_polyline(5)
@pytest.mark.parametrize('coords,expected', [
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]]),
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0], [300.0, 0.0, 0.0]], [[0, 0, 0], [100, 0, 0], [200, 0, 0], [300, 0, 0]]),
([[0.0, 0.0, 0.0], [200.0, 0.0, 0.0], [200.0, 200.0, 0.0], [0.0, 200.0, 0.0], [0.0, 0.0, 0.0]], [[0, 0, 0], [
100, 0, 0], [200, 0, 0], [200, 100, 0], [200, 200, 0], [100.0, 200, 0], [0, 200, 0], [0, 100.0, 0], [0, 0, 0]])
])
def test_polyline_divide_length(coords, expected):
assert expected == Polyline(coords).divide_polyline_by_length(100)
@pytest.mark.parametrize('coords,expected', [
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [80.0, 0.0, 0.0]]),
])
def test_polyline_divide_length_strict1(coords, expected):
assert expected == Polyline(coords).divide_polyline_by_length(80)
@pytest.mark.parametrize('coords,expected', [
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]], [[0.0, 0.0, 0.0], [80.0, 0.0, 0.0], [100.0, 0.0, 0.0]]),
])
def test_polyline_divide_length_strict2(coords, expected):
assert expected == Polyline(coords).divide_polyline_by_length(80, False)
@pytest.mark.parametrize('coords,input,expected', [
([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], math.pi/2, [Polyline([[0.0, 0.0, 0.0], [1, 0.0, 0.0]]),
Polyline([[1, 0.0, 0.0], [1, 1, 0.0]]), Polyline([[1, 1, 0.0], [0.0, 1, 0.0]]), Polyline([[0.0, 1, 0.0], [0.0, 0.0, 0.0]])]),
])
def test_polyline_split_at_corners(coords, input, expected):
assert expected == Polyline(coords).split_at_corners(input)
@pytest.mark.parametrize('coords,input,expected', [
([[0.0, 0.0, 0.0], [100.0, 0.0, 0.0]], [50, 0, 0], [1.0, 0.0, 0.0]),
([[0.0, 0.0, 0.0], [50.0, 0.0, 0.0], [100.0, 100.0, 0.0]], [50, 0, 0], [1.0, 0.0, 0.0]),
])
def test_polyline_tangent_at_point(coords, input, expected):
assert expected == Polyline(coords).tangent_at_point_on_polyline(input)
|
compas-dev/compas
|
tests/compas/geometry/test_polyline.py
|
Python
|
mit
| 4,531
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import unittest
import six
from wechatpy.replies import TextReply, create_reply
class CreateReplyTestCase(unittest.TestCase):
def test_create_reply_with_text_not_render(self):
text = 'test'
reply = create_reply(text, render=False)
self.assertEqual('text', reply.type)
self.assertEqual(text, reply.content)
reply.render()
def test_create_reply_with_text_render(self):
text = 'test'
reply = create_reply(text, render=True)
self.assertTrue(isinstance(reply, six.text_type))
def test_create_reply_with_message(self):
from wechatpy.messages import TextMessage
msg = TextMessage({
'FromUserName': 'user1',
'ToUserName': 'user2',
})
reply = create_reply('test', msg, render=False)
self.assertEqual('user1', reply.target)
self.assertEqual('user2', reply.source)
reply.render()
def test_create_reply_with_reply(self):
_reply = TextReply(content='test')
reply = create_reply(_reply, render=False)
self.assertEqual(_reply, reply)
reply.render()
def test_create_reply_with_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
]
reply = create_reply(articles, render=False)
self.assertEqual('news', reply.type)
reply.render()
def test_create_reply_with_more_than_ten_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
{
'title': 'test 4',
'description': 'test 4',
'image': 'http://www.qq.com/4.png',
'url': 'http://www.qq.com/4'
},
{
'title': 'test 5',
'description': 'test 5',
'image': 'http://www.qq.com/5.png',
'url': 'http://www.qq.com/5'
},
{
'title': 'test 6',
'description': 'test 6',
'image': 'http://www.qq.com/6.png',
'url': 'http://www.qq.com/6'
},
{
'title': 'test 7',
'description': 'test 7',
'image': 'http://www.qq.com/7.png',
'url': 'http://www.qq.com/7'
},
{
'title': 'test 8',
'description': 'test 8',
'image': 'http://www.qq.com/8.png',
'url': 'http://www.qq.com/8'
},
{
'title': 'test 9',
'description': 'test 9',
'image': 'http://www.qq.com/9.png',
'url': 'http://www.qq.com/9'
},
{
'title': 'test 10',
'description': 'test 10',
'image': 'http://www.qq.com/10.png',
'url': 'http://www.qq.com/10'
},
{
'title': 'test 11',
'description': 'test 11',
'image': 'http://www.qq.com/11.png',
'url': 'http://www.qq.com/11'
},
]
self.assertRaises(AttributeError, create_reply, articles)
def test_create_empty_reply(self):
from wechatpy.replies import EmptyReply
reply = create_reply('')
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(None)
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(False)
self.assertTrue(isinstance(reply, EmptyReply))
|
hunter007/wechatpy
|
tests/test_create_reply.py
|
Python
|
mit
| 4,773
|
import pyglet
import random
from pyglet.gl import *
from pyglet.window import key as keys
WIDTH = 500
HEIGHT = 500
rows = 100
columns = 100
RUNNING = False
cell_width = float(WIDTH) / rows
cell_height = float(HEIGHT) / columns
INITAL_POPULATION = 10
ACTIVE_TILE_INDEX = 0
STATE = []
win = pyglet.window.Window(width=WIDTH, height=HEIGHT)
# A probably unecessary initial attempt at creating a clean little api for checking keyboard state
class Keyboard:
def __init__(self):
self.storage = {
keys.UP: False,
keys.DOWN: False,
keys.LEFT: False,
keys.RIGHT: False,
keys.SPACE: False,
keys.ENTER: False
}
def up(self):
if self.storage[keys.UP]:
return True
def down(self):
if self.storage[keys.DOWN]:
return True
def left(self):
if self.storage[keys.LEFT]:
return True
def right(self):
if self.storage[keys.RIGHT]:
return True
def enter(self):
if self.storage[keys.ENTER]:
return True
keyboard = Keyboard()
def reset_state():
global STATE, INITAL_POPULATION
STATE = []
last_i = 0
pop = 0
for i in range(rows * columns):
STATE.append(0)
if random.randint(0, 300) < 50 and pop <= INITAL_POPULATION:
pop += 1
STATE.append(1)
else:
STATE.append(0)
@win.event
def on_draw():
global ACTIVE_TILE_INDEX
glClear(GL_COLOR_BUFFER_BIT)
x = (WIDTH / 2) - (cell_width / 2)
y = (HEIGHT / 2) - (cell_height / 2)
i = 0
tau = 0
for j, row in enumerate(STATE):
if j % rows == 0:
i += 1
tau = 0
if STATE[j] == 1:
y = cell_height * i
x = cell_width * tau
verts = (
x, y,
x, y + cell_height,
x + cell_width, y + cell_height,
x + cell_width, y
)
pyglet.graphics.draw(4, pyglet.gl.GL_QUADS, ('v2f', verts))
tau += 1
def neighbor_count(i):
global STATE
row = (i - (i % rows)) / rows
top_left = (i + rows) - 1
mid_left = (i + rows)
top_right = (i + rows) + 1
left = (i - 1) # + (row * rows)
right = (i + 1) #+ (row * rows)
bot_left = (i - rows) - 1
bot_mid = (i - rows)
bot_right = (i - rows) + 1
to_check = [
top_left, mid_left, top_right,
left, right,
bot_left, bot_mid, bot_right
]
alive = STATE[i] == 1
num_alive = 0
for cell in to_check:
try:
if STATE[cell] == 1: num_alive += 1
except: pass
return num_alive
@win.event
def on_mouse_motion(x, y, dx, dy):
global ACTIVE_TILE_INDEX
row = int(y / float(cell_height)) - 1 # TODO: Why subtract 1?
column = int(x / float(cell_width))
i = (row * rows) + column
ACTIVE_TILE_INDEX = i
print 'cell %d has %d live neighbors' % (i, neighbor_count(ACTIVE_TILE_INDEX))
@win.event
def on_mouse_press(x, y, dx, dy):
global STATE
val = STATE[ACTIVE_TILE_INDEX]
if val == 1:
new = 0
else:
new = 1
STATE[ACTIVE_TILE_INDEX] = new
@win.event
def on_key_press(sym, mod):
keyboard.storage[sym] = True
global RUNNING
if sym == keys.ENTER and not RUNNING:
print 'STARTING THE Game'
RUNNING = True
elif sym == keys.ENTER and RUNNING:
print 'STOPPING THE GAME'
RUNNING = False
if sym == 111: # r
print '~~~~~~~~~~~~~~~~~~'
print 'LOL RESTTING'
reset_state()
@win.event
def on_key_release(sym, mod):
keyboard.storage[sym] = False
gen = 1
def update(dt):
global gen, RUNNING, STATE
next_generation = list(STATE)
if not RUNNING: return
for i, cell in enumerate(STATE):
row = (i - (i % rows)) / rows
alive = cell == 1
num_alive = neighbor_count(i)
if alive:
if num_alive < 2:
next_generation[i] = 0
if num_alive == 2 or num_alive == 3:
next_generation[i] = 1
if num_alive > 3:
next_generation[i] = 0
elif num_alive == 3:
next_generation[i] = 1
gen += 1
print 'computed generation %d in %f seconds' % (gen, dt)
STATE = next_generation
def main():
glColor3f(1.0, 1.0, 0)
reset_state()
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run()
if __name__ == '__main__':
main()
|
woodbridge/life
|
game.py
|
Python
|
mit
| 4,698
|
import random
import json
from scipy.sparse import lil_matrix
from bidict import bidict
class Corpus:
def __init__(self, filename):
corpus_file = open(filename)
corpus_data = json.load(corpus_file)
corpus_file.close()
if not isinstance(corpus_data, dict):
raise Exception("Invalid Corpus Format")
num_keys = len(corpus_data)
self.mapping = bidict()
self.matrix = lil_matrix((num_keys, num_keys))
highest_empty_index = 0
for cur_key, cur_dict in corpus_data.items():
if not isinstance(cur_dict, dict):
raise Exception("Invalid Corpus Format")
if cur_key not in self.mapping:
self.mapping[cur_key] = highest_empty_index
highest_empty_index += 1
start_index = self.mapping[cur_key]
for target, probability in cur_dict.items():
if target not in self.mapping:
self.mapping[target] = highest_empty_index
highest_empty_index += 1
target_index = self.mapping[target]
self.matrix[start_index, target_index] = probability
def get_random_next(self, key):
"""Selects a random follower based on the key and the corpus probabilities"""
start_index = self.mapping[key]
coloumn = self.matrix.getrowview(start_index)
nonzero_indices = coloumn.nonzero()[1]
rand = random.random()
for target_index in nonzero_indices:
transition_prob = self.matrix[start_index, target_index]
if transition_prob > rand:
return self.mapping.inv[target_index]
else:
rand -= transition_prob
# one should never land here
raise Exception("Not enough possibilities for " + str(key))
|
Beta-Alf/friendly-carnival
|
corpus.py
|
Python
|
mit
| 1,866
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
* File Name : wikimacro.py
* Purpose : Fetch macros list from FreeCAD wiki, macro URL and author.
* Creation Date : 18-06-2016
* Copyright (c) 2016 Mandeep Singh <mandeeps708@gmail.com>
"""
import requests, bs4
# FreeCAD Macro page.
link = "http://www.freecadweb.org/wiki/index.php?title=Macros_recipes"
req = requests.get(link)
soup = bs4.BeautifulSoup(req.text, 'html.parser')
# Selects the spans with class MacroLink enclosing the macro links.
output = soup.select("span.MacroLink")
for x in output:
# Prints macro name
print x.a.getText()
# Macro URL.
url = "http://freecadweb.org" + x.a.get("href")
print url
req = requests.get(url)
soup = bs4.BeautifulSoup(req.text, 'html.parser')
# Use the same URL to fetch macro desciption and macro author
desc = soup.select(".macro-description")[0].getText()
author = soup.select(".macro-author")[0].getText()
print desc, author
|
mandeeps708/scripts
|
FC_getMacro/wikimacro.py
|
Python
|
mit
| 987
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
from random import random
from time import time
from os.path import join
from swift import gettext_ as _
import hashlib
from eventlet import sleep, Timeout
from eventlet.greenpool import GreenPool
from swift.common.daemon import Daemon
from swift.common.internal_client import InternalClient, UnexpectedResponse
from swift.common.utils import get_logger, dump_recon_cache
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
HTTP_PRECONDITION_FAILED
from swift.container.reconciler import direct_delete_container_entry
class ObjectExpirer(Daemon):
"""
Daemon that queries the internal hidden expiring_objects_account to
discover objects that need to be deleted.
:param conf: The daemon configuration.
"""
def __init__(self, conf, logger=None, swift=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-expirer')
self.interval = int(conf.get('interval') or 300)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
conf_path = conf.get('__file__') or '/etc/swift/object-expirer.conf'
request_tries = int(conf.get('request_tries') or 3)
self.swift = swift or InternalClient(
conf_path, 'Swift Object Expirer', request_tries)
self.report_interval = int(conf.get('report_interval') or 300)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = join(self.recon_cache_path, 'object.recon')
self.concurrency = int(conf.get('concurrency', 1))
if self.concurrency < 1:
raise ValueError("concurrency must be set to at least 1")
self.processes = int(self.conf.get('processes', 0))
self.process = int(self.conf.get('process', 0))
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
def report(self, final=False):
"""
Emits a log line report of the progress so far, or the final progress
is final=True.
:param final: Set to True for the last report once the expiration pass
has completed.
"""
if final:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass completed in %ds; %d objects expired') %
(elapsed, self.report_objects))
dump_recon_cache({'object_expiration_pass': elapsed,
'expired_last_pass': self.report_objects},
self.rcache, self.logger)
elif time() - self.report_last_time >= self.report_interval:
elapsed = time() - self.report_first_time
self.logger.info(_('Pass so far %ds; %d objects expired') %
(elapsed, self.report_objects))
self.report_last_time = time()
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
def run_forever(self, *args, **kwargs):
"""
Executes passes forever, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon has no additional keyword args.
"""
sleep(random() * self.interval)
while True:
begin = time()
try:
self.run_once(*args, **kwargs)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
elapsed = time() - begin
if elapsed < self.interval:
sleep(random() * (self.interval - elapsed))
def get_process_values(self, kwargs):
"""
Gets the processes, process from the kwargs if those values exist.
Otherwise, return processes, process set in the config file.
:param kwargs: Keyword args passed into the run_forever(), run_once()
methods. They have values specified on the command
line when the daemon is run.
"""
if kwargs.get('processes') is not None:
processes = int(kwargs['processes'])
else:
processes = self.processes
if kwargs.get('process') is not None:
process = int(kwargs['process'])
else:
process = self.process
if process < 0:
raise ValueError(
'process must be an integer greater than or equal to 0')
if processes < 0:
raise ValueError(
'processes must be an integer greater than or equal to 0')
if processes and process >= processes:
raise ValueError(
'process must be less than or equal to processes')
return processes, process
def delete_object(self, actual_obj, timestamp, container, obj):
start_time = time()
try:
try:
self.delete_actual_object(actual_obj, timestamp)
except UnexpectedResponse as err:
if err.resp.status_int != HTTP_NOT_FOUND:
raise
if float(timestamp) > time() - self.reclaim_age:
# we'll have to retry the DELETE later
raise
self.pop_queue(container, obj)
self.report_objects += 1
self.logger.increment('objects')
except (Exception, Timeout) as err:
self.logger.increment('errors')
self.logger.exception(
_('Exception while deleting object %s %s %s') %
(container, obj, str(err)))
self.logger.timing_since('timing', start_time)
self.report()
def pop_queue(self, container, obj):
"""
Issue a delete object request to the container for the expiring object
queue entry.
"""
direct_delete_container_entry(self.swift.container_ring,
self.expiring_objects_account,
container, obj)
def delete_actual_object(self, actual_obj, timestamp):
"""
Deletes the end-user object indicated by the actual object name given
'<account>/<container>/<object>' if and only if the X-Delete-At value
of the object is exactly the timestamp given.
:param actual_obj: The name of the end-user object to delete:
'<account>/<container>/<object>'
:param timestamp: The timestamp the X-Delete-At value must match to
perform the actual delete.
"""
path = '/v1/' + urllib.quote(actual_obj.lstrip('/'))
self.swift.make_request('DELETE', path,
{'X-If-Delete-At': str(timestamp)},
(2, HTTP_PRECONDITION_FAILED))
|
Khushbu27/Tutorial
|
swift/obj/expirer.py
|
Python
|
apache-2.0
| 10,550
|
# -*- coding: utf-8 -*-
# Scrapy settings for scraper_propiedades project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scraper_propiedades'
SPIDER_MODULES = ['scraper_propiedades.spiders']
NEWSPIDER_MODULE = 'scraper_propiedades.spiders'
LOG_LEVEL = 'INFO'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scraper_propiedades (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN=16
# CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
# COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED=False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'scraper_propiedades.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'scraper_propiedades.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'scraper_propiedades.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
# AUTOTHROTTLE_ENABLED=True
# The initial download delay
# AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED=True
# HTTPCACHE_EXPIRATION_SECS=0
# HTTPCACHE_DIR='httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES=[]
# HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
abenassi/scraper-propiedades
|
scraper_propiedades/scraper_propiedades/settings.py
|
Python
|
gpl-3.0
| 3,131
|
#!/usr/bin/env python
# Rekall
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <scudette@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Installation and deployment script."""
from __future__ import print_function
__author__ = "Michael Cohen <scudette@gmail.com>"
import os
import subprocess
import setuptools
from setuptools import find_packages, setup, Command
rekall_description = "Rekall Memory Forensic Framework"
current_directory = os.path.dirname(__file__)
ENV = {"__file__": __file__}
exec(open("rekall/_version.py").read(), ENV)
VERSION = ENV["get_versions"]()
def find_data_files(source):
result = []
for directory, _, files in os.walk(source):
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
# These versions are fixed to the exact tested configuration. Prior to release,
# please use "setup.py pip_upgrade" to test with the latest version. This
# approach ensures that any Rekall version will always work as tested - even
# when external packages are upgraded in an incompatible way.
install_requires = [
'PyYAML',
'acora==2.1',
'arrow==0.10.0',
'artifacts==20170909',
'future==0.16.0',
'intervaltree==2.1.0',
'ipaddr==2.2.0',
'parsedatetime==2.4',
"psutil >= 5.0, < 6.0",
'pyaff4 ==0.26.post6',
'pycryptodome==3.6.6',
'pyelftools==0.24',
'pyparsing==2.1.5',
'python-dateutil==2.6.1',
'pytsk3==20170802',
'pytz==2017.3',
'rekall-capstone==3.0.5.post2',
"rekall-efilter >= 1.6, < 1.7",
'pypykatz>=0.3.5;python_version>="3.5"',
# Should match exactly the version of this package.
'rekall-lib',
'rekall-yara==3.6.3.1',
]
if "VIRTUAL_ENV" not in os.environ:
print("*****************************************************")
print(" WARNING: You are not installing Rekall in a virtual")
print(" environment. This configuration is not supported!!!")
print(" Expect breakage.")
print("*****************************************************")
if int(setuptools.__version__.split(".")[0]) < 8:
raise RuntimeError("Rekall requires at least setuptool version 8.0. "
"Please upgrade with 'pip install --upgrade setuptools'")
class PIPUpgrade(Command):
description = "Upgrade all the dependencies in the current virtualenv."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
required = [x.split()[0] for x in install_requires]
output = subprocess.check_output(
["pip", "install", "--upgrade"] + required)
# Print the current versions.
output = subprocess.check_output(
["pip", "freeze"], errors="ignore")
result = []
for package in required:
try:
result.append(
[x for x in output.splitlines()
if package in x][0])
except IndexError:
pass
print("\n".join(sorted(result)))
class CleanCommand(Command):
description = ("custom clean command that forcefully removes "
"dist/build directories")
user_options = []
def initialize_options(self):
self.cwd = None
def finalize_options(self):
self.cwd = os.getcwd()
def run(self):
if os.getcwd() != self.cwd:
raise RuntimeError('Must be in package root: %s' % self.cwd)
os.system('rm -rf ./build ./dist')
commands = {}
commands["pip_upgrade"] = PIPUpgrade
commands["clean"] = CleanCommand
setup(
name="rekall-core",
version=VERSION["pep440"],
cmdclass=commands,
description=rekall_description,
long_description=open(os.path.join(current_directory, "README.rst")).read(),
license="GPL",
url="https://www.rekall-forensic.com/",
author="The Rekall team",
author_email="rekall-discuss@googlegroups.com",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
scripts=["rekall/rekal.py"],
package_dir={'rekall': 'rekall'},
packages=find_packages('.'),
data_files=find_data_files("resources"),
entry_points="""
[rekall.plugins]
plugins=rekall.plugins
[console_scripts]
rekal = rekall.rekal:main
rekall = rekall.rekal:main
""",
install_requires=install_requires,
extras_require={
# The following requirements are needed in Windows.
':sys_platform=="win32"': [
# Just grab the latest since it is not the same version on
# both python2 and python3.
"pypiwin32",
],
}
)
|
google/rekall
|
rekall-core/setup.py
|
Python
|
gpl-2.0
| 5,477
|
from django.conf.urls.defaults import patterns
from satchmo_store.shop.satchmo_settings import get_satchmo_setting
ssl = get_satchmo_setting('SSL', default_value=False)
urlpatterns = patterns('',
(r'^$', 'payment.modules.paypal.views.pay_ship_info', {'SSL': ssl}, 'PAYPAL_satchmo_checkout-step2'),
(r'^confirm/$', 'payment.modules.paypal.views.confirm_info', {'SSL': ssl}, 'PAYPAL_satchmo_checkout-step3'),
(r'^success/$', 'payment.modules.paypal.views.success', {'SSL': ssl}, 'PAYPAL_satchmo_checkout-success'),
(r'^ipn/$', 'payment.modules.paypal.views.ipn', {'SSL': ssl}, 'PAYPAL_satchmo_checkout-ipn'),
(r'^confirmorder/$', 'payment.views.confirm.confirm_free_order',
{'SSL' : ssl, 'key' : 'PAYPAL'}, 'PAYPAL_satchmo_checkout_free-confirm')
)
|
thoreg/satchmo
|
satchmo/apps/payment/modules/paypal/urls.py
|
Python
|
bsd-3-clause
| 783
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import logging
import re
import uuid
from urlparse import urljoin
from collections import Counter, OrderedDict
from itertools import product
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import UserError, ValidationError
from odoo.addons.website.models.website import slug
email_validator = re.compile(r"[^@]+@[^@]+\.[^@]+")
_logger = logging.getLogger(__name__)
def dict_keys_startswith(dictionary, string):
"""Returns a dictionary containing the elements of <dict> whose keys start with <string>.
.. note::
This function uses dictionary comprehensions (Python >= 2.7)
"""
matched_keys = [key for key in dictionary.keys() if key.startswith(string)]
return dict((k, dictionary[k]) for k in matched_keys)
class SurveyStage(models.Model):
"""Stages for Kanban view of surveys"""
_name = 'survey.stage'
_description = 'Survey Stage'
_order = 'sequence,id'
name = fields.Char(required=True, translate=True)
sequence = fields.Integer(default=1)
closed = fields.Boolean(help="If closed, people won't be able to answer to surveys in this column.")
fold = fields.Boolean(string="Folded in kanban view")
_sql_constraints = [
('positive_sequence', 'CHECK(sequence >= 0)', 'Sequence number MUST be a natural')
]
class Survey(models.Model):
""" Settings for a multi-page/multi-question survey.
Each survey can have one or more attached pages, and each page can display
one or more questions.
"""
_name = 'survey.survey'
_description = 'Survey'
_rec_name = 'title'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _default_stage(self):
return self.env['survey.stage'].search([], limit=1).id
title = fields.Char('Title', required=True, translate=True)
page_ids = fields.One2many('survey.page', 'survey_id', string='Pages', copy=True)
stage_id = fields.Many2one('survey.stage', string="Stage", default=_default_stage,
ondelete="set null", copy=False, group_expand='_read_group_stage_ids')
auth_required = fields.Boolean('Login required', help="Users with a public link will be requested to login before taking part to the survey",
oldname="authenticate")
users_can_go_back = fields.Boolean('Users can go back', help="If checked, users can go back to previous pages.")
tot_sent_survey = fields.Integer("Number of sent surveys", compute="_compute_survey_statistic")
tot_start_survey = fields.Integer("Number of started surveys", compute="_compute_survey_statistic")
tot_comp_survey = fields.Integer("Number of completed surveys", compute="_compute_survey_statistic")
description = fields.Html("Description", translate=True, help="A long description of the purpose of the survey")
color = fields.Integer('Color Index', default=0)
user_input_ids = fields.One2many('survey.user_input', 'survey_id', string='User responses', readonly=True)
designed = fields.Boolean("Is designed?", compute="_is_designed")
public_url = fields.Char("Public link", compute="_compute_survey_url")
public_url_html = fields.Char("Public link (html version)", compute="_compute_survey_url")
print_url = fields.Char("Print link", compute="_compute_survey_url")
result_url = fields.Char("Results link", compute="_compute_survey_url")
email_template_id = fields.Many2one('mail.template', string='Email Template', ondelete='set null')
thank_you_message = fields.Html("Thanks Message", translate=True, help="This message will be displayed when survey is completed")
quizz_mode = fields.Boolean("Quizz Mode")
active = fields.Boolean("Active", default=True)
is_closed = fields.Boolean("Is closed", related='stage_id.closed')
def _is_designed(self):
for survey in self:
if not survey.page_ids or not [page.question_ids for page in survey.page_ids if page.question_ids]:
survey.designed = False
else:
survey.designed = True
@api.multi
def _compute_survey_statistic(self):
UserInput = self.env['survey.user_input']
sent_survey = UserInput.search([('survey_id', 'in', self.ids), ('type', '=', 'link')])
start_survey = UserInput.search(['&', ('survey_id', 'in', self.ids), '|', ('state', '=', 'skip'), ('state', '=', 'done')])
complete_survey = UserInput.search([('survey_id', 'in', self.ids), ('state', '=', 'done')])
for survey in self:
survey.tot_sent_survey = len(sent_survey.filtered(lambda user_input: user_input.survey_id == survey))
survey.tot_start_survey = len(start_survey.filtered(lambda user_input: user_input.survey_id == survey))
survey.tot_comp_survey = len(complete_survey.filtered(lambda user_input: user_input.survey_id == survey))
def _compute_survey_url(self):
""" Computes a public URL for the survey """
base_url = '/' if self.env.context.get('relative_url') else self.env['ir.config_parameter'].get_param('web.base.url')
for survey in self:
survey.public_url = urljoin(base_url, "survey/start/%s" % (slug(survey)))
survey.print_url = urljoin(base_url, "survey/print/%s" % (slug(survey)))
survey.result_url = urljoin(base_url, "survey/results/%s" % (slug(survey)))
survey.public_url_html = '<a href="%s">%s</a>' % (survey.public_url, _("Click here to start survey"))
@api.model
def _read_group_stage_ids(self, stages, domain, order):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty
"""
stage_ids = stages._search([], order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
# Public methods #
def copy_data(self, default=None):
title = _("%s (copy)") % (self.title)
default = dict(default or {}, title=title)
return super(Survey, self).copy_data(default)
@api.model
def next_page(self, user_input, page_id, go_back=False):
""" The next page to display to the user, knowing that page_id is the id
of the last displayed page.
If page_id == 0, it will always return the first page of the survey.
If all the pages have been displayed and go_back == False, it will
return None
If go_back == True, it will return the *previous* page instead of the
next page.
.. note::
It is assumed here that a careful user will not try to set go_back
to True if she knows that the page to display is the first one!
(doing this will probably cause a giant worm to eat her house)
"""
survey = user_input.survey_id
pages = list(enumerate(survey.page_ids))
# First page
if page_id == 0:
return (pages[0][1], 0, len(pages) == 1)
current_page_index = pages.index((filter(lambda p: p[1].id == page_id, pages))[0])
# All the pages have been displayed
if current_page_index == len(pages) - 1 and not go_back:
return (None, -1, False)
# Let's get back, baby!
elif go_back and survey.users_can_go_back:
return (pages[current_page_index - 1][1], current_page_index - 1, False)
else:
# This will show the last page
if current_page_index == len(pages) - 2:
return (pages[current_page_index + 1][1], current_page_index + 1, True)
# This will show a regular page
else:
return (pages[current_page_index + 1][1], current_page_index + 1, False)
@api.multi
def filter_input_ids(self, filters, finished=False):
"""If user applies any filters, then this function returns list of
filtered user_input_id and label's strings for display data in web.
:param filters: list of dictionary (having: row_id, ansewr_id)
:param finished: True for completely filled survey,Falser otherwise.
:returns list of filtered user_input_ids.
"""
self.ensure_one()
if filters:
domain_filter, choice = [], []
for current_filter in filters:
row_id, answer_id = current_filter['row_id'], current_filter['answer_id']
if row_id == 0:
choice.append(answer_id)
else:
domain_filter.extend(['|', ('value_suggested_row.id', '=', row_id), ('value_suggested.id', '=', answer_id)])
if choice:
domain_filter.insert(0, ('value_suggested.id', 'in', choice))
else:
domain_filter = domain_filter[1:]
input_lines = self.env['survey.user_input_line'].search(domain_filter)
filtered_input_ids = [input_line.user_input_id.id for input_line in input_lines]
else:
filtered_input_ids = []
if finished:
UserInput = self.env['survey.user_input']
if not filtered_input_ids:
user_inputs = UserInput.search([('survey_id', '=', self.id)])
else:
user_inputs = UserInput.browse(filtered_input_ids)
return user_inputs.filtered(lambda input_item: input_item.state == 'done').ids
return filtered_input_ids
@api.model
def get_filter_display_data(self, filters):
"""Returns data to display current filters
:param filters: list of dictionary (having: row_id, answer_id)
:returns list of dict having data to display filters.
"""
filter_display_data = []
if filters:
Label = self.env['survey.label']
for current_filter in filters:
row_id, answer_id = current_filter['row_id'], current_filter['answer_id']
label = Label.browse(answer_id)
question = label.question_id
if row_id == 0:
labels = label
else:
labels = Label.browse([row_id, answer_id])
filter_display_data.append({'question_text': question.question,
'labels': labels.mapped('value')})
return filter_display_data
@api.model
def prepare_result(self, question, current_filters=None):
""" Compute statistical data for questions by counting number of vote per choice on basis of filter """
current_filters = current_filters if current_filters else []
result_summary = {}
# Calculate and return statistics for choice
if question.type in ['simple_choice', 'multiple_choice']:
answers = {}
comments = []
[answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids]
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):
answers[input_line.value_suggested.id]['count'] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers.values(), 'comments': comments}
# Calculate and return statistics for matrix
if question.type == 'matrix':
rows = OrderedDict()
answers = OrderedDict()
res = dict()
comments = []
[rows.update({label.id: label.value}) for label in question.labels_ids_2]
[answers.update({label.id: label.value}) for label in question.labels_ids]
for cell in product(rows.keys(), answers.keys()):
res[cell] = 0
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:
res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}
# Calculate and return statistics for free_text, textbox, datetime
if question.type in ['free_text', 'textbox', 'datetime']:
result_summary = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
result_summary.append(input_line)
# Calculate and return statistics for numerical_box
if question.type == 'numerical_box':
result_summary = {'input_lines': []}
all_inputs = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
all_inputs.append(input_line.value_number)
result_summary['input_lines'].append(input_line)
if all_inputs:
result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),
'max': round(max(all_inputs), 2),
'min': round(min(all_inputs), 2),
'sum': sum(all_inputs),
'most_common': Counter(all_inputs).most_common(5)})
return result_summary
@api.model
def get_input_summary(self, question, current_filters=None):
""" Returns overall summary of question e.g. answered, skipped, total_inputs on basis of filter """
current_filters = current_filters if current_filters else []
result = {}
if question.survey_id.user_input_ids:
total_input_ids = current_filters or [input_id.id for input_id in question.survey_id.user_input_ids if input_id.state != 'new']
result['total_inputs'] = len(total_input_ids)
question_input_ids = []
for user_input in question.user_input_line_ids:
if not user_input.skipped:
question_input_ids.append(user_input.user_input_id.id)
result['answered'] = len(set(question_input_ids) & set(total_input_ids))
result['skipped'] = result['total_inputs'] - result['answered']
return result
# Actions
@api.multi
def action_start_survey(self):
""" Open the website page with the survey form """
self.ensure_one()
token = self.env.context.get('survey_token')
trail = "/%s" % token if token else ""
return {
'type': 'ir.actions.act_url',
'name': "Start Survey",
'target': 'self',
'url': self.with_context(relative_url=True).public_url + trail
}
@api.multi
def action_send_survey(self):
""" Open a window to compose an email, pre-filled with the survey message """
# Ensure that this survey has at least one page with at least one question.
if not self.page_ids or not [page.question_ids for page in self.page_ids if page.question_ids]:
raise UserError(_('You cannot send an invitation for a survey that has no questions.'))
if self.stage_id.closed:
raise UserError(_("You cannot send invitations for closed surveys."))
template = self.env.ref('survey.email_template_survey', raise_if_not_found=False)
local_context = dict(
self.env.context,
default_model='survey.survey',
default_res_id=self.id,
default_survey_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment'
)
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': local_context,
}
@api.multi
def action_print_survey(self):
""" Open the website page with the survey printable view """
self.ensure_one()
token = self.env.context.get('survey_token')
trail = "/" + token if token else ""
return {
'type': 'ir.actions.act_url',
'name': "Print Survey",
'target': 'self',
'url': self.with_context(relative_url=True).print_url + trail
}
@api.multi
def action_result_survey(self):
""" Open the website page with the survey results view """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.with_context(relative_url=True).result_url
}
@api.multi
def action_test_survey(self):
""" Open the website page with the survey form into test mode"""
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.with_context(relative_url=True).public_url + "/phantom"
}
@api.multi
def action_survey_user_input(self):
action_rec = self.env.ref('survey.action_survey_user_input')
action = action_rec.read()[0]
ctx = dict(self.env.context)
ctx.update({'search_default_survey_id': self.ids[0],
'search_default_completed': 1})
action['context'] = ctx
return action
class SurveyPage(models.Model):
""" A page for a survey.
Pages are essentially containers, allowing to group questions by ordered
screens.
.. note::
A page should be deleted if the survey it belongs to is deleted.
"""
_name = 'survey.page'
_description = 'Survey Page'
_rec_name = 'title'
_order = 'sequence,id'
# Model Fields #
title = fields.Char('Page Title', required=True, translate=True)
survey_id = fields.Many2one('survey.survey', string='Survey', ondelete='cascade', required=True)
question_ids = fields.One2many('survey.question', 'page_id', string='Questions', copy=True)
sequence = fields.Integer('Page number', default=10)
description = fields.Html('Description', translate=True, oldname="note", help="An introductory text to your page")
class SurveyQuestion(models.Model):
""" Questions that will be asked in a survey.
Each question can have one of more suggested answers (eg. in case of
dropdown choices, multi-answer checkboxes, radio buttons...).
"""
_name = 'survey.question'
_description = 'Survey Question'
_rec_name = 'question'
_order = 'sequence,id'
# Model fields #
# Question metadata
page_id = fields.Many2one('survey.page', string='Survey page',
ondelete='cascade', required=True, default=lambda self: self.env.context.get('page_id'))
survey_id = fields.Many2one('survey.survey', related='page_id.survey_id', string='Survey')
sequence = fields.Integer('Sequence', default=10)
# Question
question = fields.Char('Question Name', required=True, translate=True)
description = fields.Html('Description', help="Use this field to add \
additional explanations about your question", translate=True,
oldname='descriptive_text')
# Answer
type = fields.Selection([
('free_text', 'Multiple Lines Text Box'),
('textbox', 'Single Line Text Box'),
('numerical_box', 'Numerical Value'),
('datetime', 'Date and Time'),
('simple_choice', 'Multiple choice: only one answer'),
('multiple_choice', 'Multiple choice: multiple answers allowed'),
('matrix', 'Matrix')], string='Type of Question', default='free_text', required=True)
matrix_subtype = fields.Selection([('simple', 'One choice per row'),
('multiple', 'Multiple choices per row')], string='Matrix Type', default='simple')
labels_ids = fields.One2many('survey.label', 'question_id', string='Types of answers', oldname='answer_choice_ids', copy=True)
labels_ids_2 = fields.One2many('survey.label', 'question_id_2', string='Rows of the Matrix', copy=True)
# labels are used for proposed choices
# if question.type == simple choice | multiple choice
# -> only labels_ids is used
# if question.type == matrix
# -> labels_ids are the columns of the matrix
# -> labels_ids_2 are the rows of the matrix
# Display options
column_nb = fields.Selection([('12', '1'),
('6', '2'),
('4', '3'),
('3', '4'),
('2', '6')],
'Number of columns', default='12')
# These options refer to col-xx-[12|6|4|3|2] classes in Bootstrap
display_mode = fields.Selection([('columns', 'Radio Buttons'),
('dropdown', 'Selection Box')],
default='columns')
# Comments
comments_allowed = fields.Boolean('Show Comments Field',
oldname="allow_comment")
comments_message = fields.Char('Comment Message', translate=True, default=lambda self: _("If other, please specify:"))
comment_count_as_answer = fields.Boolean('Comment Field is an Answer Choice',
oldname='make_comment_field')
# Validation
validation_required = fields.Boolean('Validate entry', oldname='is_validation_require')
validation_email = fields.Boolean('Input must be an email')
validation_length_min = fields.Integer('Minimum Text Length')
validation_length_max = fields.Integer('Maximum Text Length')
validation_min_float_value = fields.Float('Minimum value')
validation_max_float_value = fields.Float('Maximum value')
validation_min_date = fields.Datetime('Minimum Date')
validation_max_date = fields.Datetime('Maximum Date')
validation_error_msg = fields.Char('Validation Error message', oldname='validation_valid_err_msg',
translate=True, default=lambda self: _("The answer you entered has an invalid format."))
# Constraints on number of answers (matrices)
constr_mandatory = fields.Boolean('Mandatory Answer', oldname="is_require_answer")
constr_error_msg = fields.Char('Error message', oldname='req_error_msg', translate=True, default=lambda self: _("This question requires an answer."))
user_input_line_ids = fields.One2many('survey.user_input_line', 'question_id', string='Answers', domain=[('skipped', '=', False)])
_sql_constraints = [
('positive_len_min', 'CHECK (validation_length_min >= 0)', 'A length must be positive!'),
('positive_len_max', 'CHECK (validation_length_max >= 0)', 'A length must be positive!'),
('validation_length', 'CHECK (validation_length_min <= validation_length_max)', 'Max length cannot be smaller than min length!'),
('validation_float', 'CHECK (validation_min_float_value <= validation_max_float_value)', 'Max value cannot be smaller than min value!'),
('validation_date', 'CHECK (validation_min_date <= validation_max_date)', 'Max date cannot be smaller than min date!')
]
@api.onchange('validation_email')
def onchange_validation_email(self):
if self.validation_email:
self.validation_required = False
# Validation methods
@api.multi
def validate_question(self, post, answer_tag):
""" Validate question, depending on question type and parameters """
self.ensure_one()
try:
checker = getattr(self, 'validate_' + self.type)
except AttributeError:
_logger.warning(self.type + ": This type of question has no validation method")
return {}
else:
return checker(post, answer_tag)
@api.multi
def validate_free_text(self, post, answer_tag):
self.ensure_one()
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if self.constr_mandatory and not answer:
errors.update({answer_tag: self.constr_error_msg})
return errors
@api.multi
def validate_textbox(self, post, answer_tag):
self.ensure_one()
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if self.constr_mandatory and not answer:
errors.update({answer_tag: self.constr_error_msg})
# Email format validation
# Note: this validation is very basic:
# all the strings of the form
# <something>@<anything>.<extension>
# will be accepted
if answer and self.validation_email:
if not email_validator.match(answer):
errors.update({answer_tag: _('This answer must be an email address')})
# Answer validation (if properly defined)
# Length of the answer must be in a range
if answer and self.validation_required:
if not (self.validation_length_min <= len(answer) <= self.validation_length_max):
errors.update({answer_tag: self.validation_error_msg})
return errors
@api.multi
def validate_numerical_box(self, post, answer_tag):
self.ensure_one()
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if self.constr_mandatory and not answer:
errors.update({answer_tag: self.constr_error_msg})
# Checks if user input is a number
if answer:
try:
floatanswer = float(answer)
except ValueError:
errors.update({answer_tag: _('This is not a number')})
# Answer validation (if properly defined)
if answer and self.validation_required:
# Answer is not in the right range
with tools.ignore(Exception):
floatanswer = float(answer) # check that it is a float has been done hereunder
if not (self.validation_min_float_value <= floatanswer <= self.validation_max_float_value):
errors.update({answer_tag: self.validation_error_msg})
return errors
@api.multi
def validate_datetime(self, post, answer_tag):
self.ensure_one()
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if self.constr_mandatory and not answer:
errors.update({answer_tag: self.constr_error_msg})
# Checks if user input is a datetime
if answer:
try:
dateanswer = fields.Datetime.from_string(answer)
except ValueError:
errors.update({answer_tag: _('This is not a date/time')})
return errors
# Answer validation (if properly defined)
if answer and self.validation_required:
# Answer is not in the right range
try:
datetime_from_string = fields.Datetime.from_string
dateanswer = datetime_from_string(answer)
min_date = datetime_from_string(self.validation_min_date)
max_date = datetime_from_string(self.validation_max_date)
if min_date and max_date and not (min_date <= dateanswer <= max_date):
# If Minimum and Maximum Date are entered
errors.update({answer_tag: self.validation_error_msg})
elif min_date and not min_date <= dateanswer:
# If only Minimum Date is entered and not Define Maximum Date
errors.update({answer_tag: self.validation_error_msg})
elif max_date and not dateanswer <= max_date:
# If only Maximum Date is entered and not Define Minimum Date
errors.update({answer_tag: self.validation_error_msg})
except ValueError: # check that it is a datetime has been done hereunder
pass
return errors
@api.multi
def validate_simple_choice(self, post, answer_tag):
self.ensure_one()
errors = {}
if self.comments_allowed:
comment_tag = "%s_%s" % (answer_tag, 'comment')
# Empty answer to mandatory self
if self.constr_mandatory and answer_tag not in post:
errors.update({answer_tag: self.constr_error_msg})
if self.constr_mandatory and answer_tag in post and not post[answer_tag].strip():
errors.update({answer_tag: self.constr_error_msg})
# Answer is a comment and is empty
if self.constr_mandatory and answer_tag in post and post[answer_tag] == "-1" and self.comment_count_as_answer and comment_tag in post and not post[comment_tag].strip():
errors.update({answer_tag: self.constr_error_msg})
return errors
@api.multi
def validate_multiple_choice(self, post, answer_tag):
self.ensure_one()
errors = {}
if self.constr_mandatory:
answer_candidates = dict_keys_startswith(post, answer_tag)
comment_flag = answer_candidates.pop(("%s_%s" % (answer_tag, -1)), None)
if self.comments_allowed:
comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# Preventing answers with blank value
if all([True if not answer.strip() else False for answer in answer_candidates.values()]) and answer_candidates:
errors.update({answer_tag: self.constr_error_msg})
# There is no answer neither comments (if comments count as answer)
if not answer_candidates and self.comment_count_as_answer and (not comment_flag or not comment_answer):
errors.update({answer_tag: self.constr_error_msg})
# There is no answer at all
if not answer_candidates and not self.comment_count_as_answer:
errors.update({answer_tag: self.constr_error_msg})
return errors
@api.multi
def validate_matrix(self, post, answer_tag):
self.ensure_one()
errors = {}
if self.constr_mandatory:
lines_number = len(self.labels_ids_2)
answer_candidates = dict_keys_startswith(post, answer_tag)
answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# Number of lines that have been answered
if self.matrix_subtype == 'simple':
answer_number = len(answer_candidates)
elif self.matrix_subtype == 'multiple':
answer_number = len(set([sk.rsplit('_', 1)[0] for sk in answer_candidates.keys()]))
else:
raise RuntimeError("Invalid matrix subtype")
# Validate that each line has been answered
if answer_number != lines_number:
errors.update({answer_tag: self.constr_error_msg})
return errors
class SurveyLabel(models.Model):
""" A suggested answer for a question """
_name = 'survey.label'
_rec_name = 'value'
_order = 'sequence,id'
_description = 'Survey Label'
question_id = fields.Many2one('survey.question', string='Question', ondelete='cascade')
question_id_2 = fields.Many2one('survey.question', string='Question 2', ondelete='cascade')
sequence = fields.Integer('Label Sequence order', default=10)
value = fields.Char('Suggested value', translate=True, required=True)
quizz_mark = fields.Float('Score for this choice', help="A positive score indicates a correct choice; a negative or null score indicates a wrong answer")
@api.constrains('question_id', 'question_id_2')
def _check_question_not_empty(self):
"""Ensure that field question_id XOR field question_id_2 is not null"""
if not bool(self.question_id) != bool(self.question_id_2):
raise ValidationError("A label must be attached to one and only one question")
class SurveyUserInput(models.Model):
""" Metadata for a set of one user's answers to a particular survey """
_name = "survey.user_input"
_rec_name = 'date_create'
_description = 'Survey User Input'
survey_id = fields.Many2one('survey.survey', string='Survey', required=True, readonly=True, ondelete='restrict')
date_create = fields.Datetime('Creation Date', default=fields.Datetime.now, required=True, readonly=True, copy=False)
deadline = fields.Datetime('Deadline', help="Date by which the person can open the survey and submit answers", oldname="date_deadline")
type = fields.Selection([('manually', 'Manually'), ('link', 'Link')], string='Answer Type', default='manually', required=True, readonly=True, oldname="response_type")
state = fields.Selection([
('new', 'Not started yet'),
('skip', 'Partially completed'),
('done', 'Completed')], string='Status', default='new', readonly=True)
test_entry = fields.Boolean(readonly=True)
token = fields.Char('Identification token', default=lambda self: str(uuid.uuid4()), readonly=True, required=True, copy=False)
# Optional Identification data
partner_id = fields.Many2one('res.partner', string='Partner', readonly=True)
email = fields.Char('E-mail', readonly=True)
# Displaying data
last_displayed_page_id = fields.Many2one('survey.page', string='Last displayed page')
# The answers !
user_input_line_ids = fields.One2many('survey.user_input_line', 'user_input_id', string='Answers', copy=True)
# URLs used to display the answers
result_url = fields.Char("Public link to the survey results", related='survey_id.result_url')
print_url = fields.Char("Public link to the empty survey", related='survey_id.print_url')
quizz_score = fields.Float("Score for the quiz", compute="_compute_quizz_score", default=0.0)
@api.depends('user_input_line_ids.quizz_mark')
def _compute_quizz_score(self):
for user_input in self:
user_input.quizz_score = sum(user_input.user_input_line_ids.mapped('quizz_mark'))
_sql_constraints = [
('unique_token', 'UNIQUE (token)', 'A token must be unique!'),
('deadline_in_the_past', 'CHECK (deadline >= date_create)', 'The deadline cannot be in the past')
]
@api.model
def do_clean_emptys(self):
""" Remove empty user inputs that have been created manually
(used as a cronjob declared in data/survey_cron.xml)
"""
an_hour_ago = fields.Datetime.to_string(datetime.datetime.now() - datetime.timedelta(hours=1))
self.search([('type', '=', 'manually'), ('state', '=', 'new'),
('date_create', '<', an_hour_ago)]).unlink()
@api.multi
def action_survey_resend(self):
""" Send again the invitation """
self.ensure_one()
local_context = {
'survey_resent_token': True,
'default_partner_ids': self.partner_id and [self.partner_id.id] or [],
'default_multi_email': self.email or "",
'default_public': 'email_private',
}
return self.survey_id.with_context(local_context).action_send_survey()
@api.multi
def action_view_answers(self):
""" Open the website page with the survey form """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "View Answers",
'target': 'self',
'url': '%s/%s' % (self.print_url, self.token)
}
@api.multi
def action_survey_results(self):
""" Open the website page with the survey results """
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': "Survey Results",
'target': 'self',
'url': self.result_url
}
class SurveyUserInputLine(models.Model):
_name = 'survey.user_input_line'
_description = 'Survey User Input Line'
_rec_name = 'date_create'
user_input_id = fields.Many2one('survey.user_input', string='User Input', ondelete='cascade', required=True)
question_id = fields.Many2one('survey.question', string='Question', ondelete='restrict', required=True)
page_id = fields.Many2one(related='question_id.page_id', string="Page")
survey_id = fields.Many2one(related='user_input_id.survey_id', string='Survey', store=True)
date_create = fields.Datetime('Create Date', default=fields.Datetime.now, required=True)
skipped = fields.Boolean('Skipped')
answer_type = fields.Selection([
('text', 'Text'),
('number', 'Number'),
('date', 'Date'),
('free_text', 'Free Text'),
('suggestion', 'Suggestion')], string='Answer Type')
value_text = fields.Char('Text answer')
value_number = fields.Float('Numerical answer')
value_date = fields.Datetime('Date answer')
value_free_text = fields.Text('Free Text answer')
value_suggested = fields.Many2one('survey.label', string="Suggested answer")
value_suggested_row = fields.Many2one('survey.label', string="Row answer")
quizz_mark = fields.Float('Score given for this choice')
@api.constrains('skipped', 'answer_type')
def _answered_or_skipped(self):
for uil in self:
if not uil.skipped != bool(uil.answer_type):
raise ValidationError(_('A question cannot be unanswered and skipped'))
@api.constrains('answer_type')
def _check_answer_type(self):
for uil in self:
fields_type = {
'text': bool(uil.value_text),
'number': (bool(uil.value_number) or uil.value_number == 0),
'date': bool(uil.value_date),
'free_text': bool(uil.value_free_text),
'suggestion': bool(uil.value_suggested)
}
if not fields_type.get(uil.answer_type, True):
raise ValidationError(_('The answer must be in the right type'))
def _get_mark(self, value_suggested):
label = self.env['survey.label'].browse(int(value_suggested))
mark = label.quizz_mark if label.exists() else 0.0
return mark
@api.model
def create(self, vals):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self._get_mark(value_suggested)})
return super(SurveyUserInputLine, self).create(vals)
@api.multi
def write(self, vals):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self._get_mark(value_suggested)})
return super(SurveyUserInputLine, self).write(vals)
@api.model
def save_lines(self, user_input_id, question, post, answer_tag):
""" Save answers to questions, depending on question type
If an answer already exists for question and user_input_id, it will be
overwritten (in order to maintain data consistency).
"""
try:
saver = getattr(self, 'save_line_' + question.type)
except AttributeError:
_logger.error(question.type + ": This type of question has no saving function")
return False
else:
saver(user_input_id, question, post, answer_tag)
@api.model
def save_line_free_text(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False,
}
if answer_tag in post and post[answer_tag].strip():
vals.update({'answer_type': 'free_text', 'value_free_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
if old_uil:
old_uil.write(vals)
else:
old_uil.create(vals)
return True
@api.model
def save_line_textbox(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip():
vals.update({'answer_type': 'text', 'value_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
if old_uil:
old_uil.write(vals)
else:
old_uil.create(vals)
return True
@api.model
def save_line_numerical_box(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip():
vals.update({'answer_type': 'number', 'value_number': float(post[answer_tag])})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
if old_uil:
old_uil.write(vals)
else:
old_uil.create(vals)
return True
@api.model
def save_line_datetime(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip():
vals.update({'answer_type': 'date', 'value_date': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
if old_uil:
old_uil.write(vals)
else:
old_uil.create(vals)
return True
@api.model
def save_line_simple_choice(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
old_uil.sudo().unlink()
if answer_tag in post and post[answer_tag].strip():
vals.update({'answer_type': 'suggestion', 'value_suggested': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
# '-1' indicates 'comment count as an answer' so do not need to record it
if post.get(answer_tag) and post.get(answer_tag) != '-1':
self.create(vals)
comment_answer = post.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'skipped': False, 'value_suggested': False})
self.create(vals)
return True
@api.model
def save_line_multiple_choice(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
old_uil.sudo().unlink()
ca_dict = dict_keys_startswith(post, answer_tag + '_')
comment_answer = ca_dict.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if len(ca_dict) > 0:
for key in ca_dict:
# '-1' indicates 'comment count as an answer' so do not need to record it
if key != ('%s_%s' % (answer_tag, '-1')):
vals.update({'answer_type': 'suggestion', 'value_suggested': ca_dict[key]})
self.create(vals)
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'value_suggested': False})
self.create(vals)
if not ca_dict and not comment_answer:
vals.update({'answer_type': None, 'skipped': True})
self.create(vals)
return True
@api.model
def save_line_matrix(self, user_input_id, question, post, answer_tag):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search([
('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)
])
old_uil.sudo().unlink()
no_answers = True
ca_dict = dict_keys_startswith(post, answer_tag + '_')
comment_answer = ca_dict.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer})
self.create(vals)
no_answers = False
if question.matrix_subtype == 'simple':
for row in question.labels_ids_2:
a_tag = "%s_%s" % (answer_tag, row.id)
if a_tag in ca_dict:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': ca_dict[a_tag], 'value_suggested_row': row.id})
self.create(vals)
elif question.matrix_subtype == 'multiple':
for col in question.labels_ids:
for row in question.labels_ids_2:
a_tag = "%s_%s_%s" % (answer_tag, row.id, col.id)
if a_tag in ca_dict:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': col.id, 'value_suggested_row': row.id})
self.create(vals)
if no_answers:
vals.update({'answer_type': None, 'skipped': True})
self.create(vals)
return True
|
hip-odoo/odoo
|
addons/survey/models/survey.py
|
Python
|
agpl-3.0
| 47,761
|
from time import mktime
from datetime import datetime
import feedparser
def getNotify(pastHours):
current = datetime.utcnow()
d = feedparser.parse('http://www.cwb.gov.tw/rss/Data/cwb_warning.xml')
timeRange = 60 * 60 * pastHours
result = []
for e in d['entries']:
#dt = datetime(*e['updated_parsed'][:6])
dt = datetime(*e['published_parsed'][:6])
diffseconds = (current - dt).total_seconds()
if diffseconds <= timeRange :
result.append(e['summary'])
return result
if __name__ == '__main__' :
print('in past 1 hours')
print(getNotify(1))
print('in past 3 hours')
print(getNotify(3))
|
taosheng/jarvis
|
feedNotifier/weatherFeed.py
|
Python
|
apache-2.0
| 677
|
from collections import defaultdict
from asyncio import Future
from functools import wraps
def as_future(func):
def wrapper(*args, **kwargs):
x = Future()
res = func(*args, **kwargs)
x.set_result(res)
return x
return wrapper
def for_all_methods(decorator):
def decorate(cls):
for attr in cls.__dict__: # there's propably a better way to do this
if callable(getattr(cls, attr)) and not attr.startswith('_'):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate
@for_all_methods(as_future)
class MockRedis(object):
"""Imitate a Redis object so unit tests can run on our Hudson CI server without needing a real
Redis server."""
# The 'Redis' store
redis = defaultdict(dict)
def __init__(self):
"""Initialize the object."""
pass
@classmethod
def _reinit(cls):
cls.redis = defaultdict(dict)
def delete(self, key): # pylint: disable=R0201
"""Emulate delete."""
if key in MockRedis.redis:
del MockRedis.redis[key]
def exists(self, key): # pylint: disable=R0201
"""Emulate get."""
return key in MockRedis.redis
def get(self, key): # pylint: disable=R0201
"""Emulate get."""
# Override the default dict
result = None if key not in MockRedis.redis else MockRedis.redis[key]
return result
def hget(self, hashkey, attribute): # pylint: disable=R0201
"""Emulate hget."""
# Return '' if the attribute does not exist
result = MockRedis.redis[hashkey][attribute] if attribute in MockRedis.redis[hashkey] \
else None
return result
def hgetall(self, hashkey): # pylint: disable=R0201
"""Emulate hgetall."""
return MockRedis.redis[hashkey]
def hlen(self, hashkey): # pylint: disable=R0201
"""Emulate hlen."""
return len(MockRedis.redis[hashkey])
def hmset(self, hashkey, value): # pylint: disable=R0201
"""Emulate hmset."""
# Iterate over every key:value in the value argument.
for attributekey, attributevalue in value.items():
MockRedis.redis[hashkey][attributekey] = attributevalue
def hset(self, hashkey, attribute, value): # pylint: disable=R0201
"""Emulate hset."""
MockRedis.redis[hashkey][attribute] = value
def keys(self, pattern): # pylint: disable=R0201
"""Emulate keys."""
import re
# Make a regex out of pattern. The only special matching character we look for is '*'
regex = '^' + pattern.replace('*', '.*') + '$'
# Find every key that matches the pattern
result = [key for key in MockRedis.redis.keys() if re.match(regex, key)]
return result
def sadd(self, key, value): # pylint: disable=R0201
"""Emulate sadd."""
# Does the set at this key already exist?
if key in MockRedis.redis:
# Yes, add this to the set
MockRedis.redis[key].add(value)
else:
# No, override the defaultdict's default and create the set
MockRedis.redis[key] = set([value])
def smembers(self, key): # pylint: disable=R0201
"""Emulate smembers."""
return MockRedis.redis[key]
def lindex(self, key, index):
if key not in MockRedis.redis:
return None
else:
data = MockRedis.redis[key]
if not isinstance(data, list):
return None
else:
if index > len(data) - 1:
return None
else:
return data[index]
def lpush(self, key, value):
assert isinstance(value, list)
if key in MockRedis.redis:
MockRedis.redis[key] = value + MockRedis.redis[key]
else:
MockRedis.redis[key] = value
def lpop(self, key):
if key not in MockRedis.redis:
return None
elif len(MockRedis.redis[key]) < 1:
return None
else:
return MockRedis.redis[key].pop(0)
def rpush(self, key, value):
assert isinstance(value, list)
if key in MockRedis.redis:
MockRedis.redis[key] += value
else:
MockRedis.redis[key] = value
@as_future
def mock_redis_connection(*args, **kwargs):
return MockRedis()
|
etataurov/SFX-tracker
|
tests/mocked_redis.py
|
Python
|
mit
| 4,464
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for Runner module
"""
import unittest
import sys
from mock import Mock, patch
from croutera import runner
class RunnerTest(unittest.TestCase):
@patch('croutera.runner.show_help')
def test_it_runs_with_no_argument(self, show_help):
sys.args = []
runner.run()
show_help.assert_called_with()
@patch('croutera.runner.Cli')
@patch('croutera.runner.ArgsParserBuilder')
def test_it_does_not_execute_invalid_commands(self, ArgsParserBuilder, Cli):
command = Mock()
command.valid.return_value = False
Cli.command.return_value = command
runner.run()
assert not command.execute.called
@patch('croutera.runner.Cli')
@patch('croutera.runner.ArgsParserBuilder')
def test_it_does_execute_valid_commands(self, ArgsParserBuilder, Cli):
command = Mock()
command.valid.return_value = True
Cli.command.return_value = command
runner.run()
assert command.execute.called
|
cristianoliveira/croutera
|
tests/test_runner.py
|
Python
|
mit
| 1,056
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import pytest
from django.contrib.auth import get_user_model
from shoop.admin.utils.picotable import (
ChoicesFilter, Column, DateRangeFilter, Filter, MultiFieldTextFilter,
Picotable, RangeFilter, TextFilter
)
from shoop_tests.utils import empty_iterable
from shoop_tests.utils.fixtures import regular_user
class PicoContext(object):
def superuser_display(self, instance): # Test indirect `display` callable
return "very super" if instance.is_superuser else "-"
def instance_id(instance): # Test direct `display` callable
return instance.id
def false_and_true():
return [(False, "False"), (True, "True")]
def get_pico(rf):
return Picotable(
request=rf.get("/"),
columns=[
Column("id", "Id", filter_config=Filter(), display=instance_id),
Column("username", "Username", sortable=False, filter_config=MultiFieldTextFilter(filter_fields=("username", "email"), operator="iregex")),
Column("email", "Email", sortable=False, filter_config=TextFilter()),
Column("is_superuser", "Is Superuser", display="superuser_display", filter_config=ChoicesFilter(choices=false_and_true())),
Column("is_active", "Is Active", filter_config=ChoicesFilter(choices=false_and_true)), # `choices` callable
Column("date_joined", "Date Joined", filter_config=DateRangeFilter())
],
queryset=get_user_model().objects.all(),
context=PicoContext()
)
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_basic(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1})
assert len(data["items"]) == get_user_model().objects.count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_display(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1})
for item in data["items"]:
if item["id"] == admin_user.pk:
assert item["is_superuser"] == "very super"
if item["id"] == regular_user.pk:
assert item["is_superuser"] == "-"
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_sort(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "sort": "-id"})
id = None
for item in data["items"]:
if id is not None:
assert item["id"] <= id, "sorting does not work"
id = item["id"]
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_invalid_sort(rf, admin_user, regular_user):
pico = get_pico(rf)
with pytest.raises(ValueError):
data = pico.get_data({"perPage": 100, "page": 1, "sort": "-email"})
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_choice_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"is_superuser": True}})
assert len(data["items"]) == get_user_model().objects.filter(is_superuser=True).count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_text_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"email": admin_user.email}})
assert len(data["items"]) == get_user_model().objects.filter(is_superuser=True).count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_multi_filter(rf, admin_user, regular_user):
pico = get_pico(rf)
data = pico.get_data({"perPage": 100, "page": 1, "filters": {"username": "."}})
assert len(data["items"]) == get_user_model().objects.count()
@pytest.mark.django_db
@pytest.mark.usefixtures("regular_user")
def test_picotable_range_filter(rf, regular_user):
pico = get_pico(rf)
one_day = datetime.timedelta(days=1)
assert not empty_iterable(pico.get_data({"perPage": 100, "page": 1, "filters": {"date_joined": {"min": regular_user.date_joined - one_day}}})["items"])
assert not empty_iterable(pico.get_data({"perPage": 100, "page": 1, "filters": {"date_joined": {"max": regular_user.date_joined + one_day}}})["items"])
# TODO: a false test for this
def test_column_is_user_friendly():
with pytest.raises(NameError):
Column(id="foo", title="bar", asdf=True)
|
akx/shoop
|
shoop_tests/admin/test_picotable.py
|
Python
|
agpl-3.0
| 4,680
|
from django.contrib.contenttypes.models import ContentType
import json
from django.http import Http404, HttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from guardian.shortcuts import get_objects_for_user
from account.models import DepartmentGroup
from backend.tasks import TestConnectionTask
from event.models import NotificationPreferences
from .models import Application, Department, Environment, Server, ServerRole
from task.models import Execution
@login_required
def index(request):
data = {}
executions = Execution.objects.filter(task__application__department_id=request.current_department_id)
if not executions.count():
return redirect(reverse('first_steps_page'))
return render(request, 'page/index.html', data)
@permission_required('core.view_application', (Application, 'id', 'application_id'))
def application_page(request, application_id):
data = {}
data['application'] = get_object_or_404(Application, pk=application_id)
return render(request, 'page/application.html', data)
@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))
def environment_page(request, environment_id):
data = {}
data['environment'] = get_object_or_404(Environment, pk=environment_id)
data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))
return render(request, 'page/environment.html', data)
@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))
def server_test(request, server_id):
data = {}
data['server'] = get_object_or_404(Server, pk=server_id)
data['task_id'] = TestConnectionTask().delay(server_id).id
return render(request, 'partial/server_test.html', data)
@login_required
def server_test_ajax(request, task_id):
data = {}
task = TestConnectionTask().AsyncResult(task_id)
if task.status == 'SUCCESS':
status, output = task.get()
data['status'] = status
data['output'] = output
elif task.status == 'FAILED':
data['status'] = False
else:
data['status'] = None
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def first_steps_page(request):
data = {}
return render(request, 'page/first_steps.html', data)
@login_required
def settings_page(request, section='user', subsection='profile'):
data = {}
data['section'] = section
data['subsection'] = subsection
data['department'] = Department(pk=request.current_department_id)
data['on_settings'] = True
handler = '_settings_%s_%s' % (section, subsection)
if section == 'system' and request.user.is_superuser is not True:
return redirect('index')
if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):
return redirect('index')
if handler in globals():
data = globals()[handler](request, data)
else:
raise Http404
return render(request, 'page/settings.html', data)
def _settings_account_profile(request, data):
data['subsection_template'] = 'partial/account_profile.html'
from account.forms import account_create_form
form = account_create_form('user_profile', request, request.user.id)
form.fields['email'].widget.attrs['readonly'] = True
data['form'] = form
if request.method == 'POST':
if form.is_valid():
form.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_password(request, data):
data['subsection_template'] = 'partial/account_password.html'
from account.forms import account_create_form
form = account_create_form('user_password', request, request.user.id)
data['form'] = form
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=False)
user.set_password(user.password)
user.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_notifications(request, data):
data['subsection_template'] = 'partial/account_notifications.html'
data['applications'] = get_objects_for_user(request.user, 'core.view_application')
content_type = ContentType.objects.get_for_model(Application)
if request.method == 'POST':
for application in data['applications']:
key = 'notification[%s]' % application.id
notification, created = NotificationPreferences.objects.get_or_create(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type,
object_id=application.id)
if notification.is_active != (key in request.POST):
notification.is_active = key in request.POST
notification.save()
messages.success(request, 'Saved')
data['notifications'] = NotificationPreferences.objects.filter(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type.id).values_list('object_id', 'is_active')
data['notifications'] = dict(data['notifications'])
return data
def _settings_department_applications(request, data):
data['subsection_template'] = 'partial/application_list.html'
data['applications'] = Application.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['applications'].count())
return data
def _settings_department_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
from guardian.shortcuts import get_users_with_perms
department = Department.objects.get(pk=request.current_department_id)
data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')
data['department_user_list'] = True
data['form_name'] = 'user'
return data
def _settings_department_groups(request, data):
data['subsection_template'] = 'partial/group_list.html'
data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)
return data
def _settings_department_serverroles(request, data):
data['subsection_template'] = 'partial/serverrole_list.html'
data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['serverroles'].count())
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_departments(request, data):
data['subsection_template'] = 'partial/department_list.html'
data['departments'] = Department.objects.all()
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')
data['form_name'] = 'usersystem'
return data
def department_switch(request, id):
department = get_object_or_404(Department, pk=id)
if request.user.has_perm('core.view_department', department):
request.session['current_department_id'] = int(id)
else:
messages.error(request, 'Access forbidden')
return redirect('index')
def handle_403(request):
print 'aaaaaaaa'
messages.error(request, 'Access forbidden')
return redirect('index')
|
gunnery/gunnery
|
gunnery/core/views.py
|
Python
|
apache-2.0
| 7,964
|
from mist.mist_job import *
class SimpleStreaming(MistJob, WithStreamingContext, WithPublisher):
def execute(self, parameters):
import time
def takeAndPublish(time, rdd):
taken = rdd.take(11)
self.publisher.publish("-------------------------------------------")
self.publisher.publish("Time: %s" % time)
self.publisher.publish("-------------------------------------------")
self.publisher.publish(str(taken))
ssc = self.streaming_context
type(ssc)
rddQueue = []
for i in range(500):
rddQueue += [ssc.sparkContext.parallelize([j for j in range(1, 1001)], 10)]
# Create the QueueInputDStream and use it do some processing
inputStream = ssc.queueStream(rddQueue)
mappedStream = inputStream.map(lambda x: (x % 10, 1))
reducedStream = mappedStream.reduceByKey(lambda a, b: a + b)
#reducedStream.pprint()
reducedStream.foreachRDD(takeAndPublish)
ssc.start()
time.sleep(15)
ssc.stop(stopSparkContext=False, stopGraceFully=False)
result = "success"
return {"result": result}
|
KineticCookie/mist
|
examples-python/simple_streaming.py
|
Python
|
apache-2.0
| 1,188
|
from jbot import simulator
def diagonal_moving(robot):
robot.clear_messages()
robot.send_message("moving diagonally")
for m in range(0, 100):
robot.move_left(1)
robot.move_up(1)
def directional_moving(robot):
robot.send_message("moving down")
robot.move_down(30)
robot.send_message("moving up")
robot.move_up(60)
robot.send_message("moving left")
robot.move_left(25)
robot.send_message("moving right")
robot.move_right(600)
def main():
my_robot = simulator.get_robot()
directional_moving(my_robot)
diagonal_moving(my_robot)
my_robot.send_message("All Done!")
if __name__ == "__main__":
simulator.simulate(main)
|
frozenjava/RobotSimulator
|
examples/robotFunctionality.py
|
Python
|
gpl-2.0
| 704
|
#!/usr/bin/env python
import os
import sys
from .global_config import GlobalConfig
def FindMetadataBaseDir():
dirs = GlobalConfig.metadataDirs
return BaseDirFromList(dirs, "ENCODE metadata", "METADATA_BASEDIR")
def FindJobmonitorBaseDir():
dirs = [os.getenv("JOBMONITOR_BASEDIR"),
"/project/umw_zhiping_weng/0_jobmonitor/",
"/nfs/0_jobmonitor@bib5/"]
return BaseDirFromList(dirs, "job monitor", "JOBMONITOR_BASEDIR", False)
def FindRepositoryDir():
dirs = [os.getenv("WENG_LAB"),
os.path.expanduser("~/weng-lab")]
return BaseDirFromList(dirs, "weng lab repository", "WENG_LAB", False)
def BaseDirFromList(dirs, dirname, envsearched, exit_on_failure=True):
dirs = [d for d in dirs if d] # remove "None"s
for d in dirs:
if os.path.exists(d):
return d
print("missing %s base folder; searched " % dirname, ";".join(dirs))
print("check directory or %s environment variable." % envsearched)
if exit_on_failure:
sys.exit(1)
return "/tmp"
class Dirs:
metadata_base = FindMetadataBaseDir()
jobmonitor_base = FindJobmonitorBaseDir()
wenglab_base = FindRepositoryDir()
wenglab_metadata = os.path.join(wenglab_base, "metadata")
wenglab_encyclopedia = os.path.join(wenglab_base, "encyclopedia")
encode_base = os.path.join(metadata_base, "encode")
encode_data = os.path.join(encode_base, "data")
encode_json = os.path.join(encode_base, "json")
encode_experiment_json = os.path.join(encode_json, "exps")
encode_project_json = os.path.join(encode_json, "project")
encode_dataset_json = os.path.join(encode_json, "datasets")
encode_validation_data = os.path.join(metadata_base, "tools/ENCODE/validation/encValData")
mean_data = os.path.join(encode_base, "mean")
roadmap_base = os.path.join(metadata_base, "roadmap", "data", "consolidated")
tools = os.path.join(metadata_base, "tools")
genomes = os.path.join(metadata_base, "genome")
gencode_m8 = os.path.join(genomes, "gencode.m8")
encyclopedia = os.path.join(metadata_base, "encyclopedia")
dbsnps = os.path.join(genomes, "dbsnps")
enhancerTracksBase = os.path.join("Enhancer-Prediction-Tracks", "March-2016")
enhancerTracks = os.path.join(encyclopedia,
enhancerTracksBase)
promoterTracksBase = os.path.join("Promoter-Prediction-Tracks")
promoterTracks = os.path.join(encyclopedia,
promoterTracksBase)
targetGeneTracksBase = os.path.join("Target-Gene-Prediction-Tracks",
"convert")
targetGeneTracks = os.path.join(encyclopedia,
targetGeneTracksBase)
job_output_base = os.path.join(jobmonitor_base, "joboutput")
liftOverChainFiles = os.path.join(tools, "ucsc.liftOver")
@staticmethod
def ToolsFnp(fn):
fnp = os.path.join(Dirs.tools, fn)
if not os.path.exists(fnp):
print("WARN: tool missing:", fnp)
return fnp
@staticmethod
def GenomeFnp(fn):
fnp = os.path.join(Dirs.genomes, fn)
if not os.path.exists(fnp):
print("genome file missing:", fnp)
raise Exception("file missing: " + fnp)
return fnp
@staticmethod
def LiftOverChainFnp(fn):
fnp = os.path.join(Dirs.liftOverChainFiles, fn)
if not os.path.exists(fnp):
print("liftOver chain file missing:", fnp)
raise Exception("file missing: " + fnp)
return fnp
class Genome:
hg19_chr_lengths = Dirs.GenomeFnp("hg19.chromInfo")
hg19_2bit = Dirs.GenomeFnp("hg19.2bit")
hg38_chr_lengths = Dirs.GenomeFnp("hg38.chrom.sizes")
hg38_2bit = Dirs.GenomeFnp("hg38.2bit")
GRCh38_chr_lengths = Dirs.GenomeFnp("GRCh38.chrom.sizes")
GRCh38_2bit = Dirs.GenomeFnp("hg38.2bit")
mm9_chr_lengths = Dirs.GenomeFnp("mm9.chromInfo")
mm9_2bit = Dirs.GenomeFnp("mm9.2bit")
mm10_chr_lengths = Dirs.GenomeFnp("mm10.chromInfo")
mm10_minimal_chr_lengths = Dirs.GenomeFnp("mm10-minimal.chromInfo")
mm10_2bit = Dirs.GenomeFnp("mm10.2bit")
human_gencode_tss = Dirs.GenomeFnp("gencode.v19.annotation.tss.bed")
mouse_gencode_m1_tss = Dirs.GenomeFnp("gencode.vM1.annotation.tss.bed")
mouse_gencode_m8_tss = Dirs.GenomeFnp("gencode.m8.annotation.tss.bed")
mouse_gencode_m8_gtf_url = ("ftp://ftp.sanger.ac.uk/pub/gencode/" +
"Gencode_mouse/release_M8/gencode.vM8.annotation.gtf.gz")
mouse_gencode_m8_gff_url = ("ftp://ftp.sanger.ac.uk/pub/gencode/" +
"Gencode_mouse/release_M8/gencode.vM8.annotation.gff3.gz")
hg19_idr_blacklist = Dirs.GenomeFnp("blacklist/hg19/" +
"wgEncodeDacMapabilityConsensusExcludable.bed")
mm9_idr_blacklist = Dirs.GenomeFnp("blacklist/mm9/mm9-blacklist.bed")
mm10_idr_blacklist = Dirs.GenomeFnp("blacklist/mm10/mm10-blacklist.bed")
hg19_mm10_liftOver_chain = Dirs.GenomeFnp("hg19ToMm10.over.chain.gz")
mm9_mm10_liftOver_chain = Dirs.GenomeFnp("mm9ToMm10.over.chain.gz")
@staticmethod
def ChrLenByAssembly(a):
files = {"hg19": Genome.hg19_chr_lengths,
"hg38": Genome.hg38_chr_lengths,
"GRCh38": Genome.GRCh38_chr_lengths,
"mm9": Genome.mm9_chr_lengths,
"mm10": Genome.mm10_chr_lengths,
"mm10-minimal": Genome.mm10_chr_lengths}
return files[a]
@staticmethod
def BlacklistByAssembly(a):
files = {"hg19": Genome.hg19_idr_blacklist,
"mm9": Genome.mm9_idr_blacklist,
"mm10": Genome.mm10_idr_blacklist,
"mm10-minimal": Genome.mm10_idr_blacklist}
return files[a]
@staticmethod
def GencodeTSSByAssembly(a):
files = {"hg19": Genome.human_gencode_tss,
"mm9": Genome.mouse_gencode_m1_tss,
"mm10": Genome.mouse_gencode_m8_tss,
"mm10-minimal": Genome.mouse_gencode_m8_tss}
return files[a]
@staticmethod
def TwoBitByAssembly(a):
files = {"hg19": Genome.hg19_2bit,
"mm9": Genome.mm9_2bit,
"mm10": Genome.mm10_2bit,
"mm10-minimal": Genome.mm10_2bit}
return files[a]
class Tools:
ASdnaseTrack = Dirs.ToolsFnp("ucsc.v287/as/dnase.track.as")
CLIPper = Dirs.ToolsFnp("clipper/bin/clipper")
bedClip = Dirs.ToolsFnp("ucsc.v287/bedClip")
bedGraphToBigWig = Dirs.ToolsFnp("ucsc.v287/bedGraphToBigWig")
bedToBigBed = Dirs.ToolsFnp("ucsc.v287/bedToBigBed")
bedtools = "bedtools"
bigWigAverageOverBed = Dirs.ToolsFnp("ucsc.v287/bigWigAverageOverBed")
bigWigToBedGraph = Dirs.ToolsFnp("ucsc.v287/bigWigToBedGraph")
ceqlogo = Dirs.ToolsFnp("meme_4.10.2/bin/ceqlogo")
dfilter = Dirs.ToolsFnp("DFilter1.6/run_dfilter.sh")
fastaCenter = Dirs.ToolsFnp("meme_4.10.2/bin/fasta-center")
fimo = Dirs.ToolsFnp("meme_4.10.2/bin/fimo")
headRest = os.path.join(Dirs.ToolsFnp("ucsc.v287"), "headRest")
liftOver = os.path.join(Dirs.ToolsFnp("ucsc.v287"), "liftOver")
meme = Dirs.ToolsFnp("meme_4.10.2/bin/meme")
# randomLines = os.path.join(Dirs.ToolsFnp("ucsc.v287"), "randomLines")
randomLines = Dirs.ToolsFnp("randomLines")
tomtom = Dirs.ToolsFnp("meme_4.10.2/bin/tomtom")
twoBitToFa = Dirs.ToolsFnp("ucsc.v287/twoBitToFa")
validateFiles = Dirs.ToolsFnp("ENCODE/validation/validateFiles")
wigToBigWig = Dirs.ToolsFnp("ucsc.v287/wigToBigWig")
wiggleTools = Dirs.ToolsFnp("wiggletools.static.git.7579e66")
class Urls:
base = "https://www.encodeproject.org"
class Webservice:
urlBase = "http://bib7.umassmed.edu/ws/metadata/"
jobmonitorBase = "http://bib7.umassmed.edu/ws/job_monitor/"
localhostJMBase = "http://127.0.0.1:9191/job_monitor/"
localhostBase = "http://127.0.0.1:9191/metadata/"
rsyncBase = "http://bib7.umassmed.edu:%d/rsync/"
@staticmethod
def rsyncUrl(localpath, remotepath, rsync_port):
return (Webservice.rsyncBase + "/request_rsync?local-path=%s&remote-path=%s") % (rsync_port,
localpath,
remotepath)
@staticmethod
def localExp(encodeID, localhost=False):
if localhost:
return os.path.join(Webservice.localhostBase, "exp", encodeID)
return os.path.join(Webservice.urlBase, "exp", encodeID)
@staticmethod
def localUrl(uri):
return Webservice.localhostBase + uri
@staticmethod
def JobMonitorPutUrl(uri, localhost=False):
return os.path.join(Webservice.localhostJMBase
if localhost else Webservice.jobmonitorBase,
"output", uri)
@staticmethod
def JobMonitorSelectUrl(localhost=False):
return os.path.join(Webservice.localhostJMBase
if localhost else Webservice.jobmonitorBase,
"select")
@staticmethod
def JobMonitorActionUrl(localhost=False):
return os.path.join(Webservice.localhostJMBase
if localhost else Webservice.jobmonitorBase,
"db_act")
@staticmethod
def JobMonitorUrl(uri, localhost=False):
return os.path.join(Webservice.localhostJMBase
if localhost else Webservice.jobmonitorBase, uri)
class AllHumanDataset:
url = (Urls.base + "/search/?type=Experiment" +
"&replicates.library.biosample.donor.organism.scientific_name=Homo" +
"+sapiens&limit=all&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "all_human.json")
species = "human"
chr_lengths = Genome.hg19_chr_lengths
twoBit = Genome.hg19_2bit
genome = "hg19"
assemblies = ["hg19", "GRCh38"]
webserviceAll = os.path.join(Webservice.urlBase, "encode/all_human/")
webserviceTF = os.path.join(Webservice.urlBase, "encode/all_human/chipseq/tf")
webserviceHistone = os.path.join(Webservice.urlBase, "encode/all_human/chipseq/histone")
webserviceDNase = os.path.join(Webservice.urlBase, "encode/all_human/dnase")
webserviceMNase = os.path.join(Webservice.urlBase, "encode/all_human/mnase")
webserviceMethylation = os.path.join(Webservice.urlBase, "encode/all_human/methylation")
webserviceDnaseChipHg19 = os.path.join(Webservice.urlBase, "encode/all_human/dnaseAndChip/hg19")
webserviceAllBeds = os.path.join(Webservice.urlBase, "encode/all_human/beds")
webservice_eCLIP = os.path.join(Webservice.urlBase, "encode/all_human/eCLIP")
Webservice_biosample_term_name = os.path.join(Webservice.urlBase,
"encode/biosample_term_name/")
class RoadmapConsolidatedDataset:
url = (Urls.base + "/search/?type=ReferenceEpigenome" +
"&organism.scientific_name=Homo+sapiens&lab.title=Anshul" +
"+Kundaje%2C+Stanford&limit=all&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "roadmap.consolidated.json")
species = "human"
chr_lengths = Genome.hg19_chr_lengths
twoBit = Genome.hg19_2bit
genome = "hg19"
assemblies = ["hg19", "GRCh38"]
class RoadmapDataset:
url = (Urls.base + "/search/?searchTerm=roadmap&type=Experiment" +
"&award.project=Roadmap&limit=all&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "roadmap.json")
species = "human"
chr_lengths = Genome.hg19_chr_lengths
twoBit = Genome.hg19_2bit
genome = "hg19"
assemblies = ["hg19", "GRCh38"]
class AllMouseDataset:
url = (Urls.base + "/search/?type=experiment" +
"&replicates.library.biosample.donor.organism.scientific_name=Mus%20musculus" +
"&limit=all&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "all_mouse.json")
species = "mouse"
assemblies = ["mm9", "mm10-minimal", "mm10"]
webserviceAll = os.path.join(Webservice.urlBase, "encode/all_mouse/")
webserviceDnaseChip = os.path.join(Webservice.urlBase, "encode/all_mouse/dnaseAndChip")
webserviceDnaseChipMm9 = os.path.join(Webservice.urlBase, "encode/all_mouse/dnaseAndChip/mm9")
webserviceDnaseChipMm10 = os.path.join(Webservice.urlBase, "encode/all_mouse/dnaseAndChip/mm10")
webserviceTF = os.path.join(Webservice.urlBase, "encode/all_mouse/chipseq/tf")
webserviceHistone = os.path.join(Webservice.urlBase, "encode/all_mouse/chipseq/histone")
webserviceDNase = os.path.join(Webservice.urlBase, "encode/all_mouse/dnase")
webserviceMethylation = os.path.join(Webservice.urlBase, "encode/all_mouse/methylation")
webserviceAllBeds = os.path.join(Webservice.urlBase, "encode/all_mouse/beds")
webservice_eCLIP = os.path.join(Webservice.urlBase, "encode/all_mouse/eCLIP")
class smallRNAdataset:
url = ("https://www.encodeproject.org/search/?type=experiment" +
"&assay_term_name=RNA-seq" +
"&replicates.library.biosample.donor.organism.scientific_name=Homo+sapiens" +
"&replicates.library.size_range=%3C200&status=released&limit=all" +
"&files.file_type=fastq&files.read_length=101&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "junko.smallRNA.json")
species = "NA"
class cricketDataset:
url = ("https://www.encodeproject.org/search/?type=project" +
"&lab.title=Zhiping%20Weng,%20UMass&status=released&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "cricket.json")
species = "NA"
class encode3DNaseHuman:
# this is a (authenticated) search for datasets using the "'award.rfa=ENCODE3' trick"
# to select for ENCODE3 data only: all human ENCODE3 DNase-seq datasets from John Stam's lab.
url = ("https://www.encodeproject.org/search/?type=experiment&award.rfa=ENCODE3" +
"&assay_term_name=DNase-seq" +
"&replicates.library.biosample.donor.organism.scientific_name=Homo%20sapiens" +
"&limit=all&lab.title=John%20Stamatoyannopoulos,%20UW&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "encode3_human_dnase.json")
species = "human"
class RoadmapFromEncode:
url = ("https://www.encodeproject.org/search/?award.project=Roadmap" +
"&type=experiment&limit=all&format=json")
jsonFnp = os.path.join(Dirs.encode_json, "datasets", "roadmap.json")
species = "NA"
class ENCODE3MouseForChromHMM:
# all histone mods for mouse as defined in listed projects.
webserviceAll = os.path.join(Webservice.urlBase,
"encode/byDataset/ENCSR215KPY/ENCSR557UVG/" +
"ENCSR837UJN/ENCSR846VTS/ENCSR647ZZB/ENCSR392ERD")
class Datasets:
all_human = AllHumanDataset
all_mouse = AllMouseDataset
roadmap = RoadmapDataset
smallRNA = smallRNAdataset
cricket = cricketDataset
encode3DNaseHuman = encode3DNaseHuman
roadmapFromEncode = RoadmapFromEncode
ENCODE3MouseForChromHMM = ENCODE3MouseForChromHMM
|
weng-lab/SnoPlowPy
|
snoPlowPy/files_and_paths.py
|
Python
|
mit
| 15,275
|
import os
import tempfile
import unittest
import py_dep_analysis as pda
class TestPyDepAnalysis(unittest.TestCase):
def create_tmp_file(self, path: str, content: str):
f = open(path, "w")
f.write(content)
f.close()
def test_full_module_path(self):
self.assertEqual(pda._full_module_path("aa.bb.cc", "__init__.py"), "aa.bb.cc")
self.assertEqual(pda._full_module_path("aa.bb.cc", "dd.py"), "aa.bb.cc.dd")
self.assertEqual(pda._full_module_path("", "dd.py"), "dd")
def test_bazel_path_to_module_path(self):
self.assertEqual(
pda._bazel_path_to_module_path("//python/ray/rllib:xxx/yyy/dd"),
"ray.rllib.xxx.yyy.dd",
)
self.assertEqual(
pda._bazel_path_to_module_path("python:ray/rllib/xxx/yyy/dd"),
"ray.rllib.xxx.yyy.dd",
)
self.assertEqual(
pda._bazel_path_to_module_path("python/ray/rllib:xxx/yyy/dd"),
"ray.rllib.xxx.yyy.dd",
)
def test_file_path_to_module_path(self):
self.assertEqual(
pda._file_path_to_module_path("python/ray/rllib/env/env.py"),
"ray.rllib.env.env",
)
self.assertEqual(
pda._file_path_to_module_path("python/ray/rllib/env/__init__.py"),
"ray.rllib.env",
)
def test_import_line_continuation(self):
graph = pda.DepGraph()
graph.ids["ray"] = 0
with tempfile.TemporaryDirectory() as tmpdir:
src_path = os.path.join(tmpdir, "continuation1.py")
self.create_tmp_file(
src_path,
"""
import ray.rllib.env.\\
mock_env
b = 2
""",
)
pda._process_file(graph, src_path, "ray")
self.assertEqual(len(graph.ids), 2)
print(graph.ids)
# Shoud pick up the full module name.
self.assertEqual(graph.ids["ray.rllib.env.mock_env"], 1)
self.assertEqual(graph.edges[0], {1: True})
def test_import_line_continuation_parenthesis(self):
graph = pda.DepGraph()
graph.ids["ray"] = 0
with tempfile.TemporaryDirectory() as tmpdir:
src_path = os.path.join(tmpdir, "continuation1.py")
self.create_tmp_file(
src_path,
"""
from ray.rllib.env import (ClassName,
module1, module2)
b = 2
""",
)
pda._process_file(graph, src_path, "ray")
self.assertEqual(len(graph.ids), 2)
print(graph.ids)
# Shoud pick up the full module name without trailing (.
self.assertEqual(graph.ids["ray.rllib.env"], 1)
self.assertEqual(graph.edges[0], {1: True})
def test_from_import_file_module(self):
graph = pda.DepGraph()
graph.ids["ray"] = 0
with tempfile.TemporaryDirectory() as tmpdir:
src_path = "multi_line_comment_3.py"
self.create_tmp_file(
os.path.join(tmpdir, src_path),
"""
from ray.rllib.env import mock_env
a = 1
b = 2
""",
)
# Touch ray/rllib/env/mock_env.py in tmpdir,
# so that it looks like a module.
module_dir = os.path.join(tmpdir, "python", "ray", "rllib", "env")
os.makedirs(module_dir, exist_ok=True)
f = open(os.path.join(module_dir, "mock_env.py"), "w")
f.write("print('hello world!')")
f.close
pda._process_file(graph, src_path, "ray", _base_dir=tmpdir)
self.assertEqual(len(graph.ids), 2)
self.assertEqual(graph.ids["ray.rllib.env.mock_env"], 1)
# Only 1 edge from ray to ray.rllib.env.mock_env
# ray.tune.tune is ignored.
self.assertEqual(graph.edges[0], {1: True})
def test_from_import_class_object(self):
graph = pda.DepGraph()
graph.ids["ray"] = 0
with tempfile.TemporaryDirectory() as tmpdir:
src_path = "multi_line_comment_3.py"
self.create_tmp_file(
os.path.join(tmpdir, src_path),
"""
from ray.rllib.env import MockEnv
a = 1
b = 2
""",
)
# Touch ray/rllib/env.py in tmpdir,
# MockEnv is a class on env module.
module_dir = os.path.join(tmpdir, "python", "ray", "rllib")
os.makedirs(module_dir, exist_ok=True)
f = open(os.path.join(module_dir, "env.py"), "w")
f.write("print('hello world!')")
f.close
pda._process_file(graph, src_path, "ray", _base_dir=tmpdir)
self.assertEqual(len(graph.ids), 2)
# Should depend on env.py instead.
self.assertEqual(graph.ids["ray.rllib.env"], 1)
# Only 1 edge from ray to ray.rllib.env.mock_env
# ray.tune.tune is ignored.
self.assertEqual(graph.edges[0], {1: True})
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
ray-project/ray
|
ci/travis/py_dep_analysis_test.py
|
Python
|
apache-2.0
| 4,960
|
import tests.periodicities.period_test as per
per.buildModel((5 , 'S' , 400));
|
antoinecarme/pyaf
|
tests/periodicities/Second/Cycle_Second_400_S_5.py
|
Python
|
bsd-3-clause
| 81
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import math
import unittest
from tracing.value import histogram_serializer
from tracing.value.diagnostics import breakdown
from tracing.value.diagnostics import diagnostic
from tracing.value import histogram_deserializer
class BreakdownUnittest(unittest.TestCase):
def testRoundtrip(self):
bd = breakdown.Breakdown()
bd.Set('one', 1)
bd.Set('m1', -1)
bd.Set('inf', float('inf'))
bd.Set('nun', float('nan'))
bd.Set('ninf', float('-inf'))
bd.Set('long', 2**65)
d = bd.AsDict()
clone = diagnostic.Diagnostic.FromDict(d)
self.assertEqual(json.dumps(d), json.dumps(clone.AsDict()))
self.assertEqual(clone.Get('one'), 1)
self.assertEqual(clone.Get('m1'), -1)
self.assertEqual(clone.Get('inf'), float('inf'))
self.assertTrue(math.isnan(clone.Get('nun')))
self.assertEqual(clone.Get('ninf'), float('-inf'))
self.assertEqual(clone.Get('long'), 2**65)
def testDeserialize(self):
d = histogram_deserializer.HistogramDeserializer([
'a', 'b', 'c', [0, 1, 2], 'colors'])
b = breakdown.Breakdown.Deserialize([4, 3, 1, 2, 3], d)
self.assertEqual(b.color_scheme, 'colors')
self.assertEqual(b.Get('a'), 1)
self.assertEqual(b.Get('b'), 2)
self.assertEqual(b.Get('c'), 3)
def testSerialize(self):
s = histogram_serializer.HistogramSerializer()
b = breakdown.Breakdown.FromEntries({'a': 10, 'b': 20})
self.assertEqual(b.Serialize(s), [0, 3, 10, 20])
self.assertEqual(s.GetOrAllocateId(''), 0)
self.assertEqual(s.GetOrAllocateId('a'), 1)
self.assertEqual(s.GetOrAllocateId('b'), 2)
self.assertEqual(s.GetOrAllocateId([1, 2]), 3)
|
catapult-project/catapult
|
tracing/tracing/value/diagnostics/breakdown_unittest.py
|
Python
|
bsd-3-clause
| 1,818
|
########################################################################
#
# File Name: HTMLHeadingElement
#
# Documentation: http://docs.4suite.com/4DOM/HTMLHeadingElement.html
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLHeadingElement(HTMLElement):
def __init__(self, ownerDocument, nodeName):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_align(self):
return string.capitalize(self.getAttribute("ALIGN"))
def _set_align(self, value):
self.setAttribute("ALIGN", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"align" : _get_align
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"align" : _set_align
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
iCarto/siga
|
extScripting/scripts/jython/Lib/xml/dom/html/HTMLHeadingElement.py
|
Python
|
gpl-3.0
| 1,390
|
# -*- coding: utf-8 -*-
"""
InformationMachineAPILib.Models.Cart
"""
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Models.CartItem import CartItem
class Cart(object):
"""Implementation of the 'Cart' model.
TODO: type model description here.
Attributes:
cart_id (string): TODO: type description here.
cart_name (string): TODO: type description here.
cart_items (list of CartItem): TODO: type description here.
created_at (string): TODO: type description here.
updated_at (string): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the Cart class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
cart_id -- string -- Sets the attribute cart_id
cart_name -- string -- Sets the attribute cart_name
cart_items -- list of CartItem -- Sets the attribute cart_items
created_at -- string -- Sets the attribute created_at
updated_at -- string -- Sets the attribute updated_at
"""
# Set all of the parameters to their default values
self.cart_id = None
self.cart_name = None
self.cart_items = None
self.created_at = None
self.updated_at = None
# Create a mapping from API property names to Model property names
replace_names = {
"cart_id": "cart_id",
"cart_name": "cart_name",
"cart_items": "cart_items",
"created_at": "created_at",
"updated_at": "updated_at",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
# Other objects also need to be initialised properly
if "cart_items" in kwargs:
# Parameter is an array, so we need to iterate through it
self.cart_items = list()
for item in kwargs["cart_items"]:
self.cart_items.append(CartItem(**item))
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"cart_id": "cart_id",
"cart_name": "cart_name",
"cart_items": "cart_items",
"created_at": "created_at",
"updated_at": "updated_at",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
information-machine/information-machine-api-python
|
InformationMachineAPILib/Models/Cart.py
|
Python
|
mit
| 3,435
|
from pig_util import outputSchema
COFFEE_SNOB_PHRASES = set((\
'espresso', 'cappucino', 'macchiato', 'latte', 'cortado', 'pour over', 'barista',
'flat white', 'siphon pot', 'woodneck', 'french press', 'arabica', 'chemex',
'frothed', 'la marzocco', 'mazzer', 'la pavoni', 'nespresso', 'rancilio silvia', 'hario',
'intelligentsia', 'counter culture', 'barismo', 'sightglass', 'blue bottle', 'stumptown',
'single origin', 'coffee beans', 'coffee grinder', 'lavazza', 'coffeegeek'\
))
@outputSchema('is_coffee_tweet:int')
def is_coffee_tweet(text):
"""
Is the given text indicative of coffee snobbery?
"""
if not text:
return 0
lowercased = set(text.lower().split())
return 1 if any((True for phrase in COFFEE_SNOB_PHRASES if phrase in lowercased)) else 0
|
pombredanne/jkarn-pub-test
|
udfs/python/coffee.py
|
Python
|
apache-2.0
| 813
|
# coding=utf-8
# qingfanyi - Chinese to English translation tool
# Copyright (C) 2016 Rohan McGovern <rohan@mcgovern.id.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
import qingfanyi
@contextmanager
def debug_enabled():
oldval = qingfanyi._DEBUG
try:
qingfanyi._DEBUG = True
yield
finally:
qingfanyi._DEBUG = oldval
def test_debug_can_output_bytes():
with debug_enabled():
qingfanyi.debug(b'test message: \x00\x01\xff\xfe')
assert 'did not crash'
def test_debug_can_output_unicode():
with debug_enabled():
qingfanyi.debug(u'test message: 你好')
assert 'did not crash'
def test_debug_can_invoke():
with debug_enabled():
qingfanyi.debug(lambda: 'test message: foo!')
assert 'did not crash'
|
rohanpm/qingfanyi
|
tests/test_debug.py
|
Python
|
gpl-3.0
| 1,415
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import testtools
from neutron.db import api as db_api
# Import all data models
from neutron.db.migration.models import head # noqa
from neutron.db import model_base
from neutron.tests import base
from neutron import wsgi
class ExpectedException(testtools.ExpectedException):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if super(ExpectedException, self).__exit__(exc_type,
exc_value,
traceback):
self.exception = exc_value
return True
return False
def create_request(path, body, content_type, method='GET',
query_string=None, context=None):
if query_string:
url = "%s?%s" % (path, query_string)
else:
url = path
req = wsgi.Request.blank(url)
req.method = method
req.headers = {}
req.headers['Accept'] = content_type
req.body = body
if context:
req.environ['neutron.context'] = context
return req
class SqlFixture(fixtures.Fixture):
# flag to indicate that the models have been loaded
_TABLES_ESTABLISHED = False
def setUp(self):
super(SqlFixture, self).setUp()
# Register all data models
engine = db_api.get_engine()
if not SqlFixture._TABLES_ESTABLISHED:
model_base.BASEV2.metadata.create_all(engine)
SqlFixture._TABLES_ESTABLISHED = True
def clear_tables():
with engine.begin() as conn:
for table in reversed(
model_base.BASEV2.metadata.sorted_tables):
conn.execute(table.delete())
self.addCleanup(clear_tables)
class SqlTestCase(base.BaseTestCase):
def setUp(self):
super(SqlTestCase, self).setUp()
self.useFixture(SqlFixture())
class WebTestCase(SqlTestCase):
fmt = 'json'
def setUp(self):
super(WebTestCase, self).setUp()
json_deserializer = wsgi.JSONDeserializer()
self._deserializers = {
'application/json': json_deserializer,
}
def deserialize(self, response):
ctype = 'application/%s' % self.fmt
data = self._deserializers[ctype].deserialize(response.body)['body']
return data
def serialize(self, data):
ctype = 'application/%s' % self.fmt
result = wsgi.Serializer().serialize(data, ctype)
return result
class SubDictMatch(object):
def __init__(self, sub_dict):
self.sub_dict = sub_dict
def __eq__(self, super_dict):
return all(item in super_dict.items()
for item in self.sub_dict.items())
|
yuewko/neutron
|
neutron/tests/unit/testlib_api.py
|
Python
|
apache-2.0
| 3,380
|
#!/usr/bin/env python
import re, os, os.path, sys, tempfile
import subprocess
# This script dumps its stdin into an Emacs buffer (using emacsclient).
# cat foo | nom # creates a buffer called *nom*
# cat foo | nom python # creates a buffer called *nom* in python-mode
# cat foo | nom python yo # creates a buffer called *nom-yo* in python-mode
esc = re.compile(r'["\\]')
q = lambda s: esc.sub(lambda m: '\\'+m.group(0), s)
if __name__ == '__main__':
fd, fname = tempfile.mkstemp()
subprocess.Popen(["dd"], stdout=fd).wait()
os.close(fd)
name = ""
mode = ""
if len(sys.argv) > 1:
mode = "(" + sys.argv[1] + "-mode)"
if len(sys.argv) > 2:
name = "-" + sys.argv[2]
ff = """(progn
(switch-to-buffer (generate-new-buffer "*nom%s*"))
(insert-file-contents "%s")%s)""" % (q(name), q(fname), mode)
subprocess.Popen(['emacsclient', '-n', '-e', ff]).wait()
os.unlink(fname)
|
RichardBarrell/snippets
|
nom.py
|
Python
|
isc
| 925
|
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import json
import re
import datetime
import pandas as pd
sys.path.insert(0,'../')
from elasticsearch import Elasticsearch, ConnectionTimeout
from models.article import Article
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
query = {
"query": {
"bool": {
"must": [
{
"constant_score": {
"filter": {
"missing": {
"field": "features.controversy.polarity"
}
}
}
}
],
"must_not": [
{
"constant_score": {
"filter": {
"missing": {
"field": "features.controversy.value"
}
}
}
}
]
}
},
"from": 0,
"size": 1000
}
res = es.search(index="controcurator", doc_type="article", body=query)
data = res['hits']['hits']
directory = '/'
results = {}
for hit in data:
if 'comments' not in hit['_source']:
print "SKIPPING"
continue
print hit['_id']
features = hit['_source']['features']
comments = hit['_source']['comments']
if len([c for c in comments if 'sentiment' not in c]) > 0:
continue
features['controversy']['openness'] = len(comments)
features['controversy']['actors'] = len(set(map(lambda x: x['author'], comments)))
# get datetime of all comments
times = list(map(lambda x: datetime.datetime.strptime(x['timestamp'][:-6], "%Y-%m-%dT%H:%M:%S"), comments))
# add article post time to times
times.append(datetime.datetime.strptime(hit['_source']['published'][:-6], "%Y-%m-%dT%H:%M:%S"))
times = sorted(times)
#r['times'] = times
features['controversy']['duration'] = (times[-1] - times[0]).days + 1
pos = list(filter(lambda x: x > 0, map(lambda x: x['sentiment']['sentiment'], comments)))
neg = list(filter(lambda x: x < 0, map(lambda x: x['sentiment']['sentiment'], comments)))
pos = sum(pos)
neg = sum(map(abs, neg))
intensity = list(map(lambda x: x['sentiment']['intensity'], comments))
# compute polarity
if neg <= 0 or pos <= 0:
balance = 0
else:
balance = float(neg) / pos if pos > neg else float(pos) / neg
features['controversy']['polarity'] = balance
features['controversy']['emotion'] = sum(intensity) / len(intensity)
Article.get(id=hit['_id']).update(features=features)
#df = pd.DataFrame(results).T
#df.to_csv('articlestats.csv')
|
ControCurator/controcurator
|
cronjobs/exportArticleStats.py
|
Python
|
mit
| 2,548
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import subprocess
import sys
import tempfile
from subprocess import CalledProcessError, check_output
from programs.subprocutils import which
class EmptyTempFile(object):
def __init__(self, prefix=None, dir=None, closed=True):
self.file, self.name = tempfile.mkstemp(prefix=prefix, dir=dir)
if closed:
os.close(self.file)
self.closed = closed
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
os.remove(self.name)
def close(self):
if not self.closed:
os.close(self.file)
self.closed = True
def fileno(self):
return self.file
def is_vcs(dirpath): # type: (str) -> bool
dot_git = os.path.join(dirpath, ".git")
if which("git") and sys.platform != "cygwin":
if os.path.exists(dot_git) and os.path.isdir(dot_git):
return True
try:
with open(os.devnull, "w") as devnull:
output = check_output(
["git", "rev-parse", "--is-inside-work-tree"],
cwd=dirpath,
stderr=devnull,
).decode("utf-8")
return output.strip() == "true"
except CalledProcessError:
pass
return False
def is_dirty(dirpath): # type: (str) -> bool
# Ignore any changes under these paths for the purposes of forcing a rebuild
# of Buck itself.
IGNORE_PATHS = ["test"]
IGNORE_PATHS_RE_GROUP = "|".join([re.escape(e) for e in IGNORE_PATHS])
IGNORE_PATHS_RE = re.compile("^.. (?:" + IGNORE_PATHS_RE_GROUP + ")")
if not is_vcs(dirpath):
return False
output = check_output(["git", "status", "--porcelain"], cwd=dirpath).decode("utf-8")
output = "\n".join(
[line for line in output.splitlines() if not IGNORE_PATHS_RE.search(line)]
)
return bool(output.strip())
def get_vcs_revision(dirpath): # type: (str) -> str
output = check_output(["git", "rev-parse", "HEAD", "--"], cwd=dirpath).decode(
"utf-8"
)
return output.splitlines()[0].strip()
def get_vcs_revision_timestamp(dirpath): # type: (str) -> str
return (
check_output(
["git", "log", "--pretty=format:%ct", "-1", "HEAD", "--"], cwd=dirpath
)
.decode("utf-8")
.strip()
)
def get_clean_buck_version(dirpath, allow_dirty=False): # type: (str, bool) -> str
if not is_vcs(dirpath):
return "N/A"
if allow_dirty or not is_dirty(dirpath):
return get_vcs_revision(dirpath)
def get_dirty_buck_version(dirpath): # type: (str) -> str
git_tree_in = (
check_output(
["git", "log", "-n1", "--pretty=format:%T", "HEAD", "--"], cwd=dirpath
)
.decode("utf-8")
.strip()
)
with EmptyTempFile(prefix="buck-git-index") as index_file:
new_environ = os.environ.copy()
new_environ["GIT_INDEX_FILE"] = index_file.name
subprocess.check_call(
["git", "read-tree", git_tree_in], cwd=dirpath, env=new_environ
)
subprocess.check_call(["git", "add", "-A"], cwd=dirpath, env=new_environ)
git_tree_out = (
check_output(["git", "write-tree"], cwd=dirpath, env=new_environ)
.decode("utf-8")
.strip()
)
with EmptyTempFile(prefix="buck-version-uid-input", closed=False) as uid_input:
subprocess.check_call(
["git", "ls-tree", "--full-tree", git_tree_out],
cwd=dirpath,
stdout=uid_input,
)
return (
check_output(["git", "hash-object", uid_input.name], cwd=dirpath)
.decode("utf-8")
.strip()
)
|
facebook/buck
|
programs/buck_version.py
|
Python
|
apache-2.0
| 4,402
|
#!/usr/bin/env python
import os.path as osp
import numpy as np
import PIL.Image
import skimage.io
import skimage.transform
from fcn import utils
here = osp.dirname(osp.abspath(__file__))
def test_label_accuracy_score():
img_file = osp.join(here, '../data/2007_000063.jpg')
lbl_file = osp.join(here, '../data/2007_000063.png')
img = skimage.io.imread(img_file)
lbl_gt = np.array(PIL.Image.open(lbl_file), dtype=np.int32, copy=False)
lbl_gt[lbl_gt == 255] = -1
lbl_pred = lbl_gt.copy()
lbl_pred[lbl_pred == -1] = 0
lbl_pred = skimage.transform.rescale(lbl_pred, 1 / 16., order=0,
preserve_range=True)
lbl_pred = skimage.transform.resize(lbl_pred, lbl_gt.shape, order=0,
preserve_range=True)
lbl_pred = lbl_pred.astype(lbl_gt.dtype)
viz = utils.visualize_segmentation(
lbl_pred=lbl_pred, img=img, n_class=21, lbl_true=lbl_gt)
img_h, img_w = img.shape[:2]
assert isinstance(viz, np.ndarray)
assert viz.shape == (img_h * 2, img_w * 3, 3)
assert viz.dtype == np.uint8
return viz
if __name__ == '__main__':
import matplotlib.pyplot as plt
import skimage.color
viz = test_label_accuracy_score()
plt.imshow(viz)
plt.show()
|
wkentaro/fcn
|
tests/utils_tests/test_visualize_segmentation.py
|
Python
|
mit
| 1,303
|
"""
sentry.models.groupmeta
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db import models
from django.utils import timezone
from sentry.db.models import (BoundedBigIntegerField, Model, sane_repr)
class EventMapping(Model):
__core__ = False
project_id = BoundedBigIntegerField()
group_id = BoundedBigIntegerField()
event_id = models.CharField(max_length=32)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_eventmapping'
unique_together = (('project_id', 'event_id'), )
__repr__ = sane_repr('project_id', 'group_id', 'event_id')
# Implement a ForeignKey-like accessor for backwards compat
def _set_group(self, group):
self.group_id = group.id
self._group_cache = group
def _get_group(self):
from sentry.models import Group
if not hasattr(self, '_group_cache'):
self._group_cache = Group.objects.get(id=self.group_id)
return self._group_cache
group = property(_get_group, _set_group)
# Implement a ForeignKey-like accessor for backwards compat
def _set_project(self, project):
self.project_id = project.id
self._project_cache = project
def _get_project(self):
from sentry.models import Project
if not hasattr(self, '_project_cache'):
self._project_cache = Project.objects.get(id=self.project_id)
return self._project_cache
project = property(_get_project, _set_project)
|
looker/sentry
|
src/sentry/models/eventmapping.py
|
Python
|
bsd-3-clause
| 1,684
|
# coding: utf-8
from __future__ import division, unicode_literals
'''
Created on Jun 27, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 27, 2012"
import unittest
import os
import json
from pymatgen.entries.exp_entries import ExpEntry
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ExpEntryTest(unittest.TestCase):
def setUp(self):
thermodata = json.load(open(os.path.join(test_dir, "Fe2O3_exp.json"),
"r"), cls=MontyDecoder)
self.entry = ExpEntry("Fe2O3", thermodata)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -825.5)
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ExpEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -825.5)
def test_str(self):
self.assertIsNotNone(str(self.entry))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/entries/tests/test_exp_entries.py
|
Python
|
mit
| 1,189
|
#!/usr/bin/env python2
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--boto-profile', action='store',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = {}
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/inventory/aws/ec2.py
|
Python
|
apache-2.0
| 55,406
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import strict_discrete_set, truncated_discrete_set
from .adapters import LakeShoreUSBAdapter
from time import sleep
import numpy as np
class LakeShore425(Instrument):
""" Represents the LakeShore 425 Gaussmeter and provides
a high-level interface for interacting with the instrument
To allow user access to the LakeShore 425 Gaussmeter in Linux,
create the file:
:code:`/etc/udev/rules.d/52-lakeshore425.rules`, with contents:
.. code-block:: none
SUBSYSTEMS=="usb",ATTRS{idVendor}=="1fb9",ATTRS{idProduct}=="0401",MODE="0666",SYMLINK+="lakeshore425"
Then reload the udev rules with:
.. code-block:: bash
sudo udevadm control --reload-rules
sudo udevadm trigger
The device will be accessible through :code:`/dev/lakeshore425`.
"""
field = Instrument.measurement(
"RDGFIELD?",
""" Returns the field in the current units """
)
unit = Instrument.control(
"UNIT?", "UNIT %d",
""" A string property that controls the units of the instrument,
which can take the values of G, T, Oe, or A/m. """,
validator=strict_discrete_set,
values={'G':1, 'T':2, 'Oe':3, 'A/m':4},
map_values=True
)
range = Instrument.control(
"RANGE?", "RANGE %d",
""" A floating point property that controls the field range in
units of Gauss, which can take the values 35, 350, 3500, and
35,000 G. """,
validator=truncated_discrete_set,
values={35:1, 350:2, 3500:3, 35000:4},
map_values=True
)
def __init__(self, port):
super(LakeShore425, self).__init__(
LakeShoreUSBAdapter(port),
"LakeShore 425 Gaussmeter",
)
def auto_range(self):
""" Sets the field range to automatically adjust """
self.write("AUTO")
def dc_mode(self, wideband=True):
""" Sets up a steady-state (DC) measurement of the field """
if wideband:
self.mode = (1, 0, 1)
else:
self.mode(1, 0, 2)
def ac_mode(self, wideband=True):
""" Sets up a measurement of an oscillating (AC) field """
if wideband:
self.mode = (2, 1, 1)
else:
self.mode = (2, 1, 2)
@property
def mode(self):
return tuple(self.values("RDGMODE?"))
@mode.setter
def mode(self, value):
""" Provides access to directly setting the mode, filter, and
bandwidth settings
"""
mode, filter, band = value
self.write("RDGMODE %d,%d,%d" % (mode, filter, band))
def zero_probe(self):
""" Initiates the zero field sequence to calibrate the probe """
self.write("ZPROBE")
def measure(self, points, has_aborted=lambda: False, delay=1e-3):
"""Returns the mean and standard deviation of a given number
of points while blocking
"""
data = np.zeros(points, dtype=np.float32)
for i in range(points):
if has_aborted():
break
data[i] = self.field
sleep(delay)
return data.mean(), data.std()
|
ralph-group/pymeasure
|
pymeasure/instruments/lakeshore/lakeshore425.py
|
Python
|
mit
| 4,398
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
import re
from codequick import Listitem, Resolver, Route
import urlquick
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
URL_ROOT = 'https://www.lachainemeteo.com'
URL_VIDEOS = URL_ROOT + '/videos-meteo/videos-la-chaine-meteo'
URL_BRIGHTCOVE_DATAS = URL_ROOT + '/jsdyn/lcmjs.js'
@Route.register
def list_programs(plugin, item_id, **kwargs):
"""
Build programs listing
- Les feux de l'amour
- ...
"""
resp = urlquick.get(URL_VIDEOS)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='viewVideosSeries']"):
program_title = program_datas.find(
".//div[@class='title']").text.strip() + ' ' + program_datas.find(
".//div[@class='title']").find('.//strong').text.strip()
item = Listitem()
item.label = program_title
item.set_callback(list_videos,
item_id=item_id,
program_title_value=program_title)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_title_value, **kwargs):
resp = urlquick.get(URL_VIDEOS)
root = resp.parse()
for program_datas in root.iterfind(".//div[@class='viewVideosSeries']"):
program_title = program_datas.find(
".//div[@class='title']").text.strip() + ' ' + program_datas.find(
".//div[@class='title']").find('.//strong').text.strip()
if program_title == program_title_value:
list_videos_datas = program_datas.findall('.//a')
for video_datas in list_videos_datas:
video_title = video_datas.find(".//div[@class='txt']").text
video_image = video_datas.find('.//img').get('data-src')
video_url = video_datas.get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
resp = urlquick.get(video_url)
data_video_id = re.compile('data-video-id=\'(.*?)\'').findall(resp.text)[0]
data_player = re.compile('data-player=\'(.*?)\'').findall(resp.text)[0]
resp2 = urlquick.get(URL_BRIGHTCOVE_DATAS)
data_account = re.compile('players.brightcove.net/(.*?)/').findall(
resp2.text)[0]
return resolver_proxy.get_brightcove_video_json(plugin, data_account,
data_player, data_video_id,
download_mode)
|
Catch-up-TV-and-More/plugin.video.catchuptvandmore
|
resources/lib/channels/fr/lachainemeteo.py
|
Python
|
gpl-2.0
| 3,308
|
# coding: utf8
# Copyright 2018 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Project-independent library for Taskcluster decision tasks
"""
import base64
import datetime
import hashlib
import json
import os
import re
import subprocess
import sys
import taskcluster
# Public API
__all__ = [
"CONFIG", "SHARED", "Task", "DockerWorkerTask",
"GenericWorkerTask", "WindowsGenericWorkerTask", "MacOsGenericWorkerTask",
]
class Config:
"""
Global configuration, for users of the library to modify.
"""
def __init__(self):
self.task_name_template = "%s"
self.index_prefix = "garbage.servo-decisionlib"
self.index_read_only = False
self.scopes_for_all_subtasks = []
self.routes_for_all_subtasks = []
self.docker_image_build_worker_type = None
self.docker_images_expire_in = "1 month"
self.repacked_msi_files_expire_in = "1 month"
self.treeherder_repository_name = None
# Set by docker-worker:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/environment
self.decision_task_id = os.environ.get("TASK_ID")
# Set in the decision task’s payload, such as defined in .taskcluster.yml
self.task_owner = os.environ.get("TASK_OWNER")
self.task_source = os.environ.get("TASK_SOURCE")
self.git_url = os.environ.get("GIT_URL")
self.git_ref = os.environ.get("GIT_REF")
self.git_sha = os.environ.get("GIT_SHA")
def task_id(self):
if hasattr(self, "_task_id"):
return self._task_id
# If the head commit is a merge, we want to generate a unique task id which incorporates
# the merge parents rather that the actual sha of the merge commit. This ensures that tasks
# can be reused if the tree is in an identical state. Otherwise, if the head commit is
# not a merge, we can rely on the head commit sha for that purpose.
raw_commit = subprocess.check_output(["git", "cat-file", "commit", "HEAD"])
parent_commits = [
value.decode("utf8")
for line in raw_commit.split(b"\n")
for key, _, value in [line.partition(b" ")]
if key == b"parent"
]
if len(parent_commits) > 1:
self._task_id = "-".join(parent_commits) # pragma: no cover
else:
self._task_id = self.git_sha # pragma: no cover
return self._task_id
def git_sha_is_current_head(self):
output = subprocess.check_output(["git", "rev-parse", "HEAD"])
self.git_sha = output.decode("utf8").strip()
class Shared:
"""
Global shared state.
"""
def __init__(self):
self.now = datetime.datetime.utcnow()
self.found_or_created_indexed_tasks = {}
# taskclusterProxy URLs:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features
self.queue_service = taskcluster.Queue(options={"baseUrl": "http://taskcluster/queue/v1/"})
self.index_service = taskcluster.Index(options={"baseUrl": "http://taskcluster/index/v1/"})
def from_now_json(self, offset):
"""
Same as `taskcluster.fromNowJSON`, but uses the creation time of `self` for “now”.
"""
return taskcluster.stringDate(taskcluster.fromNow(offset, dateObj=self.now))
CONFIG = Config()
SHARED = Shared()
def chaining(op, attr):
def method(self, *args, **kwargs):
op(self, attr, *args, **kwargs)
return self
return method
def append_to_attr(self, attr, *args): getattr(self, attr).extend(args)
def prepend_to_attr(self, attr, *args): getattr(self, attr)[0:0] = list(args)
def update_attr(self, attr, **kwargs): getattr(self, attr).update(kwargs)
class Task:
"""
A task definition, waiting to be created.
Typical is to use chain the `with_*` methods to set or extend this object’s attributes,
then call the `crate` or `find_or_create` method to schedule a task.
This is an abstract class that needs to be specialized for different worker implementations.
"""
def __init__(self, name):
self.name = name
self.description = ""
self.scheduler_id = "taskcluster-github"
self.provisioner_id = "aws-provisioner-v1"
self.worker_type = "github-worker"
self.deadline_in = "1 day"
self.expires_in = "1 year"
self.index_and_artifacts_expire_in = self.expires_in
self.dependencies = []
self.scopes = []
self.routes = []
self.extra = {}
self.treeherder_required = False
# All `with_*` methods return `self`, so multiple method calls can be chained.
with_description = chaining(setattr, "description")
with_scheduler_id = chaining(setattr, "scheduler_id")
with_provisioner_id = chaining(setattr, "provisioner_id")
with_worker_type = chaining(setattr, "worker_type")
with_deadline_in = chaining(setattr, "deadline_in")
with_expires_in = chaining(setattr, "expires_in")
with_index_and_artifacts_expire_in = chaining(setattr, "index_and_artifacts_expire_in")
with_dependencies = chaining(append_to_attr, "dependencies")
with_scopes = chaining(append_to_attr, "scopes")
with_routes = chaining(append_to_attr, "routes")
with_extra = chaining(update_attr, "extra")
def with_treeherder_required(self):
self.treeherder_required = True
return self
def with_treeherder(self, category, symbol=None):
symbol = symbol or self.name
assert len(symbol) <= 25, symbol
self.name = "%s: %s" % (category, self.name)
# The message schema does not allow spaces in the platfrom or in labels,
# but the UI shows them in that order separated by spaces.
# So massage the metadata to get the UI to show the string we want.
# `labels` defaults to ["opt"] if not provided or empty,
# so use a more neutral underscore instead.
parts = category.split(" ")
platform = parts[0]
labels = parts[1:] or ["_"]
# https://docs.taskcluster.net/docs/reference/integrations/taskcluster-treeherder/docs/task-treeherder-config
self.with_extra(treeherder={
"machine": {"platform": platform},
"labels": labels,
"symbol": symbol,
})
if CONFIG.treeherder_repository_name:
assert CONFIG.git_sha
suffix = ".v2._/%s.%s" % (CONFIG.treeherder_repository_name, CONFIG.git_sha)
self.with_routes(
"tc-treeherder" + suffix,
"tc-treeherder-staging" + suffix,
)
self.treeherder_required = False # Taken care of
return self
def build_worker_payload(self): # pragma: no cover
"""
Overridden by sub-classes to return a dictionary in a worker-specific format,
which is used as the `payload` property in a task definition request
passed to the Queue’s `createTask` API.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
raise NotImplementedError
def create(self):
"""
Call the Queue’s `createTask` API to schedule a new task, and return its ID.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
worker_payload = self.build_worker_payload()
assert not self.treeherder_required, \
"make sure to call with_treeherder() for this task: %s" % self.name
assert CONFIG.decision_task_id
assert CONFIG.task_owner
assert CONFIG.task_source
queue_payload = {
"taskGroupId": CONFIG.decision_task_id,
"dependencies": [CONFIG.decision_task_id] + self.dependencies,
"schedulerId": self.scheduler_id,
"provisionerId": self.provisioner_id,
"workerType": self.worker_type,
"created": SHARED.from_now_json(""),
"deadline": SHARED.from_now_json(self.deadline_in),
"expires": SHARED.from_now_json(self.expires_in),
"metadata": {
"name": CONFIG.task_name_template % self.name,
"description": self.description,
"owner": CONFIG.task_owner,
"source": CONFIG.task_source,
},
"payload": worker_payload,
}
scopes = self.scopes + CONFIG.scopes_for_all_subtasks
routes = self.routes + CONFIG.routes_for_all_subtasks
if any(r.startswith("index.") for r in routes):
self.extra.setdefault("index", {})["expires"] = \
SHARED.from_now_json(self.index_and_artifacts_expire_in)
dict_update_if_truthy(
queue_payload,
scopes=scopes,
routes=routes,
extra=self.extra,
)
task_id = taskcluster.slugId().decode("utf8")
SHARED.queue_service.createTask(task_id, queue_payload)
print("Scheduled %s: %s" % (task_id, self.name))
return task_id
@staticmethod
def find(index_path):
full_index_path = "%s.%s" % (CONFIG.index_prefix, index_path)
task_id = SHARED.index_service.findTask(full_index_path)["taskId"]
print("Found task %s indexed at %s" % (task_id, full_index_path))
return task_id
def find_or_create(self, index_path=None):
"""
Try to find a task in the Index and return its ID.
The index path used is `{CONFIG.index_prefix}.{index_path}`.
`index_path` defaults to `by-task-definition.{sha256}`
with a hash of the worker payload and worker type.
If no task is found in the index,
it is created with a route to add it to the index at that same path if it succeeds.
<https://docs.taskcluster.net/docs/reference/core/taskcluster-index/references/api#findTask>
"""
if not index_path:
worker_type = self.worker_type
index_by = json.dumps([worker_type, self.build_worker_payload()]).encode("utf-8")
index_path = "by-task-definition." + hashlib.sha256(index_by).hexdigest()
task_id = SHARED.found_or_created_indexed_tasks.get(index_path)
if task_id is not None:
return task_id
try:
task_id = Task.find(index_path)
except taskcluster.TaskclusterRestFailure as e:
if e.status_code != 404: # pragma: no cover
raise
if not CONFIG.index_read_only:
self.routes.append("index.%s.%s" % (CONFIG.index_prefix, index_path))
task_id = self.create()
SHARED.found_or_created_indexed_tasks[index_path] = task_id
return task_id
class GenericWorkerTask(Task):
"""
Task definition for a worker type that runs the `generic-worker` implementation.
This is an abstract class that needs to be specialized for different operating systems.
<https://github.com/taskcluster/generic-worker>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_run_time_minutes = 30
self.env = {}
self.features = {}
self.mounts = []
self.artifacts = []
with_max_run_time_minutes = chaining(setattr, "max_run_time_minutes")
with_mounts = chaining(append_to_attr, "mounts")
with_env = chaining(update_attr, "env")
def build_command(self): # pragma: no cover
"""
Overridden by sub-classes to return the `command` property of the worker payload,
in the format appropriate for the operating system.
"""
raise NotImplementedError
def build_worker_payload(self):
"""
Return a `generic-worker` worker payload.
<https://docs.taskcluster.net/docs/reference/workers/generic-worker/docs/payload>
"""
worker_payload = {
"command": self.build_command(),
"maxRunTime": self.max_run_time_minutes * 60
}
return dict_update_if_truthy(
worker_payload,
env=self.env,
mounts=self.mounts,
features=self.features,
artifacts=[
{
"type": type_,
"path": path,
"name": "public/" + url_basename(path),
"expires": SHARED.from_now_json(self.index_and_artifacts_expire_in),
}
for type_, path in self.artifacts
],
)
def with_artifacts(self, *paths, type="file"):
"""
Add each path in `paths` as a task artifact
that expires in `self.index_and_artifacts_expire_in`.
`type` can be `"file"` or `"directory"`.
Paths are relative to the task’s home directory.
"""
self.artifacts.extend((type, path) for path in paths)
return self
def with_features(self, *names):
"""
Enable the given `generic-worker` features.
<https://github.com/taskcluster/generic-worker/blob/master/native_windows.yml>
"""
self.features.update({name: True for name in names})
return self
def _mount_content(self, url_or_artifact_name, task_id, sha256):
if task_id:
content = {"taskId": task_id, "artifact": url_or_artifact_name}
else:
content = {"url": url_or_artifact_name}
if sha256:
content["sha256"] = sha256
return content
def with_file_mount(self, url_or_artifact_name, task_id=None, sha256=None, path=None):
"""
Make `generic-worker` download a file before the task starts
and make it available at `path` (which is relative to the task’s home directory).
If `sha256` is provided, `generic-worker` will hash the downloaded file
and check it against the provided signature.
If `task_id` is provided, this task will depend on that task
and `url_or_artifact_name` is the name of an artifact of that task.
"""
return self.with_mounts({
"file": path or url_basename(url_or_artifact_name),
"content": self._mount_content(url_or_artifact_name, task_id, sha256),
})
def with_directory_mount(self, url_or_artifact_name, task_id=None, sha256=None, path=None):
"""
Make `generic-worker` download an archive before the task starts,
and uncompress it at `path` (which is relative to the task’s home directory).
`url_or_artifact_name` must end in one of `.rar`, `.tar.bz2`, `.tar.gz`, or `.zip`.
The archive must be in the corresponding format.
If `sha256` is provided, `generic-worker` will hash the downloaded archive
and check it against the provided signature.
If `task_id` is provided, this task will depend on that task
and `url_or_artifact_name` is the name of an artifact of that task.
"""
supported_formats = ["rar", "tar.bz2", "tar.gz", "zip"]
for fmt in supported_formats:
suffix = "." + fmt
if url_or_artifact_name.endswith(suffix):
return self.with_mounts({
"directory": path or url_basename(url_or_artifact_name[:-len(suffix)]),
"content": self._mount_content(url_or_artifact_name, task_id, sha256),
"format": fmt,
})
raise ValueError(
"%r does not appear to be in one of the supported formats: %r"
% (url_or_artifact_name, ", ".join(supported_formats))
) # pragma: no cover
class WindowsGenericWorkerTask(GenericWorkerTask):
"""
Task definition for a `generic-worker` task running on Windows.
Scripts are written as `.bat` files executed with `cmd.exe`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scripts = []
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
def build_command(self):
return [deindent(s) for s in self.scripts]
def with_path_from_homedir(self, *paths):
"""
Interpret each path in `paths` as relative to the task’s home directory,
and add it to the `PATH` environment variable.
"""
for p in paths:
self.with_early_script("set PATH=%HOMEDRIVE%%HOMEPATH%\\{};%PATH%".format(p))
return self
def with_repo(self, sparse_checkout=None, shallow=True):
"""
Make a shallow clone the git repository at the start of the task.
This uses `CONFIG.git_url`, `CONFIG.git_ref`, and `CONFIG.git_sha`,
and creates the clone in a `repo` directory in the task’s home directory.
If `sparse_checkout` is given, it must be a list of path patterns
to be used in `.git/info/sparse-checkout`.
See <https://git-scm.com/docs/git-read-tree#_sparse_checkout>.
"""
git = """
git init repo
cd repo
"""
if sparse_checkout:
self.with_mounts({
"file": "sparse-checkout",
"content": {"raw": "\n".join(sparse_checkout)},
})
git += """
git config core.sparsecheckout true
copy ..\\sparse-checkout .git\\info\\sparse-checkout
type .git\\info\\sparse-checkout
"""
git += """
git fetch {depth} %GIT_URL% %GIT_REF%
git reset --hard %GIT_SHA%
""".format(depth="--depth 1" if shallow else "")
return self \
.with_git() \
.with_script(git) \
.with_env(**git_env())
def with_git(self):
"""
Make the task download `git-for-windows` and make it available for `git` commands.
This is implied by `with_repo`.
"""
return self \
.with_path_from_homedir("git\\cmd") \
.with_directory_mount(
"https://github.com/git-for-windows/git/releases/download/" +
"v2.19.0.windows.1/MinGit-2.19.0-64-bit.zip",
sha256="424d24b5fc185a9c5488d7872262464f2facab4f1d4693ea8008196f14a3c19b",
path="git",
)
def with_rustup(self):
"""
Download rustup.rs and make it available to task commands,
but does not download any default toolchain.
"""
return self \
.with_path_from_homedir(".cargo\\bin") \
.with_early_script(
"%HOMEDRIVE%%HOMEPATH%\\rustup-init.exe --default-toolchain none -y"
) \
.with_file_mount(
"https://static.rust-lang.org/rustup/archive/" +
"1.17.0/x86_64-pc-windows-msvc/rustup-init.exe",
sha256="002127adeaaee6ef8d82711b5c2881a1db873262f63aea60cee9632f207e8f29",
)
def with_repacked_msi(self, url, sha256, path):
"""
Download an MSI file from `url`, extract the files in it with `lessmsi`,
and make them available in the directory at `path` (relative to the task’s home directory).
`sha256` is required and the MSI file must have that hash.
The file extraction (and recompression in a ZIP file) is done in a separate task,
wich is indexed based on `sha256` and cached for `CONFIG.repacked_msi_files_expire_in`.
<https://github.com/activescott/lessmsi>
"""
repack_task = (
WindowsGenericWorkerTask("MSI repack: " + url)
.with_worker_type(self.worker_type)
.with_max_run_time_minutes(20)
.with_file_mount(url, sha256=sha256, path="input.msi")
.with_directory_mount(
"https://github.com/activescott/lessmsi/releases/download/" +
"v1.6.1/lessmsi-v1.6.1.zip",
sha256="540b8801e08ec39ba26a100c855898f455410cecbae4991afae7bb2b4df026c7",
path="lessmsi"
)
.with_directory_mount(
"https://www.7-zip.org/a/7za920.zip",
sha256="2a3afe19c180f8373fa02ff00254d5394fec0349f5804e0ad2f6067854ff28ac",
path="7zip",
)
.with_path_from_homedir("lessmsi", "7zip")
.with_script("""
lessmsi x input.msi extracted\\
cd extracted\\SourceDir
7za a repacked.zip *
""")
.with_artifacts("extracted/SourceDir/repacked.zip")
.with_index_and_artifacts_expire_in(CONFIG.repacked_msi_files_expire_in)
.find_or_create("repacked-msi." + sha256)
)
return self \
.with_dependencies(repack_task) \
.with_directory_mount("public/repacked.zip", task_id=repack_task, path=path)
def with_python2(self):
"""
Make Python 2, pip, and virtualenv accessible to the task’s commands.
For Python 3, use `with_directory_mount` and the "embeddable zip file" distribution
from python.org.
You may need to remove `python37._pth` from the ZIP in order to work around
<https://bugs.python.org/issue34841>.
"""
return self \
.with_repacked_msi(
"https://www.python.org/ftp/python/2.7.15/python-2.7.15.amd64.msi",
sha256="5e85f3c4c209de98480acbf2ba2e71a907fd5567a838ad4b6748c76deb286ad7",
path="python2"
) \
.with_early_script("""
python -m ensurepip
pip install virtualenv==16.0.0
""") \
.with_path_from_homedir("python2", "python2\\Scripts")
class UnixTaskMixin(Task):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.curl_scripts_count = 0
def with_repo(self, shallow=True):
"""
Make a shallow clone the git repository at the start of the task.
This uses `CONFIG.git_url`, `CONFIG.git_ref`, and `CONFIG.git_sha`
* generic-worker: creates the clone in a `repo` directory
in the task’s directory.
* docker-worker: creates the clone in a `/repo` directory
at the root of the Docker container’s filesystem.
`git` and `ca-certificate` need to be installed in the Docker image.
"""
return self \
.with_env(**git_env()) \
.with_early_script("""
git init repo
cd repo
git fetch {depth} "$GIT_URL" "$GIT_REF"
git reset --hard "$GIT_SHA"
""".format(depth="--depth 1" if shallow else ""))
def with_curl_script(self, url, file_path):
self.curl_scripts_count += 1
n = self.curl_scripts_count
return self \
.with_env(**{
"CURL_%s_URL" % n: url,
"CURL_%s_PATH" % n: file_path,
}) \
.with_script("""
mkdir -p $(dirname "$CURL_{n}_PATH")
curl --retry 5 --connect-timeout 10 -Lf "$CURL_{n}_URL" -o "$CURL_{n}_PATH"
""".format(n=n))
def with_curl_artifact_script(self, task_id, artifact_name, out_directory=""):
return self \
.with_dependencies(task_id) \
.with_curl_script(
"https://queue.taskcluster.net/v1/task/%s/artifacts/public/%s"
% (task_id, artifact_name),
os.path.join(out_directory, url_basename(artifact_name)),
)
class MacOsGenericWorkerTask(UnixTaskMixin, GenericWorkerTask):
"""
Task definition for a `generic-worker` task running on macOS.
Scripts are interpreted with `bash`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scripts = []
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
def build_command(self):
# generic-worker accepts multiple commands, but unlike on Windows
# the current directory and environment variables
# are not preserved across commands on macOS.
# So concatenate scripts and use a single `bash` command instead.
return [
[
"/bin/bash", "--login", "-x", "-e", "-c",
deindent("\n".join(self.scripts))
]
]
def with_python2(self):
return self.with_early_script("""
export PATH="$HOME/Library/Python/2.7/bin:$PATH"
python -m ensurepip --user
pip install --user virtualenv
""")
def with_rustup(self):
return self.with_early_script("""
export PATH="$HOME/.cargo/bin:$PATH"
which rustup || curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain none -y
""")
class DockerWorkerTask(UnixTaskMixin, Task):
"""
Task definition for a worker type that runs the `generic-worker` implementation.
Scripts are interpreted with `bash`.
<https://github.com/taskcluster/docker-worker>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.docker_image = "ubuntu:bionic-20180821"
self.max_run_time_minutes = 30
self.scripts = []
self.env = {}
self.caches = {}
self.features = {}
self.capabilities = {}
self.artifacts = []
with_docker_image = chaining(setattr, "docker_image")
with_max_run_time_minutes = chaining(setattr, "max_run_time_minutes")
with_artifacts = chaining(append_to_attr, "artifacts")
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
with_caches = chaining(update_attr, "caches")
with_env = chaining(update_attr, "env")
with_capabilities = chaining(update_attr, "capabilities")
def build_worker_payload(self):
"""
Return a `docker-worker` worker payload.
<https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/payload>
"""
worker_payload = {
"image": self.docker_image,
"maxRunTime": self.max_run_time_minutes * 60,
"command": [
"/bin/bash", "--login", "-x", "-e", "-c",
deindent("\n".join(self.scripts))
],
}
return dict_update_if_truthy(
worker_payload,
env=self.env,
cache=self.caches,
features=self.features,
capabilities=self.capabilities,
artifacts={
"public/" + url_basename(path): {
"type": "file",
"path": path,
"expires": SHARED.from_now_json(self.index_and_artifacts_expire_in),
}
for path in self.artifacts
},
)
def with_features(self, *names):
"""
Enable the given `docker-worker` features.
<https://github.com/taskcluster/docker-worker/blob/master/docs/features.md>
"""
self.features.update({name: True for name in names})
return self
def with_dockerfile(self, dockerfile):
"""
Build a Docker image based on the given `Dockerfile`, and use it for this task.
`dockerfile` is a path in the filesystem where this code is running.
Some non-standard syntax is supported, see `expand_dockerfile`.
The image is indexed based on a hash of the expanded `Dockerfile`,
and cached for `CONFIG.docker_images_expire_in`.
Images are built without any *context*.
<https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#understand-build-context>
"""
basename = os.path.basename(dockerfile)
suffix = ".dockerfile"
assert basename.endswith(suffix)
image_name = basename[:-len(suffix)]
dockerfile_contents = expand_dockerfile(dockerfile)
digest = hashlib.sha256(dockerfile_contents).hexdigest()
image_build_task = (
DockerWorkerTask("Docker image: " + image_name)
.with_worker_type(CONFIG.docker_image_build_worker_type or self.worker_type)
.with_max_run_time_minutes(30)
.with_index_and_artifacts_expire_in(CONFIG.docker_images_expire_in)
.with_features("dind")
.with_env(DOCKERFILE=dockerfile_contents)
.with_artifacts("/image.tar.lz4")
.with_script("""
echo "$DOCKERFILE" | docker build -t taskcluster-built -
docker save taskcluster-built | lz4 > /image.tar.lz4
""")
.with_docker_image(
# https://github.com/servo/taskcluster-bootstrap-docker-images#image-builder
"servobrowser/taskcluster-bootstrap:image-builder@sha256:" \
"0a7d012ce444d62ffb9e7f06f0c52fedc24b68c2060711b313263367f7272d9d"
)
.find_or_create("docker-image." + digest)
)
return self \
.with_dependencies(image_build_task) \
.with_docker_image({
"type": "task-image",
"path": "public/image.tar.lz4",
"taskId": image_build_task,
})
def expand_dockerfile(dockerfile):
"""
Read the file at path `dockerfile`,
and transitively expand the non-standard `% include` header if it is present.
"""
with open(dockerfile, "rb") as f:
dockerfile_contents = f.read()
include_marker = b"% include"
if not dockerfile_contents.startswith(include_marker):
return dockerfile_contents
include_line, _, rest = dockerfile_contents.partition(b"\n")
included = include_line[len(include_marker):].strip().decode("utf8")
path = os.path.join(os.path.dirname(dockerfile), included)
return b"\n".join([expand_dockerfile(path), rest])
def git_env():
assert CONFIG.git_url
assert CONFIG.git_ref
assert CONFIG.git_sha
return {
"GIT_URL": CONFIG.git_url,
"GIT_REF": CONFIG.git_ref,
"GIT_SHA": CONFIG.git_sha,
}
def dict_update_if_truthy(d, **kwargs):
for key, value in kwargs.items():
if value:
d[key] = value
return d
def deindent(string):
return re.sub("\n +", "\n ", string).strip()
def url_basename(url):
return url.rpartition("/")[-1]
|
ecoal95/servo
|
etc/taskcluster/decisionlib.py
|
Python
|
mpl-2.0
| 30,707
|
"""
Utility functions for the api. This includes generation of meta and error messages.
All overrided classes and methods of Flask should go here.
Author: Lalit Jain, lalitkumarj@gmail.com
"""
import time
def timeit(f):
"""
Utility used to time the duration of code execution. This script can be composed with any other script.
Usage::\n
def f(n):
return n**n
def g(n):
return n,n**n
answer0,dt = timeit(f)(3)
answer1,answer2,dt = timeit(g)(3)
"""
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
if type(result)==tuple:
return result + ((te-ts),)
else:
return result,(te-ts)
return timed
def attach_meta(response, meta, **kwargs):
"""
Attach a meta dictionary to a response dictionary.
Inputs: :\n
(dict) response, (dict) meta, (key-value pairs) kwargs - optional messages to add to mets
Usage: :\n
"""
for k, v in kwargs.iteritems():
meta[k] = v
response["meta"] = meta
return response
verification_error = {
'message':'Failed to Authenticate',
'status':'FAIL',
'code':401
}
from flask_restful import Api
import sys, traceback
class NextBackendApi(Api):
"""
Subclass of the default Api class of Flask-Restful with custom error handling for 500 requests
All other errors are passed onto the default handle_error.
"""
def handle_error(self, e, **kwargs):
exc_type, exc_value, tb = sys.exc_info()
backend_error = traceback.format_exc(tb)
print "backend_error", backend_error,exc_type, exc_value, tb, traceback.format_exc(tb)
# Catch internal system errors
code = getattr(e, 'code', 500)
if code == 500:
response = {
'meta':{
'status': 'FAIL',
'code': 500,
'message': 'Internal Server Error',
'backend_error': backend_error
}
}
return self.make_response(response, 500)
return super(NextBackendApi, self).handle_error(e)
from flask_restful.reqparse import Argument
from flask_restful import abort
class APIArgument(Argument):
"""
Subclass of the standard flask restful Argument class to provide a custom meta message.
Passes up a 400 message if arguments are not correctly parsed.
"""
def __init__(self, *args, **kwargs):
"""
Pass up the default arguments.
"""
super(APIArgument, self).__init__(*args, **kwargs)
def handle_validation_error(self, error, bundle_errors):
"""
Called when an error is raised while parsing. Aborts the request
with a 400 status and a meta error dictionary.
Can I do this through the exception handling system?
:param error: the error that was raised
"""
help_str = '(%s) ' % self.help if self.help else ''
msg = '[%s]: %s%s' % (self.name, help_str, str(error))
if bundle_errors:
return error, msg
return abort(400, meta={'message':msg, 'code':400, 'status':'FAIL'})
# Custom Exception types for the api. These should just pass.
class DatabaseException(Exception):
pass
class BackendConnectionException(Exception):
pass
|
nextml/NEXT
|
next/api/api_util.py
|
Python
|
apache-2.0
| 3,426
|
default_app_config = 'ghswebsite.apps.admin.apps.AdminConfig'
|
tjghs/ghstracker
|
ghswebsite/apps/admin/__init__.py
|
Python
|
mit
| 62
|
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import types
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
no_pyobj_flags = Flags()
def isnan(x):
return x != x
def isequal(x):
return x == x
class TestNaN(unittest.TestCase):
def test_nans(self, flags=enable_pyobj_flags):
pyfunc = isnan
cr = compile_isolated(pyfunc, (types.float64,), flags=flags)
cfunc = cr.entry_point
self.assertTrue(cfunc(float('nan')))
self.assertFalse(cfunc(1.0))
pyfunc = isequal
cr = compile_isolated(pyfunc, (types.float64,), flags=flags)
cfunc = cr.entry_point
self.assertFalse(cfunc(float('nan')))
self.assertTrue(cfunc(1.0))
def test_nans_npm(self):
self.test_nans(flags=no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
|
sklam/numba
|
numba/tests/test_nan.py
|
Python
|
bsd-2-clause
| 895
|
import socket
import os
import os.path
import urllib.request
print("############################################")
print("# I Wish You Like it #")
print("# Simple & online Md5 HashCracker #")
print("# I wrote These Codes just in 15m #")
print("# Contact Me:mostafa-zarei@hotmail.com #")
print("############################################\n\n")
while 1 :
path = (os.getcwd())
flpath = path+'\Cracked Hashes'
if not os.path.exists(flpath): os.makedirs(flpath)
Staticurl = "http://md5cracker.com/qkhash.php?option=plaintext&pass="
Hash = input("Enter The Md5 Hash Here = ")
Url = Staticurl+Hash
name = Hash
urllib.request.urlretrieve(Url , '1.txt')
Outfile = open('1.txt','r+')
for line in Outfile :
print(line ,file=Outfile, end=' ')
Outfile = open(path+"/Cracked Hashes/%s.txt" % name , 'w+')
print(line , end='')
print("The Cracked Hash Saved in Cracked Hashes 'Folder' Also! \n")
s = input("Press Any key to continue")
|
MR-HIDDEN/online-Md5-Cracker-python3-
|
Simple Md5 Cracker.py
|
Python
|
lgpl-3.0
| 1,079
|
#!/usr/bin/env python
import os
import sys
import textwrap
import setuptools
from setuptools.command.install import install
here = os.path.dirname(__file__)
package_data = dict(
setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],
)
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower()
not in ("", "0", "false", "no")
)
include_windows_files = sys.platform == 'win32' or force_windows_specific_files
if include_windows_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
def pypi_link(pkg_filename):
"""
Given the filename, including md5 fragment, construct the
dependency link for PyPI.
"""
root = 'https://files.pythonhosted.org/packages/source'
name, sep, rest = pkg_filename.partition('-')
parts = root, name[0], name, pkg_filename
return '/'.join(parts)
class install_with_pth(install):
"""
Custom install command to install a .pth file for distutils patching.
This hack is necessary because there's no standard way to install behavior
on startup (and it's debatable if there should be one). This hack (ab)uses
the `extra_path` behavior in Setuptools to install a `.pth` file with
implicit behavior on startup to give higher precedence to the local version
of `distutils` over the version from the standard library.
Please do not replicate this behavior.
"""
_pth_name = 'distutils-precedence'
_pth_contents = textwrap.dedent("""
import os
var = 'SETUPTOOLS_USE_DISTUTILS'
enabled = os.environ.get(var, 'local') == 'local'
enabled and __import__('_distutils_hack').add_shim()
""").lstrip().replace('\n', '; ')
def initialize_options(self):
install.initialize_options(self)
self.extra_path = self._pth_name, self._pth_contents
def finalize_options(self):
install.finalize_options(self)
self._restore_install_lib()
def _restore_install_lib(self):
"""
Undo secondary effect of `extra_path` adding to `install_lib`
"""
suffix = os.path.relpath(self.install_lib, self.install_libbase)
if suffix.strip() == self._pth_contents.strip():
self.install_lib = self.install_libbase
setup_params = dict(
cmdclass={'install': install_with_pth},
package_data=package_data,
)
if __name__ == '__main__':
# allow setup.py to run from another directory
here and os.chdir(here)
dist = setuptools.setup(**setup_params)
|
pypa/setuptools
|
setup.py
|
Python
|
mit
| 2,625
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
from django.utils.datastructures import SortedDict
class BTFailure(Exception):
pass
def decode_int(x, f):
f += 1
newf = x.index('e', f)
n = int(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
n = int(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != 'e':
k, f = decode_string(x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError, ValueError):
raise BTFailure("not a valid bencoded string")
if l != len(x):
raise BTFailure("invalid bencoded value (data after valid prefix)")
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_bool(x, r):
if x:
encode_int(1, r)
else:
encode_int(0, r)
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[Bencached] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[SortedDict] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
|
abshkd/benzene
|
torrents/utils/bencode.py
|
Python
|
bsd-3-clause
| 3,405
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Lexicographic permutations" – Project Euler Problem No. 24
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=24
#
def get_permutations(str):
if len(str) == 1:
return [str]
ret = []
for i in range(0, len(str)):
char = str[i]
sub_str = str[0:i] + str[i+1::]
sub_perm = get_permutations(sub_str)
ret.extend(map(lambda str: char+str, sub_perm))
return ret
# Testcase
str = "012"
perm = get_permutations(str)
assert (6 == len(set(perm))), "Testcase failed"
assert ("012" == perm[0]), "Testcase failed"
assert ("021" == perm[1]), "Testcase failed"
assert ("102" == perm[2]), "Testcase failed"
assert ("120" == perm[3]), "Testcase failed"
assert ("201" == perm[4]), "Testcase failed"
assert ("210" == perm[5]), "Testcase failed"
# Solve
str = "0123456789" # digits must be in sorted order to preserve lexicograpic order
perm = get_permutations(str)
nth = 1000000
print "There are %d permutation of the digits '%s'" % (len(perm), str)
print "Solution: The %d-th permutation of the digits '%s' : %s" % (nth, str, perm[nth - 1])
|
fbcom/project-euler
|
024_lexicographic_permutations.py
|
Python
|
mit
| 1,226
|
from typing import Any
from typing import Tuple
from poetry.core.packages.constraints.base_constraint import BaseConstraint
from poetry.core.packages.constraints.constraint import Constraint
class MultiConstraint(BaseConstraint):
def __init__(self, *constraints: Constraint) -> None:
if any(c.operator == "==" for c in constraints):
raise ValueError(
"A multi-constraint can only be comprised of negative constraints"
)
self._constraints = constraints
@property
def constraints(self) -> Tuple[Constraint, ...]:
return self._constraints
def allows(self, other: "BaseConstraint") -> bool:
return all(constraint.allows(other) for constraint in self._constraints)
def allows_all(self, other: "BaseConstraint") -> bool:
if other.is_any():
return False
if other.is_empty():
return True
if not isinstance(other, MultiConstraint):
return self.allows(other)
our_constraints = iter(self._constraints)
their_constraints = iter(other.constraints)
our_constraint = next(our_constraints, None)
their_constraint = next(their_constraints, None)
while our_constraint and their_constraint:
if our_constraint.allows_all(their_constraint):
their_constraint = next(their_constraints, None)
else:
our_constraint = next(our_constraints, None)
return their_constraint is None
def allows_any(self, other: "BaseConstraint") -> bool:
if other.is_any():
return True
if other.is_empty():
return True
if isinstance(other, Constraint):
return self.allows(other)
if isinstance(other, MultiConstraint):
return any(
c1.allows(c2) for c1 in self.constraints for c2 in other.constraints
)
return False
def intersect(self, other: BaseConstraint) -> "BaseConstraint":
if not isinstance(other, Constraint):
raise ValueError("Unimplemented constraint intersection")
constraints = self._constraints
if other not in constraints:
constraints += (other,)
else:
constraints = (other,)
if len(constraints) == 1:
return constraints[0]
return MultiConstraint(*constraints)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, MultiConstraint):
return False
return sorted(
self._constraints, key=lambda c: (c.operator, c.version)
) == sorted(other.constraints, key=lambda c: (c.operator, c.version))
def __str__(self) -> str:
constraints = []
for constraint in self._constraints:
constraints.append(str(constraint))
return ", ".join(constraints)
|
python-poetry/poetry-core
|
src/poetry/core/packages/constraints/multi_constraint.py
|
Python
|
mit
| 2,900
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import groupby
string = raw_input()
for key, value in groupby(string):
print (len(list(value)), int(key)),
|
ugaliguy/HackerRank
|
Python/Itertools/compress-the-string.py
|
Python
|
mit
| 203
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetMetrics
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-recaptcha-enterprise
# [START recaptchaenterprise_v1_generated_RecaptchaEnterpriseService_GetMetrics_async]
from google.cloud import recaptchaenterprise_v1
async def sample_get_metrics():
# Create a client
client = recaptchaenterprise_v1.RecaptchaEnterpriseServiceAsyncClient()
# Initialize request argument(s)
request = recaptchaenterprise_v1.GetMetricsRequest(
name="name_value",
)
# Make the request
response = await client.get_metrics(request=request)
# Handle the response
print(response)
# [END recaptchaenterprise_v1_generated_RecaptchaEnterpriseService_GetMetrics_async]
|
googleapis/python-recaptcha-enterprise
|
samples/generated_samples/recaptchaenterprise_v1_generated_recaptcha_enterprise_service_get_metrics_async.py
|
Python
|
apache-2.0
| 1,553
|
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
lista = test.strip().split(',')
largo = (len(lista))/2
salida=0
for i in lista:
if lista.count(i) > largo:
salida = i
else:
salida = None
print (salida)
|
palaxi00/palaxi00.github.io
|
Codeeval/the_major_element.py
|
Python
|
mit
| 353
|
# data file for 8 period probelm
p=8
a=10 # minimum academic load allowed per period
b=24 # maximum academic load allowed per period
c=2 # minimum amount of courses allowed per period, 4 to make problem tighter
d=10 # maximum amount of courses allowed per period 8 to make problem tighter
courses8 = [
'dew100','fis100','hcw310','iwg101','mat190','mat192', 'dew101',
'fis101', 'iwi131', 'mat191' , 'mat193' , 'fis102' , 'hxwxx1',
'iei134' , 'iei141' , 'mat194',
'dewxx0', 'hcw311' ,'iei132' ,'iei133', 'iei142', 'iei162',
'iwn170', 'mat195', 'hxwxx2', 'iei231', 'iei241', 'iei271', 'iei281', 'iwn261',
'hfw120', 'iei233', 'iei238', 'iei261', 'iei272', 'iei273', 'iei161', 'iei232',
'iei262', 'iei274', 'iwi365', 'iwn270' , 'hrw130' , 'iei218' , 'iei219' ,'iei248' ]
credit8 = [
1, 3, 1, 2, 4,
4, 1, 5, 3, 4,
4, 5, 1, 3, 3,
4, 1, 1, 3, 3,
3, 3, 3, 3, 1,
4, 4, 3, 3, 3,
2, 4, 3, 3, 3,
3, 3, 3, 3, 3,
3, 3, 2, 3, 3,
3
]
prereq8= [
('dew101','dew100'),
('fis101', 'fis100'),
('fis101', 'mat192'),
('mat191', 'mat190'),
('mat193','mat190'),
('mat193','mat192'),
('fis102','fis101'),
('fis102', 'mat193'),
('iei134' , 'iwi131'),
('iei141' , 'iwi131'),
('mat194' , 'mat191'),
('mat194' , 'mat193'),
('dewxx0' , 'dew101'),
('hcw311' , 'hcw310'),
('iei132' , 'iei134'),
('iei133' , 'iei134'),
('iei142' , 'iei141'),
('mat195' , 'mat194'),
('iei231' , 'iei134'),
('iei241' , 'iei142'),
('iei271' , 'iei162'),
('iei281' , 'mat195'),
('iei233' , 'iei231'),
('iei238' , 'iei231'),
('iei261' , 'iwn261'),
('iei272' , 'iei271'),
('iei273' , 'iei271'),
('iei273' , 'iei271'),
('iei161' , 'iwn261'),
('iei161' , 'iwn261'),
('iei232' , 'iei273'),
('iei232' , 'iei273'),
('iei262' , 'iwn261'),
('iei274' , 'iei273'),
('iei274' , 'iei273'),
('iei219' , 'iei232'),
('iei248' , 'iei233'),
('iei248' , 'iei233')
]
|
LYZhelloworld/Courses
|
50.021/03/csp/bacp8.py
|
Python
|
mit
| 1,880
|
#!/bin/python3
#
# This file is part of Simpleline Text UI library.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Simpleline is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Simpleline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Simpleline. If not, see <https://www.gnu.org/licenses/>.
#
# Simple divider screen.
#
# User input processing example.
#
#
import re
from simpleline import App
from simpleline.render.screen import UIScreen, InputState
from simpleline.render.screen_handler import ScreenHandler
from simpleline.render.widgets import TextWidget
class DividerScreen(UIScreen):
def __init__(self):
# Set title of the screen.
super().__init__(title="Divider")
self._message = 0
def refresh(self, args=None):
# Fill the self.window attribute by the WindowContainer and set screen title as header.
super().refresh()
widget = TextWidget("Result: " + str(self._message))
self.window.add_with_separator(widget)
def prompt(self, args=None):
# Change user prompt
prompt = super().prompt()
# Set message to the user prompt. Give a user hint how he/she may control our application.
prompt.set_message("Pass numbers to divider in a format: 'num / num'")
# Remove continue option from the control. There is no need for that
# when we have only one screen.
prompt.remove_option('c')
return prompt
def input(self, args, key):
"""Process input from user and catch numbers with '/' symbol."""
# Test if user passed valid input for divider.
# This will basically take number + number and nothing else and only positive numbers.
groups = re.match(r'(\d+) *\/ *(\d+)$', key)
if groups:
num1 = int(groups[1])
num2 = int(groups[2])
# Dividing by zero is not valid so we won't accept this input from the user. New
# input is then required from the user.
if num2 == 0:
return InputState.DISCARDED
self._message = int(num1 / num2)
# Because this input is processed we need to show this screen (show the result).
# This will call refresh so our new result will be processed inside of the refresh()
# method.
return InputState.PROCESSED_AND_REDRAW
# Not input for our screen, try other default inputs. This will result in the
# same state as DISCARDED when no default option is used.
return key
if __name__ == "__main__":
# Initialize application (create scheduler and event loop).
App.initialize()
# Create our screen.
screen = DividerScreen()
# Schedule screen to the screen scheduler.
# This can be called only after App.initialize().
ScreenHandler.schedule_screen(screen)
# Run the application. You must have some screen scheduled
# otherwise it will end in an infinite loop.
App.run()
|
rhinstaller/python-simpleline
|
examples/07_divider/07_divider.py
|
Python
|
gpl-2.0
| 3,443
|
# Copyright (C) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of yambopy
#
"""
submodule with classes to read RT calculations
"""
|
alexmoratalla/yambopy
|
yambopy/rt/__init__.py
|
Python
|
bsd-3-clause
| 152
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from glyphsLib.util import build_ufo_path
from glyphsLib.classes import WEIGHT_CODES, GSCustomParameter
from .constants import GLYPHS_PREFIX, GLYPHLIB_PREFIX, UFO_FILENAME_CUSTOM_PARAM
from .names import build_stylemap_names
from .axes import (
get_axis_definitions,
is_instance_active,
interp,
WEIGHT_AXIS_DEF,
WIDTH_AXIS_DEF,
AxisDefinitionFactory,
)
from .custom_params import to_ufo_custom_params
EXPORT_KEY = GLYPHS_PREFIX + "export"
WIDTH_KEY = GLYPHS_PREFIX + "width"
WEIGHT_KEY = GLYPHS_PREFIX + "weight"
FULL_FILENAME_KEY = GLYPHLIB_PREFIX + "fullFilename"
MANUAL_INTERPOLATION_KEY = GLYPHS_PREFIX + "manualInterpolation"
INSTANCE_INTERPOLATIONS_KEY = GLYPHS_PREFIX + "intanceInterpolations"
CUSTOM_PARAMETERS_KEY = GLYPHS_PREFIX + "customParameters"
logger = logging.getLogger(__name__)
def to_designspace_instances(self):
"""Write instance data from self.font to self.designspace."""
for instance in self.font.instances:
if self.minimize_glyphs_diffs or (
is_instance_active(instance)
and _is_instance_included_in_family(self, instance)
):
_to_designspace_instance(self, instance)
def _to_designspace_instance(self, instance):
ufo_instance = self.designspace.newInstanceDescriptor()
# FIXME: (jany) most of these customParameters are actually attributes,
# at least according to https://docu.glyphsapp.com/#fontName
for p in instance.customParameters:
param, value = p.name, p.value
if param == "postscriptFontName":
# Glyphs uses "postscriptFontName", not "postScriptFontName"
ufo_instance.postScriptFontName = value
elif param == "fileName":
fname = value + ".ufo"
if self.instance_dir is not None:
fname = self.instance_dir + "/" + fname
ufo_instance.filename = fname
# Read either from properties or custom parameter or the font
ufo_instance.familyName = instance.familyName
ufo_instance.styleName = instance.name
fname = (
instance.customParameters[UFO_FILENAME_CUSTOM_PARAM]
or instance.customParameters[FULL_FILENAME_KEY]
)
if fname is not None:
if self.instance_dir:
fname = self.instance_dir + "/" + os.path.basename(fname)
ufo_instance.filename = fname
if not ufo_instance.filename:
instance_dir = self.instance_dir or "instance_ufos"
ufo_instance.filename = build_ufo_path(
instance_dir, ufo_instance.familyName, ufo_instance.styleName
)
designspace_axis_tags = {a.tag for a in self.designspace.axes}
location = {}
for axis_def in get_axis_definitions(self.font):
# Only write locations along defined axes
if axis_def.tag in designspace_axis_tags:
location[axis_def.name] = axis_def.get_design_loc(instance)
ufo_instance.location = location
# FIXME: (jany) should be the responsibility of ufo2ft?
# Anyway, only generate the styleMap names if the Glyphs instance already
# has a linkStyle set up, or if we're not round-tripping (i.e. generating
# UFOs for fontmake, the traditional use-case of glyphsLib.)
if instance.linkStyle or not self.minimize_glyphs_diffs:
styleMapFamilyName, styleMapStyleName = build_stylemap_names(
family_name=ufo_instance.familyName,
style_name=ufo_instance.styleName,
is_bold=instance.isBold,
is_italic=instance.isItalic,
linked_style=instance.linkStyle,
)
ufo_instance.styleMapFamilyName = styleMapFamilyName
ufo_instance.styleMapStyleName = styleMapStyleName
ufo_instance.name = " ".join(
(ufo_instance.familyName or "", ufo_instance.styleName or "")
)
if self.minimize_glyphs_diffs:
ufo_instance.lib[EXPORT_KEY] = instance.active
ufo_instance.lib[WEIGHT_KEY] = instance.weight
ufo_instance.lib[WIDTH_KEY] = instance.width
ufo_instance.lib[INSTANCE_INTERPOLATIONS_KEY] = instance.instanceInterpolations
ufo_instance.lib[MANUAL_INTERPOLATION_KEY] = instance.manualInterpolation
# Strategy: dump all custom parameters into the InstanceDescriptor.
# Later, when using `apply_instance_data`, we will dig out those custom
# parameters using `InstanceDescriptorAsGSInstance` and apply them to the
# instance UFO with `to_ufo_custom_params`.
# NOTE: customParameters are not a dict! One key can have several values
params = []
for p in instance.customParameters:
if p.name in (
"familyName",
"postscriptFontName",
"fileName",
FULL_FILENAME_KEY,
UFO_FILENAME_CUSTOM_PARAM,
):
# These will be stored in the official descriptor attributes
continue
if p.name in ("weightClass", "widthClass"):
# No need to store these ones because we can recover them by
# reading the mapping backward, because the mapping is built from
# where the instances are.
continue
params.append((p.name, p.value))
if params:
ufo_instance.lib[CUSTOM_PARAMETERS_KEY] = params
self.designspace.addInstance(ufo_instance)
def _is_instance_included_in_family(self, instance):
if not self._do_filter_instances_by_family:
return True
return instance.familyName == self.family_name
# TODO: function is too complex (35), split it up
def to_glyphs_instances(self): # noqa: C901
if self.designspace is None:
return
for ufo_instance in self.designspace.instances:
instance = self.glyphs_module.GSInstance()
try:
instance.active = ufo_instance.lib[EXPORT_KEY]
except KeyError:
# If not specified, the default is to export all instances
instance.active = True
instance.name = ufo_instance.styleName
for axis_def in get_axis_definitions(self.font):
design_loc = None
try:
design_loc = ufo_instance.location[axis_def.name]
axis_def.set_design_loc(instance, design_loc)
except KeyError:
# The location does not have this axis?
pass
if axis_def.tag in ("wght", "wdth"):
# Retrieve the user location (weightClass/widthClass)
# Generic way: read the axis mapping backwards.
user_loc = design_loc
mapping = None
for axis in self.designspace.axes:
if axis.tag == axis_def.tag:
mapping = axis.map
if mapping:
reverse_mapping = [(dl, ul) for ul, dl in mapping]
user_loc = interp(reverse_mapping, design_loc)
if user_loc is not None:
axis_def.set_user_loc(instance, user_loc)
try:
# Restore the original weight name when there is an ambiguity based
# on the value, e.g. Thin, ExtraLight, UltraLight all map to 250.
# No problem with width, because 1:1 mapping in WIDTH_CODES.
weight = ufo_instance.lib[WEIGHT_KEY]
# Only use the lib value if:
# 1. we don't have a weight for the instance already
# 2. the value from lib is not "stale", i.e. it still maps to
# the current userLocation of the instance. This is in case the
# user changes the instance location of the instance by hand but
# does not update the weight value in lib.
if (
not instance.weight
or WEIGHT_CODES[instance.weight] == WEIGHT_CODES[weight]
):
instance.weight = weight
except KeyError:
# FIXME: what now
pass
try:
if not instance.width:
instance.width = ufo_instance.lib[WIDTH_KEY]
except KeyError:
# FIXME: what now
pass
if ufo_instance.familyName is not None:
if ufo_instance.familyName != self.font.familyName:
instance.familyName = ufo_instance.familyName
smfn = ufo_instance.styleMapFamilyName
if smfn is not None:
if smfn.startswith(ufo_instance.familyName):
smfn = smfn[len(ufo_instance.familyName) :].strip()
instance.linkStyle = smfn
if ufo_instance.styleMapStyleName is not None:
style = ufo_instance.styleMapStyleName
instance.isBold = "bold" in style
instance.isItalic = "italic" in style
if ufo_instance.postScriptFontName is not None:
instance.fontName = ufo_instance.postScriptFontName
try:
instance.manualInterpolation = ufo_instance.lib[MANUAL_INTERPOLATION_KEY]
except KeyError:
pass
try:
instance.instanceInterpolations = ufo_instance.lib[
INSTANCE_INTERPOLATIONS_KEY
]
except KeyError:
# TODO: (jany) compute instanceInterpolations from the location
# if instance.manualInterpolation: warn about data loss
pass
if CUSTOM_PARAMETERS_KEY in ufo_instance.lib:
for name, value in ufo_instance.lib[CUSTOM_PARAMETERS_KEY]:
instance.customParameters.append(GSCustomParameter(name, value))
if ufo_instance.filename and self.minimize_ufo_diffs:
instance.customParameters[UFO_FILENAME_CUSTOM_PARAM] = ufo_instance.filename
# FIXME: (jany) cannot `.append()` because no proxy => no parent
self.font.instances = self.font.instances + [instance]
class InstanceDescriptorAsGSInstance:
"""Wraps a designspace InstanceDescriptor and makes it behave like a
GSInstance, just enough to use the descriptor as a source of custom
parameters for `to_ufo_custom_parameters`
"""
def __init__(self, descriptor):
self._descriptor = descriptor
# Having a simple list is enough because `to_ufo_custom_params` does
# not use the fake dictionary interface.
self.customParameters = []
if CUSTOM_PARAMETERS_KEY in descriptor.lib:
for name, value in descriptor.lib[CUSTOM_PARAMETERS_KEY]:
self.customParameters.append(GSCustomParameter(name, value))
def _set_class_from_instance(ufo, designspace, instance, axis_tag):
# FIXME: (jany) copy-pasted from above, factor into method?
assert axis_tag in ("wght", "wdth")
factory = AxisDefinitionFactory()
for axis in designspace.axes:
if axis.tag == axis_tag:
axis_def = factory.get(axis.tag, axis.name)
mapping = axis.map
break
else:
# axis not found, try use the default axis definition
axis_def = WEIGHT_AXIS_DEF if axis_tag == "wght" else WIDTH_AXIS_DEF
mapping = []
try:
design_loc = instance.location[axis_def.name]
except KeyError:
user_loc = axis_def.default_user_loc
else:
if mapping:
# Retrieve the user location (weightClass/widthClass)
# by going through the axis mapping in reverse.
reverse_mapping = sorted({dl: ul for ul, dl in mapping}.items())
user_loc = interp(reverse_mapping, design_loc)
else:
# no mapping means user space location is same as design space
user_loc = design_loc
axis_def.set_ufo_user_loc(ufo, user_loc)
def set_weight_class(ufo, designspace, instance):
""" Set ufo.info.openTypeOS2WeightClass according to the user location
of the designspace instance, as calculated from the axis mapping.
"""
_set_class_from_instance(ufo, designspace, instance, "wght")
def set_width_class(ufo, designspace, instance):
""" Set ufo.info.openTypeOS2WidthClass according to the user location
of the designspace instance, as calculated from the axis mapping.
"""
_set_class_from_instance(ufo, designspace, instance, "wdth")
def apply_instance_data(designspace, include_filenames=None, Font=None):
"""Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: a callable(path: str) -> Font, used to load a UFO, such as
defcon.Font class (default: ufoLib2.Font.open).
Returns:
List of opened and updated instance UFOs.
"""
from fontTools.designspaceLib import DesignSpaceDocument
from os.path import normcase, normpath
if Font is None:
import ufoLib2
Font = ufoLib2.Font.open
if hasattr(designspace, "__fspath__"):
designspace = designspace.__fspath__()
if isinstance(designspace, str):
designspace = DesignSpaceDocument.fromfile(designspace)
basedir = os.path.dirname(designspace.path)
instance_ufos = []
if include_filenames is not None:
include_filenames = {normcase(normpath(p)) for p in include_filenames}
for designspace_instance in designspace.instances:
fname = designspace_instance.filename
assert fname is not None, "instance %r missing required filename" % getattr(
designspace_instance, "name", designspace_instance
)
if include_filenames is not None:
fname = normcase(normpath(fname))
if fname not in include_filenames:
continue
logger.debug("Applying instance data to %s", fname)
# fontmake <= 1.4.0 compares the ufo paths returned from this function
# to the keys of a dict of designspace locations that have been passed
# through normpath (but not normcase). We do the same.
ufo = Font(normpath(os.path.join(basedir, fname)))
apply_instance_data_to_ufo(ufo, designspace_instance, designspace)
ufo.save()
instance_ufos.append(ufo)
return instance_ufos
def apply_instance_data_to_ufo(ufo, instance, designspace):
"""Apply Glyphs instance data to UFO object.
Args:
ufo: a defcon-like font object.
instance: a fontTools.designspaceLib.InstanceDescriptor.
designspace: a fontTools.designspaceLib.DesignSpaceDocument.
Returns:
None.
"""
if any(axis.tag == "wght" for axis in designspace.axes):
set_weight_class(ufo, designspace, instance)
if any(axis.tag == "wdth" for axis in designspace.axes):
set_width_class(ufo, designspace, instance)
glyphs_instance = InstanceDescriptorAsGSInstance(instance)
to_ufo_custom_params(None, ufo, glyphs_instance)
|
googlefonts/glyphsLib
|
Lib/glyphsLib/builder/instances.py
|
Python
|
apache-2.0
| 15,647
|
#!/usr/bin/env
import sys
sys.path.insert(0, '../RAID')
sys.path.insert(0, '../workloads')
sys.path.insert(0, '../Disks')
import math
import matplotlib.pyplot as plt
import RAIDinterface
import ssdsim
import hddsim
import posixsim as psx
import numpy as np
import random
#from posix import Posix as psx
def simulate():
# New RAID interface
R0 = RAIDinterface.RAID0(RAIDtype=0, disknumber=10, flashpercent=0.2, SSDcapacity=250, HDDcapacity=250, blocksize=4096)
R5 = RAIDinterface.RAID0(RAIDtype=0, disknumber=10, flashpercent=0.2, SSDcapacity=250, HDDcapacity=500, blocksize=4096)
# Distribution of possible operations
action_set = ["read","read","read","write","read","read","write","read","delete"]
#Loop
# Number of iterations to run
N = 1000000000
#Empty list to store filenames so as to read from existing files
files = []
for ii in xrange(1,N):
#create workload
# tw random number in [0,9]. To choose workload based on balance.
# Set boundary to 5 if balaned, or to 3 or 7 for unbalanced workload
#tw = random.randint(0,9)
current_action = random.choice(action_set)
if current_action == "write":
#if (tw < Bound):
#work = 'w'
# A good random name is the number of the iteration: ii
f = psx.Posix(str(ii))
files.append(f)
randomSize = random.randint(100,1000000000)
f.set_size(randomSize)
ans = R5.read(f)
print iter, 'w', f.get_size(), np.array_str(ans)
elif current_action == "read":
#work = 'r'
#Choose random file from list
#randfileind = random.randint(0,len(files)-1)
#f = files[randfileind]
f = random.choice(files)
ans = R5.write(f)
print iter, 'r', f.get_size(), np.array_str(ans)
elif current_action == "delete":
f = random.choice(files)
R5.delete(f)
else:
print "I shouldn't be here"
# After each execution, check if any disk needs to change
for i in range(disknumber):
R5.checkDisks("ChangeDisksLog.csv", ii)
|
KentonMurray/HYDE
|
workloads/read_heavy.py
|
Python
|
gpl-2.0
| 1,928
|
from django.core.urlresolvers import reverse
from webtest import TestApp
from treeherder.webapp.wsgi import application
def test_pending_job_available(jm, initial_data, pending_jobs_stored):
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'pending'
def test_running_job_available(jm, initial_data, running_jobs_stored):
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'running'
def test_completed_job_available(jm, initial_data, completed_jobs_stored):
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'completed'
def test_pending_stored_to_running_loaded(jm, initial_data, pending_jobs_stored, running_jobs_stored):
"""
tests a job transition from pending to running
given a loaded pending job, if I store and load the same job with status running,
the latter is shown in the jobs endpoint
"""
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'running'
def test_finished_job_to_running(jm, initial_data, completed_jobs_stored, running_jobs_stored):
"""
tests that a job finished cannot change state
"""
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'completed'
def test_running_job_to_pending(jm, initial_data, running_jobs_stored, pending_jobs_stored):
"""
tests that a job transition from pending to running
cannot happen
"""
webapp = TestApp(application)
resp = webapp.get(
reverse("jobs-list", kwargs={"project": jm.project})
)
jobs = resp.json
assert len(jobs['results']) == 1
assert jobs['results'][0]['state'] == 'running'
|
moijes12/treeherder
|
tests/e2e/test_jobs_loaded.py
|
Python
|
mpl-2.0
| 2,412
|
from distutils.core import setup
setup(name='btcp',
version='0.1.2',
description='Pyhton BitTorrent copy. Can be used to copy files using BitTorrent framework',
author='Sergey Sergeev',
author_email='zhirafovod@gmail.com',
#url='',
packages=['btcp','PythonBittorrent'],
data_files=[ # http://docs.python.org/2/distutils/setupscript.html#installing-package-data
('/etc/init.d', ['files/init.d/python-btcp-daemon']),
('/usr/bin', ['files/bin/btcp-copy.py']),
('/etc/btcp', ['btcp/btcp.tac']),
],
)
|
zhirafovod/btcp
|
files/setup.btcp-daemon.py
|
Python
|
mit
| 528
|
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dms_endpoint
short_description: Creates or destroys a data migration services endpoint
description:
- Creates or destroys a data migration services endpoint,
that can be used to replicate data.
version_added: "2.9"
options:
state:
description:
- State of the endpoint.
default: present
choices: ['present', 'absent']
type: str
endpointidentifier:
description:
- An identifier name for the endpoint.
type: str
required: true
endpointtype:
description:
- Type of endpoint we want to manage.
choices: ['source', 'target']
type: str
required: true
enginename:
description:
- Database engine that we want to use, please refer to
the AWS DMS for more information on the supported
engines and their limitations.
choices: ['mysql', 'oracle', 'postgres', 'mariadb', 'aurora',
'redshift', 's3', 'db2', 'azuredb', 'sybase',
'dynamodb', 'mongodb', 'sqlserver']
type: str
required: true
username:
description:
- Username our endpoint will use to connect to the database.
type: str
password:
description:
- Password used to connect to the database
this attribute can only be written
the AWS API does not return this parameter.
type: str
servername:
description:
- Servername that the endpoint will connect to.
type: str
port:
description:
- TCP port for access to the database.
type: int
databasename:
description:
- Name for the database on the origin or target side.
type: str
extraconnectionattributes:
description:
- Extra attributes for the database connection, the AWS documentation
states " For more information about extra connection attributes,
see the documentation section for your data store."
type: str
kmskeyid:
description:
- Encryption key to use to encrypt replication storage and
connection information.
type: str
tags:
description:
- A list of tags to add to the endpoint.
type: dict
certificatearn:
description:
- Amazon Resource Name (ARN) for the certificate.
type: str
sslmode:
description:
- Mode used for the SSL connection.
default: none
choices: ['none', 'require', 'verify-ca', 'verify-full']
type: str
serviceaccessrolearn:
description:
- Amazon Resource Name (ARN) for the service access role that you
want to use to create the endpoint.
type: str
externaltabledefinition:
description:
- The external table definition.
type: str
dynamodbsettings:
description:
- Settings in JSON format for the target Amazon DynamoDB endpoint
if source or target is dynamodb.
type: dict
s3settings:
description:
- S3 buckets settings for the target Amazon S3 endpoint.
type: dict
dmstransfersettings:
description:
- The settings in JSON format for the DMS transfer type of
source endpoint.
type: dict
mongodbsettings:
description:
- Settings in JSON format for the source MongoDB endpoint.
type: dict
kinesissettings:
description:
- Settings in JSON format for the target Amazon Kinesis
Data Streams endpoint.
type: dict
elasticsearchsettings:
description:
- Settings in JSON format for the target Elasticsearch endpoint.
type: dict
wait:
description:
- Whether Ansible should wait for the object to be deleted when I(state=absent).
type: bool
default: false
timeout:
description:
- Time in seconds we should wait for when deleting a resource.
- Required when I(wait=true).
type: int
retries:
description:
- number of times we should retry when deleting a resource
- Required when I(wait=true).
type: int
author:
- "Rui Moreira (@ruimoreira)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details
# Endpoint Creation
- dms_endpoint:
state: absent
endpointidentifier: 'testsource'
endpointtype: source
enginename: aurora
username: testing1
password: testint1234
servername: testing.domain.com
port: 3306
databasename: 'testdb'
sslmode: none
wait: false
'''
RETURN = ''' # '''
__metaclass__ = type
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, HAS_BOTO3, \
camel_dict_to_snake_dict, get_aws_connection_info, AWSRetry
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
backoff_params = dict(tries=5, delay=1, backoff=1.5)
@AWSRetry.backoff(**backoff_params)
def describe_endpoints(connection, endpoint_identifier):
""" checks if the endpoint exists """
try:
endpoint_filter = dict(Name='endpoint-id',
Values=[endpoint_identifier])
return connection.describe_endpoints(Filters=[endpoint_filter])
except botocore.exceptions.ClientError:
return {'Endpoints': []}
@AWSRetry.backoff(**backoff_params)
def dms_delete_endpoint(client, **params):
"""deletes the DMS endpoint based on the EndpointArn"""
if module.params.get('wait'):
return delete_dms_endpoint(client)
else:
return client.delete_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def dms_create_endpoint(client, **params):
""" creates the DMS endpoint"""
return client.create_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def dms_modify_endpoint(client, **params):
""" updates the endpoint"""
return client.modify_endpoint(**params)
@AWSRetry.backoff(**backoff_params)
def get_endpoint_deleted_waiter(client):
return client.get_waiter('endpoint_deleted')
def endpoint_exists(endpoint):
""" Returns boolean based on the existence of the endpoint
:param endpoint: dict containing the described endpoint
:return: bool
"""
return bool(len(endpoint['Endpoints']))
def get_dms_client(aws_connect_params, client_region, ec2_url):
client_params = dict(
module=module,
conn_type='client',
resource='dms',
region=client_region,
endpoint=ec2_url,
**aws_connect_params
)
return boto3_conn(**client_params)
def delete_dms_endpoint(connection):
try:
endpoint = describe_endpoints(connection,
module.params.get('endpointidentifier'))
endpoint_arn = endpoint['Endpoints'][0].get('EndpointArn')
delete_arn = dict(
EndpointArn=endpoint_arn
)
if module.params.get('wait'):
delete_output = connection.delete_endpoint(**delete_arn)
delete_waiter = get_endpoint_deleted_waiter(connection)
delete_waiter.wait(
Filters=[{
'Name': 'endpoint-arn',
'Values': [endpoint_arn]
}],
WaiterConfig={
'Delay': module.params.get('timeout'),
'MaxAttempts': module.params.get('retries')
}
)
return delete_output
else:
return connection.delete_endpoint(**delete_arn)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to delete the DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to delete the DMS endpoint.",
exception=traceback.format_exc())
def create_module_params():
"""
Reads the module parameters and returns a dict
:return: dict
"""
endpoint_parameters = dict(
EndpointIdentifier=module.params.get('endpointidentifier'),
EndpointType=module.params.get('endpointtype'),
EngineName=module.params.get('enginename'),
Username=module.params.get('username'),
Password=module.params.get('password'),
ServerName=module.params.get('servername'),
Port=module.params.get('port'),
DatabaseName=module.params.get('databasename'),
SslMode=module.params.get('sslmode')
)
if module.params.get('EndpointArn'):
endpoint_parameters['EndpointArn'] = module.params.get('EndpointArn')
if module.params.get('certificatearn'):
endpoint_parameters['CertificateArn'] = \
module.params.get('certificatearn')
if module.params.get('dmstransfersettings'):
endpoint_parameters['DmsTransferSettings'] = \
module.params.get('dmstransfersettings')
if module.params.get('extraconnectionattributes'):
endpoint_parameters['ExtraConnectionAttributes'] =\
module.params.get('extraconnectionattributes')
if module.params.get('kmskeyid'):
endpoint_parameters['KmsKeyId'] = module.params.get('kmskeyid')
if module.params.get('tags'):
endpoint_parameters['Tags'] = module.params.get('tags')
if module.params.get('serviceaccessrolearn'):
endpoint_parameters['ServiceAccessRoleArn'] = \
module.params.get('serviceaccessrolearn')
if module.params.get('externaltabledefinition'):
endpoint_parameters['ExternalTableDefinition'] = \
module.params.get('externaltabledefinition')
if module.params.get('dynamodbsettings'):
endpoint_parameters['DynamoDbSettings'] = \
module.params.get('dynamodbsettings')
if module.params.get('s3settings'):
endpoint_parameters['S3Settings'] = module.params.get('s3settings')
if module.params.get('mongodbsettings'):
endpoint_parameters['MongoDbSettings'] = \
module.params.get('mongodbsettings')
if module.params.get('kinesissettings'):
endpoint_parameters['KinesisSettings'] = \
module.params.get('kinesissettings')
if module.params.get('elasticsearchsettings'):
endpoint_parameters['ElasticsearchSettings'] = \
module.params.get('elasticsearchsettings')
if module.params.get('wait'):
endpoint_parameters['wait'] = module.boolean(module.params.get('wait'))
if module.params.get('timeout'):
endpoint_parameters['timeout'] = module.params.get('timeout')
if module.params.get('retries'):
endpoint_parameters['retries'] = module.params.get('retries')
return endpoint_parameters
def compare_params(param_described):
"""
Compares the dict obtained from the describe DMS endpoint and
what we are reading from the values in the template We can
never compare the password as boto3's method for describing
a DMS endpoint does not return the value for
the password for security reasons ( I assume )
"""
modparams = create_module_params()
changed = False
for paramname in modparams:
if paramname == 'Password' or paramname in param_described \
and param_described[paramname] == modparams[paramname] or \
str(param_described[paramname]).lower() \
== modparams[paramname]:
pass
else:
changed = True
return changed
def modify_dms_endpoint(connection):
try:
params = create_module_params()
return dms_modify_endpoint(connection, **params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to update DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to update DMS endpoint.",
exception=traceback.format_exc())
def create_dms_endpoint(connection):
"""
Function to create the dms endpoint
:param connection: boto3 aws connection
:return: information about the dms endpoint object
"""
try:
params = create_module_params()
return dms_create_endpoint(connection, **params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Failed to create DMS endpoint.",
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Failed to create DMS endpoint.",
exception=traceback.format_exc())
def main():
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
endpointidentifier=dict(required=True),
endpointtype=dict(choices=['source', 'target'], required=True),
enginename=dict(choices=['mysql', 'oracle', 'postgres', 'mariadb',
'aurora', 'redshift', 's3', 'db2', 'azuredb',
'sybase', 'dynamodb', 'mongodb', 'sqlserver'],
required=True),
username=dict(),
password=dict(no_log=True),
servername=dict(),
port=dict(type='int'),
databasename=dict(),
extraconnectionattributes=dict(),
kmskeyid=dict(),
tags=dict(type='dict'),
certificatearn=dict(),
sslmode=dict(choices=['none', 'require', 'verify-ca', 'verify-full'],
default='none'),
serviceaccessrolearn=dict(),
externaltabledefinition=dict(),
dynamodbsettings=dict(type='dict'),
s3settings=dict(type='dict'),
dmstransfersettings=dict(type='dict'),
mongodbsettings=dict(type='dict'),
kinesissettings=dict(type='dict'),
elasticsearchsettings=dict(type='dict'),
wait=dict(type='bool', default=False),
timeout=dict(type='int'),
retries=dict(type='int')
)
global module
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_if=[
["state", "absent", ["wait"]],
["wait", "True", ["timeout"]],
["wait", "True", ["retries"]],
],
supports_check_mode=False
)
exit_message = None
changed = False
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
aws_config_region, ec2_url, aws_connect_params = \
get_aws_connection_info(module, boto3=True)
dmsclient = get_dms_client(aws_connect_params, aws_config_region, ec2_url)
endpoint = describe_endpoints(dmsclient,
module.params.get('endpointidentifier'))
if state == 'present':
if endpoint_exists(endpoint):
module.params['EndpointArn'] = \
endpoint['Endpoints'][0].get('EndpointArn')
params_changed = compare_params(endpoint["Endpoints"][0])
if params_changed:
updated_dms = modify_dms_endpoint(dmsclient)
exit_message = updated_dms
changed = True
else:
module.exit_json(changed=False, msg="Endpoint Already Exists")
else:
dms_properties = create_dms_endpoint(dmsclient)
exit_message = dms_properties
changed = True
elif state == 'absent':
if endpoint_exists(endpoint):
delete_results = delete_dms_endpoint(dmsclient)
exit_message = delete_results
changed = True
else:
changed = False
exit_message = 'DMS Endpoint does not exist'
module.exit_json(changed=changed, msg=exit_message)
if __name__ == '__main__':
main()
|
Lujeni/ansible
|
lib/ansible/modules/cloud/amazon/dms_endpoint.py
|
Python
|
gpl-3.0
| 16,453
|
#!/usr/bin/env python
# The source code contained in this file is licensed under the MIT license.
# See LICENSE.txt in the main project directory, for more information.
# For the exact contribution history, see the git revision log.
from setuptools import setup
setup(
name = 'libkne',
version = '0.2.5dev',
license = 'MIT license',
description = 'Python library to read/write KNE files',
author = 'Felix Schwarz',
author_email = 'felix.schwarz@schwarz.eu',
url = 'http://www.schwarz.eu',
packages = ['libkne'],
)
|
FelixSchwarz/libkne
|
setup.py
|
Python
|
mit
| 595
|
# -*- coding: ISO-8859-1 -*-
# std library
from struct import unpack
# uhh I don't really like this, but there are so many constants to
# import otherwise
from constants import *
from EventDispatcher import EventDispatcher
class MidiFileParser:
"""
The MidiFileParser is the lowest level parser that see the data as
midi data. It generates events that gets triggered on the outstream.
"""
def __init__(self, raw_in, outstream):
"""
raw_data is the raw content of a midi file as a string.
"""
# internal values, don't mess with 'em directly
self.raw_in = raw_in
self.dispatch = EventDispatcher(outstream)
# Used to keep track of stuff
self._running_status = None
def parseMThdChunk(self):
"Parses the header chunk"
raw_in = self.raw_in
header_chunk_type = raw_in.nextSlice(4)
header_chunk_zise = raw_in.readBew(4)
# check if it is a proper midi file
if header_chunk_type != 'MThd':
raise TypeError, "It is not a valid midi file!"
# Header values are at fixed locations, so no reason to be clever
self.format = raw_in.readBew(2)
self.nTracks = raw_in.readBew(2)
self.division = raw_in.readBew(2)
# Theoretically a header larger than 6 bytes can exist
# but no one has seen one in the wild
# But correctly ignore unknown data if it is though
if header_chunk_zise > 6:
raw_in.moveCursor(header_chunk_zise-6)
# call the header event handler on the stream
self.dispatch.header(self.format, self.nTracks, self.division)
def parseMTrkChunk(self):
"Parses a track chunk. This is the most important part of the parser."
# set time to 0 at start of a track
self.dispatch.reset_time()
dispatch = self.dispatch
raw_in = self.raw_in
# Trigger event at the start of a track
dispatch.start_of_track(self._current_track)
# position cursor after track header
raw_in.moveCursor(4)
# unsigned long is 4 bytes
tracklength = raw_in.readBew(4)
track_endposition = raw_in.getCursor() + tracklength # absolute position!
while raw_in.getCursor() < track_endposition:
# find relative time of the event
time = raw_in.readVarLen()
dispatch.update_time(time)
# be aware of running status!!!!
peak_ahead = raw_in.readBew(move_cursor=0)
if (peak_ahead & 0x80):
# the status byte has the high bit set, so it
# was not running data but proper status byte
status = raw_in.readBew()
if not peak_ahead in [META_EVENT, SYSTEM_EXCLUSIVE]:
self._running_status = status
else:
# use that darn running status
status = self._running_status
# could it be illegal data ?? Do we need to test for that?
# I need more example midi files to be shure.
# Also, while I am almost certain that no realtime
# messages will pop up in a midi file, I might need to
# change my mind later.
# we need to look at nibbles here
hi_nible, lo_nible = status & 0xF0, status & 0x0F
# match up with events
# Is it a meta_event ??
# these only exists in midi files, not in transmitted midi data
# In transmitted data META_EVENT (0xFF) is a system reset
if status == META_EVENT:
meta_type = raw_in.readBew()
meta_length = raw_in.readVarLen()
meta_data = raw_in.nextSlice(meta_length)
if not meta_length: return
dispatch.meta_event(meta_type, meta_data)
if meta_type == END_OF_TRACK: return
# Is it a sysex_event ??
elif status == SYSTEM_EXCLUSIVE:
# ignore sysex events
sysex_length = raw_in.readVarLen()
# don't read sysex terminator
sysex_data = raw_in.nextSlice(sysex_length-1)
# only read last data byte if it is a sysex terminator
# It should allways be there, but better safe than sorry
if raw_in.readBew(move_cursor=0) == END_OFF_EXCLUSIVE:
eo_sysex = raw_in.readBew()
dispatch.sysex_event(sysex_data)
# the sysex code has not been properly tested, and might be fishy!
# is it a system common event?
elif hi_nible == 0xF0: # Hi bits are set then
data_sizes = {
MTC:1,
SONG_POSITION_POINTER:2,
SONG_SELECT:1,
}
data_size = data_sizes.get(hi_nible, 0)
common_data = raw_in.nextSlice(data_size)
common_type = lo_nible
dispatch.system_common(common_type, common_data)
# Oh! Then it must be a midi event (channel voice message)
else:
data_sizes = {
PATCH_CHANGE:1,
CHANNEL_PRESSURE:1,
NOTE_OFF:2,
NOTE_ON:2,
AFTERTOUCH:2,
CONTINUOUS_CONTROLLER:2,
PITCH_BEND:2,
}
data_size = data_sizes.get(hi_nible, 0)
channel_data = raw_in.nextSlice(data_size)
event_type, channel = hi_nible, lo_nible
dispatch.channel_messages(event_type, channel, channel_data)
def parseMTrkChunks(self):
"Parses all track chunks."
for t in range(self.nTracks):
self._current_track = t
self.parseMTrkChunk() # this is where it's at!
self.dispatch.eof()
if __name__ == '__main__':
import sys
# get data
test_file = 'test/midifiles/minimal.mid'
test_file = 'test/midifiles/cubase-minimal.mid'
test_file = 'test/midifiles/Lola.mid'
test_file = sys.argv[1]
# f = open(test_file, 'rb')
# raw_data = f.read()
# f.close()
#
#
# # do parsing
from MidiToText import MidiToText
from RawInstreamFile import RawInstreamFile
midi_in = MidiFileParser(RawInstreamFile(test_file), MidiToText())
midi_in.parseMThdChunk()
midi_in.parseMTrkChunks()
|
west2554/fofix
|
src/midi/MidiFileParser.py
|
Python
|
gpl-2.0
| 6,644
|
"""
To create an Attribute Editor template using python, do the following:
1. create a subclass of `uitypes.AETemplate`
2. set its ``_nodeType`` class attribute to the name of the desired node type, or name the class using the
convention ``AE<nodeType>Template``
3. import the module
AETemplates which do not meet one of the two requirements listed in step 2 will be ignored. To ensure that your
Template's node type is being detected correctly, use the ``AETemplate.nodeType()`` class method::
import AETemplates
AETemplates.AEmib_amb_occlusionTemplate.nodeType()
As a convenience, when pymel is imported it will automatically import the module ``AETemplates``, if it exists,
thereby causing any AETemplates within it or its sub-modules to be registered. Be sure to import pymel
or modules containing your ``AETemplate`` classes before opening the Atrribute Editor for the node types in question.
To check which python templates are loaded::
from pymel.core.uitypes import AELoader
print AELoader.loadedTemplates()
The example below demonstrates the simplest case, which is the first. It provides a layout for the mib_amb_occlusion
mental ray shader.
"""
from pymel.core import *
class LocalizedTemplate(ui.AETemplate):
"automatically apply language localizations to template arguments"
def _applyLocalization(self, name):
if name is not None and len(name)>2 and name[0] == 'k' and name[1].isupper():
return mel.uiRes('m_' + self.__class__.__name__ + '.' + name)
return name
def addControl(self, control, label=None, **kwargs):
label = self._applyLocalization(label)
ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
name = self._applyLocalization(name)
ui.AETemplate.beginLayout(self, name, collapse=collapse)
class mmbTemplateBase(LocalizedTemplate):
def __init__(self, nodeName):
LocalizedTemplate.__init__(self,nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
self.endScrollLayout()
def AEswatchDisplay(self, nodeName):
mel.AEswatchDisplay(nodeName)
|
yaoyansi/mymagicbox
|
common/mymagicbox/AETemplateBase.py
|
Python
|
mit
| 2,176
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.