repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
klmitch/nova | nova/conf/novnc.py | Python | apache-2.0 | 2,021 | 0 | # Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless re | quired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific lan | guage governing permissions and limitations
# under the License.
from oslo_config import cfg
novnc_opts = [
cfg.StrOpt('record',
help="""
Filename that will be used for storing websocket frames received
and sent by a proxy service (like VNC, spice, serial) running on this host.
If this is not set, no recording will be done.
"""),
cfg.BoolOpt('daemon',
default=False,
help="Run as a background process."),
cfg.BoolOpt('ssl_only',
default=False,
help="""
Disallow non-encrypted connections.
Related options:
* cert
* key
"""),
cfg.BoolOpt('source_is_ipv6',
default=False,
help="Set to True if source host is addressed with IPv6."),
cfg.StrOpt('cert',
default='self.pem',
help="""
Path to SSL certificate file.
Related options:
* key
* ssl_only
* [console] ssl_ciphers
* [console] ssl_minimum_version
"""),
cfg.StrOpt('key',
help="""
SSL key file (if separate from cert).
Related options:
* cert
"""),
cfg.StrOpt('web',
default='/usr/share/spice-html5',
help="""
Path to directory with content which will be served by a web server.
"""),
]
def register_opts(conf):
conf.register_opts(novnc_opts)
def register_cli_opts(conf):
conf.register_cli_opts(novnc_opts)
def list_opts():
return {'DEFAULT': novnc_opts}
|
sodzawic/tk | doc/extcap_example.py | Python | gpl-2.0 | 9,150 | 0.036066 | #!/usr/bin/env python
# Copyright 2014 Roland Knall <rknall [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <gerald@wireshark.org>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This is a generic example, which produces pcap packages every n seconds, and
is configurable via extcap options.
@note
{
To use this script on Windows, please generate an extcap_example.bat inside
the extcap folder, wit | h the following content:
-------
@echo off
<Path to python interpreter> | <Path to script file> $*
-------
Windows is not able to execute Python scripts directly, which also goes for all
other script-based formates beside VBScript
}
"""
import os
import sys
import signal
import re
import argparse
import time
import struct
import binascii
from threading import Thread
ERROR_USAGE = 0
ERROR_ARG = 1
ERROR_INTERFACE = 2
ERROR_FIFO = 3
doExit = False
globalinterface = 0
def signalHandler(signal, frame):
global doExit
doExit = True
#### EXTCAP FUNCTIONALITY
"""@brief Extcap configuration
This method prints the extcap configuration, which will be picked up by the
interface in Wireshark to present a interface specific configuration for
this extcap plugin
"""
def extcap_config(interface):
args = []
values = []
args.append ( (0, '--delay', 'Time delay', 'Time delay between packages', 'integer', '{range=1,15}') )
args.append ( (1, '--message', 'Message', 'Package message content', 'string', '') )
args.append ( (2, '--verify', 'Verify', 'Verify package content', 'boolflag', '') )
args.append ( (3, '--remote', 'Remote Channel', 'Remote Channel Selector', 'selector', ''))
values.append ( (3, "if1", "Remote1", "true" ) )
values.append ( (3, "if2", "Remote2", "false" ) )
for arg in args:
print ("arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s" % arg)
for value in values:
print ("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value)
def extcap_interfaces():
print ("interface {value=example1}{display=Example interface usage for extcap}")
def extcap_dlts(interface):
if ( interface == 'example1' ):
print ("dlt {number=147}{name=USER0}{display=Demo Implementation for Extcap}")
"""
### FAKE DATA GENERATOR
Extcap capture routine
This routine simulates a capture by any kind of user defined device. The parameters
are user specified and must be handled by the extcap.
The data captured inside this routine is fake, so change this routine to present
your own input data, or call your own capture program via Popen for example. See
for more details.
"""
def unsigned(n):
return int(n) & 0xFFFFFFFF
def append_bytes(ba, blist):
for c in range(0, len(blist)):
ba.append(blist[c])
return ba
def pcap_fake_header():
header = bytearray()
header = append_bytes(header, struct.pack('<L', int ('a1b2c3d4', 16) ))
header = append_bytes(header, struct.pack('<H', unsigned(2)) ) # Pcap Major Version
header = append_bytes(header, struct.pack('<H', unsigned(4)) ) # Pcap Minor Version
header = append_bytes(header, struct.pack('<I', int(0))) # Timezone
header = append_bytes(header, struct.pack('<I', int(0))) # Accurancy of timestamps
header = append_bytes(header, struct.pack('<L', int ('0000ffff', 16) )) # Max Length of capture frame
header = append_bytes(header, struct.pack('<L', unsigned(1))) # Ethernet
return header
# Calculates and returns the IP checksum based on the given IP Header
def ip_checksum(iph):
#split into bytes
words = splitN(''.join(iph.split()),4)
csum = 0;
for word in words:
csum += int(word, base=16)
csum += (csum >> 16)
csum = csum & 0xFFFF ^ 0xFFFF
return csum
def pcap_fake_package ( message ):
pcap = bytearray()
#length = 14 bytes [ eth ] + 20 bytes [ ip ] + messagelength
caplength = len(message) + 14 + 20
timestamp = int(time.time())
pcap = append_bytes(pcap, struct.pack('<L', unsigned(timestamp) ) ) # timestamp seconds
pcap = append_bytes(pcap, struct.pack('<L', 0x00 ) ) # timestamp nanoseconds
pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length captured
pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length in frame
# ETH
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac
pcap = append_bytes(pcap, struct.pack('<h', unsigned(8) )) # protocol (ip)
# IP
pcap = append_bytes(pcap, struct.pack('b', int ( '45', 16) )) # IP version
pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) #
pcap = append_bytes(pcap, struct.pack('>H', unsigned(len(message)+20) )) # length of data + payload
pcap = append_bytes(pcap, struct.pack('<H', int ( '0', 16) )) # Identification
pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) )) # Don't fragment
pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) # Fragment Offset
pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) ))
pcap = append_bytes(pcap, struct.pack('B', 0xFE )) # Protocol (2 = unspecified)
pcap = append_bytes(pcap, struct.pack('<H', int ( '0000', 16) )) # Checksum
pcap = append_bytes(pcap, struct.pack('>L', int ( '7F000001', 16) )) # Source IP
pcap = append_bytes(pcap, struct.pack('>L', int ( '7F000001', 16) )) # Dest IP
pcap = append_bytes(pcap, message)
return pcap
def extcap_capture(interface, fifo, delay, verify, message, remote):
global doExit
signal.signal(signal.SIGINT, signalHandler)
signal.signal(signal.SIGTERM , signalHandler)
tdelay = delay if delay != 0 else 5
try:
os.stat(fifo)
except OSError:
doExit = True
print ( "Fifo does not exist, exiting!" )
fh = open(fifo, 'w+b', 0 )
fh.write (pcap_fake_header())
while doExit == False:
out = str( "%s|%04X%s|%s" % ( remote.strip(), len(message), message, verify ) )
try:
fh.write (pcap_fake_package(out))
time.sleep(tdelay)
except IOError:
doExit = True
fh.close()
####
def usage():
print ( "Usage: %s <--extcap-interfaces | --extcap-dlts | --extcap-interface | --extcap-config | --capture | --fifo>" % sys.argv[0] )
if __name__ == '__main__':
interface = ""
# Capture options
delay = 0
message = ""
parser = argparse.ArgumentParser(
prog="Extcap Example",
description="Extcap example program for python"
)
# Extcap Arguments
parser.add_argument("--capture", help="Start the capture routine", action="store_true" )
parser.add_argument("--extcap-interfaces", help="Provide a list of interfaces to capture from", action="store_true")
parser.add_argument("--extcap-interface", help="Provide the interface to capture from")
parser.add_argument("--extcap-dlts", help="Provide a list of dlts for the given interface", action="store_true")
parser.add_argument("--extcap-config", help="Provide a list of configurations for the given interface", action="store_true")
parser.add_argument("--fifo", help="Use together with capture to provide the fifo to dump data to")
# Interface Arguments
parser.add_argument("--verify", help="Demonstrates a verification bool flag", action="store_true" )
parser.add_argument("--delay", help="Demonstrates an integer variable", type=int, default=0, choices=[0, 1, 2, 3, 4, 5] )
parser.add_argument("--remote", help="Demonstrates a selector choice", default="if1", choices=["if1", "if2"] )
parser.ad |
googleapis/python-dialogflow | samples/generated_samples/dialogflow_generated_dialogflow_v2_session_entity_types_create_session_entity_type_async.py | Python | apache-2.0 | 1,957 | 0.002555 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSessionEntityType
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_SessionEntityTypes_CreateSessionEntityType_async]
from google.cloud import dialogflow_v2
async def sample_create_session_entity_type():
# Create a client
client = dialogflow_v2.SessionEntityTypesAsyncClient()
# Initialize request argument(s)
session_entity_type = dialogflow_v2.SessionEntityType()
session_entity_type.name = "name_value"
session_entity_type.entity_override_mode = "ENTITY_OVERRIDE_MODE_SUPPLEMENT"
session_entity_type.entities.val | ue = "value_value"
session_entity_type.entities.synonyms = ['synonyms_value_1', 'synonyms_value_2']
request = dialogflow_v2.CreateSessionEntityTypeRequest( |
parent="parent_value",
session_entity_type=session_entity_type,
)
# Make the request
response = await client.create_session_entity_type(request=request)
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_SessionEntityTypes_CreateSessionEntityType_async]
|
cctaylor/googleads-python-lib | examples/dfp/v201502/activity_group_service/get_active_activity_groups.py | Python | apache-2.0 | 2,353 | 0.008075 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all active activity groups.
To create activity groups, run create_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ActivityGroupService.getActivityGroupsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201502')
# Create statement object to only select active activity groups.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
| # Get activity groups by statement.
while True:
response = activity_group_service.getActivityGroupsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for activity_group in response['results']:
| print ('Activity group with ID \'%s\' and name \'%s\' was found.'
% (activity_group['id'], activity_group['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
bidossessi/brss | brss/view.py | Python | gpl-2.0 | 7,267 | 0.004679 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# view.py
#
# Copyright 2011 Bidossessi Sodonon <bidossessi.sodonon@yahoo.fr>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Pango
from gi.repository import GObject
from gi.repository import WebKit
from brss.common import make_date
class View (Gtk.VBox, GObject.GObject):
"""
The feedview displays the currently selected feed item.
It redirects clicks to the user's preferred browser and
allows a basic navigation between feed itmes
"""
__gsignals__ = {
"article-loaded" : (
GObject.SignalFlags.RUN_FIRST,
None,
()),
"link-clicked" : (
GObject.SignalFlags.RUN_FIRST | ,
None,
(GObject.TYPE_STRING,)),
"link-hovered-in" : (
GObject.SignalFlags.RUN_FIRST,
| None,
(GObject.TYPE_STRING,)),
"link-hovered-out" : (
GObject.SignalFlags.RUN_FIRST,
None,
()),
}
def __init__(self, logger):
self.log = logger
Gtk.VBox.__init__(self, spacing=3)
GObject.GObject.__init__(self)
# top navi
self.set_no_show_all(True)
tbox = Gtk.HBox(spacing=3)
# navigation buttons
self.link_button = Gtk.LinkButton('', label='Article Title')
self.link_button.set_relief(Gtk.ReliefStyle.NONE)
tbox.pack_start(self.link_button, True, True,0)
self.file_img = Gtk.Image().new_from_stock('gtk-file', Gtk.IconSize.BUTTON)
tbox.pack_start(self.file_img, False, False,0)
self.star_img = Gtk.Image().new_from_stock('gtk-about', Gtk.IconSize.BUTTON)
tbox.pack_start(self.star_img, False, False,0)
self.feed_img = Gtk.Image().new_from_stock('missing', Gtk.IconSize.BUTTON)
tbox.pack_start(self.feed_img, False, False,0)
# webkit view
self.feedview = WebKit.WebView()
self.feedview.set_full_content_zoom(True)
self.feedview.connect("navigation-policy-decision-requested", self.__override_clicks)
self.feedview.connect("hovering-over-link", self.__hover_webview)
self.link_button.connect("enter-notify-event", self.__hover_link, "in")
self.link_button.connect("leave-notify-event", self.__hover_link)
# containers
tal = Gtk.Alignment.new(0.5, 0.5, 1, 1)
tal.show()
tal.add(tbox)
tbox.show()
msc = Gtk.ScrolledWindow()
msc.show()
msc.set_shadow_type(Gtk.ShadowType.IN)
msc.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
msc.add(self.feedview)
mal = Gtk.Alignment.new(0.5, 0.5, 1, 1)
mal.show()
mal.add(msc)
self.pack_start(tal, False, False,0)
self.pack_start(mal, True, True,0)
GObject.type_register(View)
self.valid_links = ['file:']
self.show()
self.link_button.show()
self.feedview.show()
self.feed_img.show()
self.file_img.show()
self.__art_id = None
def __repr__(self):
return "View"
def show_article(self, art_tuple):
art, links = art_tuple
self.log.debug("{0}: loading article {1}".format(self, art['id']))
self.__art_id = art['id']
try:
self.log.debug('{0}: Showing feed icon {1}'.format(self, art['feed_id']))
self.feed_img.set_from_stock(art['feed_id'], Gtk.IconSize.BUTTON)
except:
self.log.debug('{0}: Showing default feed icon'.format(self))
self.feed_img.set_from_stock('missing', Gtk.IconSize.BUTTON)
self.star_this(art)
self.valid_links = links
self.valid_links.append("file:")
self.link_button.set_label("[{0}] - {1}".format(
make_date(art['date']),art['title'].encode('utf-8')))
self.link_button.set_uri(art['link'])
print art['content']
self.feedview.load_html_string(art['content'], "file:///")
self.emit('article-loaded')
def __hover_webview(self, caller, alt, url):
if url:
self.emit('link-hovered-in', url)
else:
self.emit('link-hovered-out')
def __hover_link(self, button, event, io="out"):
if io == "in":
self.emit('link-hovered-in', button.get_uri())
else:
self.emit('link-hovered-out')
def __override_clicks(self, frame, request, navigation_action, policy_decision, data=None):
uri = navigation_action.get_uri()
if uri in self.valid_links:
return 1 # Let browse
else:
#self.emit('link-clicked', uri)
return 0 # Don't let browse
def clear(self, caller):
self.link_button.set_label("No Article")
nd = """
<html>
<h1>No Article to show</h1>
<p>The item you selected doesn't seem to have any articles available.</p>
<p><em>If this doesn't change after a while, maybe you should check
your feeds' validity.</em></p>
</html>"""
self.feedview.load_string(nd, "text/html", "utf-8", "file:")
#~ self.hide()
def no_engine(self, caller):
self.link_button.set_label("No Engine")
nd = """
<html>
<head>
<style>
.red {color:red}
</style>
</head>
<h1>The engine is not responding</h1>
<p>For some reason, BRss' <strong class="red">Feed Engine</strong> is not responding.</p>
<p>Please try and restart the engine.<br/>
You can then reconnect the reader from the <strong>Feeds</strong> menu.</p>
<p><em>Tip: You can also simply restart the reader, as it tries
to launch the engine at startup.</em></p>
</html>"""
self.feedview.load_string(nd, "text/html", "utf-8", "file:")
def star_this(self, article):
if article['id'] == self.__art_id:
# starred articles feedback
if article['starred'] == True:
self.file_img.hide()
self.star_img.show()
else:
self.file_img.show()
self.star_img.hide()
def do_link_hovered(self, url):
self.log.debug("{0}: Hovered on {1}".format(self, url))
def do_link_clicked(self, url):
self.log.debug("{0}: Clicked on {1}".format(self, url))
|
tensorflow/probability | tensorflow_probability/python/distributions/beta_quotient_test.py | Python | apache-2.0 | 8,738 | 0.003319 | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Dependency imports
import numpy as np
from scipy import special as sp_special
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
class _BetaQuotientTest(object):
# Since the BetaQuotient distribution is the ratio distribution of two Betas,
# we should be able to approximate the density through quadrature.
def _compute_logpdf_quadrature(self, alpha0, beta0, alpha1, beta1, z):
def _log_integrand(y):
# Pad the axes to allow for vectorized computation
return (
np.log(y) + sp_stats.beta.logpdf(
y * z[..., np.newaxis],
alpha0[..., np.newaxis],
beta0[..., np.newaxis]) +
sp_stats.beta.logpdf(
y,
alpha1[..., np.newaxis],
beta1[..., np.newaxis]))
roots, weights = sp_special.roots_legendre(8000)
# We need to account for the change of interval from [-1, 1] to [0, 1]
shifted_roots = 0.5 * roots + 0.5
return -np.log(2.) + sp_special.logsumexp(
_log_integrand(shifted_roots) + np.log(weights), axis=-1)
def testBetaQuotientShape(self):
a = tf.ones([5], dtype=self.dtype)
b = tf.ones([5], dtype=self.dtype)
c = tf.ones([5], dtype=self.dtype)
d = tf.ones([5], dtype=self.dtype)
beta_quotient = tfd.BetaQuotient(a, b, c, d, validate_args=True)
self.assertEqual(self.evaluate(beta_quotient.batch_shape_tensor()), (5,))
self.assertEqual(beta_quotient.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(beta_quotient.event_shape_tensor()),
[])
self.assertEqual(beta_quotient.event_shape, tf.TensorShape([]))
def testBetaQuotientShapeBroadcast(self):
a = tf.ones([3, 1, 1, 1], dtype=self.dtype)
b = tf.ones([1, 2, 1, 1], dtype=self.dtype)
c = tf.ones([1, 1, 5, 1], dtype=self.dtype)
d = tf.ones([1, 1, 1, 7], dtype=self.dtype)
beta_quotient = tfd.BetaQuotient(a, b, c, d, validate_args=True)
self.assertAllEqual(
self.evaluate(beta_quotient.batch_shape_tensor()), (3, 2, 5, 7))
self.assertEqual(beta_quotient.batch_shape, tf.TensorShape([3, 2, 5, 7]))
self.assertAllEqual(
self.evaluate(beta_quotient.event_shape_tensor()), [])
self.assertEqual(beta_quotient.event_shape, tf.TensorShape([]))
def testInvalidConcentration(self):
with self.assertRaisesOpError('`concentration` must be positive'):
beta_quotient = tfd.BetaQuotient(-1., 1., 1., 1., validate_args=True)
self.evaluate(beta_quotient.sample())
with self.assertRaisesOpError('`concentration` must be positive'):
beta_quotient = tfd.BetaQuotient(1., -1., 1., 1., validate_args=True)
self.evaluate(beta_quotient.sample())
with self.assertRaisesOpError('`concentration` must be positive'):
beta_quotient = tfd.BetaQuotient(1., 1., -1., 1., validate_args=True)
self.evaluate(beta_quotient.sample())
with self.assertRaisesOpError('`concentration` must be positive'):
beta_quotient = tfd.BetaQuotient(1., 1., 1., -1., validate_args=True)
self.evaluate(beta_quotient.sample())
def testLogPdf(self):
# Keep the `concentration`'s above 1 since quadrature has problems
# otherwise.
a = np.array([3., 2., 8.], dtype=self.dtype)[..., np.newaxis]
b = np.array([1.8, 2.4, 3.2], dtype=self.dtype)[..., np.newaxis]
c = np.array([5.5, 2., 4.3], dtype=self.dtype)[..., np.newaxis]
d = np.array([1.6, 2.9, 6.4], dtype=self.dtype)[..., np.newaxis]
beta_quotient = tfd.BetaQuotient(a, b, c, d, validate_args=True)
x = np.linspace(0.1, 10., 50).astype(self.dtype)
self.assertAllClose(
self._compute_logpdf_quadrature(a, b, c, d, x),
self.evaluate(beta_quotient.log_prob(x)), rtol=1e-4)
def testLogPdfBroadcast(self):
# Keep the `concentration`'s above 1 since quadrature has problems
# otherwise.
a = tf.random.uniform(
shape=[2, 1, 1, 1],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
b = tf.random.uniform(
shape=[1, 3, 1, 1],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
c = tf.random.uniform(
shape=[1, 1, 5, 1],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
d = tf.random.uniform(
shape=[1, 1, 1, 7],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
beta_quotient = tfd.BetaQuotient(a, b, c, d, validate_args=True)
x = np.linspace(0.1, 5., 7).astype(self.dtype)
log_prob, a, b, c, d = self.evaluate(
[beta_quotient.log_prob(x), a, b, c, d])
self.assertAllClose(
self._compute_logpdf_quadrature(a, b, c, d, x),
log_prob, rtol=4e-4)
def testBetaQuotientSample(self):
a = tf.random.uniform(
shape=[2, 1, 1, 1],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
b = tf.random.uniform(
shape=[1, 3, 1, 1],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
c = tf.random.uniform(
shape=[1, 1, 5, 1],
minval=1., maxval=5., seed=test_util.test_seed(), | dtype=self.dtype)
d = tf.random.uniform(
shape=[1, 1, 1, 7],
minval=1., maxval=5., seed=test_util.test_seed(), dtype=self.dtype)
beta_quotient = tfd.BetaQuotient(a, b, c, d, validate_args=True)
# TODO(b/179283344): Increase this to 3e5 when CPU-only gamma s | ampler is
# fixed.
n = int(3e4)
samples = beta_quotient.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (n, 2, 3, 5, 7))
self.assertFalse(np.any(sample_values < 0.0))
self.evaluate(
st.assert_true_mean_equal_by_dkwm(
samples,
low=self.dtype(0.),
high=self.dtype(np.inf),
expected=beta_quotient.mean(),
false_fail_rate=self.dtype(1e-6)))
@test_util.numpy_disable_gradient_test
def testBetaQuotientFullyReparameterized(self):
a = tf.constant(1.0)
b = tf.constant(2.0)
c = tf.constant(3.0)
d = tf.constant(4.0)
_, [grad_a, grad_b, grad_c, grad_d] = tfp.math.value_and_gradient(
lambda a_, b_, c_, d_: tfd.BetaQuotient( # pylint: disable=g-long-lambda
a_, b_, c_, d_, validate_args=True).sample(
10, seed=test_util.test_seed()), [a, b, c, d])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
self.assertIsNotNone(grad_c)
self.assertIsNotNone(grad_d)
self.assertNotAllZero(grad_a)
self.assertNotAllZero(grad_b)
self.assertNotAllZero(grad_c)
self.assertNotAllZero(grad_d)
def testBetaQuotientMeanNoNanStats(self):
# Mean will not be defined for the first entry.
a = np.array([2.0, 3.0, 2.5])
b = np.array([2.0, 4.0, 5.0])
c = np.array([1.0, 3.0, 2.5])
d = np.array([3.0, 4.0, 5.0])
beta_quotient = tfd.BetaQuotient(
a, b, c, d, allow_nan_stats=False, validate_args=True)
with self.assertRaisesOpError('mean undefined'):
self.evaluate(beta_quotient.mean())
def testBetaQuotientMeanAllowNanStats(self):
# Mean will not be defined for the first entry.
a = np.array([2.0, 3.0, 2.5])
b = np.array([2.0, 4.0, 5.0])
c = np.array([1.0, 3.0, 2.5])
d = n |
Elyjax/Snake | main.py | Python | gpl-2.0 | 2,389 | 0.002093 | # Importe la biblioteque pygame
import pygame
# La syntaxe from x import y permet de ne pas devoir ecrire x.y mais juste y
# L'etoile indique que l'on importe tout
from constantes import *
from Sauvegarde import *
from jouer import *
from options import *
pygame.init()
sauvegarde = Sauvegarde()
try:
sauvegarde = load(file("sauvegarde", "rb"))
except IOError: # Si le fichier n'exsite pas, on le cree
dump(sauvegarde, file("sauvegarde", "wb"))
# Ouvre une fenetre dont les dimensions dependent des options en hauteur et largeur
fenetre = pygame.display.set_mode((tailleCase * (sauvegarde.largeur + 2 * tailleBord),
tailleCase * (sauvegarde.hauteur + 2 * tailleBord)))
# On definit le titre de la fenetre
pygame.display.set_caption("Snake")
font1 = pygame.font.Font('Fonts/font1.ttf', 40)
font2 = pygame.font.Font | ("Fonts/font1.ttf", 80)
selectionActuelle = 0
menu = ["Jouer", "Options", "Quitter"]
nbMenu = len(menu)
ouvert = True
while ouvert:
for event in pygame.event.get():
if event.type == QUIT:
ouvert = False
if event.type == KEYDOWN:
| if event.key == K_ESCAPE:
ouvert = False
if event.key == K_RETURN:
if menu[selectionActuelle] == "Jouer":
jouer(fenetre)
if menu[selectionActuelle] == "Options":
options(fenetre)
if menu[selectionActuelle] == "Quitter":
ouvert = False
if event.key == K_UP:
selectionActuelle -= 1
if selectionActuelle < 0:
selectionActuelle = 0
if event.key == K_DOWN:
selectionActuelle += 1
if selectionActuelle >= nbMenu:
selectionActuelle = nbMenu - 1
fenetre.fill(couleurBlanche) # On efface l'ecran
# On affiche le menu
espacement = fenetre.get_rect().height / (nbMenu + 1)
for i in range(0, nbMenu):
if i == selectionActuelle:
text = font2.render(menu[i], 1, couleurRouge)
else:
text = font1.render(menu[i], 1, couleurNoire)
position = text.get_rect()
position.centerx = fenetre.get_rect().centerx
position.centery = espacement * (i + 1)
fenetre.blit(text, position)
pygame.display.flip()
pygame.quit()
|
strin/yaotalk | yaoTalkSpeech/yaoTalkSpeech/lm/make_dic.py | Python | gpl-3.0 | 1,489 | 0.030893 | import jieba
# cmu = file('cmu.dic').readlines()
# cmu_dict = dict()
# for line in cmu:
# line = line.decode('utf-8')
# line = line[:len(line)-1]
# pos = line.find(u' ')
# keyword = line[:pos].lower()
# content = line[pos:].lower()
# cmu_dict[keyword] = content
chinese = file('zh_broadcastnews_utf8.dic').readlines()
chinese_dict = dict()
for line in chinese:
line = line.decode('utf-8')
line = line[:len(line)-1]
pos = line.find(u' ')
keyword = line[:pos].lower()
content = line[pos:].lower()
chinese_dict[keyword] = content
dic = file('user_ch.dic').readlines()
output = open('speech.dic', 'w')
word_dict = dict()
for line in dic:
line = line.decode('utf-8')
line = line.split(u' ')
if word_dict.has_key(line[0]): # duplication.
continue
else:
word_dict[line[0]] = True
if chinese_dict.has_key(line[0]):
output.write((line[0]+chi | nese_dict[line[0]]+'\n').encode('utf-8'))
elif line[0][0] | >= u'\u4e00' and line[0][0] <=u'\u9fa5' : # is chinese
words = list(jieba.cut(line[0]))
res = list()
for word in words:
if chinese_dict.has_key(word):
res.append(chinese_dict[word])
else:
res = None
break
if res != None:
output.write((line[0]+' '.join(res)+'\n').encode('utf-8'))
else:
res = list()
for uchar in line[0]:
if chinese_dict.has_key(uchar):
res.append(chinese_dict[uchar])
else:
res = None
break
if res != None:
output.write((line[0]+' '.join(res)+'\n').encode('utf-8'))
output.close() |
nesl/LabSense | Devices/LabSenseModbus/common/modbus.py | Python | bsd-3-clause | 4,834 | 0.004344 | import argparse # For reading command line arguments
import socket # For TCP Socket: create_connection and htons()
import struct # For reading/writing binary data
import crc16 # For calculating crc-16 for modbus msgs
import logging # For logging events
import sys # For printing out response bytes
import time # For timestamping data retrieval
class TCPModbusClient(object):
def __init__(self, IP, PORT):
self.IP = IP
self.PORT = PORT
self.server_addr = (str(IP), int(PORT))
self.connect()
def connect(self):
try:
# Try connecting with a timeout of 5 seconds
self.sock = socket.create_connection(self.server_addr, 10)
except socket.error:
print ("Could not establish modbus"
"connection. Retrying in 5 seconds...")
time.sleep(5)
self.connect()
def modbusReadReg(self, addr, modbus_func, reg_addr, reg_qty):
# Create request with network endianness
struct_format = ("!BBHH")
packed_data = struct.pack(struct_format, addr, modbus_func, reg_addr, reg_qty)
packed_data_size = struct.calcsize(struct_format)
# Calculate the CRC16 and append to the end
crc = crc16.calcCRC(packed_data)
crc = socket.htons(crc)
struct_format = ("!BBHHH")
packed_data = struct.pack(struct_format, addr, modbus_func, reg_addr, reg_qty, crc)
#print "Packed data: " + repr(packed_data)
sent = False
while sent == False:
try:
# Send data
self.sock.sendall(packed_data)
response = self.getResponse(reg_qty)
sent = True
except socket.error:
print ("Modbus Connection was closed by Modbus Server. "
"Retrying in 5 seconds...")
time.sleep(5)
self.connect()
return response
def getResponse(self, reg_qty):
# Response size is:
# Modbus Address 1 byte
# Function Code 1 byte
# Number of data bytes to follow 1 byte
# Register contents reg_qty * 2 b/c they are 16 bit values
# CRC 2 bytes
response_size = 5 + 2*reg_qty
response = self.sock.recv(response_size)
struct_format = "!BBB" + "f" * (reg_qty/2) + "H"
try:
data = struct.unpack(struct_format, response)
except struct.error:
print "Received bad data. Skipping..."
return []
# Remove first 3 bytes and last two bytes (See
# above)
start = 3
end | = start + (reg_qty/2)
data = data[start:end]
#sys.stdout.write("Response: ")
#for num in data:
| #sys.stdout.write(str(num) + " " )
print "\n"
return data
""" Channel-level calls for getting
data from meters """
# Checks if channels given are valid
def checkValidChannel(self, channels):
if not all([channel in self.Valid_channels for channel in channels]):
raise KeyError("Channels given were not recognized")
return True
# Gets data from the meter
def getData(self):
current_time = time.time()
device_data = {}
channel_data = self.getDeviceData()
if channel_data:
device_data = {"devicename": self.name,
"timestamp": current_time,
"channels": channel_data
}
return device_data
""" Functions that must be implemented by child classes. """
def getDeviceData(self):
raise NotImplementedError("getDeviceData must be implemented by all child classes of TCPModbusClient.")
def parseData(self, data, modbus_address):
raise NotImplementedError("ParseData must be implemented by all child classes of TCPModbusClient.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("IP", help="IP address for device")
parser.add_argument("PORT", help="Port for device")
parser.add_argument("Modbus_address", help="Modbus\
address")
parser.add_argument("Modbus_funct_code", help="Modbus function code")
parser.add_argument("Modbus_start_reg", help="Modbus\
starting register")
parser.add_argument("Modbus_num_regs", help="Number of registers")
args = parser.parse_args()
client = TCPModbusClient(args.IP, args.PORT)
client.connect()
data = client.modbusReadReg(int(args.Modbus_address),
int(args.Modbus_funct_code),
int(args.Modbus_start_reg),
int(args.Modbus_num_regs))
|
christabor-archive/DjangoKonstrukt | konstrukt/konstrukt/forms.py | Python | mit | 940 | 0 | from django.db import models as _models
from django import forms
import models
import inspect
# Hax, inspect ftw
# Get the args and varargs of a given module
# for use in the form.
DJANGO_FIELD_ARGS = (
(str(name), str(inspect.getargspec(obj.__init__)))
for name, obj in inspect.getmembers(_models)
if name.endswith('Field')
)
class ModelForm(forms.ModelForm):
value = _models.CharField(
choices=DJANGO_FIELD_ARGS,
blank=False, null=False, max_length=10000)
class Meta:
model = models.Model
class ViewForm(form | s.ModelForm):
class Meta:
model = models.View
class FieldValueForm(forms.ModelForm):
ref_name = _models.CharField(
help_text='A reference name for this key',
max_length=500)
blank = _models.BooleanField()
null = _models.BooleanField()
class Meta:
| model = models.FieldValue
class AttachModelsViewForm(forms.Form):
pass
|
LaurentClaessens/phystricks | testing/demonstration/phystricksQEPZooNndwiS.py | Python | gpl-3.0 | 671 | 0.059613 | # -*- coding: utf8 -*-
from phystricks import *
def QEPZooNndwiS():
pspict,fig = SinglePicture("QEPZooNndwiS")
pspict.dilatation_X(1)
pspict.dilatation_Y(1)
h=2
A=Point(0,0)
B=Point(4,0)
C=Point(1,-h)
D=Point(6,-h)
d1=Segment(A,B).dilatation(1.5)
d2=Segment(D,C).dilatatio | n(1.5)
I=d1 | .midpoint()
J=d2.midpoint()
seg=Segment(I,J).dilatation(1.3)
a1=AngleAOB(A,I,J)
a2=AngleAOB(D,J,I)
a1.put_mark(0.2,angle=None,text="\( a\)",pspict=pspict)
a2.put_mark(0.2,angle=None,text="\( q\)",pspict=pspict)
pspict.DrawGraphs(d1,d2,seg,a1,a2)
fig.no_figure()
fig.conclude()
fig.write_the_file()
|
eladnoor/optslope | src/html_writer.py | Python | mit | 9,669 | 0.006412 | #!/usr/bin/python
"""
html_writer.py - Construct HTML pages
"""
import datetime
import os
import types
import numpy as np
import xml.dom.minidom
class BaseHtmlWriter:
def __init__(self):
self.div_counter = 0
pass
def relative_to_full_path(self, relpath):
raise Exception("class not implemented")
def write(self, s):
raise Exception("class not implemented")
def write_header(self):
self.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
self.write('<head>\n')
self.write('<script type="text/javascript" src="expandCollapse.js"></script>\n')
self.write('</head>\n')
self.write('<html>\n<body>\n')
now = datetime.datetime.now()
self.write('<div>Written at %s</div>' % now)
def write_js(self, path):
if os.path.exists(path + '/expandCollapse.js'):
return
fp = open(path + '/expandCollapse.js', 'w')
fp.write("""function toggleMe(a){
var e=document.getElementById(a);
if(!e)return true;
if(e.style.display=="none"){
e.style.display="block"
} else {
e.style.display="none"
}
return true;
}
""")
fp.close()
def write_ol(self, l):
self.write("<ol>\n")
for mem in l:
self.write(" <li>%s</li>\n" % str(mem))
self.write("</ol>\n")
def write_ul(self, l):
self.write("<ul>\n")
for mem in l:
self.write(" <li>%s</li>\n" % str(mem))
self.write("</ul>\n")
def write_table(self, rowdicts, headers=None, border=1, decimal=None, rowcolors=None):
"""
In order to print the row number, use the title '#' in headers and
write_table() will automatically fill that column with the row numbers.
"""
def to_string(x, decimal=None):
if type(x) == types.StringType:
return x
if type(x) in (types.IntType, np.int16, np.int32, np.int64):
return '%d' % x
if type(x) in (types.FloatType, np.float32, np.float64):
if np.isnan(x):
return 'N/A'
if decimal is not None:
return eval("'%%.%df' %% x" % decimal)
return "%g" % x
return str(x)
if not headers:
headers = set()
for rowdict in rowdicts:
for key in rowdict.keys():
headers.add(to_string(key))
headers = sorted(headers)
self.write('<table border=%d>\n' % border)
self.write('<tr><td><b>' + '</b></td><td><b>'.join(headers) + '</b></td></tr>\n')
for i, rowdict in enumerate(rowdicts):
rowdict['#'] = '%d' % i
values = [to_string(rowdict.get(key, ""), decimal) for key in headers]
if rowcolors != None:
self.write('<tr bgcolor=#%s>' % rowcolors[i])
else:
self.write('<tr>')
self.write('<td>' + '</td><td>'.join(values) + '</td></tr>\n')
self.write('</table>\n')
def table_start(self, border=1):
self.write('<table border=%d>\n' % border)
def table_writerow(self, values):
self.write('<tr><td>' + '</td><td>'.join(values) + '</td></tr>\n')
def table_end(self):
self.write("</table>\n")
def insert_toggle(self, div_id=None, start_here=False, label='Show'):
if not div_id:
div_id = "DIV%05d" % self.div_counter
self.div_counter += 1
elif type(div_id) != types.StringType:
raise ValueError("HTML div ID must be a string")
self.write('<input type="button" class="button" onclick="return toggleMe(\'%s\')" value="%s">\n'
% (div_id, label))
if start_here:
self.div_start(div_id)
return div_id
def div_start(self, div_id):
self.write('<div id="%s" style="display:none">' % div_id)
def div_end(self):
self.write('</div>\n')
def embed_img(self, fig_fname, alternative_string=""):
self.write('<img src="' + fig_fname + '" atl="' + alternative_string + '" />')
def embed_svg(self, fig_fname, width=320, height=240, name=''):
self.write('<a href="%s.svg">' % name)
self.extract_svg_from_file(fig_fname, width=width, height=height)
self.write('</a>')
#self.write('<object data="%s" type="image/s | vg+xml" width="%dpt" height="%dpt" name="%s" frameborder="0" marginwidth="0" marginheight="0"/></object>'
# % (fig_fname, width, height, name))
def embed_matplotlib_figure(self, fig, width=None, height=None, name=None):
"""
Adds a matplotlib figure into the HTML as an inline SVG
Arguments:
fig - a matplotlib Figure object
width - the desired width of the fig | ure in pixels
height - the desired height of the figure in pixels
name - if not None, the SVG will be written to a file with that name will
be linked to from the inline figure
"""
if name:
svg_filename = self.relative_to_full_path(name + '.svg')
self.write('<a href="%s.svg">' % name)
else:
svg_filename = '.svg'
width = width or (fig.get_figwidth() * fig.get_dpi())
height = height or (fig.get_figheight() * fig.get_dpi())
fig.savefig(svg_filename, format='svg')
self.extract_svg_from_file(svg_filename, width=width, height=height)
if name:
self.write('</a>')
else:
os.remove(svg_filename)
def embed_dot_inline(self, Gdot, width=320, height=240, name=None):
"""
Converts the DOT graph to an SVG DOM and uses the inline SVG option to
add it directly into the HTML (without creating a separate SVG file).
"""
if name:
svg_filename = self.relative_to_full_path(name + '.svg')
self.write('<a href="%s.svg">' % name)
else:
svg_filename = '.svg'
Gdot.write(svg_filename, prog='dot', format='svg')
self.extract_svg_from_file(svg_filename, width=width, height=height)
if name:
self.write('</a>')
else:
os.remove(svg_filename)
def embed_dot(self, Gdot, name, width=320, height=240):
"""
Converts the DOT graph to an SVG DOM and uses the inline SVG option to
add it directly into the HTML (without creating a separate SVG file).
"""
svg_filename = self.relative_to_full_path(name + '.svg')
Gdot.write(svg_filename, prog='dot', format='svg')
self.embed_svg(svg_filename, width=width, height=height, name=name)
def extract_svg_from_xmldom(self, dom, width=320, height=240):
svg = dom.getElementsByTagName("svg")[0]
svg.setAttribute('width', '%dpt' % width)
svg.setAttribute('height', '%dpt' % height)
self.write(svg.toprettyxml(indent=' ', newl=''))
def extract_svg_from_file(self, fname, width=320, height=240):
xmldom = xml.dom.minidom.parse(fname)
self.extract_svg_from_xmldom(xmldom, width, height)
def branch(self, relative_path, link_text=None):
"""
Branches the HTML file by creating a new HTML and adding a link to it with the desired text
"""
if link_text is None:
link_text = relative_path
self.write("<a href=\"" + relative_path + ".html\">" + link_text + "</a>")
return HtmlWriter(os.path.join(self.filepath, relative_path + ".html"))
def close(self):
self.write("</body>\n</html>\n")
class NullHtmlWriter(BaseHtmlWriter):
def __init__(self):
BaseHtmlWriter.__init__(self)
self.filename = None
def write(self, s):
pa |
Pink-Silver/PyDoom | resourcezip/scripts/doom2/__init__.py | Python | bsd-3-clause | 410 | 0.007317 | # Cop | yright (c) 2016, Kate Fox
# All rights reserved.
#
# This file is covered by the 3-clause BSD license.
# See the LICENSE file in this program's distribution for details.
"""Game Library for DOOM II: Hell on Earth (doom2.exe, or close to it, maybe)"""
game_shortname = "doom2"
game_title = "DOOM II: Hell on Earth"
game_description = "This time, they want your lunch money."
game_files = [ "doom2.w | ad" ]
|
mrkiwi-nz/django-helpdesk | helpdesk/urls.py | Python | bsd-3-clause | 5,961 | 0.001174 | """
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
urls.py - Mapping of URL's to our various views. Note we always used NAMED
views for simplicity in linking later on.
"""
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from helpdesk import settings as helpdesk_settings
from helpdesk.views import feeds, staff, public, kb
class DirectTemplateView(TemplateView):
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = | value()
else:
context[key] = value
return context
app_name = 'helpdesk'
urlpatterns = [
url(r'^dashboard/$',
staff.dashboard,
name='dashboard'),
url(r'^tickets/$',
staff.ticket_list,
name='list'),
url(r'^tickets/update/$',
staff.mass_update,
name='mass_update'),
url(r'^tickets/submit/$',
staff.create_ticket,
name='submit'),
url(r'^tickets/(?P<tic | ket_id>[0-9]+)/$',
staff.view_ticket,
name='view'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_edit/(?P<followup_id>[0-9]+)/$',
staff.followup_edit,
name='followup_edit'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/followup_delete/(?P<followup_id>[0-9]+)/$',
staff.followup_delete,
name='followup_delete'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/edit/$',
staff.edit_ticket,
name='edit'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/update/$',
staff.update_ticket,
name='update'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/delete/$',
staff.delete_ticket,
name='delete'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/hold/$',
staff.hold_ticket,
name='hold'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/unhold/$',
staff.unhold_ticket,
name='unhold'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/$',
staff.ticket_cc,
name='ticket_cc'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/add/$',
staff.ticket_cc_add,
name='ticket_cc_add'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/cc/delete/(?P<cc_id>[0-9]+)/$',
staff.ticket_cc_del,
name='ticket_cc_del'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/add/$',
staff.ticket_dependency_add,
name='ticket_dependency_add'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/dependency/delete/(?P<dependency_id>[0-9]+)/$',
staff.ticket_dependency_del,
name='ticket_dependency_del'),
url(r'^tickets/(?P<ticket_id>[0-9]+)/attachment_delete/(?P<attachment_id>[0-9]+)/$',
staff.attachment_del,
name='attachment_del'),
url(r'^raw/(?P<type>\w+)/$',
staff.raw_details,
name='raw'),
url(r'^rss/$',
staff.rss_list,
name='rss_index'),
url(r'^reports/$',
staff.report_index,
name='report_index'),
url(r'^reports/(?P<report>\w+)/$',
staff.run_report,
name='run_report'),
url(r'^save_query/$',
staff.save_query,
name='savequery'),
url(r'^delete_query/(?P<id>[0-9]+)/$',
staff.delete_saved_query,
name='delete_query'),
url(r'^settings/$',
staff.user_settings,
name='user_settings'),
url(r'^ignore/$',
staff.email_ignore,
name='email_ignore'),
url(r'^ignore/add/$',
staff.email_ignore_add,
name='email_ignore_add'),
url(r'^ignore/delete/(?P<id>[0-9]+)/$',
staff.email_ignore_del,
name='email_ignore_del'),
]
urlpatterns += [
url(r'^$',
public.homepage,
name='home'),
url(r'^view/$',
public.view_ticket,
name='public_view'),
url(r'^change_language/$',
public.change_language,
name='public_change_language'),
]
urlpatterns += [
url(r'^rss/user/(?P<user_name>[^/]+)/$',
login_required(feeds.OpenTicketsByUser()),
name='rss_user'),
url(r'^rss/user/(?P<user_name>[^/]+)/(?P<queue_slug>[A-Za-z0-9_-]+)/$',
login_required(feeds.OpenTicketsByUser()),
name='rss_user_queue'),
url(r'^rss/queue/(?P<queue_slug>[A-Za-z0-9_-]+)/$',
login_required(feeds.OpenTicketsByQueue()),
name='rss_queue'),
url(r'^rss/unassigned/$',
login_required(feeds.UnassignedTickets()),
name='rss_unassigned'),
url(r'^rss/recent_activity/$',
login_required(feeds.RecentFollowUps()),
name='rss_activity'),
]
urlpatterns += [
url(r'^login/$',
auth_views.login,
{'template_name': 'helpdesk/registration/login.html'},
name='login'),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'helpdesk/registration/login.html', 'next_page': '../'},
name='logout'),
]
if helpdesk_settings.HELPDESK_KB_ENABLED:
urlpatterns += [
url(r'^kb/$',
kb.index,
name='kb_index'),
url(r'^kb/(?P<item>[0-9]+)/$',
kb.item,
name='kb_item'),
url(r'^kb/(?P<item>[0-9]+)/vote/$',
kb.vote,
name='kb_vote'),
url(r'^kb/(?P<slug>[A-Za-z0-9_-]+)/$',
kb.category,
name='kb_category'),
]
urlpatterns += [
url(r'^help/context/$',
TemplateView.as_view(template_name='helpdesk/help_context.html'),
name='help_context'),
url(r'^system_settings/$',
DirectTemplateView.as_view(template_name='helpdesk/system_settings.html'),
name='system_settings'),
]
|
ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/tests/functional/modules/pyi_get_datadir.py | Python | gpl-3.0 | 1,315 | 0.003042 | #-----------------------------------------------------------------------------
# Copyright (c) 2015-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# This module assists in determining the location of the tests/functional/data
# directory.
# Library imports
# ---------------
import sys
import os.path
# Local imports
# -------------
from pyi_testmod_gettemp import gettemp
# Functions
# ---------
# This function returns the location of the
# tests/functional/data directory when run from a test, either built | by
# Pyinstaller or simply run from the Python interpreter. |
def get_data_dir():
# Some tests need input files to operate on. There are two cases:
if getattr(sys, 'frozen', False):
# 1. Frozen: rely on gettemp to find the correct directory both in
# onefile and in onedir modes.
return gettemp('data')
else:
# 2. Not frozen: rely on the filesystem layout of this git repository.
return os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'data')
|
Sutto/cloud-custodian | c7n/resources/iam.py | Python | apache-2.0 | 66,755 | 0.000599 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import csv
import datetime
import functools
import json
import io
from datetime import timedelta
import itertools
import time
from concurrent.futures import as_completed
from dateutil.tz import tzutc
from dateutil.parser import parse as parse_date
import six
from botocore.exceptions import ClientError
from c7n.actions import BaseAction
from c7n.exceptions import PolicyValidationError
from c7n.filters import ValueFilter, Filter
from c7n.filters.multiattr import MultiAttrFilter
from c7n.filters.iamaccess import CrossAccountAccessFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
from c7n.resolver import ValuesFrom
from c7n.tags import TagActionFilter, TagDelayedAction, Tag, RemoveTag
from c7n.utils import local_session, type_schema, chunks, filter_empty, QueryParser
from c7n.resources.aws import Arn
from c7n.resources.securityhub import OtherResourcePostFinding
@resources.register('iam-group')
class Group(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'group'
enum_spec = ('list_groups', 'Groups', None)
id = name = 'GroupName'
date = 'CreateDate'
config_type = "AWS::IAM::Group"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_resources(self, resource_ids, cache=True):
"""For IAM Groups on events, resource ids are Group Names."""
client = local_session(self.session_factory).client('iam')
resources = []
for rid in resource_ids:
try:
result = client.get_group(GroupName=rid)
except client.exceptions.NoSuchEntityException:
continue
group = result.pop('Group')
group['c7n:Users'] = result['Users']
resources.append(group)
return resources
@resources.register('iam-role')
class Role(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'role'
enum_spec = ('list_roles', 'Roles', None)
detail_spec = ('get_role', 'RoleName', 'RoleName', 'Role')
id = name = 'RoleName'
date = 'CreateDate'
config_type = "AWS::IAM::Role"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
@Role.action_registry.register('tag')
class RoleTag(Tag):
"""Tag an iam role."""
permissions = ('iam:TagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.tag_role, RoleName=role['RoleName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@Role.action_registry.register('remove-tag')
class RoleRemoveTag(RemoveTag):
"""Remove tags from an iam role."""
permissions = ('iam:UntagRole',)
def process_resource_set(self, client, roles, tags):
for role in roles:
try:
self.manager.retry(
client.untag_role, RoleName=role['RoleName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
@resources.register('iam-user')
class User(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'user'
detail_spec = ('get_user', 'UserName', 'UserName', 'User')
enum_spec = ('list_users', 'Users', None)
id = name = 'UserName'
date = 'CreateDate'
config_type = "AWS::IAM::User"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribeUser(self)
return super(User, self).get_source(source_type)
class DescribeUser(DescribeSource):
def get_resources(self, resource_ids, cache=True):
client = local_session(self.manager.session_factory).client('iam')
results = []
for r in resource_ids:
try:
results.append(client.get_user(UserName=r)['User'])
except client.exceptions.NoSuchEntityException:
continue
return results
@User.action_registry.register('tag')
class UserTag(Tag):
"""Tag an iam user."""
permissions = ('iam:TagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.tag_user, UserName=u['UserName'], Tags=tags)
except client.exceptions.NoSuchEntityException:
continue
@User.action_registry.register('remove-tag')
class UserRemoveTag(RemoveTag):
"""Remove tags from an iam user."""
permissions = ('iam:UntagUser',)
def process_resource_set(self, client, users, tags):
for u in users:
try:
self.manager.retry(
client.untag_user, UserName=u['UserName'], TagKeys=tags)
except client.exceptions.NoSuchEntityException:
continue
User.action_registry.register('mark-for-op', TagDelayedAction)
User.filter_registry.register('marked-for-op', TagActionFilter)
@User.action_registry.register('set-groups')
class SetGroups(BaseAction):
"""Set a specific IAM user as added/removed from a group
:example:
.. code-block:: yaml
- name: iam-user-add-remove
resource: iam-user
filters:
- type: value
key: UserName
value: Bob
actions:
- type: set-groups
state: remove
group: Admin
"""
schema = type_schema(
'set-groups',
state={'enum': ['add', 'remove']},
group={'type': 'string'},
required=['state', 'group']
)
permissions = ('iam:AddUserToGroup', 'iam:RemoveUserFromGroup',)
def validate(self):
if self.data.get('group') == '':
raise PolicyValidationError('group cannot be empty on %s'
% (self.manager.data))
def process(self, resources):
group_name = self.data['group']
state = self.data['state']
client = local_session(self.manager.session_factory).client('iam')
op_map = {
'add': client.add_user_to_group,
| 'remove': client.remove_user_from_group
}
for r in resources:
try:
op_map[state](GroupName=group_name, UserName=r['UserName'])
except client.exceptions.NoSuchEntityException:
continue
@resources.regist | er('iam-policy')
class Policy(QueryResourceManager):
class resource_type(TypeInfo):
service = 'iam'
arn_type = 'policy'
enum_spec = ('list_policies', 'Policies', None)
id = 'PolicyId'
name = 'PolicyName'
date = 'CreateDate'
config_type = "AWS::IAM::Policy"
# Denotes this resource type exists across regions
global_resource = True
arn = 'Arn'
def get_source(self, source_type):
if source_type == 'describe':
return DescribePolicy(self)
return super(Policy, self).get_source(source_type)
class DescribePolicy(DescribeSource):
def resources(self, query=None):
qfilters = PolicyQueryParser.parse( |
uniite/pyirc | gcm/gcm.py | Python | mit | 4,198 | 0.005002 | import urllib
import urllib2
import json
GCM_URL = 'https://android.googleapis.com/gcm/send'
class GCMException(Exception): pass
class GCMMalformedJsonException(GCMException): pass
class GCMConnectionException(GCMException): pass
class GCMAuthenticationException(GCMException): pass
class GCMTooManyRegIdsException(GCMException): pass
# Exceptions from Google responses
class GCMMissingRegistrationException(GCMException): pass
class GCMMismatchSenderIdException(GCMException): pass
class GCMNotRegisteredException(GCMException): pass
class GCMMessageTooBigException(GCMException): pass
class GCM(object):
def __init__(self, api_key):
self.api_key = api_key
def construct_payload(self, registration_ids, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, is_json=True):
if is_json:
payload = {'registration_ids': registration_ids}
if data:
payload['data'] = data
else:
payload = {'registration_id': registration_ids}
if data:
for k in data.keys():
data['data.%s' % k] = data.pop(k)
payload.updat | e(data)
if time_to_live:
payload['time_to_live'] = time_to_live
payload['collapse_key'] = collapse_key
if json:
payload = json.dumps(payload)
return payload
def make_request(self, data, is_json=True):
headers = {
'Authorization': 'key=%s' % self.api_key,
}
# Default Content-Type is defaulted to application/x-www-form-urlencoded;charset=UTF-8 |
if is_json:
headers['Content-Type'] = 'application/json'
if not is_json:
data = urllib.quote_plus(data)
req = urllib2.Request(GCM_URL, data, headers)
try:
response = urllib2.urlopen(req).read()
except urllib2.HTTPError as e:
if e.code == 400:
raise GCMMalformedJsonException("The request could not be parsed as JSON")
elif e.code == 401:
raise GCMAuthenticationException("There was an error authenticating the sender account")
# TODO: handle 503 and Retry-After
except urllib2.URLError as e:
raise GCMConnectionException("There was an internal error in the GCM server while trying to process the request")
if is_json:
response = json.loads(response)
return response
def handle_response(self, response):
error = response['results']['error']
if error == 'MissingRegistration':
raise GCMMissingRegistrationException("Missing registration_ids")
elif error == 'InvalidRegistration':
raise GCMMismatchSenderIdException("A registration ID is tied to a certain group of senders")
elif error == 'NotRegistered':
raise GCMNotRegisteredException("Registration id is not valid anymore")
elif error == 'MessageTooBig':
raise GCMMessageTooBigException("Message can't exceed 4096 bytes")
def plaintext_request(self, registration_id, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None):
if not registration_id:
raise GCMMissingRegistrationException("Missing registration_id")
payload = self.construct_payload(
registration_id,
data, collapse_key,
delay_while_idle,
time_to_live,
False
)
return self.make_request(payload, json=False)
def json_request(self, registration_ids, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None):
if not registration_ids:
raise GCMMissingRegistrationException("Missing registration_ids")
if len(registration_ids) > 1000:
raise GCMTooManyRegIdsException("Exceded number of registration_ids")
payload = self.construct_payload(
registration_ids, data, collapse_key,
delay_while_idle, time_to_live
)
return self.make_request(payload, is_json=True)
|
atados/atados-ovp | api/channels/default/tests/test_tasks.py | Python | agpl-3.0 | 4,585 | 0.002621 | from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.core import mail
from ovp.apps.users.tests.fixture import UserFactory
from ovp.apps.organizations.tests.fixture import OrganizationFactory
from ovp.apps.projects.models import Project, Apply, Job, Work
from ovp.apps.core.helpers import get_email_subject
from server.celery_tasks import app
@override_settings(DEFAULT_SEND_EMAIL='sync',
CELERY_TASK_EAGER_PROPAGATES_EXCEPTIONS=True,
CELERY_TASK_ALWAYS_EAGER=True)
class TestEmailTriggers(TestCase):
def setUp(self):
self.user = UserFactory.create(
email='testmail-projects@test.com',
password='test_returned',
object_channel='default'
)
self.organization = OrganizationFactory.create(
name='test org', owner=self.user,
type=0, object_channel='default'
)
self.project = Project.objects.create(
name='test project', slug='test-slug',
details='abc', description='abc',
owner=self.user, organization=self.organization,
published=False, object_channel='default'
)
self.project.published = True
self.project.save()
app.control.purge()
def test_applying_schedules_interaction_confirmation_email(self):
"""
Assert cellery task to ask about interaction
is created when user applies to project
"""
mail.outbox = []
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, get_email_subject(
'default', 'atados-askProjectInteractionConfirmation-toVolunteer', 'Ask project confirmation'
))
self.assertIn('vaga test project', mail.outbox[0].body)
def test_applying_schedules_reminder_email(self):
"""
Assert cellery task to remind volunteer
is created when user applies to project
"""
mail.outbox = []
Job.objects.create(
start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='default'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 4)
self.assertEqual(mail.outbox[1].subject, 'Uma ação está chegando... estamos ansiosos para te ver.')
self.assertIn('test project', mail.outbox[1].body)
def test_applying_schedules_ask_about_project_experience_to_volunteer(self):
"""
Assert cellery task to ask volunteer about project
experience is created when user applies to project
"""
mail.outbox = []
work = Work.objects.create(project=self.project, object_channel='default')
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[1].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[1].alternatives[0][0])
mail.outbox = []
work.delete()
Job.objects.create(
| start_date=timezone.now(), end_date=timezone.now(),
project=self.project, object_channel='default'
)
Apply.objects.create(user=self.user, project=self.project, object_channel='default')
| self.assertEqual(mail.outbox[2].subject, 'Conta pra gente como foi sua experiência?')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
def test_publishing_project_schedules_ask_about_experience_to_organization(self):
"""
Assert cellery task to ask organization about project
experience is created when user project is published
"""
mail.outbox = []
project = Project.objects.create(
name='test project', slug='test-slug', details='abc',
description='abc', owner=self.user, published=False,
organization=self.organization, object_channel='default'
)
Work.objects.create(project=project, object_channel='default')
project.published = True
project.save()
self.assertEqual(len(mail.outbox), 3)
self.assertEqual(mail.outbox[2].subject, 'Tá na hora de contar pra gente como foi')
self.assertIn('>test project<', mail.outbox[2].alternatives[0][0])
|
pypingou/pagure | pagure/hooks/pagure_hook.py | Python | gpl-2.0 | 8,617 | 0 | # -*- coding: utf-8 -*-
"""
(c) 2014-2018 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import logging
import pygit2
import sqlalchemy as sa
import wtforms
try:
from flask_wtf import FlaskForm
except ImportError:
from flask_wtf import Form as FlaskForm
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import relation
from sqlalchemy.orm import backref
import pagure.config
import pagur | e.lib.query
import pagure.lib.git
from pagure.hooks import BaseHook, BaseRunner
from pagure.lib.model import BASE, Project
_log = logging.getLogger(__name__)
pagure_config = pagure.config.reload_config()
class PagureTable(BASE):
""" Stores information about the pagure hook deployed on a project.
Table -- hook_pagure
"""
__tablename__ = "hook_pagure"
id = sa.Column(sa.Integer, primary_key=True)
project_id = sa.Colum | n(
sa.Integer,
sa.ForeignKey("projects.id", onupdate="CASCADE", ondelete="CASCADE"),
nullable=False,
unique=True,
index=True,
)
active = sa.Column(sa.Boolean, nullable=False, default=False)
project = relation(
"Project",
remote_side=[Project.id],
backref=backref(
"pagure_hook",
cascade="delete, delete-orphan",
single_parent=True,
uselist=False,
),
)
def generate_revision_change_log(
session, project, username, repodir, new_commits_list
):
print("Detailed log of new commits:\n\n")
commitid = None
for line in pagure.lib.git.read_git_lines(
["log", "--no-walk"] + new_commits_list + ["--"], repodir
):
if line.startswith("commit"):
commitid = line.split("commit ")[-1]
line = line.strip()
print("*", line)
for issue_or_pr in pagure.lib.link.get_relation(
session,
project.name,
project.username if project.is_fork else None,
project.namespace,
line,
"fixes",
include_prs=True,
):
if pagure_config.get("HOOK_DEBUG", False):
print(commitid, relation)
fixes_relation(
session,
username,
commitid,
issue_or_pr,
pagure_config.get("APP_URL"),
)
for issue in pagure.lib.link.get_relation(
session,
project.name,
project.username if project.is_fork else None,
project.namespace,
line,
"relates",
):
if pagure_config.get("HOOK_DEBUG", False):
print(commitid, issue)
relates_commit(
session,
username,
commitid,
issue,
pagure_config.get("APP_URL"),
)
def relates_commit(session, username, commitid, issue, app_url=None):
""" Add a comment to an issue that this commit relates to it. """
url = "../%s" % commitid[:8]
if app_url:
if app_url.endswith("/"):
app_url = app_url[:-1]
project = issue.project.fullname
if issue.project.is_fork:
project = "fork/%s" % project
url = "%s/%s/c/%s" % (app_url, project, commitid[:8])
comment = """ Commit [%s](%s) relates to this ticket""" % (
commitid[:8],
url,
)
try:
pagure.lib.query.add_issue_comment(
session, issue=issue, comment=comment, user=username
)
session.commit()
except pagure.exceptions.PagureException as err:
print(err)
except SQLAlchemyError as err: # pragma: no cover
session.rollback()
_log.exception(err)
def fixes_relation(session, username, commitid, relation, app_url=None):
""" Add a comment to an issue or PR that this commit fixes it and update
the status if the commit is in the master branch. """
url = "../c/%s" % commitid[:8]
if app_url:
if app_url.endswith("/"):
app_url = app_url[:-1]
project = relation.project.fullname
if relation.project.is_fork:
project = "fork/%s" % project
url = "%s/%s/c/%s" % (app_url, project, commitid[:8])
comment = """ Commit [%s](%s) fixes this %s""" % (
commitid[:8],
url,
relation.isa,
)
try:
if relation.isa == "issue":
pagure.lib.query.add_issue_comment(
session, issue=relation, comment=comment, user=username
)
elif relation.isa == "pull-request":
pagure.lib.query.add_pull_request_comment(
session,
request=relation,
commit=None,
tree_id=None,
filename=None,
row=None,
comment=comment,
user=username,
)
session.commit()
except pagure.exceptions.PagureException as err:
print(err)
except SQLAlchemyError as err: # pragma: no cover
session.rollback()
_log.exception(err)
try:
if relation.isa == "issue":
pagure.lib.query.edit_issue(
session,
relation,
user=username,
status="Closed",
close_status="Fixed",
)
elif relation.isa == "pull-request":
pagure.lib.query.close_pull_request(
session, relation, user=username, merged=True
)
session.commit()
except pagure.exceptions.PagureException as err:
print(err)
except SQLAlchemyError as err: # pragma: no cover
session.rollback()
print("ERROR", err)
_log.exception(err)
class PagureRunner(BaseRunner):
""" Runner for the pagure's specific git hook. """
@staticmethod
def post_receive(session, username, project, repotype, repodir, changes):
""" Run the default post-receive hook.
For args, see BaseRunner.runhook.
"""
if repotype != "main":
print("The pagure hook only runs on the main git repo.")
return
for refname in changes:
(oldrev, newrev) = changes[refname]
# Retrieve the default branch
repo_obj = pygit2.Repository(repodir)
default_branch = None
if not repo_obj.is_empty and not repo_obj.head_is_unborn:
default_branch = repo_obj.head.shorthand
# Skip all branch but the default one
refname = refname.replace("refs/heads/", "")
if refname != default_branch:
continue
if set(newrev) == set(["0"]):
print(
"Deleting a reference/branch, so we won't run the "
"pagure hook"
)
return
generate_revision_change_log(
session,
project,
username,
repodir,
pagure.lib.git.get_revs_between(
oldrev, newrev, repodir, refname
),
)
session.close()
class PagureForm(FlaskForm):
""" Form to configure the pagure hook. """
active = wtforms.BooleanField("Active", [wtforms.validators.Optional()])
DESCRIPTION = """
Pagure specific hook to add a comment to issues or pull requests if the pushed
commits fix them
or relate to them. This is determined based on the commit message.
To reference an issue/PR you need to use one of recognized keywords followed by
a reference to the issue or PR, separated by whitespace and and optional colon.
Such references can be either:
* The issue/PR number preceded by the `#` symbol
* The full URL of the issue or PR
If using the full URL, it is possible to reference issues in other projects.
The recognized keywords are:
* fix/fixed/fixes
* relate/related/relates
* merge/merges/merged
* close/closes/closed
Examples:
* Fixes #21
* related: https://pa |
balloob/netdisco | netdisco/discoverables/cambridgeaudio.py | Python | mit | 473 | 0 | """ Discover Cambridge Audio StreamMagic devices. """
from . import SSDPDiscover | able
class Discoverable( | SSDPDiscoverable):
"""Add support for discovering Cambridge Audio StreamMagic devices."""
def get_entries(self):
"""Get all Cambridge Audio MediaRenderer uPnP entries."""
return self.find_by_device_description({
"manufacturer": "Cambridge Audio",
"deviceType": "urn:schemas-upnp-org:device:MediaRenderer:1"
})
|
thomasmauerhofer/search-engine | src/setup/check_for_currupt_references.py | Python | mit | 2,124 | 0.000942 | from engine.api import API
from engine.utils.printing_utils import progressBar
from setup.utils.datastore_utils import repair_corrupt_reference, link_references_to_paper
def remove_duplicates_from_cited_by():
print("\nRemove Duplicates")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
paper.cited_by = list(dict.fromkeys(paper.cited_by))
api.client.update_paper(paper)
def check_references():
print("\nCheck References")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
other_papers = [p for p in papers if p.id != paper.id]
for reference in paper.references:
if not reference.get_paper_id():
continue
ref_paper = api.get_paper(reference.get_paper_id())
if ref_paper.cited_by.count(paper.id) == 0:
print()
reference.paper_id = []
api.client.update_paper(paper)
repair_corrupt_reference(reference, paper, other_papers, api)
def check_cited_by():
print("\nCheck Cited by")
api = API()
papers = api.get_all_paper()
for i, paper in enumerate(papers):
progressBar(i, len(papers))
for cited_paper_id in paper.cited_by:
if not api.contains_paper(cited_paper_id):
paper.cited_by.remove(cited_paper_ | id)
api.client.update_paper(paper)
continue
cited_paper = api.get_paper(cited_paper_id)
cited_paper_refs = [ref.get_paper_id() for ref | in cited_paper.references if ref.get_paper_id()]
if cited_paper_refs.count(paper.id) == 0:
print()
paper.cited_by.remove(cited_paper_id)
api.client.update_paper(paper)
link_references_to_paper(cited_paper, paper, api)
def perform_checks():
check_cited_by()
remove_duplicates_from_cited_by()
check_references()
if __name__ == "__main__":
perform_checks()
exit(0)
|
Madpilot0/EVE-Farm | app.py | Python | gpl-3.0 | 14,554 | 0.032431 | #!/usr/bin/env python
from flask import Flask, render_template, request, jsonify, session, redirect, escape, url_for
import MySQLdb
import bcrypt
from esipy import App
from esipy import EsiClient
from esipy import EsiSecurity
from esipy.exceptions import APIException
import time
import json
import requests
import datetime
import math
app = Flask(__name__)
class ServerError(Exception):pass
class DB:
conn = None
def connect(self):
config = {}
execfile("config.conf",config)
self.conn = MySQLdb.connect(
host=config['dbHost'],
user=config['dbUser'],
passwd=config['dbPass'],
db=config['dbBase']
)
self.conn.autocommit(True)
self.conn.set_character_set('utf8')
def query(self, sql, args=None):
try:
cursor = self.conn.cursor()
cursor.execute(sql,args)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
cursor = self.conn.cursor()
cursor.execute(sql,args)
return cursor
if __name__ == '__main__':
config = {}
execfile("config.conf",config)
serverIP = config['serverIP']
serverPort = config['serverPort']
rounds = 10
debug = config['debug']
cer = config['ssl_cer']
key = config['ssl_key']
context = (cer,key)
app.secret_key = config['appKey']
esi_app = App.create('https://esi.tech.ccp.is/latest/swagger.json?datasource=tranquility')
security = EsiSecurity(
app=esi_app,
redirect_uri=config['callbackURL'],
client_id=config['clientID'],
secret_key=config['secretKey']
)
client = EsiClient(security=security)
scopes = ['esi-location.read_location.v1','esi-skills.read_skillqueue.v1','esi-skills.read_skills.v1','esi-clones.read_clones.v1']
db = DB()
def profit():
extractorID = "40519"
injectorID = "40520"
plexID = "44992"
priceList = []
url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid="
try:
prices = requests.get(url+extractorID).json()[0]
extractorPrice = prices['buy']['fivePercent']
extractorPricen= prices['sell']['fivePercent']
prices = requests.get(url+injectorID).json()[0]
injectorPrice = prices['sell']['fivePercent']
injectorPricen= prices['buy']['fivePercent']
prices = requests.get(url+plexID).json()[0]
plexPrice = prices['buy']['fivePercent']
plexPricen= prices['sell']['fivePercent']
injectorsMonth = 3.888
profit = round(((injectorsMonth * (injectorPrice - extractorPrice)) - (plexPrice * 500))/1000000,2)
nonoptimal = round(((injectorsMonth * (injectorPricen - extractorPricen)) - | (plexPricen * 500))/1000000,2)
return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected pro | fits: (min)"+str(nonoptimal)+"mil - (max)"+str(profit)+"mil </a>"
except:
return "<a href='https://market.madpilot.nl/static/graph/farm-month.png'>Projected profits: (min)"+str(0)+"mil - (max)"+str(0)+"mil </a>"
def isk(extractors):
extractorID = "40519"
injectorID = "40520"
plexID = "44992"
priceList = []
url = "http://api.eve-central.com/api/marketstat/json?regionlimit=10000002&typeid="
try:
prices = requests.get(url+extractorID).json()[0]
extractorPrice = prices['buy']['fivePercent']
extractorPricen= prices['sell']['fivePercent']
prices = requests.get(url+injectorID).json()[0]
injectorPrice = prices['sell']['fivePercent']
injectorPricen= prices['buy']['fivePercent']
prices = requests.get(url+plexID).json()[0]
plexPrice = prices['buy']['fivePercent']
plexPricen= prices['sell']['fivePercent']
maxProfit = round(((injectorPrice - extractorPrice) * extractors)/1000000,2)
minProfit = round(((injectorPricen - extractorPricen) * extractors)/1000000,2)
except:
maxProfit = 0
minProfit = 0
return [maxProfit, minProfit]
def isReady(char_id):
checkDelay = 1800
cur = db.query("SELECT UNIX_TIMESTAMP(updated) FROM cache_table WHERE character_id = %s",[char_id])
lastChecked = cur.fetchone()
curTime = int(time.time())
if lastChecked:
lastCheckedEpoch = lastChecked[0]
if (curTime - lastCheckedEpoch) < checkDelay:
print("Checktime is less than "+str(checkDelay)+" Seconds (current: "+str(curTime - lastCheckedEpoch)+"). Skipping")
return False
return True
return True
@app.route('/')
def index():
error = None
if 'username' not in session:
error = "Not logged in"
return redirect(url_for('login', error=error))
secure = security.get_auth_uri(scopes=scopes)
cur = db.query("SELECT id FROM users WHERE user = %s;", [session['username']])
for row in cur.fetchall():
userID = row[0]
characters = []
cur = db.query("SELECT character_id, access_token, refresh_token, expires, expires_in, added, updated FROM characters WHERE owner_id = %s;", [userID])
allSP = 0
extractableSP = 0
numExtract = 0
for row in cur.fetchall():
epoch = round(time.time())
expires = row[3] - row[4] - epoch
if expires < 0:
expires = 0
refresh = {u'access_token': row[1], u'refresh_token': row[2], u'expires_in': expires}
security.update_token(refresh)
ready = isReady(row[0])
if not ready:
cur = db.query("SELECT * FROM cache_table WHERE character_id=%s",[row[0]])
cache = cur.fetchall()[0]
#Get character name
charName = esi_app.op['get_characters_names'](character_ids=[row[0]])
result = client.request(charName)
charName = json.loads(result.raw)[0].get('character_name')
print "Character "+charName
#Get character location
if ready:
charLocation = esi_app.op['get_characters_character_id_location'](character_id=row[0])
result = client.request(charLocation)
location = json.loads(result.raw)
sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id'))
sol = json.loads(client.request(sol).raw).get('name')
cur = db.query("INSERT INTO cache_table (character_id,char_location) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_location=%s",[row[0],result.raw,result.raw])
else:
location = json.loads(cache[3])
sol = esi_app.op['get_universe_systems_system_id'](system_id=location.get('solar_system_id'))
sol = json.loads(client.request(sol).raw).get('name')
#Get current training skill + queue
if ready:
charTrain = esi_app.op['get_characters_character_id_skillqueue'](character_id=row[0])
result = client.request(charTrain)
training = json.loads(result.raw)
cur = db.query("INSERT INTO cache_table (character_id,char_queue) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_queue=%s",[row[0],result.raw,result.raw])
else:
training = json.loads(cache[4])
currentlyTrainingStart = training[0].get('start_date')
currentlyTrainingEnd = training[0].get('finish_date')
startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")))
endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))
if endTrainEpoch < epoch:
while endTrainEpoch < epoch and len(training)>1:
del training[0]
currentlyTrainingStart = training[0].get('start_date')
currentlyTrainingEnd = training[0].get('finish_date')
startTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingStart, "%Y-%m-%dT%H:%M:%SZ")))
endTrainEpoch = int(time.mktime(time.strptime(currentlyTrainingEnd, "%Y-%m-%dT%H:%M:%SZ")))
trainedSpCur = training[0].get('training_start_sp') - training[0].get('level_start_sp')
endQueue = training[-1].get('finish_date')
currentlyTraining = training[0].get('skill_id')
currentlyTrainingLevel = training[0].get('finished_level')
curSkillStartSP = training[0].get('level_start_sp')
curSkillEndSP = training[0].get('level_end_sp')
curSkillSP = curSkillEndSP - curSkillStartSP
#Get currently training name
skillName = esi_app.op['get_universe_types_type_id'](type_id=currentlyTraining)
result = client.request(skillName)
skillName = json.loads(result.raw).get('name')
#Get character total sp
if ready:
charSkill = esi_app.op['get_characters_character_id_skills'](character_id=row[0])
result = client.request(charSkill)
sp = json.loads(result.raw)
totalSp = sp.get('total_sp')
cur = db.query("INSERT INTO cache_table (character_id,char_skills) VALUES (%s,%s) ON DUPLICATE KEY UPDATE char_skills=%s",[row[0],result.raw,result.raw])
else:
sp = json.loads(cache[5])
totalSp = sp.get('total_s |
Brickstertwo/pretty-markdown | utils/whitespace_utils.py | Python | mit | 542 | 0.005535 | import re
def trim_nonbreaking_whitespace(text):
"""Trims non-breaking whitespace from the end of eac | h line in the given text.
Non-breaking whitespace refers to the markdown syntax of places two spaces at the end of a line to signify a break.
"""
pattern = re.compile(r'^.*\S $')
text = text.split('\n') # use split(...) since splitlines() doesn't keep trailing blank lines if any are present
text = [line if pattern.match(line) else line.rstrip() for line in text]
| text = '\n'.join(text)
return text
|
loandy/billy | billy/tests/importers/test_merge.py | Python | bsd-3-clause | 1,328 | 0 | from __future__ import print_function
from billy.importers.utils import merge_legislators
import json
import os
def _load_test_data(test_name):
test_data = os.path.join(os.path.dirname(os.path.dirname(__file__)),
"leg_merge_test_data")
folder = "%s/%s/" % (test_data, test_name)
leg1 = json.loads(open(folder + "1.json", 'r').read())
leg2 = json.loads(open(folder + "2.json", 'r').read())
mrgd = json.loads(open(folder + "merged.json", 'r').read())
return (leg1, leg2, mrgd)
def _check_results(one, two):
if one != two:
print('\n', one, '\n', two, '\n', sep='')
return one == two
def _test_logic(name):
leg1, leg2, compare = _load_test_data(name)
produced, to_del = merge_legislators(leg1, leg2)
assert _check_results(produced, compare)
##########
# Cut below the line
def test_legid_sanity():
_test_logic("leg_id_sanity")
def test_scraped_name_sanity():
_test_logic("scraped_name_sanity")
def test_locked_sanity():
_test_logic("locked_field_sanity")
def test_role_migration():
_test_logic("role_conflict")
def test_role_migration_two():
_test_logic("role_conflict_with_prev_roles")
def | test_vanishing_photo():
_test_logic("vanishing_photo_url")
def test_order():
_test_logic(" | test_legi_order")
|
NonVolatileComputing/arrow | python/pyarrow/filesystem.py | Python | apache-2.0 | 8,926 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os.path import join as pjoin
import os
import posixpath
from pyarrow.util import implements
class FileSystem(object):
"""
Abstract filesystem interface
"""
def cat(self, path):
"""
Return contents of file as a bytes object
Returns
-------
contents : bytes
"""
with self.open(path, 'rb') as f:
return f.read()
def ls(self, path):
"""
Return list of file paths
"""
raise NotImplementedError
def delete(self, path, recursive=False):
"""
Delete the indicated file or directory
Parameters
----------
path : string
recursive : boolean, default False
If True, also delete child paths for directories
"""
raise NotImplementedError
def disk_usage(self, path):
"""
Compute bytes used by all contents under indicated path in file tree
Parameters
----------
path : string
Can be a file path or directory
Returns
-------
usage : int
"""
path_info = self.stat(path)
if path_info['kind'] == 'file':
return path_info['size']
total = 0
for root, directories, files in self.walk(path):
for child_path in files:
abspath = self._path_join(root, child_path)
total += self.stat(abspath)['size']
return total
def _path_join(self, *args):
return sel | f.pathsep.join(args)
def stat(self, path):
"""
Returns
-------
stat : dict
"""
raise NotImplementedError('FileSystem.stat')
def rm(self, path, recursive=False):
"""
Alias for FileSystem.delete
"""
retur | n self.delete(path, recursive=recursive)
def mv(self, path, new_path):
"""
Alias for FileSystem.rename
"""
return self.rename(path, new_path)
def rename(self, path, new_path):
"""
Rename file, like UNIX mv command
Parameters
----------
path : string
Path to alter
new_path : string
Path to move to
"""
raise NotImplementedError('FileSystem.rename')
def mkdir(self, path, create_parents=True):
raise NotImplementedError
def exists(self, path):
raise NotImplementedError
def isdir(self, path):
"""
Return True if path is a directory
"""
raise NotImplementedError
def isfile(self, path):
"""
Return True if path is a file
"""
raise NotImplementedError
def read_parquet(self, path, columns=None, metadata=None, schema=None,
nthreads=1, use_pandas_metadata=False):
"""
Read Parquet data from path in file system. Can read from a single file
or a directory of files
Parameters
----------
path : str
Single file path or directory
columns : List[str], optional
Subset of columns to read
metadata : pyarrow.parquet.FileMetaData
Known metadata to validate files against
schema : pyarrow.parquet.Schema
Known schema to validate files against. Alternative to metadata
argument
nthreads : int, default 1
Number of columns to read in parallel. If > 1, requires that the
underlying file source is threadsafe
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
table : pyarrow.Table
"""
from pyarrow.parquet import ParquetDataset
dataset = ParquetDataset(path, schema=schema, metadata=metadata,
filesystem=self)
return dataset.read(columns=columns, nthreads=nthreads,
use_pandas_metadata=use_pandas_metadata)
def open(self, path, mode='rb'):
"""
Open file for reading or writing
"""
raise NotImplementedError
@property
def pathsep(self):
return '/'
class LocalFileSystem(FileSystem):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = LocalFileSystem()
return cls._instance
@implements(FileSystem.ls)
def ls(self, path):
return sorted(pjoin(path, x) for x in os.listdir(path))
@implements(FileSystem.mkdir)
def mkdir(self, path, create_parents=True):
if create_parents:
os.makedirs(path)
else:
os.mkdir(path)
@implements(FileSystem.isdir)
def isdir(self, path):
return os.path.isdir(path)
@implements(FileSystem.isfile)
def isfile(self, path):
return os.path.isfile(path)
@implements(FileSystem.exists)
def exists(self, path):
return os.path.exists(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing
"""
return open(path, mode=mode)
@property
def pathsep(self):
return os.path.sep
def walk(self, top_dir):
"""
Directory tree generator, see os.walk
"""
return os.walk(top_dir)
class DaskFileSystem(FileSystem):
"""
Wraps s3fs Dask filesystem implementation like s3fs, gcsfs, etc.
"""
def __init__(self, fs):
self.fs = fs
@implements(FileSystem.isdir)
def isdir(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem.isfile)
def isfile(self, path):
raise NotImplementedError("Unsupported file system API")
@implements(FileSystem.delete)
def delete(self, path, recursive=False):
return self.fs.rm(path, recursive=recursive)
@implements(FileSystem.mkdir)
def mkdir(self, path):
return self.fs.mkdir(path)
@implements(FileSystem.open)
def open(self, path, mode='rb'):
"""
Open file for reading or writing
"""
return self.fs.open(path, mode=mode)
def ls(self, path, detail=False):
return self.fs.ls(path, detail=detail)
def walk(self, top_path):
"""
Directory tree generator, like os.walk
"""
return self.fs.walk(top_path)
class S3FSWrapper(DaskFileSystem):
@implements(FileSystem.isdir)
def isdir(self, path):
try:
contents = self.fs.ls(path)
if len(contents) == 1 and contents[0] == path:
return False
else:
return True
except OSError:
return False
@implements(FileSystem.isfile)
def isfile(self, path):
try:
contents = self.fs.ls(path)
return len(contents) == 1 and contents[0] == path
except OSError:
return False
def walk(self, path, refresh=False):
"""
Directory tree generator, like os.walk
Generator version of what is in s3fs, which yields a flattened list of
files
"""
path = path.replace('s3://', '')
directories = set()
files = |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/internet/_dumbwin32proc.py | Python | gpl-3.0 | 12,502 | 0.00272 | # -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
http://isometri.cc/strips/gates_in_the_head
"""
import os
# Win32 imports
import win32api
import win32con
import win32event
import win32file
import win32pipe
import win32process
import win32security
import pywintypes
# security attributes for pipes
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
from zope.interface import implementer
from twisted.internet.interfaces import IProcessTransport, IConsumer, IProducer
from twisted.python.win32 import quoteArguments
from twisted.internet import error
from twisted.internet import _pollingfile
from twisted.internet._baseprocess import BaseProcess
def debug(msg):
import sys
print msg
sys.stdout.flush()
class _Reaper(_pollingfile._PollableResource):
def __init__(self, proc):
self.proc = proc
def checkWork(self):
if win32event.WaitForSingleObject(self.proc.hProcess, 0) != win32event.WAIT_OBJECT_0:
return 0
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
self.deactivate()
self.proc.processEnded(exitCode)
return 0
def _findShebang(filename):
"""
Look for a #! line, and return the value following the #! if one exists, or
None if this file is not a script.
I don't know if there are any conventions for quoting in Windows shebang
lines, so this doesn't support any; therefore, you may not pass any
arguments to scripts invoked as filters. That's probably wrong, so if
somebody knows more about the cultural expectations on Windows, please feel
free to fix.
This shebang line support was added in support of the CGI tests;
appropriately enough, I determined that shebang lines are culturally
accepted in the Windows world through this page::
http://www.cgi101.com/learn/connect/winxp.html
@param filename: str representing a filename
@return: a str representing another filename.
"""
f = file(filename, 'rU')
if f.read(2) == '#!':
exe = f.readline(1024).strip('\n')
return exe
def _invalidWin32App(pywinerr):
"""
Determine if a pywintypes.error is telling us that the given process is
'not a valid win32 application', i.e. not a PE format executable.
@param pywinerr: a pywintypes.error instance raised by CreateProcess
@return: a boolean
"""
# Let's do this better in the future, but I have no idea what this error
# is; MSDN doesn't mention it, and there is no symbolic constant in
# win32process module that represents 193.
return pywinerr.args[0] == 193
@implementer(IProcessTransport, IConsumer, IProducer)
class Process(_pollingfile._PollingTimer, BaseProcess):
"""A process that integrates with the Twisted event loop.
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
closedNotifies = 0
def __init__(self, reactor, protocol, command, args, environment, path):
"""
Create a new child process.
"""
_pollingfile._PollingTimer.__init__(self, reactor)
BaseProcess.__init__(self, protocol)
# security attributes for pipes
sAttrs = win32security.SECURITY_ATTRIBUTES()
sAttrs.bInheritHandle = 1
# create the pipes which will connect to the secondary process
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
win32pipe.SetNamedPipeHandleState(self.hStdinW,
win32pipe.PIPE_NOWAIT,
None,
None)
# set the info structure for the new process.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = hStdoutW
StartupInfo.hStdError = hStderrW
StartupInfo.hStdInput = hStdinR
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# Create new handles whose inheritance property is false
currentPid = win32api.GetCurrentProcess()
tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdoutR)
self.hStdoutR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStderrR)
self.hStderrR = tmp
tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0, 0,
win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(self.hStdinW)
self.hStdinW = tmp
# Add the specified en | vironment to the current environment - this is
# necessary because certain | operations are only supported on Windows
# if certain environment variables are present.
env = os.environ.copy()
env.update(environment or {})
cmdline = quoteArguments(args)
# TODO: error detection here. See #2787 and #4184.
def doCreate():
self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
command, cmdline, None, None, 1, 0, env, path, StartupInfo)
try:
try:
doCreate()
except TypeError, e:
# win32process.CreateProcess cannot deal with mixed
# str/unicode environment, so we make it all Unicode
if e.args != ('All dictionary items must be strings, or '
'all must be unicode',):
raise
newenv = {}
for key, value in env.items():
newenv[unicode(key)] = unicode(value)
env = newenv
doCreate()
except pywintypes.error, pwte:
if not _invalidWin32App(pwte):
# This behavior isn't _really_ documented, but let's make it
# consistent with the behavior that is documented.
raise OSError(pwte)
else:
# look for a shebang line. Insert the original 'command'
# (actually a script) into the new arguments list.
sheb = _findShebang(command)
if sheb is None:
raise OSError(
"%r is neither a Windows executable, "
"nor a script with a shebang line" % command)
else:
args = list(args)
args.insert(0, command)
cmdline = quoteArguments(args)
origcmd = command
command = sheb
try:
# Let's try again.
doCreate()
except pywintypes.error, pwte2:
# d'oh, failed again!
if _invalidWin32App(pwte2):
raise OSError(
"%r has an invalid shebang line: "
"%r is not a valid executable" % (
origcmd, sheb))
raise OSError(pwte2)
# close handles which only the child will use
win32file.CloseHandle(hStderrW)
win32file.CloseHa |
nburn42/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py | Python | apache-2.0 | 5,248 | 0.002668 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from tensorflow.contrib.boosted_trees.estimator_batch import estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCor | eForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
| learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
if __name__ == "__main__":
googletest.main()
|
rfleschenberg/django-shop | shop/admin/order.py | Python | bsd-3-clause | 8,117 | 0.002957 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models.fields import Field, FieldDoesNotExist
from django.forms import widgets
from django.http import HttpResponse
from django.template import RequestContext
from django.template.loader import select_template
from django.utils.html import format_html
from django.utils.formats import number_format
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from fsm_admin.mixins import FSMTransitionMixin
from shop.models.customer import CustomerModel
from shop.models.order import OrderItemModel, OrderPayment
from shop.modifiers.pool import cart_modifiers_pool
from shop.rest import serializers
class OrderPaymentInline(admin.TabularInline):
model = OrderPayment
extra = 0
fields = ('amount', 'transaction_id', 'payment_method', 'created_at',)
readonly_fields = ('created_at',)
def get_formset(self, request, obj=None, **kwargs):
"""
Convert the field `payment_method` into a select box with all possible payment methods.
"""
choices = [pm.get_choice() for pm in cart_modifiers_pool.get_payment_modifiers()]
kwargs.update(widgets={'payment_method': widgets.Select(choices=choices)})
formset = super(OrderPaymentInline, self).get_formset(request, obj, **kwargs)
return formset
def has_delete_permission(self, request, obj=None):
return False
class OrderItemInline(admin.StackedInline):
model = OrderItemModel
extra = 0
fields = (
('product_code', 'unit_price', 'line_total',),
('quantity',),
'get_extra_data',
)
readonly_fields = ('product_code', 'quantity', 'unit_price', 'line_total', 'get_extra_data',)
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def get_max_num(self, request, obj=None, **kwargs):
return self.model.objects.filter(order=obj).count()
def get_extra_data(self, obj):
return obj.extra # TODO: use a template to format this data
get_extra_data.short_description = pgettext_lazy('admin', "Extra data")
class StatusListFilter(admin.SimpleListFilter):
title = pgettext_lazy('admin', "Status")
parameter_name = 'status'
def lookups(self, request, model_admin):
lookups = dict(model_admin.model._transition_targets)
lookups.pop('new')
lookups.pop('created')
return lookups.items()
def queryset(self, request, queryset):
if self.value():
return queryset.filter(status=self.value())
return queryset
class BaseOrderAdmin(FSMTransitionMixin, admin.ModelAdmin):
list_display = ('get_number', 'customer', 'status_name', 'total', 'created_at',)
list_filter = (StatusListFilter,)
fsm_field = ('status',)
date_hierarchy = 'created_at'
inlines = (OrderItemInline, OrderPaymentInline,)
readonly_fields = ('get_number', 'status_name', 'get_total', 'get_subtotal', 'get_customer_link',
'get_outstanding_amount', 'created_at', 'updated_at', 'extra', 'stored_request',)
fields = ('get_number', 'status_name', ('created_at', 'updated_at'),
('get_subtotal', 'get_total', 'get_outstanding_amount',), 'get_customer_link', 'extra', 'stored_request',)
actions = None
def get_number(self, obj):
return obj.get_number()
get_number.short_description = pgettext_lazy('admin', "Order number")
def get_total(self, obj):
return number_format(obj.total)
get_total.short_description = pgettext_lazy('admin', "Total")
def get_subtotal(self, obj):
return number_format(obj.subtotal)
get_subtotal.short_description = pgettext_lazy('admin', "Subtotal")
def get_outstanding_amount(self, obj):
return number_format(obj.outstanding_amount)
get_outstanding_amount.short_description = pgettext_lazy('admin', "Outstanding am | ount")
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def get_customer_link(self, obj):
try:
url = reverse('admin:shop_customerproxy_change', args=(obj.customer.pk,))
return format_html('<a href="{0}" target="_new">{1}</a>', url, obj.customer.get_username())
| except NoReverseMatch:
return format_html('<strong>{0}</strong>', obj.customer.get_username())
get_customer_link.short_description = pgettext_lazy('admin', "Customer")
get_customer_link.allow_tags = True
def get_search_fields(self, request):
fields = super(BaseOrderAdmin, self).get_search_fields(request) + \
('customer__user__email', 'customer__user__last_name',)
try:
# if CustomerModel contains a number field, let search for it
if isinstance(CustomerModel._meta.get_field('number'), Field):
fields += ('customer__number',)
except FieldDoesNotExist:
pass
return fields
class PrintOrderAdminMixin(object):
"""
A customized OrderAdmin class shall inherit from this mixin class, to add
methods for printing the delivery note and the invoice.
"""
def __init__(self, *args, **kwargs):
self.fields += ('print_out',)
self.readonly_fields += ('print_out',)
super(PrintOrderAdminMixin, self).__init__(*args, **kwargs)
def get_urls(self):
my_urls = patterns('',
url(r'^(?P<pk>\d+)/print_confirmation/$', self.admin_site.admin_view(self.render_confirmation),
name='print_confirmation'),
url(r'^(?P<pk>\d+)/print_invoice/$', self.admin_site.admin_view(self.render_invoice),
name='print_invoice'),
)
return my_urls + super(PrintOrderAdminMixin, self).get_urls()
def _render_letter(self, request, pk, template):
order = self.get_object(request, pk)
context = {'request': request, 'render_label': 'print'}
order_serializer = serializers.OrderDetailSerializer(order, context=context)
content = template.render(RequestContext(request, {
'customer': serializers.CustomerSerializer(order.customer).data,
'data': order_serializer.data,
'order': order,
}))
return HttpResponse(content)
def render_confirmation(self, request, pk=None):
template = select_template([
'{}/print/order-confirmation.html'.format(settings.SHOP_APP_LABEL.lower()),
'shop/print/order-confirmation.html'
])
return self._render_letter(request, pk, template)
def render_invoice(self, request, pk=None):
template = select_template([
'{}/print/invoice.html'.format(settings.SHOP_APP_LABEL.lower()),
'shop/print/invoice.html'
])
return self._render_letter(request, pk, template)
def print_out(self, obj):
if obj.status == 'pick_goods':
button = reverse('admin:print_confirmation', args=(obj.id,)), _("Order Confirmation")
elif obj.status == 'pack_goods':
button = reverse('admin:print_invoice', args=(obj.id,)), _("Invoice")
else:
button = None
if button:
return format_html(
'<span class="object-tools"><a href="{0}" class="viewsitelink" target="_new">{1}</a></span>',
*button)
return ''
print_out.short_description = _("Print out")
print_out.allow_tags = True
class OrderAdmin(BaseOrderAdmin):
"""
Admin class to be used with `shop.models.defauls.order`
"""
fields = BaseOrderAdmin.fields + (('shipping_address_text', 'billing_address_text',),)
def get_search_fields(self, request):
return super(OrderAdmin, self).get_search_fields(request) + \
('number', 'shipping_address_text', 'billing_address_text',)
|
Southpaw-TACTIC/TACTIC | src/pyasm/biz/clipboard.py | Python | epl-1.0 | 4,282 | 0.003503 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["Clipboard"]
from pyasm.common import Container
from pyasm.search import SearchType, SObject, Search, SearchKey
class Clipboard(SObject):
SEARCH_TYPE = "sthpw/clipboard"
def get_count(cls, where=None, category=None):
search = Search(Clipboard)
search.add_user_filter()
if not category:
search.add_filter("category","select")
if where:
search.add_where(where)
return search.get_count()
get_count = classmethod(get_count)
def get_search(cls, category=None):
search = Search(cls.SEARCH_TYPE)
search.add_user_filter()
if category:
search.add_filter("category", category)
return search
get_search = classmethod(get_search)
def get_all(cls, category=None):
'''get all or all items in a category'''
search = Clipboard.get_search(category=category)
return search.get_sobjects()
get_all = classmethod(get_all)
# special selected methods
def add_to_selected(cls, search_keys):
# make sure the sobjects exist
for search_key in search_keys:
sobject = SearchKey.get_by_search_key(search_key)
item = SearchType.create("sthpw/clipboard")
item.set_user()
item.add_related_sobject(sobject)
| item.set_value("category", "select")
item.commit()
add_to_selected = classmethod(add_to_selected)
|
def clear_selected(cls):
# make sure the sobjects exist
search = Search("sthpw/clipboard")
search.add_filter("category", "select")
search.add_user_filter()
items = search.get_sobjects()
for item in items:
item.delete()
clear_selected = classmethod(clear_selected)
def get_selected(cls):
# make sure the sobjects exist
search = Search("sthpw/clipboard")
search.add_filter("category", "select")
search.add_user_filter()
items = search.get_sobjects()
parents = []
for item in items:
parent = item.get_parent()
if parent:
parents.append(parent)
else:
print("WARNING: parent to clipboard item [%s] does not exist" % item.get_code())
return parents
get_selected = classmethod(get_selected)
def is_selected(cls, sobject):
clipboard_cache = Clipboard._get_cache("select")
if not sobject:
return False
search_key = sobject.get_search_key()
item = clipboard_cache.get(search_key)
if item:
return True
else:
return False
is_selected = classmethod(is_selected)
def reference_selected(cls, sobject):
items = cls.get_selected()
# get all of the items already connected to this sobject
from pyasm.biz import SObjectConnection
# create a connection for each item
for item in items:
SObjectConnection.create(sobject, item)
reference_selected = classmethod(reference_selected)
def _get_cache(cls, category):
'''preselect all the clipboard items of a particular category'''
key = "clipboard:%s" % category
clipboard_cache = Container.get(key)
if clipboard_cache == None:
clipboard_cache = {}
Container.put(key, clipboard_cache)
else:
return clipboard_cache
search = Search(Clipboard)
search.add_user_filter()
search.add_filter("category","select")
items = search.get_sobjects()
for item in items:
search_type = item.get_value("search_type")
search_id = item.get_value("search_id")
search_key = "%s|%s" % (search_type, search_id)
clipboard_cache[search_key] = item
return clipboard_cache
_get_cache = classmethod(_get_cache)
|
Agent007/deepchem | contrib/dragonn/models.py | Python | mit | 16,267 | 0.001967 | from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from dragonn.metrics import ClassificationResult
from keras.layers.core import (Activation, Dense, Dropout, Flatten, Permute,
Reshape, TimeDistributedDense)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
from keras.layers.core import (Activation, Dense, Flatten,
TimeDistributedDense)
from keras.layers.recurrent import GRU
from keras.callbacks import EarlyStopping
#class SequenceDNN(Model):
# """
# Sequence DNN models.
#
# Parameters
# ----------
# seq_length : int, optional
# length of input sequence.
# keras_model : instance of keras.models.Sequential, optional
# seq_length or keras_model must be specified.
# num_tasks : int, optional
# number of tasks. Default: 1.
# num_filters : list[int] | tuple[int]
# number of convolutional filters in each layer. Default: (15,).
# conv_width : list[int] | tuple[int]
# width of each layer's convolutional filters. Default: (15,).
# pool_width : int
# width of max pooling after the last layer. Default: 35.
# L1 : float
# strength of L1 penalty.
# dropout : float
# dropout probability in every convolutional layer. Default: 0.
# verbose: int
# Verbosity level during training. Valida values: 0, 1, 2.
#
# Returns
# -------
# Compiled DNN model.
# """
#
# def __init__(self,
# seq_length=None,
# keras_model=None,
# use_RNN=False,
# num_tasks=1,
# num_filters=(15, 15, 15),
# conv_width=(15, 15, 15),
# pool_width=35,
# GRU_size=35,
# TDD_size=15,
# L1=0,
# dropout=0.0,
# num_epochs=100,
# verbose=1):
# self.num_tasks = num_tasks
# self.num_epochs = num_epochs
# self.verbose = verbose
# self.train_metrics = []
# self.valid_metrics = []
# if keras_model is not None and seq_length is None:
# self.model = keras_model
# self.num_tasks = keras_model.layers[-1].output_shape[-1]
# elif seq_length is not None and keras_model is None:
# self.model = Sequential()
# assert len(num_filters) == len(conv_width)
# for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
# conv_height = 4 if i == 0 else 1
# self.model.add(
# Convolution2D(
# nb_filter=nb_filter,
# nb_row=conv_height,
# nb_col=nb_col,
# activation='linear',
# init='he_normal',
# input_shape=(1, 4, seq_length),
# W_regularizer=l1(L1),
# b_regularizer=l1(L1)))
# self.model.add(Activation('relu'))
# self.model.add(Dropout(dropout))
# self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
# if use_RNN:
# num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
# self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
# self.model.add(Permute((2, 1)))
# self.model.add(GRU(GRU_size, return_sequences=True))
# self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
# self.model.add(Flatten())
# self.model.add(Dense(output_dim=self.num_tasks))
# self.model.add(Activation('sigmoid'))
# self.model.compile(optimizer='adam', loss='binary_crossentropy')
# else:
# raise ValueError(
# "Exactly one of seq_length or keras_model must be specified!")
#
# def train(self,
# X,
# y,
# validation_data,
# early_stopping_metric='Loss',
# early_stopping_patience=5,
# save_best_model_to_prefix=None):
# if y.dtype != bool:
# assert set(np.unique(y)) == {0, 1}
# y = y.astype(bool)
# multitask = y.shape[1] > 1
# if not multitask:
# num_positives = y.sum()
# num_sequences = len(y)
# num_negatives = num_sequences - num_positives
# if self.verbose >= 1:
# print('Training model (* indicates new best result)...')
# X_valid, y_valid = validation_data
# early_stopping_wait = 0
# best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
# for epoch in range(1, self.num_epochs + 1):
# self.model.fit(
# X,
# y,
# batch_size=128,
# nb_epoch=1,
# class_weight={
# True: num_sequences / num_positives,
# False: num_sequences / num_negatives
# } if not multitask else None,
# verbose=self.verbose >= 2)
# epoch_train_metrics = self.test(X, y)
# epoch_valid_metrics = self.test(X_valid, y_valid)
# self.train_metrics.append(epoch_train_metrics)
# self.valid_metrics.append(epoch_valid_metrics)
# if self.verbose >= 1:
# print('Epoch {}:'.format(epoch))
# print('Train {}'.format(epoch_train_metrics))
# print('Valid {}'.format(epoch_valid_metrics), end='')
# current_metric = epoch_valid_metrics[early_stopping_metric].mean()
# if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
# if self.verbose >= 1:
# print(' *')
# best_metric = current_metric
# best_epoch = epoch
# early_stopping_wait = 0
# if save_best_model_to_prefix is not None:
# self.save(save_best_model_to_prefix)
# else:
# if self.verbose >= 1:
# print()
# if early_stopping_wait >= early_stopping_patience:
# break
# early_stopping_wait += 1
# if self.verbose >= 1:
# print('Finished training after {} epochs.'.format(epoch))
# if save_best_model_to_prefix is not None:
# print("The best model's architecture and weights (from epoch {0}) "
# 'were saved to {1}.arch.json and {1}.weights.h5'.format(
# best_epoch, save_best_model_to_prefix))
#
# def predict(self, X):
# return self.model.predict(X, batch_size=128, verbose=False)
#
# def get_sequence_filters(self):
# """
# Returns 3D array of 2D sequence filters.
# """
# return self.model.layers[0].get_weights()[0].squeeze(axis=1)
#
# def deeplift(self, X, batch_size=200):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) deeplift score array.
# """
# assert len(np.shape(X)) == 4 and np.shape(X)[1] == 1
# from deeplift.conversion import keras_conversion as kc
#
# # convert to deeplift model and get scoring function
# deeplift_model = kc.convert_sequential_model(self.model, verbose=False)
# score_func = deeplift_model.get_target_contribs_func(
# find_scores_layer_idx=0)
# # use a 40% GC reference
# input_references = [np.array([0.3, 0.2, 0.2, 0.3])[None, None, :, None]]
# # get deeplift scores
# deeplift_scores = np.zeros((self.num_tasks,) + X.shape)
# for i in range(self.num_tasks):
# deeplift_scores[i] = score_func(
# task_idx=i,
# input_data_list=[X],
# batch_size=batch_size,
# progress_update=None,
| # input_references_list=input_references)
# return deeplift_scores
#
# def in_silico_mutagenesis(self, X):
# """
# Returns (num_task, num_samples, 1, num_bases, sequence_length) ISM score array.
# """
# mutagenesis_scores = np.empty(X.shape + (self.num_tasks,), dtype=np.float32)
# wild_type_predictions = self.predict(X)
# wild_type_predictions = wild_type_predictions[:, np.newaxis, np.newaxis,
# | np.newaxis]
# for sequence_index, (sequence, wild_type_prediction) in enumerate(
# zip(X, wild_type_predictions)):
# mutated_sequences = np.repeat(
# sequence[np.newaxis], np.prod(sequence.shape), axis=0)
# # remove wild-type
# arange = np.arange(len(mutated_sequences))
# horizontal_cycle = np.tile(
# |
simeonf/sfpython | sfpython/flatpage_admin_override/admin.py | Python | apache-2.0 | 783 | 0.005109 | from django.contrib import admin
from django import forms
from django.core.urlresolvers import reverse
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from tinymce.widgets import | TinyMCE
class TinyMCEFlatPageAdmin(FlatPageAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'content':
return db_field.formfield(widget=TinyMCE(
attrs={'cols': 80, 'rows': 30},
mce_attrs={'external | _link_list_url': reverse('tinymce.views.flatpages_link_list')},
))
return super(TinyMCEFlatPageAdmin, self).formfield_for_dbfield(db_field, **kwargs)
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, TinyMCEFlatPageAdmin)
|
jeffrobots/senseRF | app/add_data.py | Python | bsd-2-clause | 2,377 | 0.002524 | from datetime import datetime
import peewee
import itertools
import random
from time import sleep
from models import Sensor, SensorData, init_db
sensors = [{"id": 1,
"location": "Living Room",
"late_updated": datetime.now(),
"name": "greenpi",
"refresh_rate": 30}
]
class SensorNode:
types = {"temperature", "humidity", "light"}
new_id = next(itertools.count())
def __init__(self, name, location, refresh_rate=30):
self.name = name
self.location = location
self.refresh_rate = refresh_rate
self.id = SensorNode.new_unique_id()
self.last_updated = 0
self.record = Sensor.create(unique_id=self.id, location=self.location,
last_updated=self.last_updated, name=self.name,
refresh_rate=self.refresh_rate)
self.record.save()
def update_values(self, sensors_to_update=[]):
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return SensorNode.types
else:
return {arg} & SensorNode.types
else:
return set(arg) & SensorNode.types
for item in cast_arg(sensors_to_update):
self._update(item)
def _update(self | , sensor_type):
if sensor_type is "temperature":
value = self.get_rand_temperature()
elif sensor_type is "humidity":
value = self.get_rand_humidity()
elif sensor_type is "light":
value | = self.get_rand_light()
updated_at = datetime.now()
data = {"sensor_type": sensor_type, "sensor": self.record,
"sensor_val": value, "updated_at": datetime.now()}
record = SensorData(**data)
record.save()
@staticmethod
def get_rand_temperature():
return random.uniform(62.0, 64.0)
@staticmethod
def get_rand_humidity():
return random.uniform(37.0, 40.0)
@staticmethod
def get_rand_light():
return random.uniform(300.0, 305.0)
@classmethod
def new_unique_id(cls):
return cls.new_id
if __name__ == "__main__":
init_db()
sensor = SensorNode("greenpi", "Living Room")
for number in range(10):
sensor.update_values("all")
sleep(.01)
|
scieloorg/access_stats | setup.py | Python | bsd-2-clause | 1,792 | 0.001116 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
install_requires = [
'requests>=2.8.1',
'elasticsearch>=1.5.0',
'cython>=0.22',
'thriftpy>=0.3.1',
'thriftpywrap',
'xylose>=1.16.5',
'pyramid>=1.5.7',
'pyramid_chameleon',
'pyramid_mako',
'pyramid_debugtoolbar',
'waitress',
]
test_requires = []
setup(
name="access",
version='0.6.4',
description="A SciELO RPC server and API to retrieve access statistics from the SciELO Network ",
author="SciELO",
author_email="scielo-dev@googlegroups.com",
license="BSD 2-clause",
url="http://docs.scielo.org",
keywords='scielo statistics',
packages=['access'],
classifiers=[
"Development Status :: 1 - Planning",
| "Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Operating System :: POSIX :: Linux",
"Topic :: System",
"Topic :: Services",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
dependency_links=[
| "git+https://github.com/scieloorg/thriftpy-wrap@0.1.1#egg=thriftpywrap"
],
include_package_data=True,
zip_safe=False,
setup_requires=["nose>=1.0", "coverage"],
tests_require=test_requires,
install_requires=install_requires,
test_suite="nose.collector",
entry_points="""\
[paste.app_factory]
main = access:main
[console_scripts]
accessstats_thriftserver = access.thrift.server:main
""",
) |
DavidTingley/ephys-processing-pipeline | installation/klustaviewa-0.3.0/klustaviewa/stats/tests/test_indexed_matrix.py | Python | gpl-3.0 | 7,015 | 0.010549 | """Unit tests for stats.cache module."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from nose.tools import raises
import numpy as np
from klustaviewa.stats.indexed_matrix import IndexedMatrix, CacheMatrix
# -----------------------------------------------------------------------------
# Indexed matrix tests
# -----------------------------------------------------------------------------
def test_indexed_matrix_0():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
assert np.array_equal(matrix.to_absolute([0, 3, 1]), [2, 7, 3])
assert np.array_equal(matrix.to_absolute(2), 5)
assert np.array_equal(matrix.to_relative([2, 7, 3]), [0, 3, 1])
assert np.array_equal(matrix.to_relative(5), 2)
@raises(IndexError)
def test_indexed_matrix_1():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
# This should raise an IndexError.
matrix[0, 0]
def test_indexed_matrix_2():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
assert matrix[2, 2] == 0.
assert np.array_equal(matrix[:, 2], np.zeros(4))
assert np.array_equal(matrix[7, :], np.zeros(4))
assert np.array_equal(matrix[[2, 5], :], np.zeros((2, 4)))
assert np.array_equal(matrix[[2, 5, 3], [2]], np.zeros((3, 1)))
assert np.array_equal(matrix[[2, 5, 3], 2], np.zeros(3))
assert np.array_equal(matrix[[2], [2, 5, 3]], np.zeros((1, 3)))
assert np.array_equal(matrix[2, [2, 5, 3]], np.zeros(3))
assert np.array_equal(matrix[[5, 7], [3, 2]], np.zeros((2, 2)))
def test_indexed_matrix_3():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
matrix.add_indices(4)
assert matrix.shape == (5, 5)
assert np.array_equal(matrix.indices, [2, 3, 4, 5, 7])
def test_indexed_matrix_4():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
matrix.add_indices(7)
assert np.array_equal(matrix.indices, indices)
def test_indexed_matrix_5():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
matrix.add_indices([6, 10])
assert matrix.shape == (6, 6)
assert np.array_equal(matrix.indices, [2, 3, 5, 6, 7, 10])
matrix.remove_indices(7)
assert matrix.shape == (5, 5)
assert np.array_equal(matrix.indices, [2, 3, 5, 6, 10])
@raises(IndexError)
def test_indexed_matrix_6():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
matrix.add_indices([6, 10])
# One of the indices does not exist, so this raises an Exception.
matrix.remove_indices([5, 6, 9])
def test_indexed_matrix_7():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices)
matrix[2, 3] = 10
assert np.all(matrix[2, 3] == 10)
matrix[5, :] = 20
assert np.all(matrix[5, :] == 20)
matrix[:, 7] = 30
assert np.all(matri | x[:, 7] == 30)
matrix[[2, 3], 5] = 40
assert np.all(matrix[[2, 3], 5] == 40)
matrix[[2, 3], [5, 7]] = 5 | 0
assert np.all(matrix[[2, 3], [5, 7]] == 50)
def test_indexed_matrix_8():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices, shape=(4, 4, 10))
x = np.random.rand(10)
matrix[7, 7] = x
assert np.array_equal(matrix[7, 7], x)
assert np.array_equal(matrix[7, :][-1, :], x)
assert np.array_equal(matrix[[2, 7], 7][-1, :], x)
assert np.array_equal(matrix[[2, 5, 3], [2]], np.zeros((3, 1, 10)))
assert np.array_equal(matrix[[2, 5, 3], 2], np.zeros((3, 10)))
assert np.array_equal(matrix[[2], [2, 5, 3]], np.zeros((1, 3, 10)))
assert np.array_equal(matrix[2, [2, 5, 3]], np.zeros((3, 10)))
assert np.array_equal(matrix[[5, 7], [3, 2]], np.zeros((2, 2, 10)))
matrix.remove_indices(5)
assert matrix.to_array().shape == (3, 3, 10)
assert np.array_equal(matrix[7, 7], x)
def test_indexed_matrix_9():
matrix = IndexedMatrix()
indices = [10, 20]
matrix.add_indices(10)
assert np.array_equal(matrix.not_in_indices(indices), [20])
matrix[10, 10] = 1
assert np.array_equal(matrix.not_in_indices(indices), [20])
matrix.add_indices(20)
assert np.array_equal(matrix.not_in_indices(indices), [])
matrix[20, :] = 0
matrix[:, 20] = 0
assert np.array_equal(matrix.not_in_indices(indices), [])
def test_indexed_matrix_10():
indices = [2, 3, 5, 7]
matrix = IndexedMatrix(indices=indices, shape=(4, 4, 10))
matrix[3, 7] = np.ones(10)
matrix[2, 5] = 2 * np.ones(10)
submatrix = matrix.submatrix([3,7])
assert submatrix.shape == (2, 2, 10)
assert np.array_equal(submatrix.to_array()[0, 1, ...], np.ones(10))
submatrix = matrix.submatrix([2,5])
assert submatrix.shape == (2, 2, 10)
assert np.array_equal(submatrix.to_array()[0, 1, ...], 2 * np.ones(10))
# -----------------------------------------------------------------------------
# Cache matrix tests
# -----------------------------------------------------------------------------
def test_cache_matrix_1():
indices = [2, 3, 5, 7]
matrix = CacheMatrix(shape=(0, 0, 10))
assert np.array_equal(matrix.not_in_indices(indices), indices)
d = {(i, j): i + j for i in indices for j in indices}
matrix.update(indices, d)
matrix_actual = (np.array(indices).reshape((-1, 1)) +
np.array(indices).reshape((1, -1)))
assert np.array_equal(matrix.to_array()[:, :, 0], matrix_actual)
assert np.array_equal(matrix.not_in_indices(indices), [])
def test_cache_matrix_2():
indices = [2, 3, 5, 7]
matrix = CacheMatrix(shape=(0, 0, 10))
d = {(i, j): i + j for i in indices for j in indices}
matrix.update(indices, d)
assert np.array_equal(matrix.not_in_indices(indices), [])
matrix.invalidate([2, 5])
assert np.array_equal(matrix.not_in_indices(indices), [2, 5])
def test_cache_matrix_2():
indices = [2, 3, 5, 7]
matrix = CacheMatrix()
assert np.array_equal(matrix.not_in_key_indices(indices), indices)
matrix.update(2, {(2, 2): 0, (2, 3): 0, (3, 2): 0})
assert np.array_equal(matrix.not_in_key_indices(indices), [3, 5, 7])
matrix.update([2, 3], {(2, 2): 0, (2, 3): 0, (3, 2): 0, (3, 3): 0})
assert np.array_equal(matrix.not_in_key_indices(indices), [5, 7])
matrix.invalidate([2, 5])
assert np.array_equal(matrix.not_in_key_indices(indices), [2, 5, 7])
d = {(i, j): i + j for i in indices for j in indices}
matrix.update(indices, d)
assert np.array_equal(matrix.not_in_key_indices(indices), [])
|
vnsofthe/odoo | addons/base_geolocalize/models/res_partner.py | Python | agpl-3.0 | 3,783 | 0.002379 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
import simplejson as json
except ImportError:
import json # noqa
import urllib2
from openerp.osv import osv, fields
from openerp import tools
from openerp.tools.translate import _
def geo_find(addr):
if not addr:
return None
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib2.quote(addr.encode('utf8'))
try:
result = json.load(urllib2.urlopen(url))
ex | cept Exception, e:
raise osv.except_osv(_('Network error'),
_('Cannot contact geolocation servers. Please make sure that your internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
| try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_latitude': fields.float('Geo Latitude', digits=(16, 5)),
'partner_longitude': fields.float('Geo Longitude', digits=(16, 5)),
'date_localization': fields.date('Geo Localization Date'),
}
def geo_localize(self, cr, uid, ids, context=None):
# Don't pass context to browse()! We need country names in english below
for partner in self.browse(cr, uid, ids):
if not partner:
continue
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result:
self.write(cr, uid, [partner.id], {
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.date.context_today(self, cr, uid, context=context)
}, context=context)
return True
|
srounet/pystormlib | pystormlib/structure.py | Python | mit | 485 | 0 | import ctypes
class MPQFileData(ctypes.Structure):
_fields_ = [
('filename', | ctypes.c_char * 1024),
('plainpath', ctypes.c_char_p),
('hashindex', ctypes.c_int, 32),
('blockindex', ctypes.c_int, 32),
('filesize', ctypes.c_int, 32),
('fileflags', ctypes.c_int, 32),
('compsize', ctypes.c_int, 32),
('filetimelo', ctypes.c_int, 32),
('filetimehi | ', ctypes.c_int, 32),
('locale', ctypes.c_int, 32)
]
|
ernitron/radio-server | radio-server/server.py | Python | mit | 26,943 | 0.008165 | #!/usr/bin/env python3
"""
My radio server application
For my eyes only
"""
#CREATE TABLE Radio(id integer primary key autoincrement, radio text, genre text, ur | l text);
uuid='56ty66ba-6kld-9opb-ak29-0t7f5d294686'
# Import CherryPy global namespace
import os
import sys
import time
import socket
import cherrypy
import sqlite3 as lite
import re
import subprocess
from random import shuffle
# Globals
version = "4.2.1"
database = "database.db"
player = 'omxplayer'
header = '''<!DOCTYPE html | >
<html lang="en">
<head>
<title>My Radio Web Server</title>
<meta name="generator" content="Vim">
<meta charset="UTF-8">
<link rel="icon" type="image/png" href="/static/css/icon.png" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<script src="/static/js/jquery-2.0.3.min.js"></script>
<script src="/static/js/bootstrap.min.js"></script>
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
<!-- Custom styles for this template -->
<link href="/static/css/sticky-footer.css" rel="stylesheet">
<style media="screen" type="text/css">
#radio-playing { display: none; }
#radio-table { display: none; }
#radio-volume { display: none; }
.jumbotron { padding: 10px 10px; }
</style>
<script type="text/javascript">
function fmodradio(rid) {
$.post('/m/', {id: rid},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function fdelradio(rid) {
var r = confirm("DELETING " + rid);
if (r != true) { return; }
$.post('/d/', {id: rid},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function fplayradio(rid) {
$.post('/p/', {id: rid},
function(data){
$("#radio-playing").html(data);
$("#radio-playing").show();
$("#radio-volume").hide();
},
"html"
);
}
function faddfav(i, g) {
$.post('/haddfav/', {id: i},
function(data){
$("#radio-playing").html(data);
$("#radio-playing").show();
$("#radio-volume").hide();
},
"html"
);
}
function fvolradio(updown) {
$.post('/v/', {vol: updown},
function(data){
$("#radio-volume").html(data);
$("#radio-volume").show();
},
"html"
);
}
function fkilradio() {
$.post('/k/',
function(data){
$("#radio-volume").html(data);
$("#radio-volume").show();
},
"html"
);
}
function fsearch(nam, gen) {
$.post('/g/', {name: nam, genre: gen},
function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
function frandom(n, g) {
$.post('/g/', {name: n, genre: g, randomlist:'true'},
function(data){
$("#radio-table").html(data);
$("#radio-table").show();
},
"html"
);
}
// ----------------------------------------------------------
$(document).ready(function() {
$('body').on('click', '#button-modify', function(e) {
i = $("#idm").val()
n = $("#namem").val()
g = $("#genrem").val()
u = $("#urlm").val()
$.post("/f/", {id: i, name: n, genre: g, url: u})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$('#namem').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#genrem').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#urlm').keyup(function(e){
if(e.keyCode == 13) {
$('#button-modify').click();
}
});
$('#button-search').click(function(e) {
n = $("#name").val()
g = $("#genre").val()
$.post("/g/", {name: n, genre: g})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$('#name').keyup(function(e){
if(e.keyCode == 13) {
$('#button-search').click();
}
});
$('#genre').keyup(function(e){
if(e.keyCode == 13) {
$('#button-search').click();
}
});
$("#button-insert").click(function(e) {
n = $("#namei").val()
g = $("#genrei").val()
u = $("#urli").val()
$.post("/i/", {name: n, genre: g, url: u})
.done(function(data) {
$("#radio-table").html(data);
$("#radio-table").show();
});
e.preventDefault();
});
$("#play-radio").click(function(e) {
i = $("#idp").val()
$.post("/p/", {id: i})
.done(function(data) {
$("#radio-playing").html(data);
$("#radio-playing").show();
});
e.preventDefault();
});
});
</script>
</head>
<body>
<div class="container-fluid">
<div class='jumbotron'>
<h2><a href="/">Radio</a>
<a href="#" onClick="fvolradio('down')"><span class="glyphicon glyphicon-volume-down"></span></a>
<a href="#" onClick="fvolradio('up')"><span class="glyphicon glyphicon-volume-up"></span></a>
<a href="#" onClick="fkilradio('up')"> <span class="glyphicon glyphicon-record"></span></a>
</h2>
<p>
<div class="form-group">
<input type="text" id="name" name="name" placeholder="radio to search">
<input type="text" id="genre" name="genre" placeholder="genre" >
<button id="button-search">Search</button>
</div>
</p>
<p>
<div class="form-group">
<input type="text" id="namei" name="name" placeholder="Radio Name">
<input type="text" id="genrei" name="genre" placeholder="genre">
<input type="text" id="urli" name="url" placeholder="http://radio.com/stream.mp3">
<button id="button-insert">Insert</button>
<p>
[
<a href="#" onClick="fsearch('', 'rai')"> rai </a>|
<a href="#" onClick="fsearch('','fav')"> fav </a> |
<a href="#" onClick="fsearch('','rmc')"> rmc </a> |
<a href="#" onClick="fsearch('','class')"> class </a> |
<a href="#" onClick="fsearch('','jazz')"> jazz </a> |
<a href="#" onClick="fsearch('','chill')"> chill </a> |
<a href="#" onClick="fsearch('','nl')"> nl </a> |
<a href="#" onClick="fsearch('','bbc')"> bbc </a> |
<a href="#" onClick="fsearch('','uk')"> uk </a> |
<a href="#" onClick="fsearch('','italy')"> italy </a>
]
</p>
</div>
<small><div id="radio-playing"> </div></small>
</br>
</div> <!-- Jumbotron END -->
<div id="radio-volume"> </div>
<div id="radio-table"> </div>
'''
footer = '''<p></div></body></html>'''
def isplayfile(pathname) :
if os.path.isfile(pathname) == False:
return False
ext = os.path.splitext(pathname)[1]
ext = ext.lower()
if (ext == '.mp2') : return True;
if (ext == '.mp3') : return True;
if (ext == '.ogg') : return True;
return False
# ------------------------ AUTHENTICATION --------------------------------
from cherrypy.lib import auth_basic
# Password is: webradio
users = {'admin':'29778a9bdb2253dd8650a13b8e685159'}
def validate_password(self, login, password):
if login in users :
if encrypt(password) == users[login] :
cherrypy.session['user |
databuild/databuild | databuild/operations/columns.py | Python | bsd-3-clause | 1,932 | 0.003623 | from decimal import Decimal
from dateutil import parser as date_parser
import six
def update_column(context, sheet, column, facets=None, values=None, expression=None):
assert values or expression
workbook = context['workbook']
callable_or_values = expression and expression or values
sheet = workbook.sheets[sheet]
sheet.update_column(column, callable_or_values, filter_fn=facets)
def add_column(context, sheet, name, values=None, expression=None):
workbook = context['workbook']
sheet = workbook.sheets[sheet]
callable_or_values = expression and expression or values
sheet.append_column(name, callable_or_values)
def remove_column(context, sheet, name):
workbook = context['workbook']
sheet = workbook.sheets[sheet]
sheet.remove_column(name)
def rename_column(context, sheet, old_name, new_name):
workbook = context['wo | rkbook']
sheet = workbook.sheets[sheet]
expression = lambda row: row[old_name]
add_column(context, sheet, new_nam | e, expression)
sheet.remove_column(old_name)
def to_float(context, sheet, column, facets=None):
expression = lambda x: float(x[column])
update_column(context, sheet, column, facets, expression=expression)
def to_integer(context, sheet, column, facets=None):
expression = lambda x: int(x[column])
update_column(context, sheet, column, facets, expression=expression)
def to_decimal(context, sheet, column, facets=None):
expression = lambda x: Decimal(x[column])
update_column(context, sheet, column, facets, expression=expression)
def to_text(context, sheet, column, facets=None):
expression = lambda x: six.text_type(x[column])
update_column(context, sheet, column, facets, expression=expression)
def to_datetime(context, sheet, column, facets=None):
expression = lambda x: date_parser.parse(x[column])
update_column(context, sheet, column, facets, expression=expression)
|
jaswal72/hacker-rank | Python/Basic Data Types/List Comprehensions.py | Python | mit | 226 | 0.030973 | if __name__ == '__main__':
x = int(raw_input())
y = int(raw_input())
z = int(raw_input())
n = int(raw_inp | ut())
print ( [ [i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if i+j+k != n] | )
|
atomic83/youtube-dl | youtube_dl/extractor/npr.py | Python | unlicense | 2,966 | 0.001349 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse
from ..utils import (
int_or_none,
qualities,
)
class NprIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?npr\.org/player/v2/mediaPlayer\.html\?.*\bid=(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.npr.org/player/v2/mediaPlayer.html?id=449974205',
'info_dict': {
'id': '449974205',
'title': 'New Music From Beach House, Chairlift, CMJ Discoveries And More'
},
'playlist_count': 7,
}, {
'url': 'http://www.npr.org/player/v2/mediaPlayer.html?action=1&t=1&islist=false&id=446928052&m=446929930&live=1',
'info_dict': {
'id': '446928052',
'title': "Songs We Love: Tigran Hamasyan, 'Your Mercy is Boundless'"
},
'playlist': [{
'md5': '12fa60cb2d3ed932f53609d4aeceabf1',
'info_dict': {
'id': '44692 | 9930',
'ext': 'mp3',
'title': 'Your Mercy is Boundless (Bazum en Qo gtutyunqd)',
'duration': 402,
| },
}],
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
config = self._download_json(
'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({
'id': playlist_id,
'fields': 'titles,audio,show',
'format': 'json',
'apiKey': 'MDAzMzQ2MjAyMDEyMzk4MTU1MDg3ZmM3MQ010',
}), playlist_id)
story = config['list']['story'][0]
KNOWN_FORMATS = ('threegp', 'mp4', 'mp3')
quality = qualities(KNOWN_FORMATS)
entries = []
for audio in story.get('audio', []):
title = audio.get('title', {}).get('$text')
duration = int_or_none(audio.get('duration', {}).get('$text'))
formats = []
for format_id, formats_entry in audio.get('format', {}).items():
if not formats_entry:
continue
if isinstance(formats_entry, list):
formats_entry = formats_entry[0]
format_url = formats_entry.get('$text')
if not format_url:
continue
if format_id in KNOWN_FORMATS:
formats.append({
'url': format_url,
'format_id': format_id,
'ext': formats_entry.get('type'),
'quality': quality(format_id),
})
self._sort_formats(formats)
entries.append({
'id': audio['id'],
'title': title,
'duration': duration,
'formats': formats,
})
playlist_title = story.get('title', {}).get('$text')
return self.playlist_result(entries, playlist_id, playlist_title)
|
Patola/Cura | plugins/VersionUpgrade/VersionUpgrade34to35/__init__.py | Python | lgpl-3.0 | 2,372 | 0.004637 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade34to35
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade34to35.VersionUpgrade34to35()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 6000004): ("preferences", 6000005, upgrade.upgradePreferences),
("definition_changes", 4000004): ("definition_changes", 4000005, upgrade.upgradeInstanceContainer),
("quality_changes", 4000004): ("quality_changes", 4000005, upgrade.upgradeInstanceContainer),
("quality", 4000004): ("quality", 4000005, upgrade.upgradeInstanceContainer),
("user", 4000004): ("user", 4000005, upgrade.upgradeInstanceContainer),
("machine_stack", 4000004): ("machine_stack", 4000005, upgrade.upgradeStack),
("extruder_train", 4000004): ("extruder_train", 4000005, upgrade.upgradeStack),
},
"sources": {
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"machine_stack": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
},
"extruder_train": {
"get_version": upgrade.getCf | gVersion,
"location": {"./extruders"}
},
"definiti | on_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./definition_changes"}
},
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality_changes"}
},
"quality": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return { "version_upgrade": upgrade }
|
mathLab/RBniCS | rbnics/reduction_methods/base/nonlinear_time_dependent_rb_reduction.py | Python | lgpl-3.0 | 786 | 0.005089 | # Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.reduction_methods.base.nonlinear_rb_reduction import NonlinearRBReduction
from rbnics.reduction_methods.base.time_dependent_rb_reduction import TimeDependen | tRBReduction
from rbnics.utils.decorators import PreserveClassName, RequiredBaseDecorators
@RequiredBaseDecorators(NonlinearRBReductio | n, TimeDependentRBReduction)
def NonlinearTimeDependentRBReduction(DifferentialProblemReductionMethod_DerivedClass):
@PreserveClassName
class NonlinearTimeDependentRBReduction_Class(DifferentialProblemReductionMethod_DerivedClass):
pass
# return value (a class) for the decorator
return NonlinearTimeDependentRBReduction_Class
|
pjdelport/feincms | feincms/module/medialibrary/forms.py | Python | bsd-3-clause | 2,901 | 0.005171 | # ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from __future__ import absolute_import
import os
from django import forms
from django.utils.translation import ugettext_lazy as _
from feincms import settings
from . import logger
from .models import Category, MediaFile
from .fields import AdminFileWithPreviewWidget
# ------------------------------------------------------------------------
class MediaCategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
def clean_parent(self):
data = self.cleaned_data['parent']
if data is not None and self.instance in data.path_list():
raise forms.ValidationError(_("This would create a loop in the hierarchy"))
return data
def __init__(self,* args, **kwargs):
super(MediaCategoryAdminForm, self). | __init__(*args, **kwargs)
self.fie | lds['parent'].queryset = self.fields['parent'].queryset.exclude(pk=self.instance.pk)
# ------------------------------------------------------------------------
class MediaFileAdminForm(forms.ModelForm):
class Meta:
model = MediaFile
widgets = { 'file': AdminFileWithPreviewWidget }
def __init__(self, *args, **kwargs):
super(MediaFileAdminForm, self).__init__(*args, **kwargs)
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
if not hasattr(self.instance.file.field, '_feincms_generate_filename_patched'):
orig_generate_filename = self.instance.file.field.generate_filename
def _gen_fname(instance, filename):
if instance.id and hasattr(instance, 'original_name'):
logger.info("Overwriting file %s with new data" % instance.original_name)
instance.file.storage.delete(instance.original_name)
return instance.original_name
return orig_generate_filename(instance, filename)
self.instance.file.field.generate_filename = _gen_fname
self.instance.file.field._feincms_generate_filename_patched = True
def clean_file(self):
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
new_base, new_ext = os.path.splitext(self.cleaned_data['file'].name)
old_base, old_ext = os.path.splitext(self.instance.file.name)
if new_ext.lower() != old_ext.lower():
raise forms.ValidationError(_("Cannot overwrite with different file type (attempt to overwrite a %(old_ext)s with a %(new_ext)s)") % { 'old_ext': old_ext, 'new_ext': new_ext })
self.instance.original_name = self.instance.file.name
return self.cleaned_data['file']
# ------------------------------------------------------------------------
|
amdouglas/OpenPNM | test/unit/Geometry/models/PoreSeedTest.py | Python | mit | 1,410 | 0 | import OpenPNM
import scipy as sp
class PoreSeedTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
self.geo = OpenPNM.Geometry.GenericGeometry(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
def test_random | (self):
f = OpenPNM.Geometry.models.pore_seed.random
self.geo.models.add(propname='pore.seed',
model=f,
seed=0,
num_range=[0.1, 2])
assert sp.amax(sel | f.geo['pore.seed']) > 1.9
assert sp.amin(self.geo['pore.seed']) > 0.1
def test_spatially_correlated(self):
f = OpenPNM.Geometry.models.pore_seed.spatially_correlated
self.geo.models.add(propname='pore.seed',
model=f,
weights=[2, 2, 2])
assert sp.amin(self.geo['pore.seed'] > 0)
assert sp.amax(self.geo['pore.seed'] < 1)
def test_spatially_correlated_zero_weights(self):
f = OpenPNM.Geometry.models.pore_seed.spatially_correlated
self.geo.models.add(propname='pore.seed',
model=f,
weights=[0, 0, 0])
assert sp.amin(self.geo['pore.seed'] > 0)
assert sp.amax(self.geo['pore.seed'] < 1)
|
effigies/mne-python | examples/plot_read_forward.py | Python | bsd-3-clause | 2,405 | 0.004158 | """
====================================================
Read a forward operator and display sensitivity maps
====================================================
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
print(__doc__)
import mne
from mne.datasets import sample
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
fwd = mne.read_forward_solution(fname, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
import matplotlib.pyplot as plt
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', font | size=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
############################################################################### |
# Show sensitivity of each sensor type to dipoles in the source space
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.legend()
# Cautious smoothing to see actual dipoles
args = dict(fmin=-0.1, fmid=0.5, fmax=1.1, smoothing_steps=3)
grad_map.plot(subject='sample', time_label='Gradiometer sensitivity',
subjects_dir=subjects_dir, **args)
# Note. The source space uses min-dist and therefore discards most
# superficial dipoles. This is why parts of the gyri are not covered. |
freedesktop-unofficial-mirror/telepathy__telepathy-idle | tests/twisted/channels/join-muc-channel-bouncer.py | Python | lgpl-2.1 | 1,912 | 0.002615 |
"""
Test connecting to a IRC channel when prompted by a bouncer
"""
from idletest import exec_test
from servicetest import (
EventPattern, assertEquals, call_async, make_channel_proxy
)
from constants import *
def test_join_bouncer(q, conn, stream, room):
stream.sendJoin(room)
new_channels = EventPattern('dbus-signal', signal='NewChannels')
event = q.expect_many(new_channels)[0]
q.forbid_events([new_channels])
channel_details = event.args[0]
assertEquals(1, len(channel_details))
path, props = channel_details[0]
assertEquals(HT_ROOM, props[TARGET_HANDLE_TYPE])
assertEquals(CHANNEL_TYPE_TEXT, props[CHANNEL_TYPE])
q.expect('dbus-signal', signal='MembersChanged')
q.unforbid_events([new_channels] | )
return path
def test(q, bus, conn, stream):
conn.Connect()
q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[1, 1]),
EventPattern('irc-connected'))
q.expect('dbus-signal', signal='SelfHandleChanged',
args=[1L])
q.expect('dbus-signal', signal='StatusChanged', args=[0, 1])
CHANNEL_NAME = "#idletest"
self_handle = conn.Get(CONN, 'SelfHandle', dbus_interface=PROPERTIES_IFACE)
# The bouncer initiates | a JOIN.
path = test_join_bouncer(q, conn, stream, CHANNEL_NAME)
# We PART.
chan = make_channel_proxy(conn, path, 'Channel')
chan.RemoveMembers([self_handle], "bye bye cruel world",
dbus_interface=CHANNEL_IFACE_GROUP)
q.expect('dbus-signal', signal='MembersChanged')
# The bouncer initiates a JOIN to force the issue.
test_join_bouncer(q, conn, stream, CHANNEL_NAME)
call_async(q, conn, 'Disconnect')
q.expect_many(
EventPattern('dbus-return', method='Disconnect'),
EventPattern('dbus-signal', signal='StatusChanged', args=[2, 1]))
return True
if __name__ == '__main__':
exec_test(test)
|
MCLConsortium/mcl-site | src/jpl.mcl.site.knowledge/src/jpl/mcl/site/knowledge/publicationfolder.py | Python | apache-2.0 | 575 | 0.001745 | # encoding: utf-8
u'''MCL — Publication Folder'''
from ._base import IIngestableFolder, Ingestor, IngestableFolderView
from .interfaces import IPublication
from five import grok
class IPublicationFolder(IIngestableFolder):
u'''Folder containing publications.'''
class PublicationIngestor(Ingestor):
u'''RDF ingestor for publicat | ion.'''
grok.context(IPublicationFolder)
| def getContainedObjectInterface(self):
return IPublication
class View(IngestableFolderView):
u'''View for an publication folder'''
grok.context(IPublicationFolder)
|
rfarley3/Kibana | kibana/mapping.py | Python | mit | 17,896 | 0.000056 | #!/usr/bin/env python
from __future__ import absolute_import, unicode_literals, print_function
import re
try:
from urllib2 import urlopen, HTTPError
except ImportError:
# Python 3
from urllib.request import urlopen
from urllib.error import HTTPError
import json
import requests
import time
import sys
PY3 = False
if sys.version_info[0] >= 3:
PY3 = True
def iteritems(d):
if PY3:
return d.items()
else:
return d.iteritems()
class KibanaMapping():
def __init__(self, index, index_pattern, host, debug=False):
self.index = index
self._index_pattern = index_pattern
self._host = host
self.update_urls()
# from the js possible mappings are:
# { type, indexed, analyzed, doc_values }
# but indexed and analyzed are .kibana specific,
# determined by the value within ES's 'index', which could be:
# { analyzed, no, not_analyzed }
self.mappings = ['type', 'doc_values']
# ignore system fields:
self.sys_mappings = ['_source', '_index', '_type', '_id']
# .kibana has some fields to ignore too:
self.mappings_ignore = ['count']
self.debug = debug
def pr_dbg(self, msg):
if self.debug:
print('[DBG] Mapping %s' % msg)
def pr_inf(self, msg):
print('[INF] Mapping %s' % msg)
def pr_err(self, msg):
print('[ERR] Mapping %s' % msg)
def update_urls(self):
# 'http://localhost:5601/elasticsearch/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
# 'http://localhost:9200/aaa*/_mapping/field/*?ignore_unavailable=false&allow_no_indices=false&include_defaults=true'
self.es_get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self._index_pattern +
'_mapping/field/' +
'*?ignore_unavailable=false&' +
'allow_no_indices=false&' +
'include_defaults=true')
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*'
self.post_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s' % self._index_pattern)
# 'http://localhost:5601/elasticsearch/.kibana/index-pattern/aaa*'
# 'http://localhost:9200/.kibana/index-pattern/_search/?id=aaa*'
# 'http://localhost:9200/.kibana/index-pattern/aaa*/'
self.get_url = ('http://%s:%s/' % (self._host[0], self._host[1]) +
'%s/' % self.index +
'index-pattern/%s/' % self._index_pattern)
@property
def index_pattern(self):
| return self._index_pattern
@index_pattern.setter
def index_pattern_setter(self, index_pattern):
self._index_pattern = index_pattern
self.update_urls()
@property
def host(self):
return self._host
@host.setter
def host_setter(self, host):
self._host = host
self.update_urls()
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'k | ibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None
def dedup_field_cache(self, field_cache):
deduped = []
fields_found = {}
for field in field_cache:
name = field['name']
if name not in fields_found:
deduped.append(field)
fields_found[name] = field
elif fields_found[name] != field:
self.pr_dbg("Dup field doesn't match")
self.pr_dbg("1st found: %s" % fields_found[name])
self.pr_dbg(" Dup one: %s" % field)
# else ignore, pass
return deduped
def post_field_cache(self, field_cache):
"""Where field_cache is a list of fields' mappings"""
index_pattern = self.field_cache_to_index_pattern(field_cache)
# self.pr_dbg("request/post: %s" % index_pattern)
resp = requests.post(self.post_url, data=index_pattern).text
# resp = {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":1,"created":true} # noqa
resp = json.loads(resp)
return 0
# TODO detect failure (return 1)
def field_cache_to_index_pattern(self, field_cache):
"""Return a .kibana index-pattern doc_type"""
mapping_dict = {}
mapping_dict['customFormats'] = "{}"
mapping_dict['title'] = self.index_pattern
# now post the data into .kibana
mapping_dict['fields'] = json.dumps(field_cache, separators=(',', ':'))
# in order to post, we need to create the post string
mapping_str = json.dumps(mapping_dict, separators=(',', ':'))
return mapping_str
def check_mapping(self, m):
"""Assert minimum set of fields in cache, does not validate contents"""
if 'name' not in m:
self.pr_dbg("Missing %s" % "name")
return False
# self.pr_dbg("Checking %s" % m['name'])
for x in ['analyzed', 'indexed', 'type', 'scripted', 'count']:
if x not in m or m[x] == "":
self.pr_dbg("Missing %s" % x)
self.pr_dbg("Full %s" % m)
return False
if 'doc_values' not in m or m['doc_values'] == "":
if not m['name'].startswith('_'):
self.pr_dbg("Missing %s" % "doc_values")
return False
m['doc_values'] = False
return True
def get_index_mappings(self, index):
"""Converts all index's doc_types to .kibana"""
fields_arr = []
for (key, val) in iteritems(index):
# self.pr_dbg("\tdoc_type: %s" % key)
doc_mapping = self.get_doc_type_mappings(index[key])
# self.pr_dbg("\tdoc_mapping |
wettenhj/mytardis | tardis/urls/download.py | Python | gpl-3.0 | 1,724 | 0.00058 | '''
Download URLs
'''
from django.conf.urls import url
from tardis.tardis_portal.download import (
download_datafile,
streaming_download_datafiles,
streaming_download_experiment,
streaming_download_dataset,
download_api_key
)
download_urls = [
url(r'^datafile/(?P<datafile_id>\d+)/$', down | load_datafile,
name='tardis | .tardis_portal.download.download_datafile'),
url(r'^datafiles/$', streaming_download_datafiles,
name='tardis.tardis_portal.download.streaming_download_datafiles'),
url(r'^experiment/(?P<experiment_id>\d+)/$',
streaming_download_experiment,
name='tardis.tardis_portal.download.streaming_download_experiment'),
url(r'^experiment/(?P<experiment_id>\d+)/'
r'(?P<comptype>[a-z]{3})/$', # tgz or tar
streaming_download_experiment,
name='tardis.tardis_portal.download.streaming_download_experiment'),
url(r'^experiment/(?P<experiment_id>\d+)/'
r'(?P<comptype>[a-z]{3})/(?P<organization>[^/]+)/$',
streaming_download_experiment),
url(r'^dataset/(?P<dataset_id>\d+)/$',
streaming_download_dataset,
name='tardis.tardis_portal.download.streaming_download_dataset'),
url(r'^dataset/(?P<dataset_id>\d+)/'
r'(?P<comptype>[a-z]{3})/$', # tgz or tar
streaming_download_dataset,
name='tardis.tardis_portal.download.streaming_download_dataset'),
url(r'^dataset/(?P<dataset_id>\d+)/'
r'(?P<comptype>[a-z]{3})/(?P<organization>[^/]+)/$',
streaming_download_dataset,
name='tardis.tardis_portal.download.streaming_download_dataset'),
url(r'^api_key/$', download_api_key,
name='tardis.tardis_portal.download.download_api_key'),
]
|
pombredanne/python-cdb | setup.py | Python | gpl-2.0 | 1,203 | 0.029925 | #! /usr/bin/env python
SRCDIR = "src"
SRCFILES = map(lambda f: SRCDIR + '/' + f + '.c',
["cdbmodule","cdb","cdb_make","cdb_hash",
"uint32_pack","uint32_unpack"])
from distutils.core import setup, Extension
setup (# Distribution meta-data
name = "python-cdb",
version = "0.34",
description = "Interface to constant database files",
author = "Mike Pomraning",
author_email = "mjp@pilcrow.madison.wi.us",
license = "GPL",
long_description = \
'''The python-cdb extension module is an adaptation of D. J. Bernstein's
constant database package (see http://cr.yp.to/cdb.html).
cdb files are mappings of keys to values, designed for wickedly
fast lookups and atomic updates. This module mimics the normal
cdb utilities, cdb(get|dump|make), via convenient, high-level Python
objects.''',
ext_modules = [ Extension(
| "cdbmodule",
SRCFILES,
include_dirs=[ SRCDIR + '/' ],
extra_compile_args=['-fPIC'],
)
| ],
url = "http://pilcrow.madison.wi.us/",
)
|
xhqu1981/pymatgen | dev_scripts/chemenv/view_environment.py | Python | mit | 5,095 | 0.002159 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Script to visualize the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import SEPARATION_PLANE
from pymatgen.analysis.chemenv.utils.scripts_utils import visualize
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import Plane
import numpy as np
if __name__ == '__main__':
print('+-------------------------------------------------------+\n'
'| Development script of the ChemEnv utility of pymatgen |\n'
'| Visualization of the model coordination environments |\n'
'+-------------------------------------------------------+\n')
allcg = AllCoordinationGeometries()
vis = None
while True:
cg_symbol = raw_input('Enter symbol of the geometry you want to see, "l" to see the list '
'of existing geometries or "q" to quit : ')
if cg_symbol == 'q':
break
if cg_symbol == 'l':
print(allcg.pretty_print(maxcn=13, additional_info={'nb_hints': True}))
continue
try:
cg = allcg[cg_symbol]
except LookupError:
print('Wrong geometry, try again ...')
continue
print(cg.name)
for ipoint, point in enumerate(cg.points):
| print('Point #{:d} : {} {} {}'.format(ipoint, repr(point[0]), repr(point[1]), repr(point[2])))
print('Algorithms used :')
for ialgo, algo in enumerate(cg.algorithms):
print('Algorithm #{:d} :'.format(ialgo))
print(algo)
print('')
# Visualize the separation plane of a given algorithm
sepplane = False
if any([algo.algorithm_type == SEPARATION_PLANE for algo in cg.algorit | hms]):
test = raw_input('Enter index of the algorithm for which you want to visualize the plane : ')
if test != '':
try:
ialgo = int(test)
algo = cg.algorithms[ialgo]
sepplane = True
except:
print('Unable to determine the algorithm/separation_plane you want '
'to visualize for this geometry. Continues without ...')
myfactor = 3.0
if vis is None:
vis = visualize(cg=cg, zoom=1.0, myfactor=myfactor)
else:
vis = visualize(cg=cg, vis=vis, myfactor=myfactor)
cg_points = [myfactor*np.array(pp) for pp in cg.points]
cg_central_site = myfactor*np.array(cg.central_site)
if sepplane:
pts = [cg_points[ii] for ii in algo.plane_points]
if algo.minimum_number_of_points == 2:
pts.append(cg_central_site)
centre = cg_central_site
else:
centre = np.sum(pts, axis=0) / len(pts)
factor = 1.5
target_dist = max([np.dot(pp-centre, pp-centre) for pp in cg_points])
current_dist = np.dot(pts[0] - centre, pts[0] - centre)
factor = factor * target_dist / current_dist
plane = Plane.from_npoints(points=pts)
p1 = centre + factor * (pts[0] - centre)
perp = factor * np.cross(pts[0] - centre, plane.normal_vector)
p2 = centre + perp
p3 = centre - factor * (pts[0] - centre)
p4 = centre - perp
vis.add_faces([[p1, p2, p3, p4]], [1.0, 0.0, 0.0], opacity=0.5)
target_radius = 0.25
radius = 1.5 * target_radius
if algo.minimum_number_of_points == 2:
vis.add_partial_sphere(coords=cg_central_site, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in pts:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[1.0, 0.0, 0.0], start=0, end=360,
opacity=0.5)
ps1 = [cg_points[ii] for ii in algo.point_groups[0]]
ps2 = [cg_points[ii] for ii in algo.point_groups[1]]
for pp in ps1:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 1.0, 0.0], start=0, end=360,
opacity=0.5)
for pp in ps2:
vis.add_partial_sphere(coords=pp, radius=radius,
color=[0.0, 0.0, 1.0], start=0, end=360,
opacity=0.5)
vis.show()
|
iansprice/wagtail | wagtail/tests/snippets/models.py | Python | bsd-3-clause | 2,344 | 0.000853 | from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import FieldPanel, InlinePanel
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.models import register_snippet
from .forms import FancySnippetForm
# AlphaSnippet and ZuluSnippet are for testing ordering of
# snippets when registering. They are named as such to ensure
# thier ordering is clear. They are registered during testing
# to ensure specific [in]correct register ordering
# AlphaSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class AlphaSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# ZuluSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class ZuluSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# Register model as snippet using register_snippet as both a function and a decorator
class RegisterFunction(models.Model):
pass
register_snippet(RegisterFunction)
@register_snippet
class RegisterD | ecorator(mo | dels.Model):
pass
# A snippet model that inherits from index.Indexed can be searched on
@register_snippet
class SearchableSnippet(index.Indexed, models.Model):
text = models.CharField(max_length=255)
search_fields = [
index.SearchField('text'),
]
def __str__(self):
return self.text
@register_snippet
class StandardSnippet(models.Model):
text = models.CharField(max_length=255)
@register_snippet
class FancySnippet(models.Model):
base_form_class = FancySnippetForm
@register_snippet
class FileUploadSnippet(models.Model):
file = models.FileField()
class RichTextSection(models.Model):
snippet = ParentalKey('MultiSectionRichTextSnippet', related_name='sections', on_delete=models.CASCADE)
body = RichTextField()
panels = [
FieldPanel('body'),
]
@register_snippet
class MultiSectionRichTextSnippet(ClusterableModel):
panels = [
InlinePanel('sections'),
]
|
badjr/pysal | pysal/core/IOHandlers/tests/test_arcgis_swm.py | Python | bsd-3-clause | 1,219 | 0.00082 | import unittest
import pysal
from pysal.core.IOHandlers.arcgis_swm import ArcGISSwmIO
import tempfile
import os
class test_ArcGISSwmIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal.examples.get_path('ohio.swm')
self.obj = ArcGISSwmIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual( | 88, w.n)
self.assertEqual(5.25, w.mean_neighbors)
self.assertEqual([1.0, 1.0, 1.0, 1.0], w[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
def test_write(self):
| w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.swm', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w')
o.write(w)
o.close()
wnew = pysal.open(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
Imperat/SSU-Courses | ssu-formal-languages/pda/pda_exceptions.py | Python | apache-2.0 | 91 | 0.043956 |
class UnknownSymbolExcep | tion(Exception):
pass
class PD | ACrashException(Exception):
pass |
gtaylor/btmux_battlesnake | battlesnake/plugins/contrib/pg_db/dict_conn.py | Python | bsd-3-clause | 319 | 0 | import psycopg2
import psyco | pg2.extras
from txpostgres import txpostgres
def dict_connect(*args, **kwargs):
kwargs['connection_factory'] = psycopg2.extras.DictConnection
return psycopg2.connect(*args, **kwargs)
class DictConnection(txpostgres.Connection):
connectio | nFactory = staticmethod(dict_connect)
|
massmutual/py4jdbc | tests/test_Cursor.py | Python | bsd-3-clause | 3,513 | 0.000569 | import pytest
from py4jdbc.dbapi2 import connect, Connection
from py4jdbc.resultset import ResultSet
from py4jdbc.exceptions.dbapi2 import Error
def test_connect(gateway):
url = "jdbc:derby:memory:testdb;create=true"
conn = connect(url, gateway=gateway)
cur = conn.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert isinstance(rs, ResultSet)
def test_execute(derby):
cur = derby.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert isinstance(rs, ResultSet)
def test_execute_with_params(derby):
derby.autocommit = False
cur = derby.cursor()
cur.execute("create schema x_with_params")
cur.execute("create table x_with_params.cowtest(a int, b char(1))")
# Verify table is empty.
rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
assert len(rows) == 0
# Insert one with parameter binding..
sql = "insert into x_with_params.cowtest (a, b) values (?, ?)"
cur.execute(sql, (12, "m"))
# Verify there's 1 row.
rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
assert len(rows) == 1
# Insert a bunch.
params = list(enumerate("thecowsaremooing"))
cur.executemany(sql, params)
rows = cur.execute("select * from x_with_params.cowtest as r").fetchall()
assert len(rows) == len("thecowsaremooing") + 1
derby.rollback()
derby.autocommit = True
def test_fetchone(derby):
cur = derby.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert isinstance(rs.fetchone(), rs.Row)
def test_fetchmany(derby):
'''Assert all rows of result set have the correct class.
'''
cur = derby.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert all({isinstance(row, rs.Row) for row in rs.fetchmany(5)})
def test_fetchManyCount(derby):
derby.autocommit = False
cur = derby.cursor()
cur.execute("create schema x_with_params")
cur.execute("create table x_with_params.cowtest(a int, b char(1))")
sql = "insert into x_with_params.cowtest (a, b) values (?, ?)"
params = list(enumerate("thecowsaremooing"))
cur.executemany(sql, params)
rs = cur.execute("select a from x_with_params.cowtest")
ress = []
while True:
x = rs.fetchmany(3)
ress.append(x)
if len(x) < 3:
break
derby.rollback()
derby.autocommit = True
assert sum(map(len, ress)) == len("thecowsaremooing")
def test_fetchall(derby):
'''Assert all r | ows of result set have the correct class.
'''
cur = derby.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert all({isinstance(row, rs.Row) for row in rs.fetchall()})
def test_Cursor__iter__(derby):
cur = der | by.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
assert all({isinstance(row, rs.Row) for row in rs})
def test_Cursor__iter__(derby):
cur = derby.cursor()
rs = cur.execute("select * from SYS.SYSTABLES")
# Exhaust all rows.
list(rs)
assert rs.fetchone() == None
def test_close_and_execute(derby):
cur = derby.cursor()
cur.close()
with pytest.raises(Error):
cur.execute("select * from SYS.SYSTABLES")
def test_close_and_fetchone(derby):
cur = derby.cursor()
cur.execute("select * from SYS.SYSTABLES")
cur.close()
with pytest.raises(Error):
cur.fetchone()
def test_close_twice(derby):
cur = derby.cursor()
cur.close()
with pytest.raises(Error):
cur.close()
|
francois-a/fastqtl | python/run_FastQTL_threaded.py | Python | gpl-3.0 | 5,608 | 0.005171 | #!/usr/bin/env python3
# Author: Francois Aguet
import argparse
import os
import numpy as np
import subprocess
import gzip
import multiprocessing as mp
import contextlib
from datetime import datetime
import tempfile
import glob
@contextlib.contextmanager
def cd(cd_path):
saved_path = os.getcwd()
os.chdir(cd_path)
yield
os.chdir(saved_path)
def get_cmd(args, chunk):
cmd = os.path.join(fastqtl_dir, 'bin', 'fastQTL')+' --vcf '+args.vcf+' --bed '+args.bed+' --window '+args.window \
+' --maf-threshold '+args.maf_threshold \
+' --ma-sample-threshold '+args.ma_sample_threshold \
+' --interaction-maf-threshold '+args.interaction_maf_threshold
if args.covariates:
cmd += ' --cov '+args.covariates
if args.phenotype_groups:
cmd += ' --grp '+args.phenotype_groups
if args.threshold:
cmd += ' --threshold '+args.threshold
if args.permute:
cmd += ' --permute '+' '.join([str(p) for p in args.permute])
if args.interaction:
cmd += ' --interaction '+args.interaction
if args.best_variant_only:
cmd += ' --report-best-only'
if args.seed:
cmd += ' --seed '+args.seed
if args.exclude_samples:
cmd += ' --exclude-samples '+args.exclude_samples
if args.exclude_sites:
cmd += ' --exclude-sites '+args.exclude_sites
cmd += ' --chunk '+str(chunk)+' '+args.chunks\
+ ' --out '+args.prefix+'_chunk{0:03d}.txt.gz'.format(chunk)\
+ ' --log '+args.prefix+'_chunk{0:03d}.log'.format(chunk)
return cmd
def perm_worker(inputs):
args = inputs[0]
chunk = inputs[1]
cmd = get_cmd(args, chunk)
print('Processing chunk '+str(chunk), flush=True)
s = subprocess.check_call(cmd, shell=True, executable='/bin/bash', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
print('Finished chunk '+str(chunk), flush=True)
return s
parser = argparse.ArgumentParser(description='Run FastQTL')
parser.add_argument('vcf', help='Genotypes in VCF 4.1 format')
parser.add_argument('bed', help='Phenotypes in UCSC BED extended format')
parser.add_argument('prefix', help='Prefix for output file name')
parser.add_argument('--covariates', default='', help='Covariates')
parser.add_argument('--phenotype_groups', default='', help='File with mapping of phenotype_id to group_id (gene_id)')
parser.add_argument('--chunks', default='100', help='Number of chunks, minimum: #chromosomes')
parser.add_argument('--permute', default=None, type=str, nargs='+', help='Number of permutations, e.g. [1000, 10000] (adaptive). Default: None (run nominal pass)')
parser.add_argument('--interaction', default=None, type=str, help='Interaction term')
parser.add_argument('--best_variant_only', action='store_true')
parser.add_argument('--window', default='1e6', help='Cis-window size. Default values is 1Mb (1e6).')
parser.add_argument('--threshold', default='', help='Output only significant phenotype-variant pairs with a p-value below threshold (default 1)')
parser.add_argument('--maf_threshold', default='0.0', help='Include only genotypes with minor allele frequency >=maf_threshold (default 0)')
parser.add_argument('--ma_sample_threshold', default='0', help='Include only genotypes with >=ma_sample_threshold samples carrying the minor allele (default 0)')
parser.add_argument('--interaction_maf_threshold', default='0', help='MAF threshold for interactions, applied to lower and upper half of samples')
parser.add_argument('--fdr', default=0.05, type=np.double)
parser.add_argument('--seed', default=None, help='Random number generator seed')
parser.add_argument('--exclude_samples', default=None, help='')
parser.add_argument('--exclude_sites', default=None, help='')
parser.add_argument('--qvalue_lambda', default=None, help='lambda parameter for pi0est in qvalue.')
parser.add_argument('-t', '--threads', default=8, type=int, help='Number of threads')
parser.add_argument('-o', '--output_dir', default='.', help='Output directory')
args = parser.parse_args()
fastqtl_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
print('['+datetime.now().strftime("%b %d %H:%M:%S")+'] Running FastQTL on {0:d} threads.'.format(args.threads), flush=True)
with cd(args.output_dir):
with mp.Pool(processes=args.threads) as pool:
pdata_res = [pool.map_async(perm_worker, ((args,k),)) for k in np.arange(1,int(args.chunks)+1)]
pool.close()
pool.join()
for res in pdata_res: # check exit status
assert res.get()[0]==0
with tempfile.NamedTemporaryFile(mode='w+') as chunk_list_file, \
tempfile.NamedTemporaryFile(mode='w+') as log_list_file:
# write chunk and log paths to file
chunk_files = sorted(glob.glob(args.prefix+'_chunk*.txt.gz'))
chunk_list_file.write('\n'.join(chunk_files)+'\n')
chunk_list_file.flush()
log_files = sorted(glob.glob(args.prefix+'_chunk*.log'))
log_list_file.write('\n'.join(log_files)+'\n')
log_list_file.flush()
# merge chunks
cmd = 'python3 '+os.path.join(fastqtl_dir, 'python', 'merge_chunks.py') \
+' {} {} {} | --fdr {} -o .'.format(chunk_list_file.name, log_list_file.name, args.prefix, args.fdr)
if args.qvalue_lambda:
cmd += ' --qvalue_lambda {}'.format(args.qvalue_lambda)
if args.permute:
cmd += ' --permute'
subprocess.check_call(cmd | , shell=True)
# remove chunk files
for f in chunk_files + log_files:
os.remove(f)
|
obi-two/Rebelion | data/scripts/templates/object/tangible/lair/structure/exterior/shared_lair_cave_giant_exterior_krayt_dragon.py | Python | mit | 504 | 0.043651 | #### NOTICE: THIS | FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/structure/exterior/shared_lair_cave_giant_exterior_krayt_dragon.iff"
result.attribute_template_id = -1
result.stfName("lair_n","cave_giant_exterior_krayt_dragon")
#### BEGIN MODIFICATIONS ####
| #### END MODIFICATIONS ####
return result |
meejah/AutobahnPython | examples/twisted/websocket/echo_endpoints/client.py | Python | mit | 3,397 | 0.001178 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol
class EchoClientProtocol(WebSocketClientProtocol):
"""
Example WebSocket client protocol. This is where you define your application
specific protocol and logic.
"""
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
self.factory.reactor.callL | ater(1, self.sendHello)
class EchoClientFactory(WebSocketClientFactory):
"""
Example WebSocket client factory. This creates a new instance of our protocol
when the client connects to the server.
"""
protocol = EchoClientProtocol
if __name__ == '__main__':
import sys
| import argparse
from twisted.python import log
from twisted.internet.endpoints import clientFromString
# parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true",
help="Enable debug output.")
parser.add_argument("--websocket", default="tcp:127.0.0.1:9000",
help='WebSocket client Twisted endpoint descriptor, e.g. "tcp:127.0.0.1:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", default=u"ws://127.0.0.1:9000",
help='WebSocket URL (must suit the endpoint), e.g. ws://127.0.0.1:9000.')
args = parser.parse_args()
# start Twisted logging to stdout
log.startLogging(sys.stdout)
# we use an Autobahn utility to import the "best" available Twisted reactor
from autobahn.choosereactor import install_reactor
reactor = install_reactor()
print("Running on reactor {}".format(reactor))
# start a WebSocket client
wsfactory = EchoClientFactory(args.wsurl)
wsclient = clientFromString(reactor, args.websocket)
wsclient.connect(wsfactory)
# now enter the Twisted reactor loop
reactor.run()
|
migueln/PySQM | config.py | Python | gpl-3.0 | 3,526 | 0.005672 | #!/usr/bin/env python
'''
PySQM configuration File.
____________________________
Copyright (c) Mireia Nievas <mnievas[at]ucm[dot]es>
This file is part of PySQM.
PySQM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PySQM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PySQM. If not, see <http://www.gnu.org/licenses/>.
____________________________
Notes:
You may need to change the following variables to match your
observatory coordinates, instrumental properties, etc.
Python (v2.7) syntax is mandatory.
____________________________
'''
'''
-------------
SITE location
-------------
'''
_observatory_name = 'GURUGU'
_observatory_latitude = 40.447862
_observatory_longitude = -3.364992
_observatory_alt | itude = 680
_observatory_horizon = 10 # If Sun is below this altitude, the program will take data
_device_shorttype = 'SQM' # Device STR in t | he file
_device_type = 'SQM_LU' # Device type in the Header
_device_id = _device_type + '-' + _observatory_name # Long Device lame
_device_locationname = 'Villalbilla/Spain - Observatorio GURUGU' # Device location in the world
_data_supplier = 'Mireia Nievas / Universidad Complutense de Madrid' # Data supplier (contact)
_device_addr = '/dev/ttyUSB0' # Default IP address of the ethernet device (if not automatically found)
_measures_to_promediate = 5 # Take the mean of N measures
_delay_between_measures = 20 # Delay between two measures. In seconds.
_cache_measures = 5 # Get X measures before writing on screen/file
_plot_each = 60 # Call the plot function each X measures.
_use_mysql = False # Set to True if you want to store data on a MySQL db.
_mysql_host = None # Host (ip:port / localhost) of the MySQL engine.
_mysql_user = None # User with write permission on the db.
_mysql_pass = None # Password for that user.
_mysql_database = None # Name of the database.
_mysql_dbtable = None # Name of the table
_mysql_port = None # Port of the MySQL server.
_local_timezone = +1 # UTC+1
_computer_timezone = +0 # UTC
_offset_calibration = -0.11 # magnitude = read_magnitude + offset
_reboot_on_connlost = False # Reboot if we loose connection
# Monthly (permanent) data
monthly_data_directory = "/tmp/sqm_gurugu/"
# Daily (permanent) data
daily_data_directory = monthly_data_directory+"/datos_diarios/"
# Daily (permanent) graph
daily_graph_directory = monthly_data_directory+"/graficos_diarios/"
# Current data, deleted each day.
current_data_directory = monthly_data_directory
# Current graph, deleted each day.
current_graph_directory = monthly_data_directory
# Summary with statistics for the night
summary_data_directory = monthly_data_directory
'''
----------------------------
PySQM data center (OPTIONAL)
----------------------------
'''
# Send the data to the data center
_send_to_datacenter = False
'''
Ploting options
'''
full_plot = False
limits_nsb = [16.5,20.0] # Limits in Y-axis
limits_time = [17,9] # Hours
limits_sunalt = [-80,5] # Degrees
'''
Email options
'''
_send_data_by_email = False
|
sebaxtian/raspberrypi3 | lighting_led/gpio_button.py | Python | mit | 478 | 0.002092 | '''
Ejemplo de imprimir un mensaje c | uando un boton es presionado.
Fuente: https://www.raspberrypi.org/learning/physical-computing-with-python/worksheet/
Fecha: mar dic 6 20:11:21 COT 2016
Version: 1.0
'''
from gpiozero import Button
# Usando el GP2 controlamos el boton
button | = Button(2)
# El programa espera hasta que el boton sea presionado
button.wait_for_press()
# El programa imprime un mensaje
print('Se Presiona El Boton')
'''
Ejecutar: python gpio_button.py
'''
|
green-latte/geopy.jp | info.py | Python | mit | 401 | 0.044888 | # package information.
INFO = dict(
name = "geopy.jp",
description = "Geocoding library for Python.",
author = "Green Latte",
author_email = "k.takeuchi@war | rantee.co.jp",
license = "MIT License",
url = "https://github.com/green-latte/geopy.jp",
classifiers = [
"Program | ming Language :: Python :: 3.4",
"License :: OSI Approved :: MIT License"
]
)
|
rleigh-dundee/openmicroscopy | components/tools/OmeroPy/src/omero/install/logs_library.py | Python | gpl-2.0 | 6,637 | 0.011903 | #!/usr/bin/env python
"""
Function for parsing OMERO log files.
The format expected is defined for Python in
omero.util.configure_logging.
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
:author: Josh Moore <josh@glencoesoftware.com>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
from time import mktime, strptime
import fileinp | ut
import logging
import sys
import os
import re
def parse_time(value):
"""
parse the time format used by log4j into seconds (float)
since the epoch
"""
parts = value.split(",")
value = part | s[0]
millis = float(parts[1]) / 1000.0
t = mktime(strptime(value, "%Y-%m-%d %H:%M:%S"))
t = float(t)
t += millis
return t
class log_line(object):
"""
2009-04-09 15:11:58,029 INFO [ ome.services.util.ServiceHandler] (l.Server-6) Meth: interface ome.api.IQuery.findByQuery
01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
"""
def __init__(self, line):
self.line = line
line.strip()
self.date = line[0:23]
self.level = line[24:28]
self.thread = line[74:84]
self.message = line[85:].strip()
self.status = line[86:91]
self.method = line[96:].strip()
def contains(self, s):
return 0 <= self.line.find(s)
def contains_any(self, l):
for i in l:
if self.contains(i):
return True
return False
class log_watcher(object):
def __init__(self, files, entries, exits, storeonce = None, storeall = None):
if storeonce is None: storeonce = []
if storeall is None: storeall = []
self.files = files
self.entries = entries
self.exits = exits
self.storeonce = storeonce
self.storeall = storeall
def gen(self):
self.m = {}
try:
for line in fileinput.input(self.files):
ll = log_line(line)
if ll.contains_any(self.entries):
self.m[ll.thread] = ll
elif ll.contains_any(self.storeonce):
try:
value = self.m[ll.thread]
try:
value.once
except:
value.once = ll
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.storeall):
try:
value = self.m[ll.thread]
value.all.append(ll)
except AttributeError:
value.all = [ll]
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.exits):
try:
value = self.m[ll.thread]
del self.m[ll.thread] # Free memory
value.start = parse_time(value.date)
value.stop = parse_time(ll.date)
value.took = value.stop - value.start
yield value
except KeyError:
logging.debug("Not found: " + line)
finally:
fileinput.close()
class allthreads_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["Meth:","Executor.doWork"],["Rslt:","Excp:"])
class saveAndReturnObject_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["saveAndReturnObject"],["Rslt:","Excp:"],storeonce=["Args:"],storeall=["Adding log"])
# http://matplotlib.sourceforge.net/examples/api/line_with_text.html
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
def plot_threads(watcher, all_colors = ("blue","red","yellow","green","pink","purple")):
digit = re.compile(".*(\d+).*")
fig = plt.figure()
ax = fig.add_subplot(111)
first = None
last = None
colors = {}
for ll in watcher.gen():
last = ll.stop
if first is None:
first = ll.start
if ll.thread.strip() == "main":
t = -1
else:
try:
t = digit.match(ll.thread).group(1)
except:
print "Error parsing thread:", ll.thread
raise
y = np.array([int(t),int(t)])
x = np.array([ll.start-first, ll.stop-first])
c = colors.get(t,all_colors[0])
i = all_colors.index(c)
colors[t] = all_colors[ (i+1) % len(all_colors) ]
if True:
line = MyLine(x, y, c=c, lw=2, alpha=0.5)#, mfc='red')#, ms=12, label=str(len(ll.logs)))
#line.text.set_text('line label')
line.text.set_color('red')
#line.text.set_fontsize(16)
ax.add_line(line)
else:
# http://matplotlib.sourceforge.net/examples/pylab_examples/broken_barh.html
ax.broken_barh([ (110, 30), (150, 10) ] , (10, 9), facecolors='blue')
ax.set_ylim(-2,25)
ax.set_xlim(0, (last-first))
plt.show()
if __name__ == "__main__":
for g in allthreads_watcher(sys.argv).gen():
print "Date:%s\nElapsed:%s\nLevel:%s\nThread:%s\nMethod:%s\nStatus:%s\n\n" % (g.date, g.took, g.level, g.thread, g.message, g.status)
|
GoogleCloudPlatform/datacatalog-connectors-bi | google-datacatalog-tableau-connector/src/google/datacatalog_connectors/tableau/scrape/metadata_api_helper.py | Python | apache-2.0 | 5,757 | 0 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may o | btain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribut | ed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from google.datacatalog_connectors.tableau.scrape import \
authenticator, constants, metadata_api_constants
class MetadataAPIHelper:
def __init__(self,
server_address,
api_version,
username,
password,
site_content_url=None):
self.__server_address = server_address
self.__api_version = api_version
self.__username = username
self.__password = password
self.__site_content_url = site_content_url
self.__api_endpoint = f'{server_address}' \
f'/relationship-service-war/graphql'
self.__auth_credentials = None
def fetch_dashboards(self, query_filter=None):
"""
Read dashboards metadata from a given server.
Args:
query_filter (dict): Filter fields and values
Returns:
dashboards: A list of dashboards metadata
"""
self.__set_up_auth_credentials()
body = {'query': metadata_api_constants.FETCH_DASHBOARDS_QUERY}
headers = {
constants.X_TABLEAU_AUTH_HEADER_NAME:
self.__auth_credentials['token']
}
response = requests.post(url=self.__api_endpoint,
headers=headers,
json=body).json()
dashboards = response['data']['dashboards'] \
if response and response.get('data') \
and 'dashboards' in response['data'] \
else []
# Site contentUrl handling
for dashboard in dashboards:
if dashboard.get('workbook') and 'site' in dashboard['workbook']:
self.__add_site_content_url_field(
dashboard['workbook']['site'])
return dashboards
def fetch_sites(self, query_filter=None):
"""
Read sites metadata from a given server.
Args:
query_filter (dict): Filter fields and values
Returns:
sites: A list of sites metadata
"""
self.__set_up_auth_credentials()
body = {'query': metadata_api_constants.FETCH_SITES_QUERY}
headers = {
constants.X_TABLEAU_AUTH_HEADER_NAME:
self.__auth_credentials['token']
}
response = requests.post(url=self.__api_endpoint,
headers=headers,
json=body).json()
sites = response['data']['tableauSites'] \
if response and response.get('data') \
and 'tableauSites' in response['data'] \
else []
# Site contentUrl handling
for site in sites:
self.__add_site_content_url_field(site)
workbooks = site.get('workbooks') or []
for workbook in workbooks:
self.__add_site_content_url_field(workbook['site'])
return sites
def fetch_workbooks(self, query_filter=None):
"""
Read workbooks metadata from a given server.
Args:
query_filter (dict): Filter fields and values
Returns:
workbooks: A list of workbooks metadata
"""
self.__set_up_auth_credentials()
body = {'query': metadata_api_constants.FETCH_WORKBOOKS_QUERY}
if query_filter:
variables = metadata_api_constants.FETCH_WORKBOOKS_FILTER_TEMPLATE
for key, value in query_filter.items():
variables = variables.replace(f'${key}', value)
body['variables'] = variables
headers = {
constants.X_TABLEAU_AUTH_HEADER_NAME:
self.__auth_credentials['token']
}
response = requests.post(url=self.__api_endpoint,
headers=headers,
json=body).json()
workbooks = response['data']['workbooks'] \
if response and response.get('data') \
and 'workbooks' in response['data'] \
else []
# Site contentUrl handling
for workbook in workbooks:
if 'site' in workbook:
self.__add_site_content_url_field(workbook['site'])
return workbooks
def __set_up_auth_credentials(self):
if self.__auth_credentials:
return
self.__auth_credentials = \
authenticator.Authenticator.authenticate(
self.__server_address,
self.__api_version,
self.__username,
self.__password,
self.__site_content_url)
def __add_site_content_url_field(self, original_site_metadata):
"""The `contentUrl` field is not available in the original
`TableauSite` objects returned by the Metadata API but it is required
in the prepare stage. So, it is injected into the returned objects to
make further processing more efficient.
Args:
original_site_metadata: The object returned by the Metadata API
"""
original_site_metadata['contentUrl'] = self.__site_content_url
|
ruishihan/R7-with-notes | src/host/python/GSM/SB.py | Python | apache-2.0 | 1,203 | 0.099751 | from Burst import Burst
import numpy as np
class SB(Burst):
def __init__(self):
self.syncbits = [
0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x01,
0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, 0x01,
0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00,
0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x01, 0x01 ]
self.bits = [
1,0,1,1,1,0,0,1,
0,1,1,0,0,0,1,0,
0,0,0,0,0,1,0,0,
0,0,0,0,1,1,1,1,
0,0,1,0,1,1,0,1,
1,0,1,0,0,1,0,1,
0,1,1,1,0,1,1,0,
0,0,0,1,1,0,1,1]
s = np.array(self.syncbits)*2-1
self.sync = []
for x in s:
self.sync += [x,x,x,x]
self.training_seq = self.gmsk_mapper(s,complex(0.,-1.))
def demodu(self,s):
self.dem = self.diff(s)
s = np.zeros(len(self.dem),dtype=complex)
s[:len(self.sync)]=self.sync[:]
fs = np.fft.fft(s)
fd = np.fft.fft(self.dem)
tr = np.abs(np.fft.ifft(fd*np.conj(fs)))
return tr
def channelE | st( s | elf, frame, osr ):
return Burst.channelEst( self, frame, self.training_seq, osr )
|
mitodl/ccxcon | webhooks/migrations/0001_initial.py | Python | agpl-3.0 | 624 | 0.003205 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Webhook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField()),
| ('secret', m | odels.CharField(default=uuid.uuid4, max_length=32)),
('enabled', models.BooleanField(default=True)),
],
),
]
|
orashi/PaintsPytorch | models/base_model.py | Python | mit | 9,627 | 0.002493 | import numpy as np
import torch
import os
import sys
import functools
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import init
import torch.nn.functional as F
import torchvision.models as M
class GANLoss(nn.Module):
def __init__(self, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
self.loss = nn.MSELoss()
def get_target_tensor(self, input, target_is_real):
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
def U_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
elif classname.find('ConvTranspose2d') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
print ('worked!') # TODO: kill this
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
def LR_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data, a=0.2)
def R_weight_init(ms):
for m in ms.modules():
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data = init.kaiming_normal(m.weight.data)
############################
# G network
###########################
# custom weights initialization called on netG
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def def_netG(ngf=64, norm='instance'):
norm_layer = get_norm_layer(norm_type=norm)
netG = UnetGenerator(ngf, norm_layer=norm_layer)
return netG
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, ngf, norm_layer):
super(UnetGenerator, self).__init__()
################ downS
self.down1 = nn.Conv2d(1, ngf // 2, kernel_size=4, stride=2, padding=1)
down = [nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1), norm_layer(ngf)]
self.down2 = nn.Sequential(*down)
down = [nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 2)]
self.down3 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down4 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down5 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down6 = nn.Sequential(*down)
down = [nn.Conv2d(ngf * 4, ngf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ngf * 4)]
self.down7 = nn.Sequential(*down)
self.down8 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1)
################ down--up
up = [nn.ConvTranspose2d(ngf * 8 + 2048, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up8 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up7 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up6 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 8)]
self.up5 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 12, ngf * 4, kernel_size=4, stride=2, padding=1),
norm_layer(ngf * 4)]
self.up4 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 6, ngf * 2, kernel_size=4, stride=2, padding=1),
| norm_layer(ngf * 2)]
self.up3 = nn.Sequential(*up)
up = [nn.ConvTranspose2d(ngf * 3, ngf, kernel_size=4, stride=2, padding=1), norm_layer(ngf)]
self.up2 = nn.Sequential(*up)
self.up1 = nn.ConvTranspose2d(int(ngf * 1.5), 3 | , kernel_size=4, stride=2, padding=1)
self.linear = nn.Linear(4096, 2048)
U_weight_init(self)
def forward(self, input, VGG):
x1 = F.leaky_relu(self.down1(input), 0.2, True)
x2 = F.leaky_relu(self.down2(x1), 0.2, True)
x3 = F.leaky_relu(self.down3(x2), 0.2, True)
x4 = F.leaky_relu(self.down4(x3), 0.2, True)
x5 = F.leaky_relu(self.down5(x4), 0.2, True)
x6 = F.leaky_relu(self.down6(x5), 0.2, True)
x7 = F.leaky_relu(self.down7(x6), 0.2, True)
x8 = F.relu(self.down8(x7), True)
VGG = F.relu(self.linear(VGG), True)
x = F.relu(self.up8(torch.cat([x8, VGG.view(-1, 2048, 1, 1)], 1)), True)
x = F.relu(self.up7(torch.cat([x, x7], 1)), True)
x = F.relu(self.up6(torch.cat([x, x6], 1)), True)
x = F.relu(self.up5(torch.cat([x, x5], 1)), True)
x = F.relu(self.up4(torch.cat([x, x4], 1)), True)
x = F.relu(self.up3(torch.cat([x, x3], 1)), True)
x = F.relu(self.up2(torch.cat([x, x2], 1)), True)
x = F.tanh(self.up1(torch.cat([x, x1], 1)))
return x
############################
# D network
###########################
def def_netD(ndf=64, norm='batch'):
norm_layer = get_norm_layer(norm_type=norm)
netD = NLayerDiscriminator(ndf, norm_layer=norm_layer)
return netD
class NLayerDiscriminator(nn.Module):
def __init__(self, ndf, norm_layer=nn.BatchNorm2d):
super(NLayerDiscriminator, self).__init__()
kw = 4
padw = 1
self.ndf = ndf
sequence = [
nn.Conv2d(4, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * 1, ndf * 2,
kernel_size=kw, stride=2, padding=padw),
n |
dsiddharth/access-keys | keystone/contrib/kds/cli/manage.py | Python | apache-2.0 | 1,896 | 0.001582 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystone.openstack.common import gettextutils
# gettextutils.install() must run to set _ before importing any modules that
# contain static translated strings.
gettextutils.install('keystone')
from oslo.config import cfg
from keystone.contrib.kds.common import service
from keystone.contrib.kds.db import migration
CONF = cfg.CONF
def do_db_version():
"""Print database's current migration level."""
print(migration.db_version())
def do_db_sync():
"""Place a database under migration control and upgrade,
creating fir | st if necessary.
"""
retur | n migration.db_sync(CONF.command.version)
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db_version')
parser.set_defaults(func=do_db_version)
parser = subparsers.add_parser('db_sync')
parser.set_defaults(func=do_db_sync)
parser.add_argument('version', nargs='?')
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main():
CONF.register_cli_opt(command_opt)
service.prepare_service(sys.argv)
try:
CONF.command.func()
except Exception as e:
sys.exit("ERROR: %s" % e)
|
dodobas/osm-export-tool2 | core/wsgi.py | Python | bsd-3-clause | 402 | 0 | """
WSGI config for osm-export-tool2 project.
It exposes the WSGI callable as a module-level variable named ``appl | ication``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.s | ettings.prod")
application = get_wsgi_application()
|
tobes/munge | munge/cli.py | Python | agpl-3.0 | 6,778 | 0.000148 | import argparse
import os.path
import config
import definitions
import sa_util
import csv_util
def import_module(args):
from dependencies import dependencies_manager
tables = []
for module in args.module:
if not args.updateonly:
definitions.get_importer(module)(verbose=args.verbose)
tables += definitions.get_tables(module)
deps = dependencies_manager.updates_for(tables, include=False)
if not args.noupdate:
sa_util.build_views_and_summaries(
items=deps,
verbose=args.verbose,
)
def build_views_summaries(args):
sa_util.build_views_and_summaries(
items=args.module,
all=args.all,
verbose=args.verbose,
force=args.force,
dependencies=not args.no_dependants,
)
def sql(args):
from definitions import get_definition
for item in args.items:
print '\n\nSQL for %s\n' % item
info = get_definition(item)
sql = info.get('sql')
tables = info.get('tables')
ts = {}
for index, table in enumerate(tables):
ts['t%s' % (index + 1)] = '"%s"' % table
print sql.format(**ts)
def deps(args):
from dependencies import dependencies_manager
for item in args.items:
print 'Dependencies for %s' % item
print dependencies_manager.get_needed_updates(item)
def clear_views(args):
for view in sa_util.view_list():
sa_util.drop_table_or_view(view, force=True)
def recreate_views(args):
from dependencies import dependencies_manager
views = definitions.defined_views()
existing_views = sa_util.view_list()
updates = dependencies_manager.updates_for(views)
needed = []
for update in updates:
if update in views:
if update in existing_views:
continue
needed.append(update)
needed.reverse()
print needed
sa_util.build_views_and_summaries(
items=needed,
dependencies=False,
verbose=args.verbose,
)
sa_util.swap_tables(verbose=args.verbose)
def clean_db(args):
sa_util.clear_temp_objects(verbose=args.verbose)
tables = sorted(list(
set(sa_util.table_view_list())
- set(definitions.defined_tables())
- set(sa_util.dependent_objects())
))
print 'Unknown tables'
for table in tables:
print '\t%s' % table
for table in tables:
response = raw_input('Delete table `%s` [No/yes/quit]:' % table)
if response and response.upper()[0] == 'Y':
sa_util.drop_table_or_view(table, verbose=args.verbose)
if response and response.upper()[0] == 'Q':
return
def export_all(verbose=False):
if verbose:
print('Exporting all tables')
csv_util.dump_all(verbose=verbose)
def export_custom(verbose=False):
if verbose:
print('Exporting custom tables')
import custom_output
def db_functions(verbose=False):
if verbose:
print('Creating db functions')
import postgres_functions
def import_csv(args):
verbose = args.verbose
filename = args.filename
tablename = args.tablename
delimiter = args.delimiter
filename = os.path.join(config.DATA_PATH, 'import', filename)
if delimiter == '\\t':
delimiter = '\t'
if not tablename:
tablename = os.path.splitext(os.path.basename(filename))[0]
if verbose:
print('Importing %s' % args.filename)
csv_util.import_single(
filename,
tablename,
encoding=args.encoding,
delimiter=delimiter,
verbose=verbose
)
sa_util.swap_tables(verbose=verbose)
def webserver(args):
from munge.app import app
app.run(debug=True)
def main():
commands = [
'export_all',
'export_custom',
'web',
'clean_db',
'db_functions',
'clear_views',
're | create_views',
]
parser | = argparse.ArgumentParser(
description='Command line interface for munge'
)
parser.add_argument('-v', '--verbose', action='count', default=0)
subparsers = parser.add_subparsers(help='commands', dest='command')
for command in commands:
subparsers.add_parser(command)
import_csv_parser = subparsers.add_parser('import_csv')
import_csv_parser.add_argument("--encoding", default='utf-8')
import_csv_parser.add_argument("--delimiter", default=',')
import_csv_parser.add_argument('--tablename', default=None)
import_csv_parser.add_argument('filename')
swap_temp_parser = subparsers.add_parser('swap_temp')
swap_temp_parser.add_argument('-f', '--force', action="store_true")
module_commands = [
'import',
'summaries',
]
for command in module_commands:
module_parser = subparsers.add_parser(command)
module_parser.add_argument('-f', '--force', action="store_true")
module_parser.add_argument('-d', '--no-dependants', action="store_true")
module_parser.add_argument('-a', '--all', action="store_true")
module_parser.add_argument('-t', '--test', action="store_true")
module_parser.add_argument('-n', '--noupdate', action="store_true")
module_parser.add_argument('-u', '--updateonly', action="store_true")
module_parser.add_argument('-s', '--stage', default=0, type=int)
module_parser.add_argument('module', nargs='*')
dep_parser = subparsers.add_parser('deps')
dep_parser.add_argument('items', nargs='*')
dep_parser = subparsers.add_parser('sql')
dep_parser.add_argument('items', nargs='*')
args = parser.parse_args()
if args.command == 'deps':
deps(args)
if args.command == 'sql':
sql(args)
if args.command == 'export_all':
export_all(verbose=args.verbose)
if args.command == 'export_all':
export_all(verbose=args.verbose)
elif args.command == 'import':
import_module(args)
sa_util.swap_tables(verbose=args.verbose)
elif args.command == 'swap_temp':
sa_util.swap_tables(verbose=args.verbose, force=args.force)
elif args.command == 'summaries':
build_views_summaries(args)
if not args.noupdate:
sa_util.swap_tables(verbose=args.verbose, force=args.force)
elif args.command == 'export_custom':
export_custom(verbose=args.verbose)
elif args.command == 'import_csv':
import_csv(args)
elif args.command == 'web':
webserver(args)
elif args.command == 'clean_db':
clean_db(args)
elif args.command == 'clear_views':
clear_views(args)
elif args.command == 'recreate_views':
recreate_views(args)
elif args.command == 'db_functions':
db_functions(verbose=args.verbose)
|
rgommers/statsmodels | statsmodels/stats/multitest.py | Python | bsd-3-clause | 16,540 | 0.00266 | '''Multiple Testing and P-Value Correction
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import range
from statsmodels.compat.collections import OrderedDict
import numpy as np
#==============================================
#
# Part 1: Multiple Tests and P-Value Correction
#
#==============================================
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
multitest_methods_names = {'b': 'Bonferroni',
's': 'Sidak',
'h': 'Holm',
'hs': 'Holm-Sidak',
'sh': 'Simes-Hochberg',
'ho': 'Hommel',
'fdr_bh': 'FDR Benjamini-Hochberg',
'fdr_by': 'FDR Benjamini-Yekutieli',
'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg',
'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli',
'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'
}
_alias_list = [['b', 'bonf', 'bonferroni'],
['s', 'sidak'],
['h', 'holm'],
['hs', 'holm-sidak'],
['sh', 'simes-hochberg'],
['ho', 'hommel'],
['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp'],
['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr'],
['fdr_tsbh', 'fdr_2sbh'],
['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage'],
['fdr_gbs']
]
multitest_alias = OrderedDict()
for m in _alias_list:
multitest_alias[m[0]] = m[0]
for a in m[1:]:
multitest_alias[a] = m[0]
def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False,
returnsorted=False):
'''test results and p-value correction for multiple tests
Parameters
----------
pvals : array_like
uncorrected p-values
alpha : float
FWER, family-wise error rate, e.g. 0.1
method : string
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are ::
`bonferroni` : one-step correction
`sidak` : one-step correction
`holm-sidak` : step down method using Sidak adjustments
`holm` : step-down method using Bonferroni adjustments
`simes-hochberg` : step-up method (independent)
`hommel` : closed method based on Simes tests (non-negative)
`fdr_bh` : Benjamini/Hochberg (non-negative)
`fdr_by` : Benjamini/Yekutieli (negative)
`fdr_tsbh` : two stage fdr correction (non-negative)
`fdr_tsbky` : two stage fdr correction (non-negative)
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
returnsorted : bool
not tested, return sorted p-values instead of original sequence
Returns
-------
reject : array, boolean
true for hypothesis that can be rejected for given alpha
pvals_corrected : array
p-values corrected for multiple tests
alphacSidak: float
corrected alpha for Sidak method
alphacBonf: float
corrected alpha for Bonferroni method
Notes
-----
Except for 'fdr_twostage', the p-value correction is independent of the
alpha specified as argument. In these cases the corrected p-values
can also be compared with a different alpha. In the case of 'fdr_twostage',
the corrected p-values are specific to the given alpha, see
``fdrcorrection_twostage``.
all corrected p-values now tested against R.
insufficient "cosmetic" tests yet
The 'fdr_gbs' procedure is not verified against another package, p-values
are derived from scratch and are not derived in the reference. In Monte
Carlo experiments the method worked correctly and maintained the false
discovery rate.
All procedures that are included, control FWER or FDR in the independent
case, and most are robust in the positively correlated case.
`fdr_gbs`: high power, fdr control for independent case and only small
violation in positively correlated case
**Timing**:
Most of the time with large arrays is spent in `argsort`. When
we want to calculate the p-value for several methods, then it is more
efficient to presort the pvalues, and put the results back into the
original order outside of the function.
Method='hommel' is very slow for large arrays, since it requires the
evaluation of n partitions, where n is the number of p-values.
there will be API changes.
References
----------
'''
import gc
pvals = np.asarray(pvals)
alphaf = alpha # Notation ?
if not is_sorted:
sortind = np.argsort(pvals)
pvals = np.take(pvals, sortind)
ntests = len(pvals)
alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
alphacBonf = alphaf / float(ntests)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject = pvals <= alphacBonf
pvals_corrected = pvals * float(ntests)
elif method.lower() in ['s', 'sidak']:
reject = pvals <= alphacSidak
pvals_corrected = 1 - np.power((1. - pvals), ntests)
elif method.lower() in ['hs', 'holm-sidak']:
alphacSidak_all = 1 - np.power((1. - alphaf),
1./np.arange(ntests, 0, -1))
notreject = pvals > alphacSidak_all
del alphacSidak_all
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
del notreject
pvals_corrected_raw = 1 - np.power((1. - pvals),
np.arange(ntests, 0, -1))
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
elif method.lower() in ['h', 'holm']:
notreject = pvals > alphaf / np.arange(ntests, 0, -1)
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
pvals_corrected_raw = pvals * np.arange(ntests, 0, -1)
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
gc.collect()
elif method.lower() in ['sh', 'simes-hochberg']:
alphash = alphaf / np.arange(ntests, 0, -1)
reject = pvals <= alphash
rejind = np.nonzero(reject)
if rejind[0].size > 0:
rejectmax = np.max(np.nonzero(reject))
reject[:rejectmax] = True
pvals_corrected_raw = np.arange(ntests, 0, -1) * pvals
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
elif method.lower() in ['ho', 'hommel']:
# we need a copy because we overwrite it in a loop
a = pvals.copy()
for m in range(ntests, 1, -1):
cim = np.min(m * pvals[-m:] / np.arange(1,m+1.))
a[-m:] = np.maximum(a[-m:], cim)
a[:-m] = np.maximum(a[:-m], np.minimum(m * pvals[:-m], cim))
pvals_corrected = a
reje | ct = a <= alphaf
elif method.lower() in ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='indep',
is_sorted=True)
elif method.lower() in ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr']:
# delegate, call with sorted pvals
reject, | pvals_cor |
zero-os/0-orchestrator | pyclient/zeroos/orchestrator/client/EnumClusterStatus.py | Python | apache-2.0 | 143 | 0 | from enum import Enum
class EnumClusterStatus(Enum):
empty = | "empty"
deploying = "deploying"
ready = "ready"
er | ror = "error"
|
myles/django-issues | src/issues/models.py | Python | bsd-3-clause | 3,687 | 0.033903 | from django.db import models
from django.conf import settings
from django.db.models import permalink
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.generic import GenericRelation
class Version(models.Model):
name = models.CharField(_('name'), max_length=200)
slug = models.SlugField(_('slug'), max_length=50, unique=True)
description = models.TextField(_('description'), blank=True, null=True)
url = models.URLField(_('url'), blank=True, null=True)
date = models.DateField(_('date'), blank=True, null=True)
date_added = models.DateTimeField(_('date added'), auto_now_add=True)
date_modified = models.DateTimeField(_('date modified'), auto_now=True)
class Meta:
db_table = 'issue_versions'
ordering = ('date',)
verbose_name = _('version')
verbose_name_plural = _('versions')
def __unicode__(self):
return u"%s" % (self.name)
@permalink
def get_absolute_url(self):
return ('issues_version_detail', None, {
'slug': self.slug,
})
class Category(models.Model):
name = models.CharField(_('name'), max_length=200)
slug = models.SlugField(_('slug'), max_length=50, unique=True)
assigned = models.ForeignKey(User, blank=True, null=True,
related_name='category_assigned')
date_added = models.DateTimeField(_('date added'), auto_now_add=True)
date_modified = models.DateTimeField(_('date modified'), auto_now=True)
class Meta:
db_table = 'issue_categories'
ordering = ('name',)
verbose_name = _('category')
verbose_name_plural = _('categories')
def __unicode__(self):
return u"%s" % (self.name)
@permalink
def get_absolute_url(self):
return ('issue_category_detail', None, {
'slug': self.slug,
})
ISSUE_STATUSES = (
(1, _('New')),
(2, | _('Assigned')),
(3, _('Resolved') | ),
(4, _('Feedback')),
(5, _('Closed')),
(6, _('Rejected')),
)
ISSUE_PRIORITIES = (
(1, _('Low')),
(2, _('Normal')),
(3, _('High')),
(4, _('Urgent')),
(5, _('Immediate')),
)
# TODO I want this it first check the `settings.py` file for customization.
ISSUE_STATUS_CHOICES = ISSUE_STATUSES
ISSUE_PRIORITIY_CHOICES = ISSUE_PRIORITIES
class Issue(models.Model):
subject = models.CharField(_('subject'), max_length=200)
description = models.TextField(_('description'), blank=True, null=True)
version = models.ForeignKey(Version, blank=True, null=True)
category = models.ForeignKey(Category, blank=True, null=True)
created = models.ForeignKey(User, related_name='created')
assigned = models.ForeignKey(User, blank=True, null=True,
related_name='issue_assigned')
watcher = models.ManyToManyField(User, blank=True, null=True,
related_name='watchers')
start_date = models.DateField(_('start'), blank=True, null=True,
help_text=_('The date to start working on the issue.'))
due_date = models.DateField(_('due date'), blank=True, null=True,
help_text=_('The date the issue is due.'))
status = models.IntegerField(_('status'), choices=ISSUE_STATUS_CHOICES,
default=1)
priority = models.IntegerField(_('priority'),
choices=ISSUE_PRIORITIY_CHOICES, default=2)
comment = GenericRelation(Comment, object_id_field='object_pk')
date_added = models.DateTimeField(_('date added'), auto_now_add=True)
date_modified = models.DateTimeField(_('date modified'), auto_now=True)
class Meta:
db_table = 'issues'
ordering = ('due_date', 'priority',)
verbose_name = _('issue')
verbose_name_plural = _('issues')
def __unicode__(self):
return u"[%s] %s" % (self.pk, self.subject)
@permalink
def get_absolute_url(self):
return ('issues_issue_detail', None, {
'pk': self.pk,
})
|
TamiaLab/PySkCode | skcode/utility/smileys.py | Python | agpl-3.0 | 9,460 | 0.001375 | """
SkCode smileys replacement utility code.
"""
import re
from html import escape as escape_html
from urllib.parse import urljoin
# Default emoticons map
DEFAULT_EMOTICONS_MAP = (
('<3', 'heart.png'),
(':heart:', 'heart.png'),
(":')", 'joy.png'),
(':")', 'joy.png'),
(":'-)", 'joy.png'),
(':"-)', 'joy.png'),
(":joy:", 'joy.png'),
(':D', 'grin.png'),
(':-D', 'grin.png'),
('=D', 'grin.png'),
(':grin:', 'grin.png'),
(':)', 'smile.png'),
(':-)', 'smile.png'),
('=]', 'smile.png'),
('=)', 'smile.png'),
(':]', 'smile.png'),
('^^', 'smile.png'),
(':smile:', 'smile.png'),
("':)", 'sweat_smile.png'),
("':-)", 'sweat_smile.png'),
("'=)", 'sweat_smile.png'),
("':D", 'sweat_smile.png'),
("':-D", 'sweat_smile.png'),
("'=D", 'sweat_smile.png'),
("^^'", 'sweat_smile.png'),
('^^"', 'sweat_smile.png'),
(':sweat_smile:', 'sweat_smile.png'),
('>:)', 'laughing.png'),
('>;)', 'laughing.png'),
('>:-)', 'laughing.png'),
('>;-)', 'laughing.png'),
('>=)', 'laughing.png'),
(':laughing:', 'laughing.png'),
(';)', 'wink.png'),
(';-)', 'wink.png'),
('*-)', 'wink.png'),
('*)', 'wink.png'),
(';-]', 'wink.png'),
(';]', 'wink.png'),
(';D', 'wink.png'),
(';^)', 'wink.png'),
(':wink:', 'wink.png'),
("':(", 'sweat.png'),
("':-(", 'sweat.png'),
("'=(", 'sweat.png'),
(':sweat:', 'sweat.png'),
(':*', 'kissing.png'),
(':-*', 'kissing.png'),
('=*', 'kissing.png'),
(':^*', 'kissing.png'),
(':kissing:', 'kissing.png'),
('>:P', 'troll.png'),
('X-P', 'troll.png'),
('X-p', 'troll.png'),
('x-p', 'troll.png'),
('x-P', 'troll.png'),
('>:[', 'disappointed.png'),
(':-(', 'disappointed.png'),
(':(', 'disappointed.png'),
(':-[', 'disappointed.png'),
(':[', 'disappointed.png'),
('=(', 'disappointed.png'),
(':disappointed:', 'disappointed.png'),
('>:(', 'angry.png'),
('>:-(', 'angry.png'),
(':@', 'angry.png'),
(':angry:', 'angry.png'),
(":'(", 'cry.png'),
(":'-(", 'cry.png'),
(";(", 'cry.png'),
(";-(", 'cry.png'),
(':cry:', 'cry.png'),
(':sad:', 'cry.png'),
('>.<', 'doh.png'),
('>_<', 'doh.png'),
(':doh:', 'doh.png'),
('D:', 'fearful.png'),
(':fearful:', 'fearful.png'),
(':$', 'zip.png'),
('=$', 'zip.png'),
(':zip:', 'zip.png'),
('x)', 'dizzy.png'),
('x-)', 'dizzy.png'),
('xD', 'dizzy.png'),
('X)', 'dizzy.png'),
('X-)', 'dizzy.png'),
('XD', 'dizzy.png'),
(':dizzy:', 'dizzy.png'),
('*\\0/*', 'victory.png'),
('\\0/', 'victory.png'),
('*\\O/*', 'victory.png'),
('*\\o/*', 'victory.png'),
('\\O/', 'victory.png'),
('\\o/', 'victory.png'),
('O:-)', 'innocent.png'),
('0:-3', 'innocent.png'),
('0:3', 'innocent.png'),
('0:-)', 'innocent.png'),
('0:)', 'innocent.png'),
('0;^)', 'innocent.png'),
('O:-)', 'innocent.png'),
('O:)', 'innocent.png'),
('O;-)', 'innocent.png'),
('O=)', 'innocent.png'),
('0;-)', 'innocent.png'),
('O:-3', 'innocent.png'),
('O:3', 'innocent.png'),
(':innocent:', 'innocent.png'),
('B-)', 'sunglasses.png'),
('B)', 'sunglasses.png'),
('8)', 'sunglasses.png'),
('8-)', 'sunglasses.png'),
('B-D', 'sunglasses.png'),
('8-D', 'sunglasses.png'),
(':cool:', 'sunglasses.png'),
(':sunglasses', 'sunglasses.png'),
('-_-', 'neutral.png'),
('-__-', 'neutral.png'),
('-___-', 'neutral.png'),
(':|', 'neutral.png'),
(':-|', 'neutral.png'),
('T_T', 'neutral.png'),
(':neutral:', 'neutral.png'),
(':?', 'confused.png'),
(':-?', 'confused.png'),
(':???', 'confused.png'),
('>:\\', 'confused.png'),
('>:/', 'confused.png'),
(':-/', 'confused.png'),
(':/', 'confused.png'),
(':-\\', 'confused.png'),
(':\\', 'confused.png'),
('=/', 'confused.png'),
('=\\', 'confused.png'),
(':L', 'confused.png'),
(':-L', 'confused.png'),
('=L', 'confused.png'),
(':confused:', 'confused.png'),
(':P', 'razz.png'),
(':-P', 'razz.png'),
('=P', 'razz.png'),
(':-p', 'razz.png'),
(':p', 'razz.png'),
('=p', 'razz.png'),
(':-Þ', 'razz.png'),
(':Þ', 'razz.png'),
(':þ', 'razz.png'),
(':-þ', 'razz.png'),
(':-b', 'razz.png'),
(':b', 'razz.png'),
('d:', 'razz.png'),
(':razz:', 'razz.png'),
(':-O', 'shock.png'),
(':O', 'shock.png'),
(':-o', 'shock.png'),
(':o', 'shock.png'),
('O_O', 'shock.png'),
('o_o', 'shock.png'),
('>:O', 'shock.png'),
('8o', 'shock.png'),
('8-o', 'shock.png'),
('8O', 'shock.png'),
('8-O', 'shock.png'),
(':eek:', 'shock.png'),
(':shock', 'shock.png'),
(':-X', 'mad.png'),
(':X', 'mad.png'),
(':-#', 'mad.png'),
(':#', 'mad.png'),
('=X', 'mad.png'),
('=x', 'mad.png'),
(':x', 'mad.png'),
(':-x', 'mad.png'),
('=#', 'mad.png'),
(':mad:', 'mad.png'),
(']:)', 'evil.png'),
(']:-)', 'evil.png'),
(']:D', 'evil.png'),
(']:-D', 'evil.png'),
(']=D', 'evil.png'),
(':evil:', 'evil.png'),
(':lol:', 'lol.png'),
(':oops:', 'oops.png'),
(':twisted:', 'twisted.png'),
(':geek:', 'geek.png'),
(':spy:', 'spy.png'),
(':random:', 'random.png'),
(':bomb:', 'bomb.png'),
(':tamia:', 'tamia.png'),
(':!:', 'warning.png'),
(':?:', 'question.png'),
(':idea:', 'idea.png'),
(':mrgreen:', 'alien.png'),
(':nuclear:', 'nuclear.png'),
(':sleep:', 'sleep.png'),
(':stop:', 'stop.png'),
(':death:', 'death.png'),
)
# Document attribute name for storing the emoticons map
EMOTICONS_MAP_ATTR_NAME = 'EMOTICONS_MAP'
# Document attribute name for storing the emoticons detection regex
EMOTICONS_REGEX_ATTR_NAME = 'EMOTICONS_REGEX'
# Document attribute name for storing the emotic | ons base URL
EMOTICONS_BAS | E_URL_ATTR_NAME = 'EMOTICONS_BASE_URL'
# Document attribute name for storing the emoticons HTML class
EMOTICONS_HTML_CLASS_ATTR_NAME = 'EMOTICONS_HTML_CLASS'
def setup_smileys_replacement(document_tree, base_url, emoticons_map=DEFAULT_EMOTICONS_MAP, html_class='emoticons'):
"""
Setup the document for emoticons replacement.
:param document_tree: The document tree instance to be setup.
:param emoticons_map: A tuple of tuple with two values ``(emoticon_text, emoticon_filename)``.
:param base_url: The base URL for all emoticon images. Can also be a callable for dynamic paths.
:param html_class: The HTML class to be assigned to each emoticons img tag (optional).
"""
assert document_tree, "Document tree is mandatory."
assert document_tree.is_root, "Document tree must be a root tree node instance."
assert base_url, "Base URL is mandatory."
# Craft the emoticons regex
emoticon_rules = '|'.join([re.escape(escape_html(e)) for e, _ in emoticons_map])
emoticons_regex = r'(^|\s+)(?P<emoticon>{rules})(\s+|$)'.format(rules=emoticon_rules)
emoticons_regex = re.compile(emoticons_regex)
# Turn emoticons map into a dictionary
# Note: use escape_html(k) as key because at rendering, when the ``do_smileys_replacement`` routine is called
# emoticons are already HTML-encoded. As we need to inject HTML code for img, we can't do the replacement when
# the emoticons are in plain text.
emoticons_map = {escape_html(k): v for k, v in emoticons_map}
# Helper method to turn ``base_url`` into a callable if necessary
def build_url(filename):
return urljoin(base_url, filename)
# Store all emoticons related options
document_tree.attrs[EMOTICONS_MAP_ATTR_NAME] = emoticons_map
document_tree.attrs[EMOTICONS_REGEX_ATTR_NAME] = emoticons_regex
document_tree.attrs[EMOTICONS_BASE_URL_ATTR_NAME] = build_url if isinstance(base_url, str) else base_url
document_tree.attrs[EMOTICONS_HTML_CLASS_ATTR_NAME] = html_class
def do_smileys_replacement(root_tree_node, input_text):
"""
Do all smileys replacement.
:param root_tree_node: The root tree node.
:param input_text: The i |
vsilent/smarty-bot | alembic/versions/3734300868bc_add_account_id.py | Python | mit | 391 | 0.005115 | """add accoun | t id
Revision ID: 3734300868bc
Revises: 3772e5bcb34d
Create Date: 2013-09-30 18:07:21.729288
"""
# revision identifiers, used by Alembic.
revision = '3734300868bc'
down_revision = '3772e5bcb34d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('account_profile', sa.Column('account_id', sa.Integer(11)))
pass
d | ef downgrade():
pass
|
kdebrab/pandas | asv_bench/benchmarks/index_object.py | Python | bsd-3-clause | 5,080 | 0 | import numpy as np
import pandas.util.testing as tm
from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex,
Float64Index)
from .pandas_vb_common import setup # noqa
class SetOperations(object):
goal_time = 0.2
params = (['datetime', 'date_string', 'int', 'strings'],
['intersection', 'union', 'symmetric_difference'])
param_names = ['dtype', 'method']
def setup(self, dtype, method):
N = 10**5
dates_left = date_range('1/1/2000', periods=N, freq='T')
fmt = '%Y-%m-%d %H:%M:%S'
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
str_left = tm.makeStringIndex(N)
data = {'datetime': {'left': dates_left, 'right': dates_left[:-1]},
'date_string': {'left': date_str_left,
'right': date_str_left[:-1]},
'int': {'left': int_left, 'right': int_left[:-1]},
'strings': {'left': str_left, 'right': str_left[:-1]}}
self.left = data[dtype]['left']
self.right = data[dtype]['right']
def time_operation(self, dtype, method):
getattr(self.left, method)(self.right)
class SetDisjoint(object):
goal_time = 0.2
def setup(self):
N = 10**5
B = N + 20000
self.datetime_left = DatetimeIndex(range(N))
self.datetime_right = DatetimeIndex(range(N, B))
def time_datetime_difference_disjoint(self):
self.datetime_left.difference(self.datetime_right)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Ops(object):
sample_time = 0.2
params = ['float', 'int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
indexes = {'int': 'makeIntIndex', 'float': 'makeFloatIndex'}
self.index = getattr(tm, indexes[dtype])(N)
def time_add(self, dtype):
self.index + 2
def time_subtract(self, dtype):
self.index - 2
def time_multiply(self, dtype):
self.index * 2
def time_divide(self, dtype):
self.index / 2
def time_modulo(self, dtype):
self.index % 2
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
class IndexAppend(object):
goal_time = 0.2
def setup(self):
N = 10000
self.range_idx = RangeIndex(0, 100)
self.int_idx = self.range_idx.astype(int)
self.obj_idx = self.int_idx.astype(str)
self.range_idxs = []
self.int_idxs = []
self.object_idxs = []
for i in range(1, N):
r_idx = RangeIndex(i * 100, (i + 1) * 100)
self.range_idxs.append(r_idx)
i_idx = r_idx.astype(int)
self.int_idxs.append(i_idx)
o_idx = i_idx.astype(str)
self.object_idxs.append(o_idx)
def time_append_range_list(self):
self.range_idx.append(self.range_idxs | )
def time_append_int_list(self):
self.int_idx.append(self.int_idxs)
def time_append_obj_list(self):
self.obj_idx.append(self.object_idxs)
class Indexing(object):
| goal_time = 0.2
params = ['String', 'Float', 'Int']
param_names = ['dtype']
def setup(self, dtype):
N = 10**6
self.idx = getattr(tm, 'make{}Index'.format(dtype))(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
half = N // 2
self.non_unique = self.idx[:half].append(self.idx[:half])
self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half])
self.key = self.sorted[N // 4]
def time_boolean_array(self, dtype):
self.idx[self.array_mask]
def time_boolean_series(self, dtype):
self.idx[self.series_mask]
def time_get(self, dtype):
self.idx[1]
def time_slice(self, dtype):
self.idx[:-1]
def time_slice_step(self, dtype):
self.idx[::2]
def time_get_loc(self, dtype):
self.idx.get_loc(self.key)
def time_get_loc_sorted(self, dtype):
self.sorted.get_loc(self.key)
def time_get_loc_non_unique(self, dtype):
self.non_unique.get_loc(self.key)
def time_get_loc_non_unique_sorted(self, dtype):
self.non_unique_sorted.get_loc(self.key)
class Float64IndexMethod(object):
# GH 13166
goal_time = 0.2
def setup(self):
N = 100000
a = np.arange(N)
self.ind = Float64Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
|
fangxingli/hue | desktop/libs/liboozie/src/liboozie/submittion2_tests.py | Python | apache-2.0 | 12,471 | 0.010424 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by app | licable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from desktop.lib.test_utils import clear_sys_caches
from desktop.lib.django_test_util import make_logged_in_client
from oozie.models2 import Node
from oozie.tests import OozieMockBase
from liboozie.conf import USE_LIBPATH_FOR_JARS
from liboozie.submission2 import Submission
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Doesn't move
jar_5 = 'udf5.jar'
jar_6 = 'lib/udf6.jar' # Doesn't move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
cluster.fs.create(deployment_dir + '/' + jar_5)
cluster.fs.create(deployment_dir + '/' + jar_6)
class MockJob():
XML_FILE_NAME = 'workflow.xml'
def __init__(self):
self.deployment_dir = deployment_dir
self.nodes = [
Node({'id': '1', 'type': 'mapreduce', 'properties': {'jar_path': jar_1}}),
Node({'id': '2', 'type': 'mapreduce', 'properties': {'jar_path': jar_2}}),
Node({'id': '3', 'type': 'java', 'properties': {'jar_path': jar_3}}),
Node({'id': '4', 'type': 'java', 'properties': {'jar_path': jar_4}}),
# Workspace relative paths
Node({'id': '5', 'type': 'java', 'properties': {'jar_path': jar_5}}),
Node({'id': '6', 'type': 'java', 'properties': {'jar_path': jar_6}})
]
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_true(cluster.fs.exists(deployment_dir + '/workflow.xml'), deployment_dir)
assert_true(cluster.fs.exists(deployment_dir + '/job.properties'), deployment_dir)
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_5))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_6))
# Lib
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
if USE_LIBPATH_FOR_JARS.get():
assert_true(jar_1 in submission.properties['oozie.libpath'])
assert_true(jar_2 in submission.properties['oozie.libpath'])
assert_true(jar_3 in submission.properties['oozie.libpath'])
assert_true(jar_4 in submission.properties['oozie.libpath'])
print deployment_dir + '/' + jar_5
assert_true((deployment_dir + '/' + jar_5) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath'])
assert_true((deployment_dir + '/' + jar_6) in submission.properties['oozie.libpath'], submission.properties['oozie.libpath'])
else:
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf5.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf6.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf5.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf6.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
stats_udf5 = cluster.fs.stats(deployment_dir + '/udf5.jar')
stats_udf6 = cluster.fs.stats(deployment_dir + '/udf6.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
assert_not_equal(stats_udf5['fileId'], cluster.fs.stats(deployment_dir + '/udf5.jar')['fileId'])
assert_equal(stats_udf6['fileId'], cluster.fs.stats(deployment_dir + '/udf6.jar')['fileId'])
# Test _create_file()
submission._create_file(deployment_dir, 'test.txt', data='Test data')
assert_true(cluster.fs.exists(deployment_dir + '/test.txt'), list_dir_workspace)
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020',
'security_enabled': False
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deploym |
jhermann/cookiecutter | setup.py | Python | bsd-3-clause | 3,212 | 0.001868 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
version = "1.0.1"
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
require | ments = ['binaryornot>=0.2.0', 'jinja2>=2.7', 'PyYAML>=3.10', 'click<4.0']
test_requirements = ['pytest']
# Add Python 2.6-specific dependencies
if sys.version_info[:2] < (2, 7):
requirements.append('ordereddict')
requirements.append('simplejson')
test_requirements.append('unittest2')
# Add Python 2.6 and 2.7-specific dependencies
if sys.version < '3':
requirements.append('mock')
# There ar | e no Python 3-specific dependencies to add
long_description = readme + '\n\n' + history
if sys.argv[-1] == 'readme':
print(long_description)
sys.exit()
class PyTest(Command):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
self.pytest_args = []
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
project = dict(
name='cookiecutter',
version=version,
description=('A command-line utility that creates projects from project '
'templates, e.g. creating a Python package project from a Python '
'package project template.'),
long_description=long_description,
author='Audrey Roy',
author_email='audreyr@gmail.com',
url='https://github.com/audreyr/cookiecutter',
packages=[
'cookiecutter',
],
package_dir={'cookiecutter': 'cookiecutter'},
entry_points={
'console_scripts': [
'cookiecutter = cookiecutter.cli:main',
]
},
include_package_data=True,
install_requires=requirements,
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
],
keywords='cookiecutter, Python, projects, project templates, Jinja2, \
skeleton, scaffolding, project directory, setup.py, package, packaging',
cmdclass = {'test': PyTest},
test_suite='tests',
tests_require=test_requirements
)
if __name__ == '__main__':
setup(**project)
|
tombstone/models | official/recommendation/ncf_keras_main.py | Python | apache-2.0 | 19,975 | 0.00766 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
# pylint: disable=g-bad-import-order
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import ncf_common
from official.recommendation import ncf_input_pipeline
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def metric_fn(logits, dup_mask, match_mlperf):
dup_mask = tf.cast(dup_mask, tf.float32)
logits = tf.slice(logits, [0, 1], [-1, -1])
in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg(
logits,
dup_mask,
match_mlperf)
metric_weights = tf.cast(metric_weights, tf.float32)
return in_top_k, metric_weights
class MetricLayer | (tf.keras.layers.Layer):
"""Custom layer of metrics for NCF model."""
def __init__ | (self, match_mlperf):
super(MetricLayer, self).__init__()
self.match_mlperf = match_mlperf
def get_config(self):
return {"match_mlperf": self.match_mlperf}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs, training=False):
logits, dup_mask = inputs
if training:
hr_sum = 0.0
hr_count = 0.0
else:
metric, metric_weights = metric_fn(logits, dup_mask, self.match_mlperf)
hr_sum = tf.reduce_sum(metric * metric_weights)
hr_count = tf.reduce_sum(metric_weights)
self.add_metric(hr_sum, name="hr_sum", aggregation="mean")
self.add_metric(hr_count, name="hr_count", aggregation="mean")
return logits
class LossLayer(tf.keras.layers.Layer):
"""Pass-through loss layer for NCF model."""
def __init__(self, loss_normalization_factor):
# The loss may overflow in float16, so we use float32 instead.
super(LossLayer, self).__init__(dtype="float32")
self.loss_normalization_factor = loss_normalization_factor
self.loss = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="sum")
def get_config(self):
return {"loss_normalization_factor": self.loss_normalization_factor}
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def call(self, inputs):
logits, labels, valid_pt_mask_input = inputs
loss = self.loss(
y_true=labels, y_pred=logits, sample_weight=valid_pt_mask_input)
loss = loss * (1.0 / self.loss_normalization_factor)
self.add_loss(loss)
return logits
class IncrementEpochCallback(tf.keras.callbacks.Callback):
"""A callback to increase the requested epoch for the data producer.
The reason why we need this is because we can only buffer a limited amount of
data. So we keep a moving window to represent the buffer. This is to move the
one of the window's boundaries for each epoch.
"""
def __init__(self, producer):
self._producer = producer
def on_epoch_begin(self, epoch, logs=None):
self._producer.increment_request_epoch()
class CustomEarlyStopping(tf.keras.callbacks.Callback):
"""Stop training has reached a desired hit rate."""
def __init__(self, monitor, desired_value):
super(CustomEarlyStopping, self).__init__()
self.monitor = monitor
self.desired = desired_value
self.stopped_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current and current >= self.desired:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning("Early stopping conditioned on metric `%s` "
"which is not available. Available metrics are: %s",
self.monitor, ",".join(list(logs.keys())))
return monitor_value
def _get_keras_model(params):
"""Constructs and returns the model."""
batch_size = params["batch_size"]
user_input = tf.keras.layers.Input(
shape=(1,), name=movielens.USER_COLUMN, dtype=tf.int32)
item_input = tf.keras.layers.Input(
shape=(1,), name=movielens.ITEM_COLUMN, dtype=tf.int32)
valid_pt_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.VALID_POINT_MASK, dtype=tf.bool)
dup_mask_input = tf.keras.layers.Input(
shape=(1,), name=rconst.DUPLICATE_MASK, dtype=tf.int32)
label_input = tf.keras.layers.Input(
shape=(1,), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool)
base_model = neumf_model.construct_model(user_input, item_input, params)
logits = base_model.output
zeros = tf.keras.layers.Lambda(
lambda x: x * 0)(logits)
softmax_logits = tf.keras.layers.concatenate(
[zeros, logits],
axis=-1)
# Custom training loop calculates loss and metric as a part of
# training/evaluation step function.
if not params["keras_use_ctl"]:
softmax_logits = MetricLayer(
params["match_mlperf"])([softmax_logits, dup_mask_input])
# TODO(b/134744680): Use model.add_loss() instead once the API is well
# supported.
softmax_logits = LossLayer(batch_size)(
[softmax_logits, label_input, valid_pt_mask_input])
keras_model = tf.keras.Model(
inputs={
movielens.USER_COLUMN: user_input,
movielens.ITEM_COLUMN: item_input,
rconst.VALID_POINT_MASK: valid_pt_mask_input,
rconst.DUPLICATE_MASK: dup_mask_input,
rconst.TRAIN_LABEL_KEY: label_input},
outputs=softmax_logits)
keras_model.summary()
return keras_model
def run_ncf(_):
"""Run NCF training and eval with Keras."""
keras_utils.set_session_config(enable_xla=FLAGS.enable_xla)
if FLAGS.seed is not None:
print("Setting tf seed")
tf.random.set_seed(FLAGS.seed)
model_helpers.apply_clean(FLAGS)
if FLAGS.dtype == "fp16" and FLAGS.fp16_implementation == "keras":
policy = tf.keras.mixed_precision.experimental.Policy(
"mixed_float16",
loss_scale=flags_core.get_loss_scale(FLAGS, default_for_fp16="dynamic"))
tf.keras.mixed_precision.experimental.set_policy(policy)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
params = ncf_common.parse_flags(FLAGS)
params["distribute_strategy"] = strategy
if params["use_tpu"] and not params["keras_use_ctl"]:
logging.error("Custom training loop must be used when using TPUStrategy.")
return
batch_size = params["batch_size"]
time_callback = keras_utils.TimeHistory(batch_size, FLAGS.log_steps)
callbacks = [time_callback]
producer, input_meta_data = None, None
generate_input_online = params["train_dataset_path"] is None
if generate_input_online:
# Start d |
dongsenfo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/tests/test_structure_environments.py | Python | mit | 12,696 | 0.005986 | #!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
import os
import json
import numpy as np
import shutil
from monty.tempfile import ScratchDir
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import StructureEnvironments
from pymatgen.analysis.chemenv.coordination_environments.structure_environments import LightStructureEnvironments
from pymatgen.core.periodic_table import Specie
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import MultiWeightsChemenvStrategy
se_files_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..",
'test_files', "chemenv", "structure_environments_files")
class StructureEnvironmentsTest(PymatgenTest):
def test_structure_environments(self):
with ScratchDir("."):
f = open("{}/{}".format(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
isite = 6
csm_and_maps_fig, csm_and_maps_subplot = se.get_csm_and_maps(isite=isite)
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[0].get_xydata().flatten(), [0.0, 0.53499332])
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[1].get_xydata().flatten(), [1.0, 0.47026441])
np.testing.assert_array_almost_equal(csm_and_maps_subplot.lines[2].get_xydata().flatten(), [2.0, 0.00988778])
environments_figure, environments_subplot = se.get_environments_figure(isite=isite)
np.testing.assert_array_almost_equal(np.array(environments_subpl | ot.patches[0].get_xy()),
[[1., 1.],
[1., 0.99301365],
[1.00179228, 0.99301365],
[1.00179228, 1.],
[1., 1.]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[1].get_xy()),
| [[1., 0.99301365],
[1., 0.],
[1.00179228, 0.],
[1.00179228, 0.99301365],
[1., 0.99301365]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[2].get_xy()),
[[1.00179228, 1.],
[1.00179228, 0.99301365],
[2.25, 0.99301365],
[2.25, 1.],
[1.00179228, 1.]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[3].get_xy()),
[[1.00179228, 0.99301365],
[1.00179228, 0.],
[2.22376156, 0.],
[2.22376156, 0.0060837],
[2.25, 0.0060837],
[2.25, 0.99301365],
[1.00179228, 0.99301365]])
np.testing.assert_array_almost_equal(np.array(environments_subplot.patches[4].get_xy()),
[[2.22376156, 0.0060837],
[2.22376156, 0.],
[2.25, 0.],
[2.25, 0.0060837],
[2.22376156, 0.0060837]])
se.save_environments_figure(isite=isite, imagename='image.png')
self.assertTrue(os.path.exists('image.png'))
self.assertEqual(len(se.differences_wrt(se)), 0)
self.assertFalse(se.__ne__(se))
ce = se.ce_list[isite][4][0]
self.assertTrue(ce.__len__(), 4)
symbol, mingeom = ce.minimum_geometry(symmetry_measure_type='csm_wocs_ctwocc')
self.assertEqual(symbol, 'T:4')
self.assertAlmostEqual(mingeom['symmetry_measure'], 0.00988778424054)
np.testing.assert_array_almost_equal(mingeom['other_symmetry_measures']['rotation_matrix_wcs_csc'],
[[-0.8433079817973094, -0.19705747216466898, 0.5000000005010193],
[0.4868840909509757, 0.11377118475194581, 0.8660254034951744],
[-0.22754236927612112, 0.9737681809261427, 1.3979531202869064e-13]])
self.assertEqual(mingeom['detailed_voronoi_index'], {'index': 0, 'cn': 4})
self.assertAlmostEqual(mingeom['other_symmetry_measures']['scaling_factor_wocs_ctwocc'], 1.6270605877934026)
ce_string = ce.__str__()
self.assertTrue('csm1 (with central site) : 0.00988' in ce_string)
self.assertTrue('csm2 (without central site) : 0.00981' in ce_string)
self.assertTrue('csm1 (with central site) : 12.987' in ce_string)
self.assertTrue('csm2 (without central site) : 11.827' in ce_string)
self.assertTrue('csm1 (with central site) : 32.466' in ce_string)
self.assertTrue('csm2 (without central site) : 32.466' in ce_string)
self.assertTrue('csm1 (with central site) : 34.644' in ce_string)
self.assertTrue('csm2 (without central site) : 32.466' in ce_string)
mingeoms = ce.minimum_geometries(symmetry_measure_type='csm_wocs_ctwocc', max_csm=12.0)
self.assertEqual(len(mingeoms), 2)
mingeoms = ce.minimum_geometries(symmetry_measure_type='csm_wocs_ctwcc', max_csm=12.0)
self.assertEqual(len(mingeoms), 1)
mingeoms = ce.minimum_geometries(n=3)
self.assertEqual(len(mingeoms), 3)
ce2 = se.ce_list[7][4][0]
self.assertTrue(ce.is_close_to(ce2, rtol=0.01, atol=1e-4))
self.assertFalse(ce.is_close_to(ce2, rtol=0.0, atol=1e-8))
self.assertFalse(ce.__eq__(ce2))
self.assertTrue(ce.__ne__(ce2))
def test_light_structure_environments(self):
with ScratchDir("."):
f = open("{}/{}".format(se_files_dir, 'se_mp-7000.json'), 'r')
dd = json.load(f)
f.close()
se = StructureEnvironments.from_dict(dd)
strategy = SimplestChemenvStrategy()
lse = LightStructureEnvironments.from_structure_environments(structure_environments=se, strategy=strategy,
valences='undefined')
isite = 6
nb_set = lse.neighbors_sets[isite][0]
neighb_coords = [np.array([0.2443798, 1.80409653, -1.13218359]),
np.array([1.44020353, 1.11368738, 1.13218359]),
np.array([2.75513098, 2.54465207, -0.70467298]),
np.array([0.82616785, 3.65833945, 0.70467298])]
neighb_indices = [0, 3, 5, 1]
neighb_images = [[0, 0, -1], [0, 0, 0], [0, 0, -1], [0, 0, 0]]
np.testing.assert_array_almost_equal(neighb_coords, nb_set.neighb_coords)
np.testing.assert_array_almost_equal(neighb_coords, [s.coords for s in nb_set.neighb_sites])
nb_sai = nb_set.neighb_sites_and_indices
np.testing.assert_array_almost_equal(neighb_coords, [sai['site'].coords for sai in nb_sai])
np.testing.assert_array_almost_equal(neighb_indices, [sai['index'] for sai in nb_sai])
nb_iai = nb_set.neig |
samsam2310/LINE | line/api.py | Python | bsd-3-clause | 11,264 | 0.003107 | # -*- coding: utf-8 -*-
"""
line.client
~~~~~~~~~~~
LineClient for sending and receiving message from LINE server.
:copyright: (c) 2014 by Taehoon Kim.
:license: BSD, see LICENSE for more details.
"""
import rsa
import requests
try:
import simplejson as json
except ImportError:
import json
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
import sys
reload(sys)
sys.setdefaulte | ncoding("utf-8")
#from curve import CurveThrift
from curve import CurveThrift
from curve.ttypes import TalkException
from curve.ttypes import ToType, ContentType
class LineAPI(object):
"""This class is a wrapper of LINE API
"""
LINE_DOMAIN = "http://gd2.line.naver.jp"
LINE_HTTP_URL = LINE_DOMAIN + "/api/v4/TalkService.do"
LINE_HTTP_IN_URL = | LINE_DOMAIN + "/P4"
LINE_CERTIFICATE_URL = LINE_DOMAIN + "/Q"
LINE_SESSION_LINE_URL = LINE_DOMAIN + "/authct/v1/keys/line"
LINE_SESSION_NAVER_URL = LINE_DOMAIN + "/authct/v1/keys/naver"
CERT_FILE = ".line.crt"
ip = "127.0.0.1"
version = "5.1.2"
com_name = ""
revision = 0
certificate = ""
_headers = {}
def __init__(self):
object.__init__(self)
self._session = requests.session()
def ready(self):
"""
After login, make `client` and `client_in` instance
to communicate with LINE server
"""
raise Exception("Code is removed because of the request of LINE corporation")
def updateAuthToken(self):
"""
After login, update authToken to avoid expiration of
authToken. This method skip the PinCode validation step.
"""
if self.certificate:
self.login()
self.tokenLogin()
return True
else:
self.raise_error("You need to login first. There is no valid certificate")
def tokenLogin(self):
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
def login(self):
"""Login to LINE server."""
if self.provider == CurveThrift.Provider.LINE: # LINE
j = self._get_json(self.LINE_SESSION_LINE_URL)
else: # NAVER
j = self._get_json(self.LINE_SESSION_NAVER_URL)
session_key = j['session_key']
message = (chr(len(session_key)) + session_key +
chr(len(self.id)) + self.id +
chr(len(self.password)) + self.password).encode('utf-8')
keyname, n, e = j['rsa_key'].split(",")
pub_key = rsa.PublicKey(int(n,16), int(e,16))
crypto = rsa.encrypt(message, pub_key).encode('hex')
self.transport = THttpClient.THttpClient(self.LINE_HTTP_URL)
self.transport.setCustomHeaders(self._headers)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self._client = CurveThrift.Client(self.protocol)
try:
with open(self.CERT_FILE,'r') as f:
self.certificate = f.read()
f.close()
except:
self.certificate = ""
msg = self._client.loginWithIdentityCredentialForCertificate(
self.id, self.password, keyname, crypto, True, self.ip,
self.com_name, self.provider, self.certificate)
if msg.type == 1:
self.certificate = msg.certificate
self.authToken = self._headers['X-Line-Access'] = msg.authToken
elif msg.type == 2:
msg = "require QR code"
self.raise_error(msg)
elif msg.type == 3:
self._headers['X-Line-Access'] = msg.verifier
self._pinCode = msg.pinCode
print "Enter PinCode '%s' to your mobile phone in 2 minutes"\
% self._pinCode
j = self.get_json(self.LINE_CERTIFICATE_URL)
self.verifier = j['result']['verifier']
msg = self._client.loginWithVerifierForCertificate(self.verifier)
if msg.type == 1:
if msg.certificate is not None:
with open(self.CERT_FILE,'w') as f:
f.write(msg.certificate)
self.certificate = msg.certificate
if msg.authToken is not None:
self.authToken = self._headers['X-Line-Access'] = msg.authToken
return True
else:
return False
else:
msg = "Require device confirm"
self.raise_error(msg)
#raise Exception("Code is removed because of the request of LINE corporation")
else:
self.authToken = self._headers['X-Line-Access'] = msg.authToken
return True
def get_json(self, url):
"""Get josn from given url with saved session and headers"""
return json.loads(self._session.get(url, headers=self._headers).text)
def _getProfile(self):
"""Get profile information
:returns: Profile object
- picturePath
- displayName
- phone (base64 encoded?)
- allowSearchByUserid
- pictureStatus
- userid
- mid # used for unique id for account
- phoneticName
- regionCode
- allowSearchByEmail
- email
- statusMessage
"""
return self._client.getProfile()
def _getAllContactIds(self):
"""Get all contacts of your LINE account"""
return self._client.getAllContactIds()
def _getBlockedContactIds(self):
"""Get all blocked contacts of your LINE account"""
return self._client.getBlockedContactIds()
def _getContacts(self, ids):
"""Get contact information list from ids
:returns: List of Contact list
- status
- capableVideoCall
- dispalyName
- settings
- pictureStatus
- capableVoiceCall
- capableBuddy
- mid
- displayNameOverridden
- relation
- thumbnailUrl
- createdTime
- facoriteTime
- capableMyhome
- attributes
- type
- phoneticName
- statusMessage
"""
if type(ids) != list:
msg = "argument should be list of contact ids"
self.raise_error(msg)
return self._client.getContacts(ids)
def _findAndAddContactsByMid(self, mid, seq=0):
"""Find and add contacts by Mid"""
return self._client.findAndAddContactsByMid(seq, mid)
def _findContactByUserid(self, userid):
"""Find contacts by Userid"""
return self._client.findContactByUserid(userid)
def _findAndAddContactsByUserid(self, userid, seq=0):
"""Find and add contacts by Userid"""
return self._client.findAndAddContactsByUserid(seq, userid)
def _findContactsByPhone(self, phones):
"""Find contacts by phone"""
return self._client.findContactsByPhone(phones)
def _findAndAddContactsByPhone(self, phones, seq=0):
"""Find and add contacts by phone"""
return self._client.findAndAddContactsByPhone(seq, phones)
def _findContactsByEmail(self, emails):
"""Find contacts by email"""
return self._client.findContactsByEmail(emails)
def _findAndAddContactsByEmail(self, emails, seq=0):
"""Find and add contacts by email"""
return self._client.findAndAddContactsByEmail(seq, emails)
def _createRoom(self, ids, seq=0):
"""Create a ch |
Prototype-X/Zabbix-Network-Weathermap | mapping.py | Python | gpl-3.0 | 14,264 | 0.002384 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# __author__ = 'maximus'
from PIL import Image, ImageDraw, ImageFont
from datetime import datetime
import math
import os
import logging
log = logging.getLogger(__name__)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Palette(metaclass=Singleton): # noqa
def __init__(self):
self.palette = ['#908C8C', '#FFFFFF', '#8000FF', '#0000FF', '#00EAEA', '#00FF00', '#FFFF00', '#FF9933',
'#FF0000']
self.palette_default = ('#908C8C', '#FFFFFF', '#8000FF', '#0000FF', '#00EAEA', '#00FF00', '#FFFF00',
'#FF9933', '#FF0000')
log.debug('Object singleton Palette created')
def reset(self):
self.palette = list(self.palette_default)
class Table(object):
def __init__(self, fontfile, x=0, y=0, palette=Palette().palette_default, fontsize=12, dt=True):
self.x = x
self.y = y
self.width_palet = 30
self.height_palet = 20
self.yt = y + self.height_palet
self.indent_x = 5
self.indent_y = 3
self.palette = palette
self.text_label = 'Traffic Load'
self.rect_xy = []
self.table_xy()
self.text = ('0-0%', '0-1%', '1-10%', '10-25%', '25-40%', '40-55%', '55-70%', '70-85%', '85-100%')
self.fontfile = fontfile
self.fontcolor = 'black'
self.fontsize = fontsize
self.font = ImageFont.truetype(self.fontfile, size=self.fontsize)
self.dt = dt
self.dt_obj = None
self.date_now = None
self.time_now = None
log.debug('Object Table created')
def table_xy(self):
for i in range(0, 11):
x1 = self.x + self.indent_x
y1 = self.yt + self.indent_y + self.height_palet * i
x2 = self.x + self.indent_x + self.width_palet
y2 = self.yt + self.height_palet * (i + 1)
self.rect_xy.append((x1, y1, x2, y2))
def draw_table(self, draw):
draw.rec | tangle((self.x, self.y, self.rect_xy[8][2] + 60, self.rect_xy[8][3] + 5), outline='black', fill='white')
draw.text((self.x + 5, self.y + 5), self.text_label, fill='black', font=self.font)
for i in range(0, 9):
draw.rectangle(self.rect_xy[i], fill=self.palette[i], outline=self.palette[i])
draw.text((self.rect_xy[i][2] + 2, self.rect_xy[i][1] + 2), self.text[i], fill='black' | , font=self.font)
if self.dt:
self.draw_datetime(draw)
def draw_datetime(self, draw):
self.dt_obj = datetime.now()
self.date_now = datetime.strftime(self.dt_obj, "%d.%m.%Y")
self.time_now = datetime.strftime(self.dt_obj, "%H:%M:%S")
draw.rectangle((self.x, self.rect_xy[9][1] + 5, self.rect_xy[10][2] + 60,
self.rect_xy[10][3] + 5), outline='black', fill='white')
draw.text((self.rect_xy[9][0] + 14, self.rect_xy[9][1] + 8), self.time_now, fill='black', font=self.font)
draw.text((self.rect_xy[10][0] + 8, self.rect_xy[10][1] + 4), self.date_now, fill='black', font=self.font)
log.debug('Object Table draw')
class Label(object):
def __init__(self, fontfile, bgcolor='white', fontcolor='black', fontsize=10, outline='black', label=None,
point=None):
self.outline = outline
self.bgcolor = bgcolor
self.fontcolor = fontcolor
self.fontsize = fontsize
self.fontfile = fontfile
self.font = ImageFont.truetype(self.fontfile, size=self.fontsize)
self.name = str(label)
self.points = [0, 0, 0, 0]
self.point_name = [0, 0]
self.font_width = {8: 6, 10: 7.4, 12: 8, 14: 9, 16: 11, 18: 12, 20: 13}
try:
self.font_width[self.fontsize]
except KeyError:
self.fontsize = 10
if point:
self.label_xy(point)
log.debug('Object Label created')
def label_xy(self, point):
"""font_dict = {fontsize:symbol width}
symbol height = fontsize
:param point: coordinates where label show"""
count = len(self.name)
if count:
self.points[0] = int(point[0] - count * self.font_width[self.fontsize] / 2 + 1)
self.points[1] = int(point[1] - self.fontsize/2)
self.points[2] = int(point[0] + count * self.font_width[self.fontsize] / 2 - 1)
self.points[3] = int(point[1] + self.fontsize/2 + 1)
self.point_name[0] = self.points[0] + 2
self.point_name[1] = self.points[1]
class Node(object):
""" Node (device) on a map"""
def __init__(self, fontfile, icon_path, x=0, y=0, label=None, icon=None, fontsize=10):
self.x = x
self.y = y
self.icon = icon
self.icon_path = icon_path
self.icon_point = None
self.label_obj = None
if label:
self.label_obj = Label(label=label, point=[x, y], fontfile=fontfile, fontsize=fontsize)
if self.icon:
self.icon_point = self.icon_xy()
log.debug('Object Node created')
def icon_xy(self):
if os.path.isfile(self.icon_path + '/' + self.icon):
im = Image.open(self.icon_path + '/' + self.icon)
else:
im = Image.open(self.icon)
width, height = im.size
x = int(self.x - width/2)
y = int(self.y - height/2)
im.close()
return [x, y]
class Link(object):
""" A line between two Nodes. The line contains two arrows: one for an input
value and one for an output value"""
def __init__(self, fontfile, node_a, node_b, bandwidth=1000, width=5, palette=Palette().palette_default,
fontsize=10):
self.node_a = node_a
self.node_b = node_b
self.fontfile = fontfile
self.fontsize = fontsize
self.bandwidth = bandwidth
self.width = float(width)
self.palette = palette
self.input_points = self._get_input_arrow_points()
self.output_points = self._get_output_arrow_points()
self.incolor = None
self.outcolor = None
self.in_label = None
self.out_label = None
log.debug('Object Link created')
@staticmethod
def _middle(x, y):
""" Return a middle point coordinate between 2 given points """
return int(x+(y-x)/2)
@staticmethod
def _new_x(a, b, x, y):
""" Calculate "x" coordinate """
return int(math.cos(math.atan2(y, x) + math.atan2(b, a))*math.sqrt(x*x+y*y))
@staticmethod
def _new_y(a, b, x, y):
""" Calculate "y" coordinate """
return int(math.sin(math.atan2(y, x) + math.atan2(b, a))*math.sqrt(x*x+y*y))
def data(self, in_bps=0000, out_bps=749890567):
in_kps = in_bps/1000
out_kps = out_bps/1000
self._fill_arrow(in_kps, out_kps)
in_name, out_name = self._name(in_kps, out_kps)
in_point = self._get_input_label_point()
out_point = self._get_output_label_point()
self.in_label = Label(self.fontfile, label=in_name, point=in_point, fontsize=self.fontsize)
self.out_label = Label(self.fontfile, label=out_name, point=out_point, fontsize=self.fontsize)
@staticmethod
def _name(in_kps, out_kps):
if 0 <= in_kps <= 999:
in_label = str(round(in_kps, 2)) + 'K'
elif 999 < in_kps <= 999999:
in_mps = in_kps/1000
in_label = str(round(in_mps, 2)) + 'M'
elif in_kps > 999999:
in_gps = in_kps/1000000
in_label = str(round(in_gps, 2)) + 'G'
else:
in_label = 'ERR'
if 0 <= out_kps <= 999:
out_label = str(round(out_kps, 2)) + 'K'
elif 999 < out_kps <= 999999:
out_mps = out_kps/1000
out_label = str(round(out_mps, 2)) + 'M'
elif out_kps > 999999:
out_gps = out_kps/1000000
out_label = str(round(out_gps, 2)) + 'G'
else:
|
VadimMalykh/courses | deeplearning1/nbs/vgg16.py | Python | apache-2.0 | 4,286 | 0.008866 | from __future__ import division, print_function
import os, json
from glob import glob
import numpy as np
from scipy import misc, ndi | mage
from scipy.ndimage.interpolation import zoom
import keras
from | keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing import image
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean
return x[:, ::-1] # reverse axis rgb->bgr
class Vgg16():
"""The VGG 16 Imagenet model"""
def __init__(self):
self.FILE_PATH = 'http://www.platform.ai/models/'
self.create()
self.get_classes()
def get_classes(self):
fname = 'imagenet_class_index.json'
if not os.path.exists(fname):
fname = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
with open(fname) as f:
class_dict = json.load(f)
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def predict(self, imgs, details=False):
all_preds = self.model.predict(imgs)
idxs = np.argmax(all_preds, axis=1)
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
classes = [self.classes[idx] for idx in idxs]
return np.array(preds), idxs, classes
def ConvBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(filters, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def FCBlock(self):
model = self.model
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def create(self):
model = self.model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))
self.ConvBlock(2, 64)
self.ConvBlock(2, 128)
self.ConvBlock(3, 256)
self.ConvBlock(3, 512)
self.ConvBlock(3, 512)
model.add(Flatten())
self.FCBlock()
self.FCBlock()
model.add(Dense(1000, activation='softmax'))
fname = 'vgg16.h5'
if not os.path.exists(fname):
fname = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
model.load_weights(fname)
def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):
return gen.flow_from_directory(path, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def ft(self, num):
model = self.model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(num, activation='softmax'))
self.compile()
def finetune(self, batches):
self.ft(batches.num_class)
classes = list(iter(batches.class_indices))
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
self.classes = classes
def compile(self, lr=0.001):
self.model.compile(optimizer=Adam(lr=lr),
loss='categorical_crossentropy', metrics=['accuracy'])
def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):
self.model.fit(trn, labels, nb_epoch=nb_epoch,
validation_data=(val, val_labels), batch_size=batch_size)
def fit(self, batches, val_batches, nb_epoch=1):
self.model.fit_generator(batches, steps_per_epoch=batches.samples/batches.batch_size, epochs=nb_epoch,
validation_data=val_batches, validation_steps=val_batches.samples/val_batches.batch_size)
def test(self, path, batch_size=8):
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
return test_batches, self.model.predict_generator(test_batches, test_batches.samples/batch_size)
|
kingctan/django-health-check | health_check/__init__.py | Python | mit | 1,966 | 0.002035 | # This file is heavily inspired from the django admin autodiscover
__version_info__ = {
'major': 1,
'minor': 0,
'micro': 2,
'releaselevel': 'final',
'serial': 0
}
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from health_check.plugins import plugin_dir
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = copy.copy(plugin_dir._registry)
import_module('%s.plugin_health_check' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
plugin_dir._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
| if module_has_submodule(mod, 'plugin_health_check'):
raise
def get_ | version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
|
tensorflow/lingvo | lingvo/core/gshard_layers_test.py | Python | apache-2.0 | 8,710 | 0.007577 | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Test code for gshard_layers."""
from lingvo import compat as tf
from lingvo.core import gshard_builder
from lingvo.core import gshard_layers
from lingvo.core import test_utils
import numpy as np
FLAGS = tf.flags.FLAGS
class CausalDepthwiseConv1DLayerTest(test_utils.TestCase):
def _GetRefParams(self, kernel_size, dim):
builder = gshard_builder.MoEBuilder.Params().Set(
model_dim=dim).Instantiate()
return builder.DepthwiseConvAutoregressive('conv', kernel_size)
def _GetParams(self, kernel_size, dim):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv',
kernel_size=kernel_size,
model_dims=dim,
compatible_with_mtf_ckpt=True)
return p
def _GetInputs(self, batch, seq_len, dim):
np.random.seed(None)
return tf.convert_to_tensor(
np.random.rand(batch, seq_len, dim).astype(np.float32))
def testEqualToDepthwiseConvAutoregressive(self):
b, seq_len, d, k = 2, 8, 4, 3
with tf.variable_scope('ref'):
ref_l = self._GetRefParams(k, d).Instantiate()
with tf.variable_scope('act'):
exp_l = self._GetParams(k, d).Instantiate()
inputs = self._GetInputs(b, seq_len, d)
# [b, t, d]
ref_out = ref_l.FProp(ref_l.theta, inputs)
# [b, t, d]
act_out = exp_l.FProp(exp_l.theta, inputs)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([ref_out, act_out])
self.assertAllClose(expected, actual)
class Conv1DStateLayerTest(test_utils.TestCase):
def _GetParams(self, kernel_size, dims):
p = gshard_layers.CausalDepthwiseConv1DLayer.Params().Set(
name='conv', kernel_size=kernel_size, model_dims=dims)
p.state_layer = gshard_layers.Conv1DStateLayer.Params().Set(
shape=[None, None] + dims)
return p
def _GetInputs(self, batch, seq_len, dim):
np.random.seed(None)
np_inputs = np.random.rand(batch, seq_len, dim).astype(np.float32)
tf.logging.info(f'np_inputs: {np_inputs}')
return tf.convert_to_tensor(np_inputs)
def testSingleStep(self):
b, seq_len, dim, k, beam = 2, 8, 2, 3, 1
inputs = self._GetInputs(b, seq_len * beam, dim)
# Normal Fprop with a len=seqlen sequence.
l = self._GetParams(k, [dim]).Instantiate()
outputs = l.FProp(l.theta, inputs)
state0 = gshard_layers.StateLayer.InitState(l, [b, beam, k])
tf.logging.info(f'state0: {repr(state0)}')
all_outputs = []
state_t = state0
theta_t = l.theta.DeepCopy()
for i in range(seq_len):
inputs_t = inputs[:, i:i + 1 * beam, :]
# Copies state to theta.
theta_t = gshard_layers.StateLayer.UpdateTheta(l, theta_t, state_t, t=i)
tf.logging.info(f'theta_{i}: {repr(theta_t)}')
# Updates theta inplace.
out_t = l.FProp(theta_t, inputs_t)
# Copies theta to state.
state_t = gshard_layers.StateLayer.UpdateState(l, theta_t, state_t)
tf.logging.info(f'state_{i}: {repr(state_t)}')
all_outputs.append(out_t)
# seq_len steps of FProp(), each with len=1.
concat_step_outputs = tf.concat(all_outputs, axis=1)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([outputs, concat_step_outputs])
print(f'expected: {expected}')
print(f'actual: {actual}')
self.assertAllClose(expected, actual)
def testSingleStepRank4(self):
b, seq_len, dim1, dim2, k, beam = 2, 8, 2, 7, 3, 1
inputs = self._GetInputs(b, seq_len * beam, dim1 * dim2)
inputs = tf.reshape(inputs, (b, seq_len * beam, dim1, dim2))
l = self._GetParams(k, [dim1, dim2]).Instantiate()
# Normal Fprop with a len=seq_len sequence.
outputs = l.FProp(l.theta, inputs)
state0 = gshard_layers.StateLayer.InitState(l, [b, beam, k])
tf.logging.info(f'state0: {repr(state0)}')
all_outputs = []
state_t = state0
theta_t = l.theta.DeepCopy()
for i in range(seq_len):
inputs_t = inputs[:, i:i + 1 * beam, :]
# Copies state to theta.
theta_t = gshard_layers.StateLayer.UpdateTheta(l, theta_t, state_t, t=i)
tf.logging.info(f'theta_{i}: {repr(theta_t)}')
# Updates theta inplace.
out_t = l.FProp(theta_t, inputs_t)
# Copies theta to state.
state_t = gshard_layers.StateLayer.UpdateState(l, theta_t, state_t)
tf.logging.info(f'state_{i}: {repr(state_t)}')
all_outputs.append(out_t)
# seq_len steps of FProp(), each with len=1.
concat_step_outputs = tf.concat(all_outputs, axis=1)
init_op = tf.global_variables_initializer()
with self.session(use_gpu=False) as sess:
sess.run(init_op)
expected, actual = sess.run([outputs, concat_step_outputs])
print(f'expected: {expected}')
print(f'actual: {actual}')
self.assertAllClose(expected, actual)
def testPrefix(self):
b, prefix_len, seq_len, dim1, dim2, k, beam = 2, 5, 15, 2, 7, 3, 4
inputs = self._GetInputs(b, seq_len * beam, dim1 * dim2)
inputs = tf.reshape(inputs, (b, seq_len * beam, dim1, dim2))
prefix = self._GetInputs(b, prefix_len, dim1 * dim2)
prefix = tf.reshape(prefix, (b, prefix_len, dim1, dim2))
prefix_and_inputs = tf.reshape(prefix, (b, 1, prefix_len, dim1, dim2))
prefix_and_inputs = tf.tile(prefix_and_inputs, (1, beam, 1, 1, 1))
prefix_and_inputs = tf.concat(
[prefix_and_inputs,
tf.reshape(inputs, (b, beam, seq_len, dim1, dim2))],
axis=2)
prefix_and_inputs = tf.reshape(prefix_and_inputs,
(b * beam,
(prefix_len + seq_len), dim1, dim2))
with tf.variable_scope('model'):
l_no_prefix = self._GetParams(k, [dim1, dim2]).Instantiate()
with tf.variable_scope('model', reuse=True):
l = self._GetParams(k, [dim1, dim2]).Instantiate()
prefix_expected_outputs = l_no_prefix.FProp(l.theta, prefix)
decode_expected_outputs = tf.reshape(
l_no_prefix.FProp(l.theta, prefix_and_inputs)[:, prefix_len:],
(b, beam, seq_len, dim1, dim2))
state0 = gshard_layers.StateLayer.InitState(l, [b, beam, k])
tf.logging.info(f'state0: {repr(state0)}')
state_prefix = state0
theta_prefix = l.theta.DeepCopy()
theta_prefix = gshard_layers.StateLayer.UpdateTheta(
l, theta_prefix, state_prefix, t=0)
tf.logging.info(f'theta_{0}: {repr(theta_prefix)}')
prefix_actual_outputs = l.FProp(theta_prefix, prefix)
state_prefix = gshard_layers.StateLayer.UpdateState(l, theta_prefix,
state_prefix)
tf.logging.info(f'state_{0}: {repr(state_prefix)}')
decode_outputs = []
state_t = state0
theta_t = l.theta.DeepCopy()
for i in range(seq_len):
input | s_t = tf.reshape(inputs, (b, beam, seq_len, dim1, dim2))[:, :, i]
# Copies state to theta.
theta_t = gshard_layers.StateLayer.UpdateTheta(l, thet | a_t, state_t, t=i)
tf.logging.info(f'theta_{i}: {repr(theta_t)}')
# Updates theta inplace.
out_t = l.FProp(theta_t, inputs_t)
# Copies theta to state.
state_t = gshard_layers.StateLayer.UpdateState(l, theta_t, state_t)
tf.logging.info(f'state_{i}: {repr(state_t)}')
decode_outputs.append(tf.expand_dims(out_t, axis=2))
# seq_len steps of FProp(), each with len=1.
decode_actual_outputs = tf |
ganga-devs/ganga | ganga/GangaCore/test/GPI/TestExceptions.py | Python | gpl-3.0 | 2,047 | 0.00342 | from | GangaCore.testlib.decorators import add_config
@add_config([('TestingFramework', 'AutoCleanup', False)])
def test_all_exceptions(gpi):
"""Create all exceptions and make sure they behave correctly"""
import GangaCore.Core.exceptions
test_str = "My Test Error"
def exception_test(err_name | ):
"""Run tests on the given exception"""
err_type = getattr(GangaCore.Core.exceptions, err_name)
err_obj = err_type(test_str)
assert test_str in str(err_obj)
err_list = ["GangaException", "GangaFileError", "PluginError", "ApplicationConfigurationError",
"ApplicationPrepareError", "IncompleteJobSubmissionError", "IncompleteKillError", "JobManagerError",
"GangaAttributeError", "GangaValueError", "GangaIOError", "SplitterError", "ProtectedAttributeError",
"ReadOnlyObjectError", "TypeMismatchError", "SchemaError", "SchemaVersionError", "CredentialsError",
"CredentialRenewalError", "InvalidCredentialError", "ExpiredCredentialError"]
for e in err_list:
exception_test(e)
# check the BackendError
from GangaCore.Core.exceptions import BackendError
err = BackendError("TestBackend", test_str)
assert "TestBackend" in str(err)
assert test_str in str(err)
# check the InaccessibleObjectError
from GangaCore.Core.exceptions import InaccessibleObjectError, JobManagerError
from GangaCore.Core.GangaRepository import getRegistry
err = InaccessibleObjectError(getRegistry('jobs').repository, 0, JobManagerError("My JobManagerError"))
assert "jobs" in str(err)
assert "#0" in str(err)
assert "My JobManagerError" in str(err)
# check the RepositoryError
from GangaCore.Core.exceptions import RepositoryError
from GangaCore.Core.GangaRepository import getRegistry
RepositoryError(getRegistry('jobs').repository, test_str)
# Construct another to check the except clause in the exception is called
RepositoryError(getRegistry('jobs').repository, test_str)
|
mrmrwat/pylsner | pylsner/gui.py | Python | mit | 2,624 | 0.001143 | import cairo
from gi.repository import Gtk
from gi.repository import Gdk
from pylsner import plugin
class Window(Gtk.Window):
def __init__(self):
super(Window, self).__init__(skip_pager_hint=True,
skip_taskbar_hint=True,
)
self.set_title('Pylsner')
screen = self.get_screen()
self.width = screen.get_width()
self.height = screen.get_height()
self.set_size_request(self.width, self.height)
self.set_position(Gtk.WindowPosition.CENTER)
rgba = screen.get_rgba_visual()
self.set_visual(rgba)
self.override_background_color(Gtk.StateFlags.NORMAL,
Gdk.RGBA(0, 0, 0, 0),
)
self.set_wmclass('pylsner', 'pylsner')
self.set_type_hint(Gdk.WindowTypeHint.DOCK)
self.stick()
self.set_keep_below(True)
drawing_a | rea = Gtk.DrawingArea()
drawing_area. | connect('draw', self.redraw)
self.refresh_cnt = 0
self.add(drawing_area)
self.connect('destroy', lambda q: Gtk.main_quit())
self.widgets = []
self.show_all()
def refresh(self, force=False):
self.refresh_cnt += 1
if self.refresh_cnt >= 60000:
self.refresh_cnt = 0
redraw_required = False
for wid in self.widgets:
if (self.refresh_cnt % wid.metric.refresh_rate == 0) or force:
wid.refresh()
redraw_required = True
if redraw_required:
self.queue_draw()
return True
def redraw(self, _, ctx):
ctx.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
for wid in self.widgets:
wid.redraw(ctx)
class Widget:
def __init__(self,
name='default',
metric={'plugin': 'time'},
indicator={'plugin': 'arc'},
fill={'plugin': 'rgba_255'},
):
self.name = name
MetricPlugin = plugin.load_plugin('metrics', metric['plugin'])
self.metric = MetricPlugin(**metric)
IndicatorPlugin = plugin.load_plugin('indicators', indicator['plugin'])
self.indicator = IndicatorPlugin(**indicator)
FillPlugin = plugin.load_plugin('fills', fill['plugin'])
self.fill = FillPlugin(**fill)
def refresh(self):
self.metric.refresh()
self.fill.refresh(self.metric.value)
def redraw(self, ctx):
ctx.set_source(self.fill.pattern)
self.indicator.redraw(ctx, self.metric.value)
|
apache/libcloud | libcloud/common/gandi_live.py | Python | apache-2.0 | 7,334 | 0 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gandi Live driver base classes
"""
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import ProviderError
from libcloud.utils.py3 import httplib
__all__ = [
"API_HOST",
"GandiLiveBaseError",
"JsonParseError",
"ResourceNotFoundError",
"InvalidRequestError",
"ResourceConflictError",
"GandiLiveResponse",
"GandiLiveConnection",
"BaseGandiLiveDriver",
]
API_HOST = "dns.api.gandi.net"
class GandiLiveBaseError(ProviderError):
"""
Exception class for Gandi Live driver
"""
pass
class JsonParseError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 404,
# "message": "Unknown zone",
# "object": "LocalizedHTTPNotFound",
# "cause": "Not Found"
# }
class ResourceNotFoundError(GandiLiveBaseError):
pass
# Example:
# {
# "code": 400,
# "message": "zone or zone_uuid must be set",
# "object": "HTTPBadRequest",
# "cause": "No zone set.",
# "errors": [
# {
# "location": "body",
# "name": "zone_uuid",
# "description": "\"FAKEUUID\" is not a UUID"
# }
# ]
# }
class InvalidRequestError(GandiLiveBaseError):
pass
# Examples:
# {
# "code": 409,
# "message": "Zone Testing already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "The domain example.org already exists",
# "object": "HTTPConflict",
# "cause": "Duplicate Entry"
# }
# {
# "code": 409,
# "message": "This zone is still used by 1 domains",
# "object": "HTTPConflict",
# "cause": "In use"
# }
class ResourceConflictError(GandiLiveBaseError):
pass
class GandiLiveResponse(JsonResponse):
"""
A Base Gandi Live Response class to derive from.
"""
def success(self):
"" | "
Determine if our request was successful.
For the Gandi Live response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C | {True}
"""
return True
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
json_error = False
try:
body = json.loads(self.body)
except Exception:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
# Service does not appear to return HTTP 202 Accepted for anything.
valid_http_codes = [
httplib.OK,
httplib.CREATED,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status)
else:
return body
elif self.status == httplib.NO_CONTENT:
# Parse error for empty body is acceptable, but a non-empty body
# is not.
if len(body) > 0:
msg = '"No Content" response contained content'
raise GandiLiveBaseError(msg, self.status)
else:
return {}
elif self.status == httplib.NOT_FOUND:
message = self._get_error(body, json_error)
raise ResourceNotFoundError(message, self.status)
elif self.status == httplib.BAD_REQUEST:
message = self._get_error(body, json_error)
raise InvalidRequestError(message, self.status)
elif self.status == httplib.CONFLICT:
message = self._get_error(body, json_error)
raise ResourceConflictError(message, self.status)
else:
message = self._get_error(body, json_error)
raise GandiLiveBaseError(message, self.status)
# Errors are not described at all in Gandi's official documentation.
# It appears when an error arises, a JSON object is returned along with
# an HTTP 4xx class code. The object is structured as:
# {
# code: <code>,
# object: <object>,
# message: <message>,
# cause: <cause>,
# errors: [
# {
# location: <error-location>,
# name: <error-name>,
# description: <error-description>
# }
# ]
# }
# where
# <code> is a number equal to the HTTP response status code
# <object> is a string with some internal name for the status code
# <message> is a string detailing what the problem is
# <cause> is a string that comes from a set of succinct problem summaries
# errors is optional; if present:
# <error-location> is a string for which part of the request to look in
# <error-name> is a string naming the parameter
# <error-description> is a string detailing what the problem is
# Here we ignore object and combine message and cause along with an error
# if one or more exists.
def _get_error(self, body, json_error):
"""
Get the error code and message from a JSON response.
Incorporate the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: String containing error message
:rtype: ``str``
"""
if not json_error and "cause" in body:
message = "%s: %s" % (body["cause"], body["message"])
if "errors" in body:
err = body["errors"][0]
message = "%s (%s in %s: %s)" % (
message,
err.get("location"),
err.get("name"),
err.get("description"),
)
else:
message = body
return message
class GandiLiveConnection(ConnectionKey):
"""
Connection class for the Gandi Live driver
"""
responseCls = GandiLiveResponse
host = API_HOST
def add_default_headers(self, headers):
"""
Returns default headers as a dictionary.
"""
headers["Content-Type"] = "application/json"
headers["X-Api-Key"] = self.key
return headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
class BaseGandiLiveDriver(object):
"""
Gandi Live base driver
"""
connectionCls = GandiLiveConnection
name = "GandiLive"
|
ChawalitK/odoo | addons/event_sale/models/sale_order.py | Python | gpl-3.0 | 3,419 | 0.004095 | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.multi
def action_confirm(self):
self.ensure_one()
res = super(SaleOrder, self).action_confirm()
self.order_line._update_registrations(confirm=True)
if any(self.order_line.filtered(lambda line: line.event_id)):
return self.env['ir.actions.act_window'].with_context(default_sale_order_id=self.id).for_xml_id('event_sale', 'action_sale_order_event_registration')
return res
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
event_id = fields.Many2one('event.event', string='Event',
help="Choose an event and it will automatically create a registration for this event.")
event_ticket_id = fields.Many2one('event.event.ticket', string='Event Ticket', help="Choose "
"an event ticket and it will automatically create a registration for this event ticket.")
# those 2 fields are used for dynamic domains and filled by onchange
# TDE: really necessary ? ..
event_type_id = fields.Many2one(related='product_id.event_type_id', string="Event Type")
event_ok = fields.Boolean(related='product_id.event_ok')
@api.multi
def _prepare_invoice_line(self, qty):
self.ensure_one()
res = super(SaleOrderLine, self)._prepare_invoice_line(qty)
if self.event_id:
res['name'] = '%s: %s' % (res.get('name', ''), self.event_id.name)
return res
@api.onchange('product_id')
def _onchange_product_id_event(self):
values = {'event_type_id': False, 'event_ok': False}
if self.product_id.event_ok:
values['event_type_id'] = self.product_id.event_type_id.id
values['event_ok'] = self.product_id.event_ok
self.update(values)
@api.multi
def _update_registrations(self, confirm=True, registration_data=None):
""" Create or update registrations linked to a sale order line. A sale
order line has a product_u | om_qty attribute that will be the number of
registrations linked to | this line. This method update existing registrations
and create new one for missing one. """
Registration = self.env['event.registration']
registrations = Registration.search([('sale_order_line_id', 'in', self.ids)])
for so_line in self.filtered('event_id'):
existing_registrations = registrations.filtered(lambda self: self.sale_order_line_id.id == so_line.id)
if confirm:
existing_registrations.filtered(lambda self: self.state != 'open').confirm_registration()
else:
existing_registrations.filtered(lambda self: self.state == 'cancel').do_draft()
for count in range(int(so_line.product_uom_qty) - len(existing_registrations)):
registration = {}
if registration_data:
registration = registration_data.pop()
# TDE CHECK: auto confirmation
registration['sale_order_line_id'] = so_line
Registration.with_context(registration_force_draft=True).create(
Registration._prepare_attendee_values(registration))
return True
@api.onchange('event_ticket_id')
def _onchange_event_ticket_id(self):
self.price_unit = self.event_ticket_id.price
|
tingletech/md5s3stash | md5s3stash.py | Python | bsd-3-clause | 13,467 | 0.001188 | #!/usr/bin/env python
""" md5s3stash
content addressable storage in AWS S3
"""
from __future__ import unicode_literals
import sys
import os
import argparse
import tempfile
import urllib2
import urllib
import urlparse
import base64
import logging
import hashlib
import basin
import boto
import magic
from PIL import Image
from collections import namedtuple
import re
regex_s3 = re.compile(r's3.*amazonaws.com')
def main(argv=None):
parser = argpar | se.ArgumentParser(
description='content addressable storage in AWS S3')
parser.add_argument('url | ', nargs='+',
help='URL or path of source file to stash')
parser.add_argument('-b', '--bucket_base', nargs="?",
help='this must be a unique name in all of AWS S3')
parser.add_argument('-s', '--bucket_scheme', nargs="?",
default="simple", choices=['simple', 'multivalue'],
help='this must be a unique name in all of AWS S3')
parser.add_argument(
'-t', '--tempdir',
required=False,
help="if your files might be large, make sure this is on a big disk"
)
parser.add_argument(
'-w', '--warnings',
default=False,
help='show python `DeprecationWarning`s supressed by default',
required=False,
action='store_true',
)
parser.add_argument('--loglevel', default='ERROR', required=False)
parser.add_argument('-u', '--username', required=False,
help='username for downloads requiring BasicAuth')
parser.add_argument('-p', '--password', required=False,
help='password for downloads requiring BasicAuth')
if argv is None:
argv = parser.parse_args()
if argv.bucket_base:
bucket_base = argv.bucket_base
else:
assert 'BUCKET_BASE' in os.environ, "`-b` or `BUCKET_BASE` must be set"
bucket_base = os.environ['BUCKET_BASE']
if not argv.warnings:
# supress warnings
# http://stackoverflow.com/a/2047600/1763984
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
if argv.tempdir:
tempfile.tempdir = argv.tempdir
auth = None
if argv.username:
auth = (argv.username, argv.password)
# set debugging level
numeric_level = getattr(logging, argv.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % argv.loglevel)
logging.basicConfig(level=numeric_level, )
# if being used in a library, probably want to be able to recycle
# connection?
conn = boto.connect_s3()
for url in argv.url:
print("{0}\t{1}\t{2}\t{3}".format(
*md5s3stash(url, bucket_base, conn, url_auth=auth, bucket_scheme=argv.bucket_scheme)
))
def md5s3stash(
url,
bucket_base,
conn=None,
url_auth=None,
url_cache={},
hash_cache={},
bucket_scheme='simple'
):
""" stash a file at `url` in the named `bucket_base` ,
`conn` is an optional boto.connect_s3()
`url_auth` is optional Basic auth ('<username>', '<password'>) tuple
to use if the url to download requires authentication.
`url_cache` is an object with a dict interface, keyed on url
url_cache[url] = { md5: ..., If-None-Match: etag, If-Modified-Since: date }
`hash_cache` is an obhect with dict interface, keyed on md5
hash_cache[md5] = ( s3_url, mime_type, dimensions )
`bucket_scheme` is text string 'simple' or 'multibucket'
"""
StashReport = namedtuple('StashReport', 'url, md5, s3_url, mime_type, dimensions')
(file_path, md5, mime_type) = checkChunks(url, url_auth, url_cache)
try:
return StashReport(url, md5, *hash_cache[md5])
except KeyError:
pass
s3_url = md5_to_s3_url(md5, bucket_base, bucket_scheme=bucket_scheme)
if conn is None:
conn = boto.connect_s3()
s3move(file_path, s3_url, mime_type, conn)
(mime, dimensions) = image_info(file_path)
os.remove(file_path) # safer than rmtree
hash_cache[md5] = (s3_url, mime, dimensions)
report = StashReport(url, md5, *hash_cache[md5])
logging.getLogger('MD5S3:stash').info(report)
return report
# think about refactoring the next two functions
def md5_to_s3_url(md5, bucket_base, bucket_scheme='multibucket'):
""" calculate the s3 URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "s3://{0}/{1}".format(
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "s3://{0}.{1}/{2}".format(
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_http_url(md5, bucket_base, bucket_scheme='multibucket', s3_endpoint='s3.amazonaws.com'):
""" calculate the http URL given an md5 and an bucket_base """
if bucket_scheme == 'simple':
url = "http://{0}/{1}/{2}".format(
s3_endpoint,
bucket_base,
md5
)
elif bucket_scheme == 'multibucket':
url = "http://{1}.{2}.{0}/{3}".format(
s3_endpoint,
md5_to_bucket_shard(md5),
bucket_base,
md5
)
return url
def md5_to_bucket_shard(md5):
""" calculate the shard label of the bucket name from md5 """
# "Consider utilizing multiple buckets that start with different
# alphanumeric characters. This will ensure a degree of partitioning
# from the start. The higher your volume of concurrent PUT and
# GET requests, the more impact this will likely have."
# -- http://aws.amazon.com/articles/1904
# "Bucket names must be a series of one or more labels. Adjacent
# labels are separated by a single period (.). [...] Each label must
# start and end with a lowercase letter or a number. "
# -- http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
# see also: http://en.wikipedia.org/wiki/Base_36
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
# http://stats.stackexchange.com/a/70884/14900
# take the first two digits of the hash and turn that into an inteter
# this should be evenly distributed
int_value = int(md5[0], 16)+10*int(md5[1], 16)
# divide by the length of the alphabet and take the remainder
bucket = int_value % len(ALPHABET)
return basin.encode(ALPHABET, bucket)
def is_s3_url(url):
'''For s3 urls, if you send http authentication headers, S3 will
send a "400 Bad Request" in response.
Now look for s3*.amazonaws.com
'''
# moving to OR this will be s3-us-west-2.amazonaws.com
match = regex_s3.search(url)
return True if match else False
def urlopen_with_auth(url, auth=None, cache={}):
'''Use urllib2 to open url if the auth is specified.
auth is tuple of (username, password)
'''
opener = urllib2.build_opener(DefaultErrorHandler())
req = urllib2.Request(url)
p = urlparse.urlparse(url)
# try to set headers for conditional get request
try:
here = cache[url]
if 'If-None-Match' in here:
req.add_header('If-None-Match', cache[url]['If-None-Match'],)
if 'If-Modified-Since' in here:
req.add_header('If-Modified-Since', cache[url]['If-Modified-Since'],)
except KeyError:
pass
if not auth or is_s3_url(url):
if p.scheme not in ['http', 'https']:
return urllib.urlopen(url) # urllib works with normal file paths
else:
# make sure https
if p.scheme != 'https':
raise urllib2.URLError('Basic auth not over https is bad idea! \
scheme:{0}'.format(p.scheme))
# Need to add header so it gets sent with first request,
# else redirected to shib
b64authstr = base64.b64encode('{0}:{1}'.format(*auth))
req.add_header('Authorization', 'Basic {0}'.format(b64authstr))
# return urllib2.urlopen(req)
return opener.open(req)
def checkChunks(url, auth=None, cache={}):
"""
Helper to download |
jawilson/home-assistant | homeassistant/components/stream/__init__.py | Python | apache-2.0 | 13,852 | 0.001083 | """Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
from collections.a | bc import Mapping
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from typing import cast
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeass | istant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ENDPOINTS,
ATTR_SETTINGS,
ATTR_STREAMS,
CONF_LL_HLS,
CONF_PART_DURATION,
CONF_SEGMENT_DURATION,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
SEGMENT_DURATION_ADJUSTER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
from .core import PROVIDERS, IdleTimer, StreamOutput, StreamSettings
from .hls import HlsStreamOutput, async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_REDACT_PATTERN = [
(re.compile(r"//.*:.*@"), "//****:****@"),
(re.compile(r"\?auth=.*"), "?auth=****"),
]
def redact_credentials(data: str) -> str:
"""Redact credentials from string data."""
for (pattern, repl) in STREAM_SOURCE_REDACT_PATTERN:
data = pattern.sub(repl, data)
return data
def create_stream(
hass: HomeAssistant, stream_source: str, options: dict[str, str]
) -> Stream:
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url and options are passed into
pyav / ffmpeg as options.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_LL_HLS, default=False): cv.boolean,
vol.Optional(CONF_SEGMENT_DURATION, default=6): vol.All(
cv.positive_float, vol.Range(min=2, max=10)
),
vol.Optional(CONF_PART_DURATION, default=1): vol.All(
cv.positive_float, vol.Range(min=0.2, max=1.5)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def filter_libav_logging() -> None:
"""Filter libav logging to only log when the stream logger is at DEBUG."""
stream_debug_enabled = logging.getLogger(__name__).isEnabledFor(logging.DEBUG)
def libav_filter(record: logging.LogRecord) -> bool:
return stream_debug_enabled
for logging_namespace in (
"libav.mp4",
"libav.h264",
"libav.hevc",
"libav.rtsp",
"libav.tcp",
"libav.tls",
"libav.mpegts",
"libav.NULL",
):
logging.getLogger(logging_namespace).addFilter(libav_filter)
# Set log level to error for libav.mp4
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up stream."""
# Drop libav log messages if stream logging is above DEBUG
filter_libav_logging()
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
if (conf := config.get(DOMAIN)) and conf[CONF_LL_HLS]:
assert isinstance(conf[CONF_SEGMENT_DURATION], float)
assert isinstance(conf[CONF_PART_DURATION], float)
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=True,
min_segment_duration=conf[CONF_SEGMENT_DURATION]
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=conf[CONF_PART_DURATION],
hls_advance_part_limit=max(int(3 / conf[CONF_PART_DURATION]), 3),
hls_part_timeout=2 * conf[CONF_PART_DURATION],
)
else:
hass.data[DOMAIN][ATTR_SETTINGS] = StreamSettings(
ll_hls=False,
min_segment_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS
- SEGMENT_DURATION_ADJUSTER,
part_target_duration=TARGET_SEGMENT_DURATION_NON_LL_HLS,
hls_advance_part_limit=3,
hls_part_timeout=TARGET_SEGMENT_DURATION_NON_LL_HLS,
)
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event: Event) -> None:
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(
self, hass: HomeAssistant, source: str, options: dict[str, str]
) -> None:
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self.keepalive = False
self.access_token: str | None = None
self._thread: threading.Thread | None = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
self._available = True
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
endpoint_fmt: str = self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt]
return endpoint_fmt.format(self.access_token)
def outputs(self) -> Mapping[str, StreamOutput]:
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(
self, fmt: str, timeout: int = OUTPUT_IDLE_TIMEOUT
) -> StreamOutput:
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback() -> None:
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider: StreamOutput) -> None:
"""Remove provider outp |
diablo-rewriter/diablo | obfuscation/diversity_engine/iterative_diablo/compare_and_evaluate.py | Python | gpl-2.0 | 9,057 | 0.036215 | #!/usr/bin/python
# This research is supported by the European Union Seventh Framework Programme (FP7/2007-2013), project ASPIRE (Advanced Software Protection: Integration, Research, and Exploitation), under grant agreement no. 609734; on-line at https://aspire-fp7.eu/. */
# The development of portions of the code contained in this file was sponsored by Samsung Electronics UK. */
import logging
import os
import sys
import benchmarks
import diablo
import diablo_normalization
import experiments
import multiplatform
import ida
# Code to evaluate the framework for normalization
# Specific paths for these experiments:
# experiments.setGlobalBinaryPathBase("/home/bcoppens/private/software/diversity/experiments/diversity/normalization/diablodiota/test/")
base_path_proteus = "/media/1cf273de-b073-441c-9c4e-330c60320e4a/data/diversity/experiments/normalization/exploratory/bzip2/"
base_path_glaucus = "/media/1cf273de-b073-441c-9c4e-330c60320e4a/data/diversity/experiments/normalization/exploratory/soplex_iterative/7_soplex_1call_nocallee_yesbranchfun_newrules_improved/"
experiments.setGlobalBinaryPathBase(base_path_glaucus)
#experiments.setGlobalBinaryPathBase(base_path_proteus)
settings.set_settings(settings_forcelayout)
matchers = [ ida.BinDiff ]
analyzers = matchers
#tmp_benchmark = benchmarks.Benchmark("bzip2", "path_to_original_sources", { 1: "o22", 2: "p22" }, { } )
tmp_benchmark = benchmarks.Benchmark("bzip2", "path_to_original_sources", { 1: "soplex_iterated_binary_0", 2: "soplex_iterated_binary_20" }, { "test": [ [ "test.mps" ] ] } )
logging.basicConfig(filename='compare_files_extensive_iterative_inlining_soplex.log', format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
def binary_iteration_filename(experiment, iteration):
#return "%s_%i" % (experiment.benchmark.exe, iteration)
#return experiment.benchmark.versions[iteration]
return "%s_v%i" % (experiment.shortname, iteration)
def unnormalized_filename(experiment, iteration):
return experiment.benchmark.versions[iteration]
def match(experiment, v1, v2, matchers):
localDir = multiplatform.asLocal(experiment.path)
os.chdir(localDir)
for i in [v1, v2]:
exe = binary_iteration_filename( experiment, i )
ida.run_Ida(exe, extend_analysis=experiment.config.extend_IDA_analysis, path=experiment.path)
matchers[0].match(experiment, v1, v2)
def evaluate(experiment, v1, v2, matchers):
matches_v2 = matchers[0].get_instructions(experiment, v1, v2, which_version=v2)
exe_v2 = binary_iteration_filename( experiment, v2 )
instructions_v2 = diablo.all_insts(multiplatform.dir_entry(experiment.path, "%s.xml" % exe_v2))
#full_name = "%s, %s" % (setting, experiment.name)
full_name = experiment.name
logging.info("===> SIZE FOR EXPERIMENT %s: %i" % (full_name, len(matches_v2)))
logging.info("Instructions in binary: %i, matched: %i" % (len(instructions_v2), len(matches_v2)))
logging.info("===> PRUNING RATE FOR EXPERIMENT %s: %f (tot. abs. insts. count: %i)" % (full_name, (float(len(matches_v2)) / float(len(instructions_v2))), len(instructions_v2)))
options = [ "RemoveNotExecuted", "RemoveDataSections", "NormalizeJcc", "DynamicCalls",
"JumpFunctions", "JumpFunctionRemovePush", "Liveness", "OpaquePredicates", "SortDynamicTargets",
"Optimize", "MergeBBLs",
#"Inlining", "BranchElimintation", "DynamicJumps", "MultipleIfTarges" ] #, "NormalizeInstructions", "Peephole", "FixLayout" ]
"Inlining", "DynamicJumps", "MultipleIfTarges", "Peephole" ] #, "NormalizeInstructions", "Peephole", "FixLayout" ]
config_simple = diablo_normalization.NormalizationConf(trace_pr | efix="")
config_simple.extend_IDA_analysis = False
config_simple.experiment_dir = ""
config_extended = diablo_normalization.NormalizationConf(trace_prefix="")
config_extended.extend_IDA_analysis = True
config_extended.experiment_dir = ""
def get_traces(vs):
experiment = experiments.Experiment(config_simple, tmp_benchmark, binary_iteration_filename)
for v in vs:
experiment.config.trace_prefix = "trace%s" % unnormalized_filename( experiment, v )
| diablo_normalization.generate_dynamic_info(experiment, unnormalized_filename( experiment, v ))
# get_traces([1,2]) # For now, we just experiment with different normalizations on fixed 2 soplex binaries
name = ""
smallname = ""
binary_pair = 0
only_do = None
#only_do = [ 15 ]
#only_do = [ 11, 12, 13, 14, 15, 16, 17 ]
#only_do = [ 13, 14, 15, 16, 17, 18 ]
#only_do = range(26, 36)
for option in options:
name += option
smallname += diablo_normalization.diablo_transforms_mapper[option]
config_simple.enable(option)
config_extended.enable(option)
#for data_ok in [ True ]: # [ True, False ]:
data_ok = True
for doDyninst in [ False, True ]:
if data_ok:
currentname = name + "PlaceDataCorrectly"
currentsmallname = smallname + "-dataok"
config_simple.enable("PlaceDataCorrectly")
config_extended.enable("PlaceDataCorrectly")
else:
currentname = name + "PlaceDataInCorrectly"
config_simple.disable("PlaceDataCorrectly")
config_extended.disable("PlaceDataCorrectly")
currentsmallname = smallname + "-datanok"
if doDyninst:
currentname = name + "DYNINST"
currentsmallname = smallname + "-DYNINST"
else:
currentname = name + "NOdyninst"
currentsmallname = smallname + "-NOdyninst"
binary_pair += 1
logging.info("BINARYPAIR,%i,%s" % (binary_pair, currentname))
if only_do is not None and binary_pair not in only_do:
logging.info("Skipping actual code generation")
continue
experiment_simple = experiments.Experiment(config_simple, tmp_benchmark, binary_iteration_filename)
experiment_simple.shortname = "exp%i_basic" % binary_pair # currentsmallname + "_basic"
experiment_simple.name = currentname + "BasicDisassembly"
experiment_extended = experiments.Experiment(config_extended, tmp_benchmark, binary_iteration_filename)
experiment_extended.name = currentname + "ExtendedDisassembly"
experiment_extended.shortname = "exp%i_extended" % binary_pair # = currentsmallname + "_extended"
experimentslist = [ experiment_extended ]
for experiment in experimentslist:
for v in [ 1, 2 ]:
out_binary = binary_iteration_filename(experiment, v)
experiment.config.trace_prefix = "trace%s" % unnormalized_filename( experiment, v )
# TODO: run dyninst!
if doDyninst:
experiment.config.dyninst_prefix = "dyninst%s" % unnormalized_filename( experiment, v )
else:
experiment.config.dyninst_prefix = None
diablo_normalization.runNormalization(experiment, unnormalized_filename( experiment, v ), out_binary, out_binary)
match(experiment, 1, 2, matchers)
evaluate(experiment, 1, 2, matchers)
sys.exit(0)
#def binary_iteration_filename(experiment, iteration):
##return "%s_%i" % (experiment.benchmark.exe, iteration)
#return experiment.benchmark.versions[iteration]
for setting in [ "stringsfout", "stringsjuist" ]:
config_simple.experiment_dir = setting
config_extended.experiment_dir = config_simple.experiment_dir
experiment_simple = experiments.Experiment(config_simple, tmp_benchmark, binary_iteration_filename)
experiment_simple.name = "Basic Disassembly"
experiment_extended = experiments.Experiment(config_extended, tmp_benchmark, binary_iteration_filename)
experiment_extended.name = "Extended Disassembly"
experimentslist = [ experiment_simple, experiment_extended ]
for experiment in experimentslist:
localDir = multiplatform.asLocal(experiment.path)
os.chdir(localDir)
#for matcher in matchers:
#logging.info("Potentially doing analysis for matcher %s on iteration %i", str(matcher), current_iteration)
#matcher.analyze(experiment, current_iteration)
#for orig in feedbackRound.compareWith:
#logging.info("Matching %i with %i using %s", orig, current_iteration, str(matcher))
#matcher.match(experiment, orig, current_iteration)
#logging.debug("... Matched")
#found = ida.matcher.semantic_changes_found_count_filters(helper, matcher, experiment, orig, current_iteration, matcher.filters)
#logging.debug("... Done")
#for chain in found:
#log = "%s maps to: %i total, %i semantic changes" % (chain, found[chain].fou |
zxtstarry/src | book/cwp/geo2008IsotropicAngleDomainElasticRTM/marm2allA/marm2.py | Python | gpl-2.0 | 4,919 | 0.038219 | from rsf.proj import *
from math import *
import fdmod,pcsutil,wefd
def data(par):
# ---------------------------------------------------------- | --
Fetch('vp_marmousi-ii.segy',"marm2")
Fetch('vs_marmousi-ii.segy',"marm2")
Fetch('density_marmousi-ii.segy',"marm2")
# ------------------------------------------------------------
for file in ('vp','vs','ro'):
if(file=='ro'):
ifile='density_marmousi-ii.segy'
else:
ifile=file+'_marmousi-ii.segy'
Flow(['z'+file,'t'+file,'./s'+file,'./ | b'+file],ifile,
'''
segyread tape=$SOURCE
tfile=${TARGETS[1]}
hfile=${TARGETS[2]}
bfile=${TARGETS[3]}
''',stdin=0)
Flow('_'+file,'z'+file,
'''
put
o1=0 d1=0.001249 label1=%(lz)s unit1=%(uz)s
o2=0 d2=0.001249 label2=%(lx)s unit2=%(ux)s |
window j1=2 j2=2
''' % par)
if(file=='ro'):
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g | scale rscale=1000000' % par)
else:
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g' % par)
# ------------------------------------------------------------
Flow( 'wmask','vpraw','mask max=1.5 | dd type=float')
# Result('wmask',fdmod.cgrey('allpos=y',par))
Flow('rx','vpraw','math output="1.0e6+1.5e6*(input-1.5)/3" ')
Flow('ro','roraw','math output=1')
Flow('vp','vpraw','smooth rect1=35 rect2=35 repeat=5')
Flow('vs','vp wmask','scale rscale=0.5 | math w=${SOURCES[1]} output="input*(1-w)"')
# velocity ratio at cig location x
Flow('vratio1_1','vp vp','add mode=d ${SOURCES[1]}');
Flow('vratio1_2','vp vs','add mode=d ${SOURCES[1]}');
Flow('vratio2_1','vs vp','add mode=d ${SOURCES[1]}');
Flow('vratio2_2','vs vs','add mode=d ${SOURCES[1]}');
Flow('vratio','vratio1_1 vratio1_2 vratio2_1 vratio2_2',
'''
cat axis=3 space=n ${SOURCES[0:4]}
''',stdin=0)
def mask(mask,xsou,tmin,tmax,par):
dipline1(mask+'ml',
0.15+tmin,par['xmin'],
0.15,xsou,
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
dipline1(mask+'mr',
0.15,xsou,
0.15+tmax,par['xmax'],
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
Flow(mask,[mask+'ml',mask+'mr'],
'''
spike nsp=1 mag=1.0
n1=%(nx)d o1=%(ox)g d1=%(dx)g k1=%(ltap)d l1=%(rtap)d
n2=%(nt)d o2=%(ot)g d2=%(dt)g |
smooth rect1=100 repeat=1 |
scale axis=123 |
transp |
add mode=p ${SOURCES[0]} |
add mode=p ${SOURCES[1]} |
transp |
smooth rect2=100 repeat=3 |
put label1=x label2=t unit1=km unit2=s |
spray axis=3 n=2 o=0 d=1 |
transp plane=23
''' % par)
Result(mask,
'window n2=1 | transp|' + fdmod.dgrey('',par))
def dip(dip,img,par):
Flow( dip,img,'dip rect1=40 rect2=40 order=3 liter=100 verb=y ')
Result(dip,fdmod.cgrey('color=j wantscalebar=n',par))
def psang(x,img,dip,vpvs,tag,par):
#dip angle at cig location x
Flow( dip+'-one',dip,'window n2=1 min2=%g'%x)
#vpvs ratio at cig location x
Flow('vratioPP',vpvs,'window n3=1 f3=0 n2=1 min2=%g'%x)
Flow('vratioPS',vpvs,'window n3=1 f3=1 n2=1 min2=%g'%x)
Flow('vratioSP',vpvs,'window n3=1 f3=2 n2=1 min2=%g'%x)
Flow('vratioSS',vpvs,'window n3=1 f3=3 n2=1 min2=%g'%x)
nhx=200
nhz=0
nht=0
wefd.elaps('S'+tag,
img+tag+'_ds',
img+tag+'_dr',
nhx,nhz,nht,
dip+'-one',x,par)
def dipline1(mod,s1,s2,e1,e2,vi,vt,n1,o1,d1,n2,o2,d2):
min1=o1
max1=o1+(n1-1)*d1
min2=o2
max2=o2+(n2-1)*d2
ra = (e1-s1)/(e2-s2)
vels = "%s,%s,%s" %(vi,vt,vt)
drvs = "%s,%s" %(tan(ra),tan(ra))
dim1 = 'd1=%g o1=%g n1=%d' % (d2,o2,n2)
dim2 = 'd2=%g o2=%g n2=%d' % (d1,o1,n1)
Flow(mod+'lay2',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(min2,min1,max2,max1,dim1,drvs))
Flow(mod+'lay1',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(s2,s1,e2,e1,dim1,drvs))
Flow( mod+'layers',[mod+'lay1',mod+'lay2'],'cat axis=2 ${SOURCES[1:2]}')
Flow(mod,mod+'layers',
'''
unif2 v00=%s n1=%d d1=%g o1=%g
''' % (vels,n1,d1,o1) )
|
edac-epscor/nmepscor-data-collection-form | application/builder/forms.py | Python | mit | 2,579 | 0.001163 |
import warnings
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import authenticate
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
# Originally from django.contrib.auth.forms
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields are case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = 'User Name'
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
# Now -- instead we | use authDrupal
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
elif not self.user_cache.is_active | :
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
return self.cleaned_data
def check_for_test_cookie(self):
warnings.warn("check_for_test_cookie is deprecated; ensure your login "
"view is CSRF-protected.", DeprecationWarning)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
|
rynecarbone/Pokerly | Deck.py | Python | mit | 2,273 | 0.036516 | from random import shuffle
from operator import attrgetter
###
class Card:
'''Class to define type of cards'''
suits = {'H': '\u2661',
'D': '\u2662',
'S': '\u2660',
'C': '\u2663'}
faces = {11: 'J',
12: 'Q',
13: 'K',
14: 'A'}
#______________________________
def __init__(self, value, suit):
self.value = value
self.suit = suit
self.name = value if value < 11 else self.faces[value]
self.symbol = self.suits[suit]
#______________________
def __eq__(self, other):
return self.value == other.value
#______________________
def __neq__(self, other):
return not self.__e | q__(other)
#______________________
def __gt__(self, other):
return self.value > other.val | ue
#_____________________
def __lt__(self, other):
return self.value < other.value
#_______________
def __repr__(self):
return '[{0:>2}{1} ]'.format(self.name, self.symbol)
#___________________
def dump_state(self):
print('{0}{1} (value={2}, suit={3})'.format(self.name,self.symbol,self.value,self.suit))
###
class Deck:
'''Class to keep track of
state of deck'''
suits = ['H','D','S','C']
values = [x for x in range(2,15)]
#_________________
def __init__(self):
self.cards_left = 52
self.deck = []
self.fill_deck()
#___________________
def __repr__(self):
s_deck = sorted(self.deck, key=attrgetter('suit', 'value'))
h = " ".join(str(x) for x in s_deck if x.suit is 'H')
d = " ".join(str(x) for x in s_deck if x.suit is 'D')
s = " ".join(str(x) for x in s_deck if x.suit is 'S')
c = " ".join(str(x) for x in s_deck if x.suit is 'C')
return '{0}\n{1}\n{2}\n{3}'.format(h,d,s,c)
#_________________
def fill_deck(self):
for s in self.suits:
for v in self.values:
self.deck.append(Card(v,s))
#__________________
def shuffle(self):
shuffle(self.deck)
#_________________________
def deal_card(self, player):
if len(self.deck) > 0:
if len(player.hand) < player.max_cards:
card_to_deal = self.deck.pop(0)
player.add_card(card_to_deal)
else:
print('{0} has too many cards !!!'.format(player.name))
else:
print('No more cards left!!')
|
funkyeah/tiddlyweb | tiddlyweb/serializations/__init__.py | Python | bsd-3-clause | 3,732 | 0 | """
Turn entities to and fro various representations.
This is the base Class and interface Class used to
transform strings of various forms to model objects
and model objects to strings of various forms.
"""
from tiddlyweb.serializer import NoSerializationError
from tiddlyweb.model.tiddler import string_to_tags_list
class SerializationInterface(object):
"""
A Serialization is a collection of methods that
either turn an input string into the object named
by the method, or turn the object into a string
form.
The interface is fairly simple: For the data
entities that exist in the TiddlyWeb system there
(optionally) exists <entity>_as and as_<entity> methods
in each Serialization.
*_as returns a string form of the entity, perhaps as
HTML, Text, YAML, Atom, whatever the Serialization does.
as_* takes a provided entity and string and updates
the skeletal entity to represent the information
contained in the string (in the Serialization format).
There are also three supporting methods, list_tiddlers,
list_recipes() and list_bags() that provide convenience
methods for presenting a collection of either in the
Serialization form. A string is returned.
If a method doesn't exist a NoSerializationError is raised
and the calling code is expected to do something intelligent
when trapping it.
"""
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
def recipe_as(self, recipe):
"""
Serialize a Recipe into this serializer's form.
"""
raise NoSerializationError
def as_recipe(self, recipe, input_string):
"""
Take input_string, which is a serialized recipe
and turn it into a Recipe (if possible).
"""
raise NoSerializationError
def bag_as(self, bag):
"""
Serialize a Bag into this serializer's form.
"""
raise NoSerializationError
def as_bag(self, bag, input_string):
"""
Take input_string, which is a serialized bag
and turn it into a Bag (if possible).
"""
raise NoSerializationError
def tiddler_as(self, tiddler):
"""
Ser | ialize a Bag into this serializer's form.
"""
raise NoSerializationError
def as_tiddler(self, tiddler, input_string):
"""
Take input_string, which is a serialized tiddler
and turn it into a Tiddler (if possible | ).
"""
raise NoSerializationError
def list_tiddlers(self, bag):
"""
Provided a bag, output the included tiddlers.
"""
raise NoSerializationError
def list_recipes(self, recipes):
"""
Provided a List of RecipeS, make a serialized
list of those recipes (e.g. a a list of HTML
links).
"""
raise NoSerializationError
def list_bags(self, bags):
"""
Provided a List of BagS, make a serialized
list of those bags (e.g. a a list of HTML
links).
"""
raise NoSerializationError
def as_tags(self, string):
"""
Not called directly, put made public for future
use. Turn a string into a list of tags.
"""
return string_to_tags_list(string)
def tags_as(self, tags):
"""
Not called directly, put made public for future
use. Turn a list of tags into a serialized list.
"""
tag_string_list = []
for tag in tags:
if ' ' in tag:
tag = '[[%s]]' % tag
tag_string_list.append(tag)
return u' '.join(tag_string_list)
|
FederatedAI/FATE | python/fate_arch/storage/hdfs/_table.py | Python | apache-2.0 | 5,051 | 0.000594 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
from typing import Iterable
from pyarrow import fs
from fate_arch.common import hdfs_utils
from fate_arch.common.log import getLogger
from fate_arch.storage import StorageEngine, HDFSStoreType
from fate_arch.storage import StorageTableBase
LOGGER = getLogger()
class StorageTable(StorageTableBase):
def __init__(
self,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: HDFSStoreType = HDFSStoreType.DISK,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.HDFS,
store_type=store_type,
)
# tricky way to load libhdfs
try:
from pyarrow import HadoopFileSystem
HadoopFileSystem(self.path)
except Exception as e:
LOGGER.warning(f"load libhdfs failed: {e}")
self._hdfs_client = fs.HadoopFileSystem.from_uri(self.path)
def check_address(self):
return self._exist()
def _put_all(
self, kv_list: Iterable, append=True, assume_file_exist=False, **kwargs
):
LOGGER.info(f"put in hdfs file: {self.path}")
if append and (assume_file_exist or self._exist()):
stream = self._hdfs_client.open_append_stream(
path=self.path, compression=None
)
else:
stream = self._hdfs_client.open_output_stream(
path=self.path, compression=None
)
counter = self._meta.get_count() if self._meta.get_count() else 0
with io.TextIOWrapper(stream) as writer:
for k, v in kv_list:
writer.write(hdfs_utils.serialize(k, v))
writer.write(hdfs_utils.NEWLINE)
counter = counter + 1
self._meta.update_metas(count=counter)
def _collect(self, **kwargs) -> list:
for line in self._as_generator():
yield hdfs_utils.deserialize(line.rstrip())
def _read(self) -> list:
for line in self._as_genera | tor():
yield line
def _destroy(self):
self._hdfs_client.delete_file(self.path)
def _count(self):
count = 0
if self._meta.get_count():
return self._meta.get_count()
for _ in self._as_generator():
count += 1
return count
def _save_as(
self, addre | ss, partitions=None, name=None, namespace=None, **kwargs
):
self._hdfs_client.copy_file(src=self.path, dst=address.path)
table = StorageTable(
address=address,
partitions=partitions,
name=name,
namespace=namespace,
**kwargs,
)
return table
def close(self):
pass
@property
def path(self) -> str:
return f"{self._address.name_node}/{self._address.path}"
def _exist(self):
info = self._hdfs_client.get_file_info([self.path])[0]
return info.type != fs.FileType.NotFound
def _as_generator(self):
info = self._hdfs_client.get_file_info([self.path])[0]
if info.type == fs.FileType.NotFound:
raise FileNotFoundError(f"file {self.path} not found")
elif info.type == fs.FileType.File:
# todo:
with io.TextIOWrapper(
buffer=self._hdfs_client.open_input_stream(self.path), encoding="utf-8"
) as reader:
for line in reader:
yield line
else:
selector = fs.FileSelector(os.path.join("/", self._address.path))
file_infos = self._hdfs_client.get_file_info(selector)
for file_info in file_infos:
if file_info.base_name == "_SUCCESS":
continue
assert (
file_info.is_file
), f"{self.path} is directory contains a subdirectory: {file_info.path}"
with io.TextIOWrapper(
buffer=self._hdfs_client.open_input_stream(
f"{self._address.name_node}/{file_info.path}"
),
encoding="utf-8",
) as reader:
for line in reader:
yield line
|
magistral-io/MagistralPython | src/magistral/client/util/JksHandler.py | Python | mit | 3,975 | 0.013082 | '''
Created on 16 Sep 2016
@author: rizarse
'''
import jks, textwrap, base64
from os.path import expanduser
import os.path
import atexit
import shutil
from os import makedirs
class JksHandler(object):
def __init__(self, params):
pass
@staticmethod
def writePkAndCerts(ks, token):
uid = None
home = expanduser("~")
def deleteCerts(self, path):
shutil.rmtree(path)
atexit.register(deleteCerts, home + '/magistral/' + token)
for alias, pk in ks.private_keys.items():
uid = alias
if pk.algorithm_oid == jks.util.RSA_ENCRYPTION_OID:
if os.path.exists(home + '/magistral/' + token) == False:
makedirs(home + '/magistral/' + token)
key = home + '/magistral/' + token + '/key.pem'
if os.path.exists(key): os.remove(key)
with open(key, 'wb') as f:
f.seek(0)
f.write(bytearray(b"-----BEGIN RSA PRIVATE KEY-----\r\n"))
f.write(bytes("\r\n".join(textwrap.wrap(base64.b64encode(pk.pkey).decode('ascii'), 64)), 'utf-8'))
f.write(bytearray(b"\r\n-----END RSA PRIVATE KEY-----"))
f.close()
counter = 0;
cert = home + '/magistral/' + token + '/certificate.pem'
if os.path.exists(cert): os.remove(cert)
with open(cert, 'wb') as f:
f.seek(0)
for c in pk.cert_chain:
f.write(bytearray(b"-----BEGIN CERTIFICATE-----\r\n"))
f.write(bytes("\r\n".join(textwrap.wrap(base64.b64encode(c[1]).decode('ascii'), 64)), 'utf-8'))
f.write(bytearray(b"\r\n-----END CERTIFICATE-----\r\n"))
counter = counter + 1
if (counter == 2): break
f.close()
ca = home + '/magistral/' + token + '/ca.pem'
if os.path.exists(ca): os.remove(ca)
with open(ca, 'wb') as f:
for alias, c in ks.certs.items():
f.write(bytearray(b"-----BEGIN CERTIFICATE-----\r\n"))
f.write(bytes("\r\n".join(textwrap.wrap(base64.b64encode(c.cert).decode('ascii'), 64)), 'utf-8'))
f.write(bytearray(b"\r\n-----END CERTIFICATE-----\r\n"))
f.close()
return uid
@staticmethod
def printJks(ks):
def print_pem(der_bytes, _type_):
print("-----BEGIN %s-----" % _type_)
print("\r\n".join(textwrap.wrap(base64.b64encode(der_bytes).decode('ascii'), 64)))
| print("-----END %s-----" % _type_)
for _, pk in ks.private_keys.items():
print("Private key: %s" % pk.alias)
if pk.algorithm_oid == jks.util.RSA_ENCRYPTION_OID:
print_pem(pk.pkey, "RSA PRIVATE KEY")
else:
print_pem(pk.pkey_pkcs8, "PRIVATE KEY")
for c in pk.cert_chain:
print_pem(c[1], "CERTIFICATE")
print()
for _, c | in ks.certs.items():
print("Certificate: %s" % c.alias)
print_pem(c.cert, "CERTIFICATE")
print()
for _, sk in ks.secret_keys.items():
print("Secret key: %s" % sk.alias)
print(" Algorithm: %s" % sk.algorithm)
print(" Key size: %d bits" % sk.key_size)
print(" Key: %s" % "".join("{:02x}".format(b) for b in bytearray(sk.key)))
print()
|
SophieBartmann/Faust-Bot | FaustBot/Communication/PingObservable.py | Python | gpl-3.0 | 831 | 0.003619 | import _thread
from FaustBot.Communication.Observable import Observable
class PingObservable(Observable):
def input(self, raw_data, connection):
data = {'raw': raw_data, 'server': ''}
if raw_data.find('PING') == 0:
data['server'] = raw_data.split('PING ')[1]
else:
| return
# hier kann noch gecheckt werden, ob data wirklich ein server ist, der ping haben will, oder sonstwas
# finde heraus, w | er zurückgepingt werden muss, und ob das überhaupt ein ping-request ist oder ein user sich
# einen spass erlaubt hat
self.notify_observers(data, connection)
def notify_observers(self, data, connection):
for observer in self._observers:
_thread.start_new_thread(observer.__class__.update_on_ping, (observer, data, connection))
|
jacyn/burst | webapp/userman/backends.py | Python | mit | 1,074 | 0.004655 | from django.contrib.auth.models import User
#import advantage.iam
import sys
class CustomUserBackend(object):
def authenticate(self, username=None, password=None):
print >> sys.stderr, "starting to authenticate.. "
try:
user = User.objects.get(username=username)
except User.objects.DoesNotExists:
user = None
if user is not None and not user.password:
print >> sys.stderr, "%s" % user.password
login_valid = True
#login_valid = advantage.iam.login(username, password)
if login_valid:
try:
user = User.objects.get(username=username)
| except User.DoesNotExist:
user = User(username=username, password='')
user.is_staff = False
user.is_superuser = False
user.save()
return user
return None
def get_user(self, us | er_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.