repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Phrozyn/MozDef
|
tests/alerts/test_bruteforce_ssh.py
|
Python
|
mpl-2.0
| 7,170
| 0.00265
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertBruteforceSsh(AlertTestSuite):
alert_filename = "bruteforce_ssh"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_source": {
"summary": 'login invalid ldap_count_entries failed by 1.2.3.4',
"hostname": "exhostname",
"details": {
"program": "sshd",
"sourceipaddress": "1.2.3.4",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "bruteforce",
"severity": "NOTICE",
"summary": "10 ssh bruteforce attempts by 1.2.3.4 exho
|
stname (10 hits)",
"tags": ['ssh'],
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Po
|
sitive test with default event and default alert expected",
events=AlertTestSuite.create_events(default_event, 10),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 1})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 1})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'login failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'invalid failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'ldap_count_entries failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
events[8]['_source']['details']['sourceipaddress'] = "127.0.0.1"
events[9]['_source']['details']['sourceipaddress'] = "127.0.0.1"
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 10 events however one has different sourceipaddress",
events=events,
)
)
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with not enough events",
events=AlertTestSuite.create_events(default_event, 9),
),
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login good ldap_count_entries'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary without 'failed'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'failed'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'failed'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'login'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'invalid'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'ldap_count_entries'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'ldap_count_entries'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['details']['program'] = 'badprogram'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with bad program",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 3})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 3})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = event['_source']['summary'].replace('1.2.3.4', '11.22.33.44')
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 11.22.33.44 as a whitelisted ip",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = event['_source']['summary'].replace('1.2.3.4', '55.66.77.88')
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 55.66.77.88 as a whitelisted ip",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['details']['sourceipaddress'] = None
test_cases.append(
NegativeAlertTestCase(
description="Negative test case aggregation key excluded",
events=events,
)
)
|
climapulse/dj-bgfiles
|
tests/test_http.py
|
Python
|
bsd-3-clause
| 1,483
| 0.004752
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division, absolute_import
from bgfiles.http import create_content_disposition
from django.test import SimpleTestCase
class CreateContentDispositionTest(SimpleTestCase):
def test(self):
header = create_content_disposition('Fußball.pdf')
self.assertEqual(b'attachment; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball.pdf', hea
|
der)
header = create_content_disposition('Fußball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball
|
.pdf', header)
header = create_content_disposition(b'Fussball.pdf')
self.assertEqual(b'attachment; filename="Fussball.pdf"', header)
header = create_content_disposition(b'Fussball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fussball.pdf"', header)
expected = (b'attachment; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf'), expected)
expected = (b'inline; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf', attachment=False), expected)
|
pgandev/RocketMap
|
pogom/webhook.py
|
Python
|
agpl-3.0
| 8,054
| 0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import requests
from datetime import datetime
from cachetools import LFUCache
from requests_futures.sessions import FuturesSession
import threading
from .utils import get_args
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
log = logging.getLogger(__name__)
# How low do we want the queue size to stay?
wh_warning_threshold = 100
# How long can it be over the threshold, in seconds?
# Default: 5 seconds per 100 in threshold.
wh_threshold_lifetime = int(5 * (wh_warning_threshold / 100.0))
wh_lock = threading.Lock()
args = get_args()
def send_to_webhook(session, message_type, message):
if not args.webhooks:
# What are you even doing here...
log.warning('Called send_to_webhook() without webhooks.')
|
return
req_timeout = args.wh_ti
|
meout
data = {
'type': message_type,
'message': message
}
for w in args.webhooks:
try:
session.post(w, json=data, timeout=(None, req_timeout),
background_callback=__wh_completed)
except requests.exceptions.ReadTimeout:
log.exception('Response timeout on webhook endpoint %s.', w)
except requests.exceptions.RequestException as e:
log.exception(repr(e))
def wh_updater(args, queue, key_caches):
wh_threshold_timer = datetime.now()
wh_over_threshold = False
# Set up one session to use for all requests.
# Requests to the same host will reuse the underlying TCP
# connection, giving a performance increase.
session = __get_requests_session(args)
# Extract the proper identifier. This list also controls which message
# types are getting cached.
ident_fields = {
'pokestop': 'pokestop_id',
'pokemon': 'encounter_id',
'gym': 'gym_id',
'gym_details': 'gym_id'
}
# Instantiate WH LFU caches for all cached types. We separate the caches
# by ident_field types, because different ident_field (message) types can
# use the same name for their ident field.
for key in ident_fields:
key_caches[key] = LFUCache(maxsize=args.wh_lfu_size)
# The forever loop.
while True:
try:
# Loop the queue.
whtype, message = queue.get()
# Get the proper cache if this type has one.
key_cache = None
if whtype in key_caches:
key_cache = key_caches[whtype]
# Get the unique identifier to check our cache, if it has one.
ident = message.get(ident_fields.get(whtype), None)
# cachetools in Python2.7 isn't thread safe, so we add a lock.
with wh_lock:
# Only send if identifier isn't already in cache.
if ident is None or key_cache is None:
# We don't know what it is, or it doesn't have a cache,
# so let's just log and send as-is.
log.debug(
'Sending webhook item of uncached type: %s.', whtype)
send_to_webhook(session, whtype, message)
elif ident not in key_cache:
key_cache[ident] = message
log.debug('Sending %s to webhook: %s.', whtype, ident)
send_to_webhook(session, whtype, message)
else:
# Make sure to call key_cache[ident] in all branches so it
# updates the LFU usage count.
# If the object has changed in an important way, send new
# data to webhooks.
if __wh_object_changed(whtype, key_cache[ident], message):
key_cache[ident] = message
send_to_webhook(session, whtype, message)
log.debug('Sending updated %s to webhook: %s.',
whtype, ident)
else:
log.debug('Not resending %s to webhook: %s.',
whtype, ident)
# Helping out the GC.
del whtype
del message
del ident
# Webhook queue moving too slow.
if (not wh_over_threshold) and (
queue.qsize() > wh_warning_threshold):
wh_over_threshold = True
wh_threshold_timer = datetime.now()
elif wh_over_threshold:
if queue.qsize() < wh_warning_threshold:
wh_over_threshold = False
else:
timediff = datetime.now() - wh_threshold_timer
if timediff.total_seconds() > wh_threshold_lifetime:
log.warning('Webhook queue has been > %d (@%d);'
+ ' for over %d seconds,'
+ ' try increasing --wh-concurrency'
+ ' or --wh-threads.',
wh_warning_threshold,
queue.qsize(),
wh_threshold_lifetime)
queue.task_done()
except Exception as e:
log.exception('Exception in wh_updater: %s.', repr(e))
# Helpers
# Background handler for completed webhook requests.
# Currently doesn't do anything.
def __wh_completed():
pass
def __get_requests_session(args):
# Config / arg parser
num_retries = args.wh_retries
backoff_factor = args.wh_backoff_factor
pool_size = args.wh_concurrency
# Use requests & urllib3 to auto-retry.
# If the backoff_factor is 0.1, then sleep() will sleep for [0.1s, 0.2s,
# 0.4s, ...] between retries. It will also force a retry if the status
# code returned is 500, 502, 503 or 504.
session = FuturesSession(max_workers=pool_size)
# If any regular response is generated, no retry is done. Without using
# the status_forcelist, even a response with status 500 will not be
# retried.
retries = Retry(total=num_retries, backoff_factor=backoff_factor,
status_forcelist=[500, 502, 503, 504])
# Mount handler on both HTTP & HTTPS.
session.mount('http://', HTTPAdapter(max_retries=retries,
pool_connections=pool_size,
pool_maxsize=pool_size))
session.mount('https://', HTTPAdapter(max_retries=retries,
pool_connections=pool_size,
pool_maxsize=pool_size))
return session
def __get_key_fields(whtype):
key_fields = {
# lure_expiration is a UTC timestamp so it's good (Y).
'pokestop': ['enabled', 'latitude',
'longitude', 'lure_expiration', 'active_fort_modifier'],
'pokemon': ['spawnpoint_id', 'pokemon_id', 'latitude', 'longitude',
'disappear_time', 'move_1', 'move_2',
'individual_stamina', 'individual_defense',
'individual_attack', 'form', 'cp', 'pokemon_level'],
'gym': ['team_id', 'guard_pokemon_id',
'gym_points', 'enabled', 'latitude', 'longitude'],
'gym_details': ['latitude', 'longitude', 'team', 'pokemon']
}
return key_fields.get(whtype, [])
# Determine if a webhook object has changed in any important way (and
# requires a resend).
def __wh_object_changed(whtype, old, new):
# Only test for important fields: don't trust last_modified fields.
fields = __get_key_fields(whtype)
if not fields:
log.debug('Received an object of unknown type %s.', whtype)
return True
return not __dict_fields_equal(fields, old, new)
# Determine if two dicts have equal values for all keys in a list.
def __dict_fields_equal(keys, a, b):
for k in keys:
if a.get(k) != b.get(k):
return False
return True
|
kingvuplus/boom2
|
lib/python/Components/NetworkTime.py
|
Python
|
gpl-2.0
| 1,875
| 0.005867
|
# Embedded file name: /usr/lib/enigma2/python/Components/NetworkTime.py
from Components.Console import Console
from config import config
from enigma import eTimer, eDVBLocalTimeHandler, eEPGCache
from Tools.StbHardware import setRTCtime
from time import time, ctime
def AutoNTPSync(session = None, **kwargs):
global ntpsyncpoller
ntp
|
syncpoller = NTPSyncPoller()
ntpsyncpoller.start()
class NTPSyncPoller:
def __init__(self):
self.timer = eTimer()
self.Console = Console()
def start(self):
if self.timecheck not in self.timer.callback:
self.timer.callback.append
|
(self.timecheck)
self.timer.startLongTimer(0)
def stop(self):
if self.timecheck in self.timer.callback:
self.timer.callback.remove(self.timecheck)
self.timer.stop()
def timecheck(self):
if config.misc.SyncTimeUsing.value == '1':
print '[NTP]: Updating'
self.Console.ePopen('/usr/bin/ntpdate-sync', self.update_schedule)
else:
self.update_schedule()
def update_schedule(self, result = None, retval = None, extra_args = None):
nowTime = time()
nowTimereal = ctime(nowTime)
if nowTime > 10000:
print '[NTP]: setting E2 unixtime:', nowTime
print '[NTP]: setting E2 realtime:', nowTimereal
setRTCtime(nowTime)
if config.misc.SyncTimeUsing.value == '1':
eDVBLocalTimeHandler.getInstance().setUseDVBTime(False)
else:
eDVBLocalTimeHandler.getInstance().setUseDVBTime(True)
eEPGCache.getInstance().timeUpdated()
self.timer.startLongTimer(int(config.misc.useNTPminutes.value) * 60)
else:
print 'NO TIME SET'
self.timer.startLongTimer(10)
|
mahabuber/erpnext
|
erpnext/hr/report/monthly_salary_register/monthly_salary_register.py
|
Python
|
agpl-3.0
| 4,082
| 0.038707
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("M
|
onth") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where e_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]
|
))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where d_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map
|
163gal/Time-Line
|
libs/wx/tools/Editra/src/syntax/_vbscript.py
|
Python
|
gpl-3.0
| 3,982
| 0.004269
|
###############################################################################
# Name: vbscript.py #
# Purpose: Define VBScript syntax
|
for highlighting and other featu
|
res #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: vbscript.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for VBScript.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _vbscript.py 63834 2010-04-03 06:04:33Z CJP $"
__revision__ = "$Revision: 63834 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
VBS_KW = ("addressof alias and as attribute base begin binary boolean byref "
"byte byval call case cdbl cint clng compare const csng cstr "
"currency date decimal declare defbool defbyte defcur defdate defdbl "
"defdec defint deflng defobj defsng defstr defvar dim do double each "
"else elseif empty end enum eqv erase error event exit explicit "
"false for friend function get global gosub goto if imp implements "
"in input integer is len let lib like load lock long loop lset me "
"mid midb mod new next not nothing null object on option optional "
"or paramarray preserve print private property public raiseevent "
"randomize redim rem resume return rset seek select set single "
"static step stop string sub text then time to true type typeof "
"unload until variant wend while with withevents xor")
# Syntax specifications
SYNTAX_ITEMS = [ (stc.STC_B_ASM, 'asm_style'),
(stc.STC_B_BINNUMBER, 'default_style'), # STYLE NEEDED
(stc.STC_B_COMMENT, 'comment_style'),
(stc.STC_B_CONSTANT, 'const_style'),
(stc.STC_B_DATE, 'default_style'), # STYLE NEEDED
(stc.STC_B_DEFAULT, 'default_style'),
(stc.STC_B_ERROR, 'error_style'),
(stc.STC_B_HEXNUMBER, 'number_style'),
(stc.STC_B_IDENTIFIER, 'default_style'),
(stc.STC_B_KEYWORD, 'keyword_style'),
(stc.STC_B_KEYWORD2, 'class_style'), # STYLE NEEDED
(stc.STC_B_KEYWORD3, 'funct_style'), # STYLE NEEDED
(stc.STC_B_KEYWORD4, 'scalar_style'), # STYLE NEEDED
(stc.STC_B_LABEL, 'directive_style'), # STYLE NEEDED
(stc.STC_B_NUMBER, 'number_style'),
(stc.STC_B_OPERATOR, 'operator_style'),
(stc.STC_B_PREPROCESSOR, 'pre_style'),
(stc.STC_B_STRING, 'string_style'),
(stc.STC_B_STRINGEOL, 'stringeol_style')
]
#---- Extra Properties ----#
FOLD = ("fold", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for VbScript"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_VBSCRIPT)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [(0, VBS_KW),]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'\'']
|
TheAltcoinBoard/XAB-withoutSecp256k1
|
contrib/linearize/linearize-hashes.py
|
Python
|
mit
| 2,868
| 0.034519
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 15715
if 'min_height' not in setti
|
ngs:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
|
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
ARCHER-CSE/parallel-io
|
benchmark/analysis/analyse_benchio_output.py
|
Python
|
gpl-3.0
| 4,610
| 0.014967
|
#!/usr/bin/env python
#
# Analyse benchio output files
#
# System modules for grabbing data
import sys
import os.path
import re
from glob import glob
import seaborn as sns
# Modules for analysing and visualising data
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 9})
matplotlib.rcParams.update({'figure.autolayout': True})
def main(argv):
resdir = sys.argv[1]
usesize = int(sys.argv[2])
file
|
s = get_filelist(resdir, "benchio_")
csvdump = open('csvdump.csv', 'w')
cs
|
vdump.write('"Writers","Scheme","Write Bandwidth (MiB/s)"\n')
# Loop over files getting data
resframe_proto = []
for file in files:
infile = open(file, 'r')
resdict = {}
for line in infile:
if re.search('MPI-IO', line):
break
elif re.search('Starting job', line):
tokens = line.split()
resdict['JobID'] = tokens[2]
elif re.search('Running', line):
tokens = line.split()
resdict['Writers'] = int(tokens[2])
elif re.search('Array', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['LocalSize'] = (x, y, z)
elif re.search('Global', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['GlobalSize'] = (x, y, z)
elif re.search('Total', line):
tokens = line.split()
resdict['TotData'] = float(tokens[5])
infile.close()
infile = open(file, 'r')
timedict = resdict.copy()
for line in infile:
if re.search('HDF5', line):
break
elif re.search('Writing to', line):
tokens = line.split()
nstripe = 0
if re.match('striped', tokens[2]):
timedict['Striping'] = 'Stripe Count = -1'
nstripe = -1
elif re.match('defstriped', tokens[2]):
timedict['Striping'] = 'Stripe Count = 4'
nstripe = 4
elif re.match('unstriped', tokens[2]):
timedict['Striping'] = 'Stripe Count = 1'
nstripe = 1
elif re.match(' time', line):
tokens = line.split()
timedict['Write'] = float(tokens[6])
timedict['File'] = os.path.abspath(file)
timedict['Count'] = 1
resframe_proto.append(timedict)
csvstring = '{0},"SSF -c {1} -s 1m",{2}\n'.format(timedict['Writers'], nstripe, timedict['Write'])
csvdump.write(csvstring)
curstriping = timedict['Striping']
timedict = resdict.copy()
timedict['Striping'] = curstriping
infile.close()
csvdump.close()
resframe = pd.DataFrame(resframe_proto)
print 'Number of valid results files read = ', len(resframe.index)
resframe = resframe[resframe.LocalSize == (usesize, usesize, usesize) ]
print "Summary of all results found:"
print resframe
labels = map(int, resframe['Writers'].unique())
labels.sort()
# Get copy of dataframe with only numeric values
resframe_num = resframe.drop(['File', 'GlobalSize', 'TotData'], 1)
# What stats are we computing on which columns
groupf = {'Write':['min','median','max','mean'], 'Count':'sum'}
# Compute the maximum read and write bandwidths from the data
stats = resframe_num.sort('Writers').groupby(['Writers', 'Striping', 'LocalSize']).agg(groupf)
print "Useful statistics:"
print stats
print stats.to_csv(float_format='%.3f')
fig, ax = plt.subplots()
sns.pointplot(x='Writers', y='Write', data=resframe, hue='Striping', estimator=np.median, scale=0.5)
# sns.stripplot(x='Writers', y='Write', data=resframe, hue='Striping', jitter=True)
ax.set_ylim(ymin=0)
plt.ylabel('Bandwidth / MiB/s')
plt.xlabel('Writers')
plt.legend()
plt.savefig('bandwidth_stats.png')
plt.clf()
sys.exit(0)
def get_filelist(dir, stem):
"""
Get list of date files in the specified directory
"""
files = []
if os.path.exists(dir):
files = glob(os.path.join(dir, stem + '*' ))
files.sort()
else:
sys.stderr.write("Directory does not exist: {1}".format(dir))
sys.exit(1)
return files
if __name__ == "__main__":
main(sys.argv[1:])
|
admiralspark/NetSpark-Scripts
|
Example_Scripts/TinyDB/dbInputData.py
|
Python
|
gpl-3.0
| 2,573
| 0.002721
|
'''
Usage:
dbInputData.py
dbInputData.py -h | --help
dbInputData.py [--debug] [-i INPUT] [-o FILE]
Options:
-h, --help Shows this menu.
-d, --debug Print debug information. This is the most verbose
option.
-i INPUT Input file [default: test.csv]
-o FILE TinyDB database to output to. [default: netspark.json]
'''
import logging
import csv
from tinydb import TinyDB, Query
from docopt import docopt
arguments = docopt(__doc__)
# Set logging level https://www.digitalocean.com/community/tutorials/how-to-use-logging-in-python-3
if arguments['--debug'] == True:
logging.basicConfig(level=logging.DEBUG)
print("Arguments: \n" + str(arguments))
else:
logging.basicConfig(level=logging.INFO)
# Define the Database
DB = TinyDB(arguments['-o'])
logging.info("Set TinyDB database to: " + str(arguments['-o']))
logging.debug("Loaded database: " + str(DB))
# The test file
CSVFILE = arguments['-i']
logging.info("Set CSV input file to: " + str(CSVFILE))
# The Magic (read CSV as dict, form dict with data we care about, dump it into db)
with open(CSVFILE, mode='r') as csvfile:
logging.debug("Attempting load of CSVFILE into dictionary...")
READER = csv.DictReader(csvfile)
logging.debug("csv.DictReader load success")
# Now iterate through every row in the CSVfile and make dictionaries
for row in READER:
lo
|
gging.debug("Iterating through csv dictionary rows...")
dbdict = {
'hostname': row[
|
'SysName'],
'device_type': row['device_type'],
'ipaddr': row['IP_Address'],
'department': row['Department']
}
logging.debug("Made the following dbdict: " + str(dbdict))
Find = Query()
logging.debug("Begin searching for IP Address using dbdict['ipaddr']")
ifexists = DB.contains(Find.ipaddr == dbdict['ipaddr'])
if ifexists is True:
logging.debug("Found match for IP. Updating values...")
DB.update({'hostname': dbdict['hostname']}, Find.ipaddr == dbdict['ipaddr'])
DB.update({'device_type': dbdict['device_type']}, Find.ipaddr == dbdict['ipaddr'])
DB.update({'department': dbdict['department']}, Find.ipaddr == dbdict['ipaddr'])
logging.debug("Updated DB with values: " + str(dbdict))
else:
logging.debug("No match found for IP. Adding new DB entry...")
DB.insert(dbdict)
logging.debug("Added new values: " + str(dbdict))
|
Clinical-Genomics/scout
|
tests/parse/test_parse_rank_score.py
|
Python
|
bsd-3-clause
| 1,288
| 0.00854
|
from scout.parse.variant.rank_score import parse_rank_score
from scout.parse.variant.variant import parse_variant
def test_parse_rank_score():
## GIVEN a rank score string on genmod format
rank_scores_info = "123:10"
variant_score = 10.0
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that the correct rank score is parsed
|
assert variant_score == parsed_rank_score
def test_parse_rank_
|
score_no_score():
## GIVEN a empty rank score string
rank_scores_info = ""
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that None is returned
assert parsed_rank_score == None
def test_parse_rank_score_variant(cyvcf2_variant, case_obj, scout_config):
## GIVEN a variant
rank_score = 15
case_id = case_obj["_id"]
## WHEN adding a rank score string to the INFO field
rank_score_str = f"{case_id}:{rank_score}"
cyvcf2_variant.INFO["RankScore"] = rank_score_str
## WHEN parsing the variant
var_info = parse_variant(cyvcf2_variant, case_obj)
## THEN assert that the correct score is parsed
assert var_info["rank_score"] == rank_score
|
jumpstarter-io/nova
|
nova/tests/scheduler/test_weights.py
|
Python
|
apache-2.0
| 9,359
| 0.000427
|
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler weights.
"""
from nova import context
from nova import exception
from nova.openstack.common.fixture import mockpatch
from nova.scheduler import weights
from nova import test
from nova.tests import matchers
from nova.tests.scheduler import fakes
class TestWeighedHost(test.NoDBTestCase):
def test_dict_conversion(self):
host_state = fakes.FakeHostState('somehost', None, {})
host = weights.WeighedHost(host_state, 'someweight')
expected = {'weight': 'someweight',
'host': 'somehost'}
self.assertThat(host.to_dict(), matchers.DictMatches(expected))
def test_all_weighers(self):
classes = weights.all_weighers()
class_names = [cls.__name__ for cls in classes]
self.assertEqual(len(classes), 2)
self.assertIn('RAMWeigher', class_names)
self.assertIn('MetricsWeigher', class_names)
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.ram.RAMWeigher'])
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1:
|
free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostin
|
fo_list)
self.assertEqual(weighed_host.weight, 0.0)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0 * 2)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weight_classes,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(weighed_host.weight, 1)
self.assertEqual(weighed_host.obj.host, "host4")
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(weighed_host.weight, 0)
self.assertEqual(weighed_host.obj.host, "negative")
class MetricsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(MetricsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES_METRICS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.metrics.MetricsWeigher'])
def _get_weighed_host(self, hosts, setting, weight_properties=None):
if not weight_properties:
weight_properties = {}
self.flags(weight_setting=setting, group='metrics')
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, settings, expected_weight, expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list, settings)
self.assertEqual(weighed_host.weight, expected_weight)
self.assertEqual(weighed_host.obj.host, expected_host)
def test_single_resource(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host4 should win:
setting = ['foo=1']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host2 should win:
setting = ['foo=0.0001', 'bar=1']
self._do_test(setting, 1.0, 'host2')
def test_single_resourcenegtive_ratio(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host1 should win:
setting = ['foo=-1']
self._do_test(setting, 1.0, 'host1')
def test_multiple_resource_missing_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource_wrong_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar = 2.0t']
self._do_test(setting, 1.0, 'host4')
def _check_parsing_result(self, weigher, setting, results):
self.flags(weight_setting=setting, group='metrics')
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
self._check_parsing_result(weigher,
['foo=1'],
[('foo', 1.0)])
self._check_parsing_result(weigher,
['foo=1', 'bar=-2.1'],
[('foo', 1.0), ('bar', -2.1)])
|
firasbenmakhlouf/JobLookup
|
annonce/apps.py
|
Python
|
mit
| 130
| 0
|
from __future__ import unicode_literals
|
from django.apps import AppConfig
cl
|
ass AnnonceConfig(AppConfig):
name = 'annonce'
|
psi4/psi4
|
psi4/driver/p4util/inpsight.py
|
Python
|
lgpl-3.0
| 23,279
| 0.016624
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
import os
from datetime import date
# yapf: disable
class InPsight:
# POV-Ray defines
defines = {}
defines['Shadows'] = 'false'
defines['Background_Color'] = '<0.6,0.6,0.6>'
defines['Output_File_Type'] = 'N'
defines['Output_Alpha'] = 'true'
defines['Light_Color'] = '<1,1,1>'
defines['Filename'] = 'inpsight'
defines['Filepath'] = os.getcwd()
defines['Antialias'] = 'true'
defines['Antialias_Threshold'] = '0.1'
# Molecule geometry
atoms = [] # (Z,x,y,z,R,r,g,b,t) in bohr
bonds = [] # (x1,y1,z1,R1,x2,y2,z2,R2,r,g,b,t)
# Molecular geometry defines
colors = []
radii = []
radial_scale = 0.25
bond_width = 0.2 # bohr
bohr_per_ang = 1.8897161646320724
bonding_alpha = 0.65 # Used to select/reject bonds via sum of vDW radii
# View defines (high-level)
azimuth = 0.0
elevation = 0.0
zoom = 0.5
height = 900
width = 1200
# Camera positions (low-level)
location = [1.0,0.0,0.0]
up = [0.0,0.75,0.0]
right = [1.0,0.0,0.0]
sky = [0.0,-1.0,0.0]
look_at = [0.0,0.0,0.0]
light = [1.0,0.0,0.0]
light_color = [0.6,0.6,0.6]
# Standard Jmol colors, 256-based
colors.append([0,0,0])
colors.append([255,255,255])
colors.append([217,255,255])
colors.append([204,128,255])
colors.append([194,255,0])
colors.append([255,181,181])
colors.append([144,144,144])
colors.append([48,80,248])
colors.append([255,13,13])
colors.append([144,224,80])
colors.append([179,227,245])
colors.append([171,92,242])
colors.append([138,255,0])
colors.append([191,166,166])
colors.append([240,200,160])
colors.append([255,128,0])
colors.append([255,255,48])
colors.append([31,240,31])
colors.append([128,209,227])
colors.append([143,64,212])
colors.append([61,255,0])
colors.append([230,230,230])
colors.append([191,194,199])
colors.append([166,166,171])
colors.append([138,153,199])
colors.append([156,122,199])
colors.append([224,102,51])
colors.append([240,144,160])
colors.append([80,208,80])
colors.append([200,128,51])
colors.append([125,128,176])
colors.append([194,143,143])
colors.append([102,143,143])
colors.append([189,128,227])
colors.append([255,161,0])
colors.append([166,41,41])
colors.append([92,184,209])
colors.append([112,46,176])
colors.append([0,255,0])
colors.append([148,255,255])
colors.append([148,224,224])
colors.append([115,194,201])
colors.append([84,181,181])
colors.append([59,158,158])
colors.append([36,143,143])
colors.append([10,125,140])
colors.append([0,105,133])
colors.append([192,192,192])
colors.append([255,217,143])
colors.append([166,117,115])
colors.append([102,128,128])
colors.append([158,99,181])
colors.app
|
end([212,122,0])
colors.append([148,0,148])
colors.append([66,158,176])
colors.append([87,23,143])
colors.append([0,201,0])
colors.append([112,212,255])
colors.append([255,255,199])
colors.append([217,255,199])
colors.append([199,255,199])
colors.append([163,255,199])
colors.append([143,255,199])
colors.append([97,255,199])
colors.append([69,255,199])
colors.append([48,255,199])
colors.append([31,255,199])
colors.append([0,255,156])
|
colors.append([0,230,117])
colors.append([0,212,82])
colors.append([0,191,56])
colors.append([0,171,36])
colors.append([77,194,255])
colors.append([77,166,255])
colors.append([33,148,214])
colors.append([38,125,171])
colors.append([38,102,150])
colors.append([23,84,135])
colors.append([208,208,224])
colors.append([255,209,35])
colors.append([184,184,208])
colors.append([166,84,77])
colors.append([87,89,97])
colors.append([158,79,181])
colors.append([171,92,0])
colors.append([117,79,69])
colors.append([66,130,150])
colors.append([66,0,102])
colors.append([0,125,0])
colors.append([112,171,250])
colors.append([0,186,255])
colors.append([0,161,255])
colors.append([0,143,255])
colors.append([0,128,255])
colors.append([0,107,255])
colors.append([84,92,242])
colors.append([120,92,227])
colors.append([138,79,227])
colors.append([161,54,212])
colors.append([179,31,212])
colors.append([179,31,186])
colors.append([179,13,166])
colors.append([189,13,135])
colors.append([199,0,102])
colors.append([204,0,89])
colors.append([209,0,79])
colors.append([217,0,69])
colors.append([224,0,56])
colors.append([230,0,46])
colors.append([235,0,38])
# Approximate vDW radii in angstrom
radii.append(2.0)
radii.append(1.001)
radii.append(1.012)
radii.append(0.825)
radii.append(1.408)
radii.append(1.485)
radii.append(1.452)
radii.append(1.397)
radii.append(1.342)
radii.append(1.287)
radii.append(1.243)
radii.append(1.144)
radii.append(1.364)
radii.append(1.639)
radii.append(1.716)
radii.append(1.705)
radii.append(1.683)
radii.append(1.639)
radii.append(1.595)
radii.append(1.485)
radii.append(1.474)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.650)
radii.append(1.727)
radii.append(1.760)
radii.append(1.771)
radii.append(1.749)
radii.append(1.727)
radii.append(1.628)
radii.append(1.606)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.672)
radii.append(1.804)
radii.append(1.881)
radii.append(1.892)
radii.append(1.892)
radii.append(1.881)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(
|
carze/cutlass
|
cutlass/HostVariantCall.py
|
Python
|
mit
| 23,915
| 0.000794
|
"""
This module models the host variant call object.
"""
import json
import logging
import os
import string
from cutlass.iHMPSession import iHMPSession
from cutlass.Base import Base
from cutlass.aspera import aspera
from cutlass.Util import *
# pylint: disable=W0703, C1801
# Create a module logger named after the module
module_logger = logging.getLogger(__name__)
# Add a NullHandler for the case if no logging is configured by the application
module_logger.addHandler(logging.NullHandler())
class HostVariantCall(Base):
"""
The class models host variant call data for the iHMP project. This class
contains all the fields required to save a HostVariantCall object to OSDF.
Attributes:
namespace (str): The namespace this class will use in the OSDF instance
"""
namespace = "ihmp"
aspera_server = "aspera2.ihmpdcc.org"
def __init__(self, *args, **kwargs):
"""
Constructor for the HostVariantCall class. This initializes
the fields specific to the class, and inherits from the Base class.
Args:
None
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.logger.addHandler(logging.NullHandler())
# These are common to all objects
self._id = None
self._version = None
self._links = {}
self._tags = []
# These are particular to HostVariantCall objects
self._checksums = None
self._comment = None
self._date = None
self._format = None
self._local_file = None
self._reference = None
self._size = None
self._study = None
self._subtype = None
self._urls = ['']
self._variant_calling_process = None
# Optional properties
self._format_doc = None
self._private_files = None
self._sop = None
super(HostVariantCall, self).__init__(*args, **kwargs)
def validate(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for that specific object. All required fields
for that specific object must be present.
Args:
None
Returns:
A list of strings, where each string is the error that the
validation raised during OSDF validation
"""
self.logger.debug("In validate.")
document = self._get_raw_doc()
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
(valid, error_message) = session.get_osdf().validate_node(document)
problem
|
s = []
if not valid:
self.logger.info("Validation did not succeed for %s.", __name__)
problems.append(error_message)
if self._private_files:
self.logger.info("User specified the files are private.")
else:
self.logger.info("Data is NOT private, so check that local_file is set.")
if self._local_file is None:
problems.append("Local file is not yet set.")
elif not os.path.isfil
|
e(self._local_file):
problems.append("Local file does not point to an actual file.")
if 'computed_from' not in self._links.keys():
problems.append("Must add a 'computed_from' link to a host_wgs_raw_seq_set.")
self.logger.debug("Number of validation problems: %s.", len(problems))
return problems
def is_valid(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for the specific object. However, unlike
validates(), this method does not provide exact error messages,
it states if the validation was successful or not.
Args:
None
Returns:
True if the data validates, False if the current state of
fields in the instance do not validate with the OSDF instance
"""
self.logger.debug("In is_valid.")
problems = self.validate()
valid = True
if len(problems):
self.logger.error("There were %s problems.", str(len(problems)))
valid = False
self.logger.debug("Valid? %s", str(valid))
return valid
@property
def checksums(self):
"""
str: One or more checksums used to ensure file integrity.
"""
self.logger.debug("In 'checksums' getter.")
return self._checksums
@checksums.setter
@enforce_dict
def checksums(self, checksums):
"""
The setter for the checksum data.
Args:
checksums (dict): The checksums for the data file.
Returns:
None
"""
self.logger.debug("In 'checksums' setter.")
self._checksums = checksums
@property
def comment(self):
"""
str: Free-text comment.
"""
self.logger.debug("In 'comment' getter.")
return self._comment
@comment.setter
@enforce_string
def comment(self, comment):
"""
The setter for the comment field. The comment must be a string,
and less than 512 characters.
Args:
comment (str): The new comment to add to the string.
Returns:
None
"""
self.logger.debug("In 'comment' setter.")
self._comment = comment
@property
def date(self):
"""
str: Date on which the output were generated.
"""
self.logger.debug("In 'date' getter.")
return self._date
@date.setter
@enforce_string
@enforce_past_date
def date(self, date):
"""
The date on which the output were generated. The date
must be in the past.
Args:
date (str): The date.
Returns:
None
"""
self.logger.debug("In 'date' setter.")
self._date = date
@property
def format(self):
"""
str: The file format of the sequence file.
"""
self.logger.debug("In 'format' getter.")
return self._format
@format.setter
@enforce_string
def format(self, format_str):
"""
The setter for the format. This must be either 'fasta' or 'fastq'.
Args:
format_str (str): The new format string for the current object.
Returns:
None
"""
self.logger.debug("In 'format' setter.")
formats = ["vcf", "txt"]
if format_str in formats:
self._format = format_str
else:
raise Exception("Format must be either vcf or txt.")
@property
def format_doc(self):
"""
str: URL for documentation of file format.
"""
self.logger.debug("In 'format_doc' getter.")
return self._format_doc
@format_doc.setter
@enforce_string
def format_doc(self, format_doc):
"""
The setter for the file format documentation URL.
Args:
format_doc (str): The new format_doc for the current object.
Returns:
None
"""
self.logger.debug("In 'format_doc' setter.")
self._format_doc = format_doc
@property
def local_file(self):
"""
str: URL to the local file to upload to the server.
"""
self.logger.debug("In 'local_file' getter.")
return self._local_file
@local_file.setter
@enforce_string
def local_file(self, local_file):
"""
The setter for the local file.
Args:
local_file (str): The URL to the local file that should
be uploaded to the server.
Returns:
None
"""
self.logger.debug("In 'local_file' setter.")
self._local_file = local_file
@property
def private_files(self):
"""
bool: Whether this object describes private data that should not
be uploaded to the DCC. Defaults to false.
"""
self.logger.debug("In 'private_files' getter
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSpiritGodShura.py
|
Python
|
bsd-3-clause
| 484
| 0.024793
|
def extractSpiritGodShura(item):
"""
# Sousetsuka
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if item['
|
title'].startswith('Chapter') and item['tags'] == ['Chapters']:
if ':' in item['title'] and not postfix:
postfix = item['title'].split(':')[-1]
return buildReleaseMessageWithType(item
|
, 'Spirit God Shura', vol, chp, postfix=postfix, tl_type='oel')
return False
|
laenderoliveira/exerclivropy
|
cap09/exercicio-09-21.py
|
Python
|
mit
| 2,962
| 0
|
agenda = []
def pedenome():
return input("Nome: ").replace("#", "$")
def pedetelefone():
return input("Telefone: ").replace("#", "$")
def pedearquivo():
return input("Nome do arquivo: ")
def mostra(nome, telefone):
print(f"Nome: {nome} Telefone: {telefone}")
def pesquisa(nome):
mnome = nome.lower()
for p, e in enumerate(agenda):
if e[0].lower() == mnome:
return p
return None
def novo():
nome = pedenome()
telefone = pedetelefone()
agenda.append([nome, telefone])
def apaga():
nome = pedenome()
p = pesquisa(nome)
if p is not None:
m = "Certeza que quer excluir? (1 - Para confirmar / 0 - para sair): "
valor = faixa(m, 0, 1)
if valor == 1:
del agenda[p]
else:
print("Não foi apagado!")
else:
print("Nome não encontrado.")
def altera():
p = pesquisa(pedenome())
if p is not None:
print("Encontrado!")
nome = agenda[p][0]
telefone = agenda[p][1]
mostra(nome, telefone)
nome = pedenome()
telefone = pedetelefone()
m = "Certeza que quer alterar? (1 - Para confirmar / 0 - para sair): "
valor = faixa(m, 0, 1)
if valor == 1:
agenda[p] = [nome, telefone]
else:
print("Não alterado!")
else:
print("Não encontrado")
def lista():
print("\nAgenda\n")
print("-"*6)
for n, d in enumerate(agenda):
nome, telefone = d
print(n+1, end=' ')
mostra(nome, telefone)
print("-"*6)
def grava():
nomearquivo = pedearquivo()
arquivo = open(nomearquivo, "w")
for nome, telefone in agenda:
arquivo.write(f"{nome}#{telefone}\n")
arquivo.close()
def le():
global agenda
agenda = []
nomearquivo = pedearquivo()
arquivo = open(nomearquivo, "r")
for linha in arquivo.readlines():
nome, telfone = linha.strip().split("#")
agenda.append([nome, telfone])
arquivo.close()
def faixa(pergunta, i, f):
while True:
try:
valor = int(input(pergunta))
if valor >= i and valor <= f:
return valor
except ValueError:
print(f"Valor inválido, favor digitar valor entre {i} e {f}")
def ordena():
global agenda
agenda.sort()
lista()
def menu():
print("""
0 - Sair
1 - Novo
2 - Alterar
3 - Excluir
4 - Lista
5 - Grava
6 - Lê
7 - Ordena por Nome
""")
la = len(agenda)
print(f"{la} contato(s) na agenda.")
return faixa("Escola uma opção: ", 0, 7)
while True:
|
opcao = menu()
if opcao == 0:
break
elif opcao == 1:
novo()
elif opcao == 2:
altera()
elif opcao == 3:
apaga()
elif opcao == 4:
lista()
elif opcao == 5:
grava()
elif opca
|
o == 6:
le()
elif opcao == 7:
ordena()
|
antoinecarme/pyaf
|
tests/artificial/transf_None/trend_Lag1Trend/cycle_12/ar_/test_artificial_32_None_Lag1Trend_12__20.py
|
Python
|
bsd-3-clause
| 259
| 0.088803
|
import pyaf.Bench.TS
|
_datasets as tsds
import tests.artificial.process_artificial_dataset as art
|
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0);
|
google/TensorNetwork
|
tensornetwork/backends/jax/jax_backend.py
|
Python
|
apache-2.0
| 36,454
| 0.004197
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple, Callable, List, Text, Type, Sequence
from typing import Union
from tensornetwork.backends import abstract_backend
from tensornetwork.backends.numpy import decompositions
import numpy as np
from tensornetwork.backends.jax import jitted_functions
from functools import partial
import warnings
Tensor = Any
# pylint: disable=abstract-method
_CACHED_MATVECS = {}
_CACHED_FUNCTIONS = {}
class JaxBackend(abstract_backend.AbstractBackend):
"""See abstract_backend.AbstractBackend for documentation."""
def __init__(self, dtype: Optional[np.dtype] = None,
precision: Optional[Text] = None) -> None:
# pylint: disable=global-variable-undefined
global libjax # Jax module
global jnp # jax.numpy module
global jsp # jax.scipy module
super().__init__()
try:
#pylint: disable=import-outside-toplevel
import jax
except ImportError as err:
raise ImportError("Jax not installed, please switch to a different "
"backend or install Jax.") from err
libjax = jax
jnp = libjax.numpy
jsp = libjax.scipy
self.name = "jax"
self._dtype = np.dtype(dtype) if dtype is not None else None
self.jax_precision = precision if precision is not None else libjax.lax.Precision.DEFAULT #pylint: disable=line-too-long
def tensordot(self, a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
return jnp.tensordot(a, b, axes, precision=self.jax_precision)
def reshape(self, tensor: Tensor, shape: Tensor) -> Tensor:
return jnp.reshape(tensor, np.asarray(shape).astype(np.int32))
def transpose(self, tensor, perm=None) -> Tensor:
return jnp.transpose(tensor, perm)
def shape_concat(self, values: Tensor, axis: int) -> Tensor:
return np.concatenate(values, axis)
def slice(self, tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
if len(start_indices) != len(slice_sizes):
raise ValueError("Lengths of start_indices and slice_sizes must be"
"identical.")
return libjax.lax.dynamic_slice(tensor, start_indices, slice_sizes)
def svd(
self,
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return
|
decompositions.svd(
jnp,
tensor,
pivot_axis,
max_singular_values,
max_truncation_error,
relative=relative)
def qr(
self,
tensor: Tensor,
|
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.qr(jnp, tensor, pivot_axis, non_negative_diagonal)
def rq(
self,
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.rq(jnp, tensor, pivot_axis, non_negative_diagonal)
def shape_tensor(self, tensor: Tensor) -> Tensor:
return tensor.shape
def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return tensor.shape
def sparse_shape(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return self.shape_tuple(tensor)
def shape_prod(self, values: Tensor) -> Tensor:
return np.prod(values)
def sqrt(self, tensor: Tensor) -> Tensor:
return jnp.sqrt(tensor)
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
if (not isinstance(tensor, (np.ndarray, jnp.ndarray))
and not jnp.isscalar(tensor)):
raise TypeError(("Expected a `jnp.array`, `np.array` or scalar. "
f"Got {type(tensor)}"))
result = jnp.asarray(tensor)
return result
def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return jnp.tensordot(tensor1, tensor2, 0,
precision=self.jax_precision)
def einsum(self,
expression: str,
*tensors: Tensor,
optimize: bool = True) -> Tensor:
return jnp.einsum(expression, *tensors, optimize=optimize,
precision=self.jax_precision)
def norm(self, tensor: Tensor) -> Tensor:
return jnp.linalg.norm(tensor)
def eye(self,
N,
dtype: Optional[np.dtype] = None,
M: Optional[int] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.eye(N, M=M, dtype=dtype)
def ones(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.ones(shape, dtype=dtype)
def zeros(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.zeros(shape, dtype=dtype)
def randn(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None) -> Tensor:
if not seed:
seed = np.random.randint(0, 2**63)
key = libjax.random.PRNGKey(seed)
dtype = dtype if dtype is not None else np.dtype(np.float64)
def cmplx_randn(complex_dtype, real_dtype):
real_dtype = np.dtype(real_dtype)
complex_dtype = np.dtype(complex_dtype)
key_2 = libjax.random.PRNGKey(seed + 1)
real_part = libjax.random.normal(key, shape, dtype=real_dtype)
complex_part = libjax.random.normal(key_2, shape, dtype=real_dtype)
unit = (
np.complex64(1j)
if complex_dtype == np.dtype(np.complex64) else np.complex128(1j))
return real_part + unit * complex_part
if np.dtype(dtype) is np.dtype(jnp.complex128):
return cmplx_randn(dtype, jnp.float64)
if np.dtype(dtype) is np.dtype(jnp.complex64):
return cmplx_randn(dtype, jnp.float32)
return libjax.random.normal(key, shape).astype(dtype)
def random_uniform(self,
shape: Tuple[int, ...],
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None) -> Tensor:
if not seed:
seed = np.random.randint(0, 2**63)
key = libjax.random.PRNGKey(seed)
dtype = dtype if dtype is not None else np.dtype(np.float64)
def cmplx_random_uniform(complex_dtype, real_dtype):
real_dtype = np.dtype(real_dtype)
complex_dtype = np.dtype(complex_dtype)
key_2 = libjax.random.PRNGKey(seed + 1)
real_part = libjax.random.uniform(
key,
shape,
dtype=real_dtype,
minval=boundaries[0],
maxval=boundaries[1])
complex_part = libjax.random.uniform(
key_2,
shape,
dtype=real_dtype,
minval=boundaries[0],
maxval=boundaries[1])
unit = (
np.complex64(1j)
if complex_dtype == np.dtype(np.complex64) else np.complex128(1j))
return real_part + unit * complex_part
if np.dtype(dtype) is np.dtype(jnp.complex128):
return cmplx_random_uniform(dtype, jnp.float64)
if np.dtype(dtype) is np.dtype(jnp.complex64):
return cmplx_random_uniform(dtype, jnp.float32)
return libjax.random.uniform(
key, shape, minval=boundaries[0], maxval=boundaries[1]).astype(dtype)
def eigs(self, #pylint: disable=arguments-differ
A: Callable,
args: Optional[List] = None,
initial_
|
egentry/stellarstructure
|
integrate.py
|
Python
|
mit
| 6,257
| 0.042033
|
import numpy as np
import scipy.integrate as integ
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
import derivs
import gasproperties
import opacity
import loadinitial
import modelparameters
from multiprocessing import Pool
def deriv_wrapper(y, x, X, Y, Z, mu):
"""
Creates a system of 1st order ode's to be solved
Assumes:
mass, m as independent variable
Inputs:
y 1x4 float - consists of:
- r(m), radius [cm]
- l(m), luminosity [erg s^-1]
- P(m), total pressure [dyne cm^-2]
- T(m), Temperature [K]
x 1x1 float - consists of:
- m, mass [g]
params 1x4 float - consists of:
- X, hydrogen mass fraction
- Y, helium mass fraction
- Z, metals mass fraction
- mu, mean molecular weight
Outputs:
dy_dx 1x4 float - consists of:
- dr(m)/dm, radius derivative [cm g^-1]
- dl(m)/dm, luminosity derivative [erg s^-1 g^-1]
- dP(m)/dm, total pressure derivative [dyne cm^-2 g^-1]
- dT(m)/dm, Temperature derivative [K g^-1]
Warnings:
"""
m = x
r, l, P, T = y
beta = gasproperties.calculate_beta(P, T)
rho = gasproperties.calculate_density(P * beta, T, mu)
kappa = opacity.calculate_opacity(T, rho)
dr_dm = derivs.calculate_dr_dm(r, rho)
dl_dm = derivs.calculate_dl_dm(T, rho, X, Y, Z)
dP_dm = derivs.calculate_dP_dm(m, r)
dT_dm = derivs.calculate_dT_dm(m, r, l, P, T, kappa)
dy_dx = [dr_dm, dl_dm, dP_dm, dT_dm]
return dy_dx
def integrate_outwards(M_star, m_fitting_point, P_c, T_c, mu, X, Y, Z,
n_steps=1e4,
logspacing=True,
file_suffix="",
write=False):
m0 = 1e-8 * M_star
beta = gasproperties.calculate_beta(P_c, T_c)
rho = gasproperties.calculate_density(P_c * beta, T_c, mu)
r0, l0, P0, T0 = loadinitial.load1(m0, P_c, T_c, mu, X, Y, Z)
y0 = [r0, l0, P0, T0]
mu = modelparameters.mu
params = (X, Y, Z, mu)
if logspacing is True:
m = np.logspace(np.log10(m0), np.log10(m_fitting_point), n_steps)
else:
m = np.linspace(m0, m_fitting_point, n_steps)
y, infodict = integ.odeint(deriv_wrapper, y0, m,
mxstep=500,
args=params, full_output=True)
r,l,P,T = y.transpose()
sol = np.column_stack((m, y))
if write is True:
np.savetxt('data/sol_outwards' + file_suffix + '.dat', sol,
header=" \t\t m [m]\t\t\t\t\t r [cm]\t\t\t\t\t\t l [erg s^-1]\t\t\t\t\t P [dyne cm^-2]\t\t\t\t\t\t T [K]")
plt.figure(1)
plt.subplot(221)
plt.plot(m / M_star, r)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m / M_star, l)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m / M_star, P)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m / M_star, T)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_outwards" + file_suffix + ".eps")
plt.savefig("plots/stellar_model_outwards" + file_suffix + ".pdf")
# plt.show()
plt.close()
return m, y, infodict
def integrate_inwards(M_star, m_fitting_point, R_star, L_star, mu, X, Y, Z,
n_steps=1e4,
logspacing=False,
file_suffix="",
write=False):
r0, l0, P0, T0 = loadinitial.load2(M_star, R_star, L_star, mu)
y0 = [r0, l0, P0, T0]
mu = modelparameters.mu
params = (X, Y, Z, mu)
if logspacing is True:
m = np.logspace(np.log10(m_fitting_point), np.log10(M_star), n_steps)
else:
m = np.linspace(m_fitting_point, M_star, n_steps)
m = np.flipud(m) #reverse direction of integration
y, infodict = integ.odeint(deriv_wrapper, y0, m,
mxstep=5000,
args=params, full_output=True)
r,l,P,T = y.transpose()
sol = np.column_stack((m, y))
if write is True:
np.savetxt('data/sol_inwards' + file_suffix + '.dat', sol,
header=" \t\t m [m]\t\t\t\t\t r [cm]\t\t\t\t\t\t l [erg s^-1]\t\t\t\t\t P [dyne cm^-2]\t\t\t\t\t\t T [K]")
plt.figure(1)
plt.subplot(221)
plt.plot(m / M_star, r)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m / M_star, l)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m / M_star, P)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m / M_star, T)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_inwards" + file_suffix + ".pdf")
# plt.show()
plt.close()
return m, y, infodict
def test():
X = modelparameters.X
Y = modelparameters.Y
Z = modelparameters.Z
mu = modelparameters.mu
params = (X, Y, Z, mu)
P_c = modelparameters.P_c # core pressure, [dyne cm^-2]
T_c = modelparameters.T_c # core temperature, [K]
M_star = modelparameters.M_star
R_star = modelparameters.R_star
L_star = modelparameters.L_star
m_fitting_point = modelparameters.m_fitting_point
m_outward, y_outward, infodict_outward = integrate_outwards(M_star,
m_fitting_point, P_c, T_c, mu, X, Y, Z, n_steps = 5e1)
m_inward, y_inward, infodict_inward = integrate_inwards(M_star,
m_fitting_point, R_star, L_star, mu, X, Y, Z, n_steps = 5e1)
r_inward, l_inward, P_inward, T_inward = y_inward.transpose()
r_outward, l_outward, P_outward, T_outward = y_outward.transpose()
m_tot = np.concatenate((m_outward, np.flipud(m_inward)))
r_tot = np.concatenate((r_outward, np.flipud(r_inward)))
l_tot = np.concatenate((l_outward, np.flipud(l_inward)))
P_tot = np.concatenate
|
((P_outward, np.flipud(P_inward)))
T_tot = np.concatenate((T_outward, np.flipud(T_inward)))
plt.figure(1)
plt.subplot(221)
plt.plot(m_tot / M_star, r_tot)
plt.xlabel(
|
r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m_tot / M_star, l_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m_tot / M_star, P_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m_tot / M_star, T_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_total.pdf")
# plt.show()
plt.close()
return (m_tot, r_tot, l_tot, P_tot, T_tot)
|
SubhasisDutta/NoteBook
|
settings.py
|
Python
|
mit
| 370
| 0.002703
|
'''
Created on Jun 18, 2015
@author: Subhasis
'''
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '540614338141-drb3g1kcetlp4sbgaj7dfkj
|
ci6n5ove5.apps.googleusercontent.com'
ANDROID_CLIEN
|
T_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
|
sevein/archivematica
|
src/dashboard/src/components/helpers.py
|
Python
|
agpl-3.0
| 13,812
| 0.004634
|
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import calendar
import logging
import mimetypes
import os
import pprint
import requests
import urllib
from urlparse import urljoin
import json
from django.utils.dateformat import format
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger, InvalidPage
from django.core.urlresolvers import reverse
from django.db.models import Max
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import render
from contrib import utils
from main import models
from mcpserver import Client as MCPServerClient
logger = logging.getLogger('archivematica.dashboard')
class AtomError(Exception):
pass
# Used for debugging
def pr(object):
return pprint.pformat(object)
# Used for raw SQL queries to return data in dictionaries instead of lists
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def keynat(string):
r'''A natural sort helper function for sort() and sorted()
without using regular expressions or exceptions.
>>> items = ('Z', 'a', '10th', '1st', '9')
>>> sorted(items)
['10th', '1st', '9', 'Z', 'a']
>>> sorted(items, key=keynat)
['1st', '9', '10th', 'a', 'Z']
'''
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r
def json_response(data, status_code=200):
return HttpResponse(
json.dumps(data),
content_type='application/json',
status=status_code,
)
def pager(objects, items_per_page, current_page_number):
"""
:param objects: Iterable of items to paginate
:param items_per_page: Number of items on each page
:param current_page_number: Page to return information for
:return: django.paginator.Page object (with additional attributes)
"""
if current_page_number is None:
current_page_number = 1
paginator = Paginator(objects, items_per_page)
try:
page = paginator.page(current_page_number)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.page(paginator.num_pages)
# For compatibility with old code, add the alternate names as attributes
# TODO replace all places that call this with the actual parameters
page.objects = page.object_list
page.current = page.number
try:
page.previous = page.previous_page_number()
except InvalidPage:
page.previous = None
try:
page.next = page.next_page_number()
except InvalidPage:
page.next = None
page.has_other = page.has_other_pages()
page.total_items = paginator.count
page.num_pages = paginator.num_pages
# Add lists of the (up to) 5 adjacent pages
num_neighbours = 5
if page.number > num_neighbours:
page.previous_pages = range(page.number - num_neighbours, page.number)
else:
page.previous_pages = range(1, page.number)
if page.number < (paginator.num_pages - num_neighbours):
page.next_pages = range(page.number + 1, page.number + num_neighbours + 1)
else:
page.next_pages = range(page.number + 1, paginator.num_pages + 1)
return page
def get_file_sip_uuid(fileuuid):
file = models.File.objects.get(uuid=fileuuid)
return file.sip.uuid
def task_duration_in_seconds(task):
if task.endtime != None:
duration = int(format(task.endtime, 'U')) - int(format(task.starttime, 'U'))
else:
duration = ''
if duration == 0:
duration = '< 1'
return duration
def get_jobs_by_sipuuid(uuid):
jobs = models.Job.objects.filter(sipuuid=uuid,subjobof='').order_by('-createdtime', 'subjobof')
priorities = {
'completedUnsuccessfully': 0,
'requiresAprroval': 1,
'requiresApproval': 1,
'exeCommand': 2,
'verificationCommand': 3,
'completedSuccessfully': 4,
'cleanupSuccessfulCommand': 5,
}
def get_priority(job):
try: return priorities[job.currentstep]
except Exception: return 0
return sorted(jobs, key = get_priority) # key = lambda job: priorities[job.currentstep]
def get_metadata_type_id_by_description(description):
return models.MetadataAppliesToType.objects.get(description=description)
def get_setting(setting, default=''):
try:
setting = models.DashboardSetting.objects.get(name=setting)
return setting.value
except:
return default
def get_boolean_setting(setting, default=''):
setting = get_setting(setting, default)
if setting == 'False':
return False
else:
return bool(setting)
def set_setting(setting, value=''):
try:
setting_data = models.DashboardSetting.objects.get(name=setting)
except:
setting_data = models.DashboardSetting.objects.create()
setting_data.name = setting
setting_da
|
ta.value = value
setting_data.save()
def get_client_config_value(field):
clientConfigFilePath = '/etc/archivematica/MCPClient/clientConfig.conf'
config = ConfigParser.SafeConfi
|
gParser()
config.read(clientConfigFilePath)
try:
return config.get('MCPClient', field)
except:
return ''
def get_server_config_value(field):
clientConfigFilePath = '/etc/archivematica/MCPServer/serverConfig.conf'
config = ConfigParser.SafeConfigParser()
config.read(clientConfigFilePath)
try:
return config.get('MCPServer', field)
except:
return ''
def get_atom_levels_of_description(clear=True):
"""
Fetch levels of description from an AtoM instance and store them in the database.
The URL and authentication details for the AtoM instance must already be stored in the settings.
Note that only English levels of description are fetched at this point in time.
:param bool clear: When True, deletes all existing levels of description from the Archivematica database before fetching; otherwise, the fetched levels of description will be appended to the already-stored values.
:raises AtomError: if no AtoM URL or authentication credentials are defined in the settings, or if the levels of description cannot be fetched for another reason
"""
url = get_setting('dip_upload_atom_url')
if not url:
raise AtomError("AtoM URL not defined!")
auth = (
get_setting('dip_upload_atom_email'),
get_setting('dip_upload_atom_password'),
)
if not auth:
raise AtomError("AtoM authentication settings not defined!")
# taxonomy 34 is "level of description"
dest = urljoin(url, 'api/taxonomies/34')
response = requests.get(dest, params={'culture': 'en'}, auth=auth)
if response.status_code == 200:
base = 1
if clear:
models.LevelOfDescription.objects.all().delete()
else:
# Add after existing LoD
base = models.LevelOfDescription.objects.aggregate(max=Max('sort
|
luci/luci-py
|
appengine/components/components/config/run_coverage.py
|
Python
|
apache-2.0
| 491
| 0.004073
|
#!/usr/bin/env python
# Copyright 2015 The LUCI
|
Authors. All rights re
|
served.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import os
import sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(THIS_DIR, '..', '..'))
from tools import run_coverage
if __name__ == '__main__':
sys.exit(run_coverage.main(
THIS_DIR,
[],
'PRESUBMIT.py,components,*test*,tool*'))
|
primepix/django-sentry
|
example_project/urls.py
|
Python
|
bsd-3-clause
| 148
| 0.006757
|
from django.conf.urls import *
|
urlpatterns = patterns('',
url(r'^debug/', include('tests.urls')),
url(r'^', include('sentry.web.urls'))
|
,
)
|
facebook/buck
|
test/com/facebook/buck/android/testdata/android_project/native/proguard_gen/generator.py
|
Python
|
apache-2.0
| 151
| 0.006623
|
#!/usr/bin/python
import sys
assert len(sys.argv) > 2
with open
|
(sys.argv[1], "w") as out:
for l in
|
sys.argv[2:]:
out.write("# %s\n" % l)
|
marchchad/GatorApp
|
gator_dev/urls.py
|
Python
|
gpl-2.0
| 146
| 0.006849
|
from django.conf.urls import
|
patterns, include, url
handler404 = 'jobs.views.FileNotFound'
urlpatterns = [
|
url(r'^', include('jobs.urls'))
]
|
alesdotio/Spirit
|
spirit/user/forms.py
|
Python
|
mit
| 4,294
| 0.002329
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.template import defaultfilters
from django.conf import settings
from ..core.utils.timezone import timezones
from .models import UserProfile
User = get_user_model()
username_max_length = User._meta.get_field('username').max_length
TIMEZONE_CHOICES = timezones()
class CleanEmailMixin(object):
de
|
f clean_email(self):
email = self.cleaned_data["email"]
if settings.ST_CASE_INSENSITIVE_EMAILS:
email = email.lower()
|
if not settings.ST_UNIQUE_EMAILS:
return email
is_taken = User.objects\
.filter(email=email)\
.exists()
if is_taken:
raise forms.ValidationError(_("The email is taken."))
return email
def get_email(self):
return self.cleaned_data["email"]
class EmailCheckForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
class EmailChangeForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(EmailChangeForm, self).__init__(*args, **kwargs)
if not self.user.has_usable_password():
self.fields.pop('password')
def clean_password(self):
password = self.cleaned_data["password"]
if not self.user.check_password(password):
raise forms.ValidationError(_("The provided password is incorrect."))
return password
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ("first_name", "last_name")
class UserProfileForm(forms.ModelForm):
timezone = forms.ChoiceField(label=_("Time zone"), choices=TIMEZONE_CHOICES)
class Meta:
model = UserProfile
fields = ("location", "timezone", "hide_last_seen")
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
now = timezone.localtime(timezone.now())
self.fields['timezone'].help_text = _('Current time is: %(date)s %(time)s') % {
'date': defaultfilters.date(now),
'time': defaultfilters.time(now)
}
class AvatarChangeForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ("avatar_chosen", "avatar")
widgets = {
'avatar_chosen': forms.RadioSelect
}
class UsernameChangeForm(forms.Form):
new_username = forms.CharField(label=_("New username"), max_length=username_max_length)
password = forms.CharField(label=_("Current password"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(UsernameChangeForm, self).__init__(*args, **kwargs)
def clean(self):
if not self.user.has_usable_password():
raise forms.ValidationError(_('You do not have a password set. Please use the set password form on your profile before trying to change your username.'))
if self.user.st.last_username_change_date:
raise forms.ValidationError(_('Sorry, you cannot change your username again!'))
def clean_new_username(self):
username = self.cleaned_data["new_username"]
if username.lower() in settings.ST_INVALID_USERNAMES:
raise forms.ValidationError(_("The username is invalid."))
if settings.ST_CASE_INSENSITIVE_EMAILS:
is_taken = User.objects.filter(username__iexact=username).exists()
else:
is_taken = User.objects.filter(username__exact=username).exists()
if is_taken:
raise forms.ValidationError(_("The username is taken."))
return username
def clean_password(self):
password = self.cleaned_data["password"]
if not self.user.check_password(password):
raise forms.ValidationError(_("The provided password is incorrect."))
return password
|
nickhand/nbodykit
|
nbodykit/source/catalog/tests/test_file.py
|
Python
|
gpl-3.0
| 4,337
| 0.007378
|
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, da
|
taset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.
|
allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
|
pombredanne/HyperDex
|
bindings/__init__.py
|
Python
|
bsd-3-clause
| 22,251
| 0.008314
|
# Copyright (c) 2013-2014, Cornell University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of HyperDex nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MicrotransactionCall : pass
class AsyncCall: pass
class SyncCall: pass
class NoFailCall: pass
class Iterator: pass
class StructClient(object):
args = (('struct hyperdex_client*', 'client'),)
class StructAdmin(object):
args = (('struct hyperdex_admin*', 'admin'),)
class Microtransaction(object):
args = (('struct hyperdex_client_microtransaction*', 'microtransaction'),)
class SpaceName(object):
args = (('const char*', 'space'),)
class SpaceNameSource(object):
args = (('const char*', 'source'),)
class SpaceNameTarget(object):
args = (('const char*', 'target'),)
class Key(object):
args = (('const char*', 'key'), ('size_t', 'key_sz'))
class Predicates(object):
args = (('const struct hyperdex_client_attribute_check*', 'checks'),
('size_t', 'checks_sz'))
class Attributes(object):
args = (('const struct hyperdex_client_attribute*', 'attrs'),
('size_t', 'attrs_sz'))
class MapAttributes(object):
args = (('const struct hyperdex_client_map_attribute*', 'mapattrs'),
('size_t', 'mapattrs_sz'))
class AttributeNames(object):
args = (('const char**', 'attrnames'),
('size_t', 'attrnames_sz'))
class Status(object):
args = (('enum hyperdex_client_returncode', 'status'),)
class AdminStatus(object):
args = (('enum hyperdex_admin_returncode', 'status'),)
class Description(object):
args = (('const char*', 'description'),)
class SortBy(object):
args = (('const char*', 'sort_by'),)
class Limit(object):
args = (('uint64_t', 'limit'),)
class Count(object):
args = (('uint64_t', 'count'),)
class MaxMin(object):
args = (('int', 'maxmin'),)
class ReadOnly(object):
args = (('int', 'ro'),)
class FaultTolerance(object):
args = (('uint64_t', 'ft'),)
class SpaceDescription(object):
args = (('const char*', 'description'),)
class SpaceList(object):
args = (('const char*', 'spaces'),)
class IndexList(object):
args = (('const char*', 'indexes'),)
class SubspaceList(object):
args = (('const char*', 'subspaces'),)
class Token(object):
args = (('uint64_t', 'token'),)
class Address(object):
args = (('const char*', 'address'),)
class BackupName(object):
args = (('const char*', 'backup'),)
class BackupList(object):
args = (('const char*', 'backups'),)
class PerformanceCounters(object):
args = (('struct hyperdex_admin_perf_counter', 'pc'),)
class AttributeName(object):
args = (('const char*', 'attribute'),)
class IndexID(object):
args = (('uint64_t', 'idxid'),)
class Method(object):
def __init__(self, name, form, args_in, args_out):
self.name = name
self.form = form
self.args_in = args_in
self.args_out = args_out
# NOTE: The commas here aren't redundant, because the parser expects lists of arguments
Client = [
Method('get', AsyncCall, (SpaceName, Key), (Status, Attributes)),
Method('get_partial', AsyncCall, (SpaceName, Key, AttributeNames), (Status, Attributes)),
Method('put', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_put', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_put', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('cond_put_or_create', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_put', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('put_if_not_exist', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('del', AsyncCall, (SpaceName, Key), (Status,)),
Method('cond_del', AsyncCall, (SpaceName, Key, Predicates), (Status,)),
Method('group_del', AsyncCall, (SpaceName, Predicates), (Status, Count)),
Method('atomic_add', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_add', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_add', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_add', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_sub', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_sub', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_sub', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_sub', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_mul', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_mul', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_mul', AsyncCall, (SpaceName, Key, Predicates, Attributes), (S
|
tatus,)),
Method('group_atomic_mul', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_div', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_div', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_div', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_div', AsyncCall, (SpaceName, Predic
|
ates, Attributes), (Status, Count)),
Method('atomic_mod', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_mod', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_mod', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_and', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_and', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_and', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_and', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_or', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_or', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_or', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_or', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_xor', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_xor', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_xor', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_min', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_min', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_
|
supernathan23/dn_availability
|
dn_availability/cli.py
|
Python
|
mit
| 13,300
| 0.008722
|
""" Code used for running the package directly from the Command Line """
import os.path
import logging
import argparse
import sys
from .db import AvailabilityDB
from .core import AvailabilityInfo
from .reports import ALL_REPORTS
from .settings import get_settings
def run():
# -------------------------- Logging Settings ---------------------------
logger = logging.getLogger(__name__)
main_logger = logging.getLogger('dn_availability')
_handler = logging.StreamHandler()
_formatter = logging.Formatter('%(levelname)s(%(name)s): %(message)s')
_handler.setFormatter(_formatter)
main_logger.addHandler(_handler)
# -------------------------- Argument Parser -----------------------------
parser = argparse.ArgumentParser(
description='A utility for managing available numbers for a Cisco UCM system',
epilog='For more information, visit the project page at: https://github.com/supernathan23/dn_availability')
subparsers = parser.add_subparsers(title='Actions', dest='action',
metavar='<action>',
description='You can enter "<action> -h" for details '
'on that action',
help='Available actions: add_report backup example_settings export '
'gen_report import init list restore')
# global args
parser.add_argument('-f', dest='settings_file',
help='Settings File. See the example_settings.cfg file for details')
parser.add_argument('-c', '--confirm', action='store_true',
dest='confirm',
help='Prompt for comfirmation before doing anything. '
'Default is to only prompt when deleting data')
parser.add_argument('-q', '--quiet', action='store_true',
help='Do not prompt for confirmations')
parser.add_argument('-v', '--verbose', action='count',
help='Display log messages. (Will override -q | --quiet)')
# Example Settings
parser_settings = subparsers.add_parser('example_settings')
parser_settings.add_argument('-o', '--output_file',
help='Output filename (will be overwritten if it exists!!)')
# list subcommand
parser_list = subparsers.add_parser('list')
parser_list.add_argument('-t', '--table',
help='Table to list data from, if not provided will display a list of '
'tables')
# init subcommand
parser_init = subparsers.add_parser('init_db')
parser_init.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before initializing')
# import subcommand
parser_import = subparsers.add_parser('import')
parser_import.add_argument('table',
help='Table to store the imported data. (use the list command to get a '
'list of the available tables)')
parser_import.add_argument('filename',
help='CSV filename to import')
# export subcommand
parser_export = subparsers.add_parser('export')
parser_export.add_argument('table',
help='Table to export. (use the list command to get a list of the '
'available tables)')
parser_export.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# backup subcommand
parser_backup = subparsers.add_parser('backup')
parser_backup.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# restore subcommand
parser_restore = subparsers.add_parser('restore')
parser_restore.add_argument('filename',
help='Source filename')
parser_restore.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before restoring backup')
# add_report subcommand
parser_add_report = subparsers.add_parser('add_report')
group_add_report = parser_add_report.add_mutually_exclusive_group(required=True)
group_add_report.add_argument('-t', '--timestamp',
help='Timestamp of when the report was generated.')
group_add_report.add_argument('-a', '--auto_timestamp',
action='store_true', default=False,
help='Obtain the timestamp from the file\'s creation date. Will prompt to '
'confirm that the timestamp is correct.')
parser_add_report.add_argument('-c', '--confirm_timestamp',
action='store_true', default=False,
help='Prompts to confirm the timestamp is correct. Timestamp is shown '
'in the systems standard format to make things easier. (Enabled by '
'default when -a (--auto_timestamp) is used')
parser_add_report.add_argument('system_id',
help='Phone System ID (can be obtained by using "list -t PhoneSystem" '
'subcommand')
parser_add_report.add_argument('filename',
help='Device report filename to be added to the system')
# gen_report subcommand
parser_gen_report = subparsers.add_parser('gen_report')
parser_gen_report.add_argument('report_name',
choices=ALL_REPORTS.keys(), metavar='report_name',
help='Name of the report. Available Reports: {}'.format(
|
', '.join(ALL_REPORTS.keys()))
)
parser_gen_report.add_argument('-s', '--system_id', action='append',
help='System ID (use the list -t PhoneSystem" subcommand for a list of'
' syste
|
ms)')
parser_gen_report.add_argument('-g', '--number_group', action='append',
help='Number Group ID (use the "list -t NumberGroup" subcommand for a '
'list of number groups')
parser_gen_report.add_argument('-o', '--output_filename',
help='Destination filename (will be overwritten if it exists!!)')
# ---------------------------Setup----------------------------------------
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
if args.verbose > 1:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.ERROR
else:
log_level = logging.WARNING
main_logger.setLevel(log_level)
logger.info('Log verbosity set to %s', log_level)
app_settings = get_settings(args.settings_file)
db = AvailabilityDB(app_settings['DEFAULT']['db_url'])
info = AvailabilityInfo(db)
# -------------------------- Actions -------------------------------------
if args.action == 'list':
if not args.table:
logger.info('Listing tables')
print('Active Tables:')
print('\n'.join(info.db.metadata.tables.keys()))
sys.exit()
logger.info('Listing records for table %s', args.table)
conn = info.db.connect()
table = info.db.get_table(args.table)
results = conn.execute(table.select())
for row in results:
print(row)
if args.action == 'example_settings':
from pkg_resources import resource_string
settings_data = resource_string('dn_availability', 'example_settings.cfg').decode()
if args.output_file:
if args.confirm:
print('About to export an example settings file to "{}". (If file '
'exists it will be overwritten)'.format(args.output_file))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Exporting example settings file to "%s"', args.output_file)
with open(args.output_file, 'w') as f:
f.write(settings_data)
else:
print(settings_data)
elif args.action == 'init_db':
if args.drop:
if not args.quiet:
print('You are about to re-initialize the DB, '
'ALL EXISTING DATA WILL BE ERASED!!!')
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
info.db.tear
|
tboyce021/home-assistant
|
homeassistant/components/zha/core/channels/base.py
|
Python
|
apache-2.0
| 12,609
| 0.001269
|
"""Base classes for channels."""
import asyncio
from enum import Enum
from functools import wraps
import logging
from typing import Any, Union
import zigpy.exceptions
from homeassistant.core import callback
from .. import typing as zha_typing
from ..const import (
ATTR_ARGS,
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_CLUSTER_ID,
ATTR_COMMAND,
ATTR_UNIQUE_ID,
ATTR_VALUE,
CHANNEL_ZDO,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import LogMixin, safe_read
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
try:
result = await command(*args, **kwds)
channel.debug(
"executed '%s' command with args: '%s' kwargs: '%s' result: %s",
command.__name__,
args,
kwds,
result,
)
return result
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
channel.debug(
"command failed: '%s' args: '%s' kwargs '%s' exception: '%s'",
command.__name__,
args,
kwds,
str(ex),
)
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
REPORT_CONFIG = ()
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ZigbeeChannel."""
self._generic_id = f"channel_0x{cluster.cluster_id:04x}"
self._channel_name = getattr(cluster, "ep_attribute", self._generic_id)
self._ch_pool = ch_pool
self._cluster = cluster
self._id = f"{ch_pool.id}:0x{cluster.cluster_id:04x}"
unique_id = ch_pool.unique_id.replace("-", ":")
self._unique_id = f"{unique_id}:0x{cluster.cluster_id:04x}"
self._report_config = self.REPORT_CONFIG
if not hasattr(self, "_value_attribute") and len(self._report_config) > 0:
attr = self._report_config[0].get("attr")
if isinstance(attr, str):
self.value_attribute = self.cluster.attridx.get(attr)
else:
self.value_attribute = attr
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def id(self) -> str:
"""Return channel id unique for this device only."""
return self._id
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._ch_pool.async_send_signal(signal, *args)
async def bind(self):
"""Bind a zigbee cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async def configure_reporting(self) -> None:
"""Configure attribute reporting for a cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
for report in self._report_config:
attr = report["attr"]
attr_name = self.cluster.attributes.get(attr, [attr])[0]
min_report_int, max_report_int, reportable_change = report["config"]
try:
res = await self.cluster.configure_reporting(
attr, min_report_int, max_report_int, reportable_change, **kwargs
)
self.debug(
"reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
attr_name,
self.cluster.ep_attribute,
min_report_int,
max_report_int,
reportable_change,
res,
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting for '%s' attr on '%s' cluster: %s",
attr_name,
self.cluster.ep_attribute,
str(ex),
)
async def async_configure(self) -> None:
"""Set cluster binding and attribute reporting."""
if not self._ch_pool.skip_configuration:
await self.bind()
if self.cluster.is_server:
await self.configure_reporting()
ch_specific_cfg = getattr(self, "async_configure_channel_specific", None)
if ch_specific_cfg:
await ch_specific_cfg()
self.debug("finished channel configuration")
else:
self.debug("skipping channel configuration")
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache: bool) -> None:
"""Initialize channel."""
if not from_cache and self._ch_pool.skip_configuration:
self._status = ChannelStatus.INITIALIZED
return
self.debug("initializing channel: from_cache: %s", from_cache)
attributes = [cfg["attr"] for cfg in self._report_config]
if attributes:
await self.get_attributes(attributes, from_cache=from_cache)
ch_specific_init = getattr(self, "async_initialize_channel_specific", None)
if ch_specific_init:
await ch_specific_init(from_cache=from_cache)
self.debug("finished channel configuration")
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.async_se
|
nd_signal(
f"{self.unique_id}_{SI
|
GNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
@callback
def zha_send_event(self, command: str, args: Union[int, dict]) -> None:
"""Relay events to hass."""
self._ch_pool.zha_send_event(
{
ATTR_UNIQUE_ID: self.unique_id,
ATTR_CLUSTER_ID: self.cluster.cluster_id,
ATTR_COMMAND: command,
ATTR_ARGS: args,
}
)
async def async_update(self):
"""Retrieve
|
rx2130/Leetcode
|
python/274 H-Index.py
|
Python
|
apache-2.0
| 993
| 0.001007
|
class Solution(object):
# Op1: time O(n*log(n)) space O(1)
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
for i, x in enumerate(citations):
# print(i, x)
if i >= x:
return i
return len(citations)
# Op1.1
return sum(i < j for i, j in enumerate(sorted(citations, reverse=True)))
# Op2: time O(n) space O(n)
def hIndex2(self, citations):
n = len(citations)
citeCount = [0] * (n + 1)
for c in citations:
if c >= n:
|
citeCount[n] += 1
else:
citeCount[c] += 1
count = 0
f
|
or i in reversed(range(n + 1)):
count += citeCount[i]
if count >= i:
return i
return 0
citations = [3, 0, 6, 1, 5]
test = Solution()
print(test.hIndex(citations))
print(test.hIndex2(citations))
|
ferris-wufei/toolbox
|
dw/api_ga/api_ga.py
|
Python
|
gpl-2.0
| 5,603
| 0.003926
|
# -*- coding: utf-8 -*-
"""
author: ferris
update: 2015-12-08
function: query from Google Analytics API, using loop to overcome quota of 10,000 records per query.
"""
import argparse
from googleapiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
# api_name = 'analytics'
# api_version = 'v3'
# scope = ['https://www.googleapis.com/auth/analytics.readonly']
# service_account_email = 'account-1@still-sensor-115306.iam.gserviceaccount.com'
# key_file_location = 'client_secrets.p12'
class GA:
def __init__(self, api_name='analytics', api_version='v3',
scope=['https://www.googleapis.com/auth/analytics.readonly'],
service_account_email='account-1@still-sensor-115306.iam.gserviceaccount.com',
key_file_location='client_secrets.p12'):
self.service = self.get_service(api_name, api_version, scope, key_file_location, service_account_email)
@staticmethod
def get_service(api_name, api_version, scope, key_file_location, service_account_email):
"""
:param api_name: The name of the api to connect to.
:param api_version: The api version to connect to.
:param scope: A list auth scopes to authorize for the application.
:param key_file_location: The path to a valid service account p12 key file.
:param service_account_email: The service account email address.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = SignedJwtAssertionCredentials(service_account_email, key,
scope=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def get_list(self, conf):
"""
:param conf: Python dictionary containing these query parameters: profile_id,
start_date, end_date, dimensions, metrics, filters
"""
# get first 10,000 records
start_index = 1
max_results = 10000
api_query = self.service.data().ga().get(ids='ga:' + conf['profile_id'],
start_date=conf['start_date'],
end_date=conf['end_date'],
dimensions=conf['dimensions'],
metrics=conf['metrics'],
filters=conf['filters'],
start_index=start_index,
max_results=max_results,
samplingLevel='HIGHER_PRECISION' # minimize sampling effect
)
try:
temp_data = api_query.execute()
except TypeError:
print('There was an error in constructing your query')
# no results
num_results = temp_data.get('totalResults')
if num_results == 0:
print("no results from query")
return []
# print number of total results
print("total results from query: {0}".format(num_results))
# save results of the 1st query
result_list = []
result_list.extend(temp_data.get('rows'))
# save results of additional queries
if num_results > 10000:
rows_left = num_results - 10000
# loop queries
while rows_left > 0:
start_index += 10000
api_query = self.service.data().ga().get(ids='ga:' + conf['profile_id'],
start_date=conf['start_date'],
end_date=conf['end_date'],
dimensions=conf['dimensions'],
metrics=conf['metrics'],
filters=conf['filters'],
|
start_index=start_index,
max_results=max_results,
|
samplingLevel='HIGHER_PRECISION'
).execute()
try:
temp_data = api_query.execute()
except TypeError:
print('There was an error in constructing your query')
result_list.extend(temp_data.get('rows'))
print('appended more records')
rows_left -= 10000
print("export to list success")
return result_list
# demo
if __name__ == "__main__":
# sample configuration
sample_conf = {'dimensions': 'ga:pagePath,ga:eventCategory,ga:eventAction,ga:eventLabel',
'metrics': 'ga:totalEvents,ga:users', 'filters': 'ga:hostname=~imike\.com',
'profile_id': '112805419', 'start_date': '2015-12-08', 'end_date': '2015-12-08'}
# query
G1 = GA()
results = G1.get_list(sample_conf)
# output
with open('test.out', 'w+') as f_out:
for r in results:
line = '\t'.join(r) + '\n'
f_out.write(line)
print("output to file success")
|
apache/dispatch
|
tests/system_tests_tcp_adaptor.py
|
Python
|
apache-2.0
| 50,963
| 0.002178
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import json
import os
import socket
import sys
import time
import traceback
from system_test import Logger
from system_test import main_module
from system_test import Process
from system_test import Qdrouterd
from system_test import TestCase
from system_test import TIMEOUT
from system_test import unittest
from subprocess import PIPE
from subprocess import STDOUT
# Tests in this file are organized by classes that inherit TestCase.
# The first instance is TcpAdaptor(TestCase).
# The tests emit files that are named starting with 'TcpAdaptor'. This includes
# logs and shell scripts.
# Subsequent TestCase subclasses must follow this pattern and emit files named
# with the test class name at the beginning of the emitted files.
try:
from TCP_echo_client import TcpEchoClient
from TCP_echo_server import TcpEchoServer
except ImportError:
class TCP_echo_client(object):
pass
class TCP_echo_server(object):
pass
DISABLE_SELECTOR_TESTS = False
DISABLE_SELECTOR_REASON = ''
try:
import selectors # noqa F401: imported but unused (echo server and echo client import this, they run as subprocesses)
except ImportError:
DISABLE_SELECTOR_TESTS = True
DISABLE_SELECTOR_REASON = "Python selectors module is not available on this platform."
# This code takes a wild guess how long an echo server must stall
# receiving input data before Q2 holdoff triggers in the host router
# on all the various CI systems out there.
Q2_DELAY_SECONDS = 1.0
# This code needs to know the size in bytes of the holdoff trigger threshold.
# Whitebox testing knows that the holdoff is specified in some number of
# buffers. What whitebox testing does not know how big the buffers are
# or the number of buffers or how many bytes are actually in each buffer.
# Today the holdoff is probably 128K bytes so use something bigger than that
# in the test to get the trigger to kick in.
# On top of that the echo server is undermined by having TCP window or python
# read the server socket in advance of the echo server asking it to.
# In a test case the adaptor logged writing almost 3MBytes
# 2021-02-26 19:11:20.831826 PN_RAW_CONNECTION_WRITTEN Wrote 8192 bytes. Total written 2777007 bytes
# well before the server started reading from the socket.
# 2021-02-26 19:11:21.534246 J0#206 TCP_TEST [] [] ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL Connection from 127.0.0.1:54410 stall end
# 2021-02-26 19:11:21.534801 J0#207 TCP_TEST [] [] ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL read from: 127.0.0.1:54410 len:1024:
# Giving the stalled server 10Mbytes seems to run the TCP window out of capacity
# so that it stops reading from the TcpConnector and Q2 finally kicks in.
Q2_TEST_MESSAGE_SIZE = 10000000
# local timeout in seconds to wait for one echo client to finish
echo_timeout = 30
def ncat_available():
popen_args = ['ncat', '--version']
try:
process = Process(popen_args,
name='ncat_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
return True
except:
return False
#
# Test concurrent clients
#
class EchoClientRunner():
"""
Launch an echo client upon construction.
Provide poll interface for checking done/error.
Provide wait/join to shut down.
"""
def __init__(self, test_name, client_n, logger, client, server, size,
count,
print_client_logs=True,
timeout=TIMEOUT,
port_override=None):
"""
Launch an echo client upon construction.
:param test_name: Unique name for log file prefix
:param client_n: Client number for differentiating otherwise identical clients
:param logger: parent logger for logging test activity vs. client activity
:param client: router name to which the client connects
:param server: name whose address the client is targeting
:param size: length of messages in bytes
:param count: number of messages to be sent/verified
:param print_client_logs: verbosity switch
:return Null if success else string describing error
"""
self.test_name = test_name
self.client_n = str(client_n)
self.logger = logger
self.client = client
self.server = server
self.size = size
self.count = count
self.timeout = timeout
self.print_client_logs = print_client_logs
self.client_final = False
# Each router has a listener for the echo server attached to every router
self.listener_port = TcpAdaptor.tcp_client_listener_ports[self.client][self.server] if port_override is None else port_override
self.name = "%s_%s_%s_%s" % \
(self.test_name, self.client_n, self.size, self.count)
self.client_prefix = "ECHO_CLIENT %s" % self.name
parent_path = os.path.dirname(os.getcwd())
self.client_logger = Logger(title=self.client_prefix,
print_to_console=self.print_client_logs,
save_for_dump=False,
ofilename=os.path.join(parent_path, "setUpClass/TcpAdaptor_echo_client_%s.log" % self.name))
try:
self.e_client = TcpEchoClient(prefix=self.client_prefix,
host='localhost',
port=self.listener_port,
size=self.size,
count=self.count,
timeout=self.timeout,
logger=self.client_logger)
except Exception as exc:
self.e_client.error = "TCP_TEST TcpAdaptor_runner_%s failed. Exception: %s" % \
(self.name, traceback.format_exc())
self.logger.log(self.e_client.error)
raise Exception(self.e_client.error)
def client_error(self):
return self.e_client.error
def client_exit_status(self):
return self.e_client.exit_status
def client_running(self):
return self.e_client.is_running
def wait(self):
# wait for client to exit
# Return None if successful wait/join/exit/close else error message
result = None
try:
self.e_client.wait()
except Exception as exc:
self.e_client.error = "TCP_TEST EchoClient %s failed. Exception: %s" % \
(self.name, traceback.format_exc())
self.logger.log(self.e_client.error)
result = self.e_client.error
return r
|
esult
class TcpAdaptor(TestCase):
"""
6 edge routers connected via 3 interior routers.
9 echo servers are connected via tcpConnector, one to each router.
Each router has 10 listeners, one for each server and
another for which there is no server.
"""
# +-------+ +---------+ +---------+ +---------+ +-------+
# | EA1 |<-->| INTA |<==>| INTB |<==>| INTC |<-->| EC1 |
# +-------+ |
|
| | | | | +-------+
# +-------+ | | | | | |
|
livio/DocDown-Python
|
docdown/template_adapters/string_format.py
|
Python
|
bsd-3-clause
| 1,530
| 0.001961
|
# -*- coding: utf-8 -*-
"""
Adapter to use Python str.format() to render a template
"""
from __future__ import absolute_import, unicode_literals, print_function
from string import Formatter
# handle py2 and py3 strings without relying on six lib since we don't use it for anything else.
try:
basestring
except NameError:
# if it's good enough for Kenneth Reitz, it's good enough for me
# https://github.com/kennethreitz/requests/blob/5c4549493b35f5dbb084d029eaf12b6c7ce22579/requests/compat.py#L66
basestring = (str, bytes)
class DefaultValueFormatter(Formatter):
"""
String formatter which replaces keys found in the string but not in the replacement parameters
with a default value.
The default value for the default is the empty string `''`
"""
def __init__(self, default=''):
Formatter.__init__(self)
self.default = default
def get_value(self, key, args, kwds):
if isinstance(key, basestring):
try:
return kwds[key]
except KeyError:
return self.default
Formatter.get_value(key, args, kwds)
class StringFormatAdapter(object):
"""
Adapter for NoteBlockPreprocessor to render templates using standard python string substitution
using named arguments.
"""
def r
|
ender(self, template='', context=None, *args, **kwargs):
if context is None:
|
context = {}
formatter = DefaultValueFormatter()
return formatter.format(template, **context)
|
whosonfirst/py-mapzen-whosonfirst-pip-utils
|
setup.py
|
Python
|
bsd-3-clause
| 1,676
| 0.004773
|
#!/usr/bin/env python
# Remove .egg-info directory if it exists, to avoid dependency problems with
# partially-installed packages (20160119/dphiffer)
import os, sys
from shutil import rmtree
cwd = os.path.dirname(os.path.realpath(sys.argv[0]))
egg_info = cwd + "/mapzen.whosonfirst.pip.utils.egg-info"
if os.path.exists(egg_info):
rmtree(egg_info)
from setuptools import setup, find_packages
packages = find_packages()
desc = open("README.md").read(),
version = open("VERSION").read()
setup(
name='mapzen.whosonfirst.pip.utils',
namespace_packages=['mapzen', 'mapzen.whosonfirst'],
version=version,
description='Python utility methods for making Who\'s On First documents play nicely with the go-whosonfirst-pip server',
author='Mapzen',
url='https://github.com/ma
|
pzen/py-mapzen-whosonfirst-pip-utils',
install_requires=[
'mapzen.whosonfirst.pip>=0.04',
'mapzen.whosonfirst.placetypes>=0.11',
'shapely',
],
dependency_links=[
'https://github.com/whosonfirst/py-mapzen-whosonfirst-pip/tarball/master#egg=mapzen.whosonfirst.pip-0.04',
'https://github.com/whosonfirst/py-mapzen-w
|
hosonfirst-placetypes/tarball/master#egg=mapzen.whosonfirst.placetypes-0.11',
],
packages=packages,
scripts=[
],
download_url='https://github.com/mapzen/py-mapzen-whosonfirst-pip-utils/releases/tag/' + version,
license='BSD')
|
light-swarm/lightswarm_render
|
scripts/display.py
|
Python
|
mit
| 385
| 0.012987
|
#!/usr/bin/env python
import cv2
import cv2.cv as cv
class Di
|
splay:
def setup(self, fullscreen):
cv2.namedWindow('proj_0', cv2.WINDOW_OPENGL)
if fullscreen:
cv2.setWindowProperty('proj_0', cv2.WND_PROP_FULLSCREEN, cv.CV_WINDOW_FULLSCREEN)
def draw(self, image):
cv2.imshow('proj_0', image)
cv2.waitKey(1)
| |
hugoxia/Python
|
FluentPython/chapter_2/sequence.py
|
Python
|
mit
| 919
| 0
|
from collections import namedtuple
def game():
# 元组拆包
a, b, *rest = range(5)
print(a, b, rest)
a, b, *rest = range(2)
print(a, b, rest)
a, *body, c, d = range(5)
print(a, body, c, d)
*head, b, c, d = range(5)
print(head, b, c, d)
# 具名元组 page_26
City = namedtuple('City', 'name country population coordinates')
tokyo = City('Tokyo', 'JP', '36.933', (35.689722, 139.691667))
print(tokyo)
print("population is %s" % tokyo.population)
print("coordinates is {}".format(tokyo.coordinates))
print("index one in tokyo is %s" % tokyo[1])
print("all fields is {}".format(City._fields))
LatL
|
ong = namedtuple('LatLong', 'lat long')
delhi_data = ('Delhi NCR', 'IN', 21.935, LatLong(28.613889, 77.208889))
delhi = City._make(delhi_data)
print(delhi._asdict()) # collections.OrderedDict
if __n
|
ame__ == "__main__":
game()
|
joshbedo/phpsh
|
src/dbgp-phpsh.py
|
Python
|
bsd-3-clause
| 34,783
| 0.004226
|
#!/usr/bin/env python
from select import poll, POLLIN, POLLHUP
from subprocess import Popen, PIPE
from phpsh import PhpshConfig
import xml.dom.minidom
import signal
import socket
import shlex
import time
import sys
import re
import os
"""This is a DBGp xdebug protocol proxy started by phpsh. It accepts a
connection from xdebug, connects to an IDE debug client and
communicates with its parent phpsh over a pair of pipes."""
__version__ = "1.0"
__author__ = "march@facebook.com"
__date__ = "Nov 05, 2008"
usage = "dbgp.py <4-pipe-fds>"
client_init_error_msg = """
Timed out while waiting for debug client for %ds. Make sure the client is
configured for PHP debugging and expects xdebug connections on port
%d. Client command was: %s"""
logfile = None
def debug_log(s):
global tracing_enabled
global logfile
if not tracing_enabled:
return
if not logfile:
logfile = open("dbgp.log", "a", 1)
logfile.write('\n>>>>>>>>>>>>>>>>>>>>>>>\n\n')
logfile.write(s+'\n\n')
logfile.flush()
def dbgp_get_filename(dbgp_response):
"""If dbgp_response is a dbgp <response> message with status='break' and
'filename' attribute set, return the value of filename. Otherwise
return None"""
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res and res[0].getAttribute('status') == "break":
msg = doc.getElementsByTagName("xdebug:message")
if msg and msg[0].hasAttribute('filename'):
return msg[0].getAttribute('filename')
def dbgp_get_txid(dbgp_response):
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res:
return res[0].getAttribute('transaction_id')
def dbgp_get_bpid(dbgp_response):
"""If dbgp_response is a response to 'breakpoint_set' with
transaction_id=txid, return the value of id attribute as a string.
Otherwise return None"""
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res and res[0].getAttribute('command') == 'breakpoint_set':
return res[0].getAttribute('id')
def xdebug_is_stopping(dbgp_response):
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
return res and res[0].getAttribute("status") == "stopping"
def parse_port(portstr):
if not portstr:
return None
try:
port = int(portstr)
if port < 0:
raise ValueError, "Invalid port: " + portstr
elif port == 0:
port = None
except ValueError:
raise ValueError, "Invalid port: " + portstr
return port
def parse_timeout(timeoutstr):
if not timeoutstr:
return None
try:
timeout = int(timeoutstr)
if timeout <= 0:
return None
except ValueError:
raise ValueError, "Invalid timeout: " + timeoutstr
return timeout
def get_emacs_version():
vline = Popen("emacs --version | head -n 1", shell=True,
stdout=PIPE, stderr=PIPE).communicate()[0]
if not vline:
raise OSError, "emacs not found. Make sure it's in your PATH."
m = re.compile("GNU Emacs ([0-9.]+)").match(vline)
if not m:
raise ValueError, "could not parse emacs version: " + vline +\
"\nexpected GNU Emacs [0-9.]+"
try:
return [int(s) for s in m.group(1).strip('.').split('.')]
except ValueError:
raise ValueError, "invalid Emacs version format: " + m.group(1)
def get_debugclient_version(debugclient_path):
vline = Popen(debugclient_path + " -v | head -n 1", shell=True,
stdout=PIPE, stderr=PIPE).communicate()[0]
if not vline:
raise OSError, "debugclient not found\nThis is a simple xdebug "\
"protocol client distributed with xdebug\n"\
"Make sure it's in your PATH."
m = re.compile("Xdebug Simple DBGp client \(([0-9.]+)\)").match(vline)
if not m:
raise ValueError, "could not parse debugclient version: " + vline +\
"\nexpected Xdebug Simple DBGp client ([0-9.]+)"
try:
return [int(s) for s in m.group(1).strip('.').split('.')]
except ValueError:
raise ValueError, "invalid debugclient version format: " + m.group(1)
class DebugClient:
"""Objects of this class are interfaces to debug IDE clients. A DebugClient object may exist even if the underlying IDE process is no longer running."""
def __init__(self, config, port):
self.p_client = None # Popen to client
self.conn = None # DBGpConn to client
self.lasttxid = None # last txid seen from this client
self.lastdbgpcmd = None # name of last command read from client
self.stopped = True # never sent anything to this client, or
# last message was "stopped"
self.config = config # RawConfigParser
self.port = port
self.host = config.get_option("Debugging", "ClientHost")
self.timeout = parse_timeout(config.get_option("Debugging",
"ClientTimeout"))
self.auto_close = False # client exits after each debugging session
# self.emacs_command() may set this to True
debug_log("creating DebugClient object")
if config.get_option("Debugging", "X11").startswith("require") \
and not os.getenv('DISPLAY'):
debug_log("X11 is required and DISPLAY is not set")
raise Exception, "X11 is required
|
and DISPLAY is not set"
cmd = config.get_option("Debugging", "DebugClient")
if cmd.startswith("emacs"):
emacs_version = get_emacs_version()
if emacs_version < [22, 1]:
raise Exception, "emacs version " + str(emacs_version) +\
" is too low, 22.1 or above required"
debugclient_path = config.get_option("Emacs", "XdebugClie
|
ntPath")
debugclient_version = get_debugclient_version(debugclient_path)
if debugclient_version < [0, 10, 0]:
raise Exception, "debugclient (xdebug client) version " +\
str(debugclient_version) + " is too low. 0.10.0 or "\
"above required"
self.cmd = self.emacs_command(config)
else:
self.cmd = shlex.split(cmd)
def connect(self):
"""Try to connect to self.host:self.port (if host is an empty string,
connect to localhost). If can't connect and host is localhost,
execute cmd and try to connect again until timeout. Raises
socket.timeout if client is not up until timeout, OSError if
client could not be started"""
global config
if self.conn:
if self.conn.isconnected():
# check if the client is still connected by reading
# everything that the client sent us since the end of
# last session (if any) and checking for HUP
client_set = poll()
client_set.register(self.conn.get_sockfd(), POLLIN)
events = client_set.poll(0)
try:
while events:
fd, e = events[0]
if e&POLLHUP:
self.conn.close()
self.conn = None
raise EOFError
else:
self.recv_cmd()
events = client_set.poll(0)
return # still connected
except (socket.error, EOFError):
pass
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except socket.error, msg:
if self.host != '' and self.host != 'localhost' \
and self.host != '127.0.0.1' or not self.cmd:
# could not connect and client is not local or no command
# to start a client. Prop
|
jstasiak/python-zeroconf
|
examples/self_test.py
|
Python
|
lgpl-2.1
| 1,785
| 0.00112
|
#!/usr/bin/env python3
import logging
import socket
import sys
from zeroconf import ServiceInfo, Zeroconf, __version__
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if
|
len(sys.argv) > 1:
assert sys.ar
|
gv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
print(f"Multicast DNS Service Discovery for Python, version {__version__}")
r = Zeroconf()
print("1. Testing registration of a service...")
desc = {'version': '0.10', 'a': 'test value', 'b': 'another value'}
addresses = [socket.inet_aton("127.0.0.1")]
expected = {'127.0.0.1'}
if socket.has_ipv6:
addresses.append(socket.inet_pton(socket.AF_INET6, '::1'))
expected.add('::1')
info = ServiceInfo(
"_http._tcp.local.",
"My Service Name._http._tcp.local.",
addresses=addresses,
port=1234,
properties=desc,
)
print(" Registering service...")
r.register_service(info)
print(" Registration done.")
print("2. Testing query of service information...")
print(" Getting ZOE service: %s" % (r.get_service_info("_http._tcp.local.", "ZOE._http._tcp.local.")))
print(" Query done.")
print("3. Testing query of own service...")
queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.")
assert queried_info
assert set(queried_info.parsed_addresses()) == expected
print(f" Getting self: {queried_info}")
print(" Query done.")
print("4. Testing unregister of service information...")
r.unregister_service(info)
print(" Unregister done.")
r.close()
|
bbsan2k/nzbToMedia
|
libs/beetsplug/lastgenre/__init__.py
|
Python
|
gpl-3.0
| 14,453
| 0
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
"""Gets genres for imported music based on Last.fm tags.
Uses a provided whitelist file to determine which tags are valid genres.
The included (default) genre list was originally produced by scraping Wikipedia
and has been edited to remove some questionable entries.
The scraper script used is available here:
https://gist.github.com/1241307
"""
import pylast
import os
import yaml
import traceback
from beets import plugins
from beets import ui
from beets import config
from beets.util import normpath, plurality
from beets import library
LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY)
PYLAST_EXCEPTIONS = (
pylast.WSError,
pylast.MalformedResponseError,
pylast.NetworkError,
)
REPLACE = {
u'\u2010': '-',
}
def deduplicate(seq):
"""Remove duplicates from sequence wile preserving order.
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# Canonicalization tree processing.
def flatten_tree(elem, path, branches):
"""Flatten nested lists/dictionaries into lists of strings
(branches).
"""
if not path:
path = []
if isinstance(elem, dict):
for (k, v) in elem.items():
flatten_tree(v, path + [k], branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [unicode(elem)])
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[:idx + 1]))
except ValueError:
continue
return [candidate]
# Main plugin logic.
WHITELIST = os.path.join(os.path.dirname(__file__), 'genres.txt')
C14N_TREE = os.path.join(os.path.dirname(__file__), 'genres-tree.yaml')
class LastGenrePlugin(plugins.BeetsPlugin):
def __init__(self):
super(LastGenrePlugin, self).__init__()
self.config.add({
'whitelist': True,
'min_weight': 10,
'count': 1,
'fallback': None,
'canonical': False,
'source': 'album',
'force': True,
'auto': True,
'separator': u', ',
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
if self.config['auto']:
self.import_stages = [self.imported]
self._genre_cache = {}
# Read the whitelist file if enabled.
self.whitelist = set()
wl_filename = self.config['whitelist'].get()
if wl_filename in (True, ''): # I
|
ndicates the default whitelist.
wl_filename = WHITELIST
if wl_filename:
wl_filename = normpath(wl_filename)
with open(wl_filename, 'r') as f:
for line in f:
line = line.decode('utf8').strip().lower()
if line and not line.startswith(u'
|
#'):
self.whitelist.add(line)
# Read the genres tree for canonicalization if enabled.
self.c14n_branches = []
c14n_filename = self.config['canonical'].get()
if c14n_filename in (True, ''): # Default tree.
c14n_filename = C14N_TREE
if c14n_filename:
c14n_filename = normpath(c14n_filename)
genres_tree = yaml.load(open(c14n_filename, 'r'))
flatten_tree(genres_tree, [], self.c14n_branches)
@property
def sources(self):
"""A tuple of allowed genre sources. May contain 'track',
'album', or 'artist.'
"""
source = self.config['source'].as_choice(('track', 'album', 'artist'))
if source == 'track':
return 'track', 'album', 'artist'
elif source == 'album':
return 'album', 'artist'
elif source == 'artist':
return 'artist',
def _resolve_genres(self, tags):
"""Given a list of strings, return a genre by joining them into a
single string and (optionally) canonicalizing each.
"""
if not tags:
return None
count = self.config['count'].get(int)
if self.c14n_branches:
# Extend the list to consider tags parents in the c14n tree
tags_all = []
for tag in tags:
# Add parents that are in the whitelist, or add the oldest
# ancestor if no whitelist
if self.whitelist:
parents = [x for x in find_parents(tag, self.c14n_branches)
if self._is_allowed(x)]
else:
parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents
if len(tags_all) >= count:
break
tags = tags_all
tags = deduplicate(tags)
# c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list
tags = [x.title() for x in tags if self._is_allowed(x)]
return self.config['separator'].get(unicode).join(
tags[:self.config['count'].get(int)]
)
def fetch_genre(self, lastfm_obj):
"""Return the genre for a pylast entity or None if no suitable genre
can be found. Ex. 'Electronic, House, Dance'
"""
min_weight = self.config['min_weight'].get(int)
return self._resolve_genres(self._tags_for(lastfm_obj, min_weight))
def _is_allowed(self, genre):
"""Determine whether the genre is present in the whitelist,
returning a boolean.
"""
if genre is None:
return False
if not self.whitelist or genre in self.whitelist:
return True
return False
# Cached entity lookups.
def _last_lookup(self, entity, method, *args):
"""Get a genre based on the named entity using the callable `method`
whose arguments are given in the sequence `args`. The genre lookup
is cached based on the entity name and the arguments. Before the
lookup, each argument is has some Unicode characters replaced with
rough ASCII equivalents in order to return better results from the
Last.fm database.
"""
# Shortcut if we're missing metadata.
if any(not s for s in args):
return None
key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args))
if key in self._genre_cache:
return self._genre_cache[key]
else:
args_replaced = []
for arg in args:
for k, v in REPLACE.items():
arg = arg.replace(k, v)
args_replaced.append(arg)
genre = self.fetch_genre(method(*args_replaced))
self._genre_cache[key] = genre
return genre
def fetch_album_genre(self, obj):
"""Return the album genre for this Item or Album.
"""
return self._last_lookup(
u'album', LASTFM.get_album, obj.albumartist, obj.album
)
def fetch_album_artist_genre(self, obj):
"""Return the album artist genre for this Item or Album.
"""
return self._last_lookup(
u'artist', LASTFM.get_artist, obj.albumartist
)
def fetch_
|
ipriver/0x71aBot-Web-Interface
|
webinter/urls.py
|
Python
|
mit
| 262
| 0
|
from django.conf.urls import url
from . import views
f
|
rom django.views.decorators.cache import cache_page
app_name = 'webinter'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='
|
index'),
url(r'^logout/$', views.logout_view, name='logout'),
]
|
pombredanne/pants
|
contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen_integration.py
|
Python
|
apache-2.0
| 2,191
| 0.007303
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScroogeGenTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/scrooge/src/python"],
'backend_packages': ["pants.backend.codegen", "pants.backend.jvm", "pants.contrib.scrooge"]
},
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(ScroogeGenTest, self).run_pants(command, full_config, stdin_data, extra_env,
**kwargs)
@staticmethod
def thrift_test_target(name):
return 'contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/scrooge_gen:' + name
def test_good(self):
# scrooge_gen should pass with correct thrift files.
cmd = ['gen', self.thrift_test_target('good-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_namespace_map(self):
# scrooge_gen should pass with namespace_map specified
cmd = ['gen', self.thrift_test_target('namespace-map-thrift')]
pants_run = self.run
|
_pants(cmd)
self.assert_success(pants_run)
def test_default_java_namespace(self):
# scrooge_gen should pass with default_java_namespace specified
cmd = ['gen', self.thrift_test_target('default-java-namespace-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_include_paths(self):
# scrooge_gen should pass with include_paths specified
cmd = ['gen', self.thrift_test_target('include-paths-thrift')]
pants_ru
|
n = self.run_pants(cmd)
self.assert_success(pants_run)
|
fizz-ml/policybandit
|
trainer.py
|
Python
|
mit
| 3,859
| 0.011143
|
import torch as t
from torch.autograd import Variable as V
from torch import FloatTensor as FT
import numpy as np
from bayestorch.hmc import HMCSampler
class SimpleTrainer:
def __init__(self, env,critic,hallucinator,policy_buffer,policy_c, noise_dim):
self.env = env
self.hallucinator = hallucinator
self.critic = critic
self.policy_buffer = policy_buffer
self.policy_c = policy_c
self.noise_dim = noise_dim
def train(self, train_steps,sample_steps,opt_steps):
in_dim=self.env.obs_size
out_dim=self.env.action_size
cur_policy = self.policy_c(in_dim,out_dim)
for i in range(train_steps):
reward = self.sample_episode(cur_policy)
self.policy_buffer.put(cur_policy.state_dict(),reward)
self.train_critic_hallucinator(sample_steps)
self.train_policy(opt_steps)
def sample_episode(self, policy,n=1,skip = 3):
done = False
total_reward = 0
for i in range(n):
cur_obs = self.env.new_episode()
t = 0
while not done:
cur_obs = V(FT(cur_obs)).unsqueeze(0)
display = (t % skip == 0)
cur_action = policy.forward(cur_obs).data.cpu().numpy()
cur_obs,cur_reward,done = self.env.next_obs(cur_action.squeeze(0), render = display)
total_reward += cur_reward
t += 1
avg_episode_reward = total_reward / n
return avg_episode_reward
def train_critic_hallucinator(self,sample_steps):
def closure_gen():
yield (lambda: self.critic.get_prior_llh())
for state_dict,reward in self.policy_buffer:
policy = self.policy_c(self.env.obs_size, self.env.action_size)
|
policy.load_state_dict(state_dict)
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and di
|
mensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
mean = self.critic(states,actions)[0]
lsd = self.critic(states,actions)[0]
llh = gaussian_llh(mean,lsd,reward)
return reward
yield closure
params = self.critic.parameter_list() \
+ self.hallucinator.parameter_list()
sampler = HMCSampler(params)
for i in range(sample_steps):
sampler.step(closure_gen)
def train_policy(self,opt_steps):
state_dict, _ = self.policy_buffer.peek()
policy = self.policy_c(self.env.obs_size, self.env.action_size)
policy.load_state_dict(state_dict)
opt = t.optim.SGD(policy.parameters(), lr=0.001)
# This is bad just have one goddamnit
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and dimensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
reward = self.critic(states,actions)[0]
return reward
for i in range(opt_steps):
opt.zero_grad()
opt.step(closure)
return policy.state_dict()
def gaussian_llh(mean,log_std_dev,reward):
llh = -(mean-reward)**2 - 2*log_std_dev
return llh
|
thomasleveil/pydig
|
pydiglib/dnsparam.py
|
Python
|
gpl-2.0
| 3,623
| 0.008556
|
import hashlib
import struct
class DNSparam:
"""Class to encapsulate some DNS parameter types (type, class etc)"""
def __init__(self, prefix, name2val):
self.name2val = name2val
self.val2name = dict([(y,x) for (x,y) in name2val.items()])
self.prefix = prefix
self.prefix_offset = len(prefix)
def get_name(self, val):
"""given code (value), return text name of dns parameter"""
if self.prefix:
return self.val2name.get(val, "%s%d" % (self.prefix, val))
|
else:
return self.val2name[val]
def get_val(self, name):
"""given text name, return code (value) of a dns parameter"""
if self.prefix and name.startswith(self.prefix):
return int(name[self.prefix_offset:])
else:
return self.name2val[name]
# DNS Resource Record Types
DICT_RRTYPE = {
"A": 1,
"NS": 2,
"MD": 3,
"MF": 4,
"CNAME": 5,
"SOA": 6,
"MB": 7,
|
"MG": 8,
"MR": 9,
"NULL": 10,
"WKS": 11,
"PTR": 12,
"HINFO": 13,
"MINFO": 14,
"MX": 15,
"TXT": 16,
"RP": 17,
"AFSDB": 18,
"X25": 19,
"ISDN": 20,
"RT": 21,
"NSAP": 22,
"NSAP-PTR": 23,
"SIG": 24,
"KEY": 25,
"PX": 26,
"GPOS": 27,
"AAAA": 28,
"LOC": 29,
"NXT": 30,
"EID": 31,
"NIMLOC": 32,
"SRV": 33,
"ATMA": 34,
"NAPTR": 35,
"KX": 36,
"CERT": 37,
"A6": 38,
"DNAME": 39,
"SINK": 40,
"OPT": 41,
"APL": 42,
"DS": 43,
"SSHFP": 44,
"IPSECKEY": 45,
"RRSIG": 46,
"NSEC": 47,
"DNSKEY": 48,
"DHCID": 49,
"NSEC3": 50,
"NSEC3PARAM": 51,
"TLSA": 52,
"HIP": 55,
"NINFO": 56,
"RKEY": 57,
"TALINK": 58,
"CDS": 59,
"CDNSKEY": 60,
"OPENPGPKEY": 61,
"SPF": 99,
"UINFO": 100,
"UID": 101,
"GID": 102,
"UNSPEC": 103,
"NID": 104,
"L32": 105,
"L64": 106,
"LP": 107,
"EUI48": 108,
"EUI64": 109,
"TKEY": 249,
"TSIG": 250,
"IXFR": 251,
"AXFR": 252,
"MAILB": 253,
"MAILA": 254,
"ANY": 255,
"URI": 256,
"CAA": 257,
"TA": 32768,
"DLV": 32769,
}
DICT_RRCLASS = {
"IN": 1,
"CH": 3,
"HS": 4,
"ANY": 255,
}
# DNS Response Codes
DICT_RCODE = {
"NOERROR": 0,
"FORMERR": 1,
"SERVFAIL": 2,
"NXDOMAIN": 3,
"NOTIMPL": 4,
"REFUSED": 5,
"NOTAUTH": 9,
"BADVERS": 16,
"BADKEY": 17,
"BADTIME": 18,
"BADMODE": 19,
"BADNAME": 20,
"BADALG": 21,
"BADTRUNC": 22,
}
# Instantiate the DNS parameter classes at the module level, since they
# are used by a variety of module routines.
qt = DNSparam("TYPE", DICT_RRTYPE)
qc = DNSparam("CLASS", DICT_RRCLASS)
rc = DNSparam("RCODE", DICT_RCODE)
# DNSSEC Protocol Numbers
dnssec_proto = {
0: "Reserved",
1: "TLS",
2: "Email",
3: "DNSSEC",
4: "IPSEC",
}
# DNSSEC Algorithms
dnssec_alg = {
0: "Reserved",
1: "RSAMD5",
2: "DH",
3: "DSA",
4: "Reserved",
5: "RSASHA1",
6: "DSA-NSEC3-SHA1",
7: "RSASHA1-NSEC3-SHA1",
8: "RSASHA256",
10: "RSASHA512",
12:"ECC-GOST",
13:"ECDSAP256SHA256",
14:"ECDSAP384SHA384",
}
# DNSSEC Digest algorithms (see RFC 4509 and RFC 6605)
dnssec_digest = {
1: "SHA-1",
2: "SHA-256",
4: "SHA-384",
}
# SSH Fingerprint algorithms (see RFC 4255)
sshfp_alg = {
1: "RSA",
2: "DSA",
3: "ECDSA",
4: "ED25519",
}
# SSHFP fingerprint types (see RFC 4255)
sshfp_fptype = {
1: "SHA-1",
2: "SHA-256",
}
|
AndreLamurias/IBEnt
|
src/classification/rext/ddi_kernels.py
|
Python
|
mit
| 23,772
| 0.008287
|
#!/usr/bin/env python
#shallow linguistic kernel
import sys, os
import os.path
import xml.etree.ElementTree as ET
import logging
from optparse import OptionParser
import pickle
import operator
from subprocess import Popen, PIPE
from time import time
#from pandas import DataFrame
import numpy as np
from scipy.stats import mode
import platform
import re
import nltk
import nltk.data
from nltk.tree import Tree
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import KFold
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
import sklearn.metrics as skm
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
import relations
basedir = "models/ddi_models/"
temp_dir = "temp/"
def reparse_tree(line):
ptree = Tree.fromstring(line)
leaves = ptree.leaves()
def get_pair_instances(pair, pairtext):
pairinstances = []
#if the first candidate has more than one mention, each one is an instance
if len(pair[relations.PAIR_E1TOKENS]) > 1:
#logging.debug("%s e1 tokens", len(pairdic[ddi.PAIR_E1TOKENS]))
#create to instances for this pair
#print "a", [pairtext[t] for t in pairs[pair][ddi.PAIR_E1TOKENS]]
#print "b", [pairtext[t] for t in pairs[pair][ddi.PAIR_E2TOKENS]]
for idx in pair[relations.PAIR_E1TOKENS]:
temptokens = pairtext[:]
#for tidx in range(len(pairtext)):
# if tidx != idx and pairtext[tidx] == "#drug-candidatea#":
# temptokens.append("#drug-entity#")
# else:
# temptokens.append(pairtext[tidx])
for index, item in enumerate(temptokens):
if index != idx and item == "#drug-candidatea#":
temptokens[index] = "#drug-entity#"
pairinstances.append(temptokens[:])
else: # otherwise, consider just one instance for now
pairinstances.append(pairtext[:])
# if the second candidate has more than one mention, for each one of candidate1 mention,
# add another instance for each candidate 2 mention
if len(pairdic[relations.PAIR_E2TOKENS]) > 1:
#logging.debug("%s e2 tokens", len(pairdic[ddi.PAIR_E2TOKENS]))
totalinstances = len(pairinstances)
#logging.debug("duplicating %s sentences", totalinstances)
for idx in pairdic[relations.PAIR_E2TOKENS]:
for isent in range(totalinstances):
#logging.debug(' '.join(sent))
temptokens = pairinstances[isent][:]
for index, item in enumerate(temptokens):
if index != idx and item == "#drug-candidateb#":
temptokens[index] = "#drug-entity#"
#for tidx in range(len(sent)):
# if tidx != idx and pairtext[tidx] == "#drug-candidateb#":
# temptokens.append("#drug-entity#")
# else:
# temptokens.append(pairtext[tidx])
pairinstances.append(temptokens[:])
#print pairinstances
#originallen = len(pairinstances)
#duplicate number of instances for this pair, switching roles
#for i in range(originallen):
# inverted = pairinstances[i][:]
# for index, t in enumerate(inverted):
# if t == "#drug-candidatea#":
# inverted[i] = "#drug-candidateb#"
# elif t == "#drug-candidateb#":
# inverted[i] = "#drug-candidatea#"
# pairinstances.append(inverted[:])
return pairinstances
def generatejSRE_line(pairtext, pos, lemmas, ner):
candidates = [0,0]
body = ''
for it in range(len(pairtext)):
#for it in range(len(pairtext)):
if pairtext[it] == "#drug-candidatea#":
#print pairtext[i],
tokentype = 'DRUG'
#tokentype = etypes[0]
tokenlabel = 'A'
candidates[0] += 1
tokentext = "#candidate#"
#tokentext = entitytext[0]
#tokentext = pairtext[it].lstrip()
lemma = tokentext
elif pairtext[it] == "#drug-candidateb#":
#print pairtext[i]
tokentype = 'DRUG'
#tokentype = etypes[0]
tokenlabel = 'T'
tokentext = "#candidate#"
#tokentext = pairtext[it].lstrip()
#tokentext = entitytext[1]
lemma = tokentext
candidates[1] += 1
elif pairtext[it] == "#drug-entity#":
tokentype = 'DRUG'
tokenlabel = 'O'
tokentext = pairtext[it].lstrip()
lemma = tokentext
else:
# logging.debug("{}".format(pairtext[it].lstrip()))
tokentype = ner[it]
tokenlabel = 'O'
tokentext = pai
|
rtext[it].lstrip()
lemma = lemmas[it]
|
if tokentext == '-RRB-':
tokentext = ')'
lemma = ')'
elif tokentext == '-LRB-':
tokentext = '('
lemma = '('
#if ' ' in pairtext[it][0].lstrip() or '\n' in pairtext[it][0].lstrip():
# print "token with spaces!"
# print pairs[pair][ddi.PAIR_TOKENS][it][0].lstrip()
# sys.exit()
body += "&&".join([str(it), tokentext,
lemma,
pos[it],
tokentype, tokenlabel])
body += ' '
#logging.debug("%s\t%s\t%s", str(trueddi), pair, body)
if candidates[0] == 0:
logging.debug("missing first candidate on pair ")
body = "0&&#candidate#&&#candidate#&&-None-&&drug&&T " + body
#print body
elif candidates[1] == 0:
logging.debug("missing second candidate on pair")
#print body
body += " " + str(it+1) + "&&#candidate#&&#candidate#&&-None-&&drug&&T "
return body
def generatejSREdata(pairs, sentence, basemodel, savefile, train=False):
examplelines = []
for pair in pairs:
#logging.debug(pair)
e1id = pair.eids[0]
e2id = pair.eids[1]
sid = sentence.sid
sentence_tokens = [t.text for t in sentence.tokens]
#print pairtext,
if not pair.relation:
trueddi = 0
else:
trueddi = 1
#print pairtext
pos = [t.pos for t in sentence.tokens]
lemmas = [t.lemma for t in sentence.tokens]
ner = [t.tag for t in sentence.tokens]
logging.debug("{} {} {} {}".format(len(sentence_tokens), len(pos), len(lemmas), len(ner)))
pair_text, pos, lemmas, ner = blind_all_entities(sentence_tokens, sentence.entities.elist[basemodel],
[e1id, e2id], pos, lemmas, ner)
logging.debug("{} {} {} {}".format(len(pair_text), len(pos), len(lemmas), len(ner)))
#logging.debug("generating jsre lines...")
#for i in range(len(pairinstances)):
#body = generatejSRE_line(pairinstances[i], pos, stems, ner)
body = generatejSRE_line(pair_text, pos, lemmas, ner)
examplelines.append(str(trueddi) + '\t' + pair.pid + '.i' + '0\t' + body + '\n')
#print body
#elif candidates[0] > 1 or candidates[1] > 1:
# print "multiple candidates!!", pairtext
# logging.debug("writing to file...")
with open(temp_dir + savefile, 'w') as trainfile:
for l in examplelines:
#print l
trainfile.write(l)
# logging.info("wrote " + temp_dir + savefile)
def compact_id(eid):
return eid.replace('.', '').replace('-', '')
def blind_all_entities(tokens, entities, eids, pos, lemmas, ner):
# logging.info(eids)
ogtokens = tokens[:]
found1 = 0
found2 = 0
for e in entities:
if e.eid == eids[0]:
first_token = e.tokens[0].order + found1 + found2
# logging.debug("{} {} {} {}".format(tokens[first_token], pos[first_token], lemmas[first_token], ner[first_token]))
tokens = tokens[:first_token] + ["#drug-candidatea#"] + tokens[first_token:
|
saurabh-hirani/icinga2_api
|
icinga2_api/cmdline.py
|
Python
|
isc
| 3,123
| 0.012168
|
#!/usr/bin/env python
import click
import json
import re
from icinga2_api.api import Api
from icinga2_api import defaults
VALID_ACTIONS = ['create', 'read', 'update', 'delete']
def validate_uri(ctx, param, value):
if not value.startswith('/'):
raise click.BadParameter('should begin with single /')
return value
def validate_action(ctx, param, value):
if value not in VALID_ACTIONS:
raise click.BadParameter('should be in %s' % VALID_ACTIONS)
return value
def validate_data(ctx, param, value):
if value is None:
return value
try:
return json.loads(value)
except ValueError as e:
raise click.BadParameter('should be valid json')
@click.command()
@click.option('-c', '--configfile',
help='icinga2 API config file. Default: %s' % defaults.CONFIGFILE,
default=defaults.CONFIGFILE)
@click.option('-p', '--profile',
help='icinga2 profile to load. Default: %s' % defaults.PROFILE,
default=defaults.PROFILE)
@click.option('-a', '--action', help='|'.join(VALID_ACTIONS) + ' Default: read',
callback=validate_action,
default='read')
@click.option('-H', '--host', help='icinga2 api host - not required if profile specified',
default=None)
@click.option('--port', help='icinga2 api port - not required if profile specified',
default=None)
@click.option('-u', '--uri', help='icinga2 api uri. Default: ' + defaults.READ_ACTION_URI,
callback=validate_uri,
default=defaults.READ_ACTION_URI)
@click.option('-U', '--user', help='icinga2 api user - not required if profile specified',
default=None)
@click.option('--password', help='icinga2 api password - not required if profile specified',
default=None)
@click.option('-t', '--timeout', help='icinga2 api timeout - not required if profile specified',
default=None)
@click.option('-V', '--verify', help='verify certificate. Default: false',
default=False)
@click.option('-C', '--cert-path', help='ver
|
ify certificate path - not required if profile specified',
default=None)
@click.option('-v', '--verbose/--no-verbose', help='verbose. Default: false',
default=False)
@click.option('-d', '--data', help='json data to pass',
callback=validate_data,
default=None)
@click.pass_context
def icinga2_api(ctx, **kwargs):
"""
https://github.com/saurabh-hirani/icinga2_api/blob/master/README.md
"""
if kwargs['verbose']:
print 'args: %s' % kwar
|
gs
obj = Api(**kwargs)
kwargs['uri'] = re.sub("/{2,}", "/", kwargs['uri'])
method_ref = getattr(obj, kwargs['action'])
output_ds = method_ref(kwargs['uri'], kwargs['data'])
exit_code = 0
if output_ds['status'] != 'success':
click.echo(click.style('CRITICAL: %s action failed' % kwargs['action'], fg='red'))
exit_code = 2
else:
click.echo(click.style('OK: %s action succeeded' % kwargs['action'], fg='green'))
click.echo(json.dumps(output_ds, indent=2))
ctx.exit(exit_code)
if __name__ == '__main__':
icinga2_api()
|
gina-alaska/emodis_ndvi_python-docker
|
emodis_ndvi_python/pycodes/oneyear_data_layer_subset_good.py
|
Python
|
mit
| 5,054
| 0.033241
|
#This python script is modified from oneyear_data_layer_subset_good.pro
#This routine open one year files defined in file lists, stack these file, subset, and fill bad data with -2000
#input arguments are flist_ndvi, flist_bq, ul_lon,ul_lat,lr_lon,lr_lat
#;inputs: yyyy_flist_ndvi----file list for one year *ndvi.tif,
#; yyyy_flist_bq -----file list fro one year *nvdi_bq.tif
#; ul-----upper left coordinate in unit of degree in geographic coordinates,WGS84
#; lr-----lower right cordinate in unit of degree in geographic coordinates,WGS84
#; data_ver_flg------, 0-old version data,1-new version data
import sys
import os
import platform
from read_ndvi import *
import raster_process as rp
print 'Number of arguments:', len
|
(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
if len(sys.argv) != 7:
print "input arguments are: flist_ndvi, flist_bq, ulx,uly,lrx,lry"
sys.exit(1)
flist_ndvi=sys.argv[1]
flist_bq=sys.argv[2]
ulx=float(sys.argv[3])
uly=float(sys.argv[4])
lrx=float(sys.argv[5])
lry=float(sys.argv[6])
#;test
#;ul in deg, minute, secons= 173d 0' 0.00"W, 72d 0' 0.00"N
#;lr in deg, minute, second= 127d5
|
9'56.82"W, 54d 0' 0.07"N
#;if do not want subsize the data, just input 0,0,0,0 for ul_lon,ul_lat,lr_lon,lr_lat, respectively.
#;wrkdir='/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/'
#;flist_ndvi='/mnt/jzhu_scratch/EMODIS-NDVI-DATA/wrk/ver_new_201107/2008/flist_ndvi'
#;flist_bq = '/mnt/jzhu_scratch/EMODIS-NDVI-DATA/wrk/ver_new_201107/2008/flist_bq'
#;flist_ndvi='/raid/scratch/cesu/eMODIS/ver_old/2008/flist_ndvi'
#;flist_bq='/raid/scratch/cesu/eMODIS/ver_old/2008/flist_bq'
#;flist_ndvi='/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/2010/2010_flist_ndvi'
#;flist_bq = '/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/2010/2010_flist_bq'
#;ul=[-173.0d,72.0d]
#;lr=[-127.999116667d,54.000019444d]
#;set path and start envi
#;ENVI, /RESTORE_BASE_SAVE_FILES
#;PREF_SET, 'IDL_PATH', '<IDL_DEFAULT>:+~/nps/cesu/modis_ndvi_250m/bin', /COMMIT
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if platform.system() == 'Windows':
sign='\\'
else:
sign='/'
#---- read these two lists into flist and flist_bq
u1=open(flist_ndvi,'r')
u2=open(flist_bq ,'r')
#---- count the number of lines in the flist files
#total_line_count = sum(1 for line in open("filename.txt"))
#total_line_count = sum(1 for line in open("filename.txt"))
#---- get the file names into the list
flist=u1.readlines()
flist=[x.rstrip('\n') for x in flist]
flistbq=u2.readlines()
flistbq=[x.rstrip('\n') for x in flistbq]
num=len(flist)
#---- get workdir and year from mid-year file
#p =strpos(flist(1),sign,/reverse_search)
#len=strlen(flist(1))
wrkdir=os.path.dirname(flist[0])
filen =os.path.basename(flist[0])
#;-----use file header to determine the
if filen.find('MT3RG_') == True:
data_ver_flg=0
else:
data_ver_flg=1
if data_ver_flg == 0:
year=filen[6:9] #MT3RG_2008_141-147_250m_composite_ndvi.tif
else:
year=filen[13:17] #AK_eMTH_NDVI.2008.036-042.QKM.VI_NDVI.005.2011202142526.tif
#;---- define a struc to save info of each file
#;p={flists,fn:'abc',sn:0,dims:lonarr(5),bn:0L}
#;x=create_struct(name=flist,fn,'abc',fid,0L,dims,lonarr(5),bn,0L)
#x={flist,fn:'abc',bname:'abc',fid:0L,dims:lonarr(5),pos:0L}
#flista=replicate(x,num) ;save ndvi data files
#flistq=replicate(x,num) ; save ndvi_bq data files
#;---- go through one year ndvi and ndvi_bq data files
First_Flag=True
for j in range(0L, num):
fn_ndvi = flist[j]
#;---- for old data name
if data_ver_flg == 0:
str1='composite_ndvi'
str2='composite_ndvi_bq'
p1=fn_ndvi.rfinid(sign)
tmpbname=fn_ndvi[p1+7:p1+19] # for old data, its name looks like:MT3RG_2008_253-259_250m_composite_ndvi.tif
else:
#;---- for new data name
str1='.VI_NDVI.'
str2='.VI_QUAL.'
p1=fn_ndvi.rfind(sign)
tmpbname=fn_ndvi[p1+14:p1+26] #for new data, its name looks like:eMTH_NDVI.2008.029-035.QKM.VI_NDVI.005.2011202084157.tif
p=fn_ndvi.find(str1)
length=len(fn_ndvi)
file_hdr=fn_ndvi[0:p]
file_end =fn_ndvi[p+len(str1):length]
fn_bq=file_hdr+str2+file_end
idx = fn_bq in flistbq
if idx == True:
#---- read ndvi and bq to cut off no-sense points
print('process the '+ str(j+1) + ' th file: ' +fn_ndvi)
(rt_t, rt_d)=read_ndvi(fn_ndvi,fn_bq,ulx,uly,lrx,lry,tmpbname)
if First_Flag == True:
First_Flag=False
tot_t=wrkdir+'/'+year+'_stack_ndvi.tif'
tot_d=wrkdir+'/'+year+'_stack_bq.tif'
os.system('cp '+ rt_t +' '+ tot_t)
os.system('rm -f '+rt_t)
os.system('cp '+ rt_d +' '+ tot_d)
os.system('rm -f '+rt_d)
else:
tot_t=rp.raster_comb(tot_t,rt_t)
tot_d=rp.raster_comb(tot_d,rt_d)
|
koendeschacht/python-logstash-async
|
logstash_async/utils.py
|
Python
|
mit
| 2,122
| 0.000943
|
# -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from __future__ import print_function
from datetime import datetime
from importlib import import_module
from itertools import chain, islice
import sys
import traceback
import six
# ----------------------------------------------------------------------
def ichunked(seq, chunksize):
"""Yiel
|
ds items from an iterator in iterable chunks.
http://stackoverflow.com/a/1335572
"""
itera
|
ble = iter(seq)
while True:
yield list(chain([next(iterable)], islice(iterable, chunksize - 1)))
# ----------------------------------------------------------------------
def safe_log_via_print(log_level, message, *args, **kwargs):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
log_message = u'{}: {}: {}'.format(timestamp, log_level, message)
print(log_message % args, file=sys.stderr)
# print stack trace if available
exc_info = kwargs.get('exc_info', None)
if exc_info or log_level == 'exception':
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
stack_trace = ''.join(traceback.format_exception(*exc_info))
print(stack_trace, file=sys.stderr)
# ----------------------------------------------------------------------
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
(stolen from Django)
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "{} doesn't look like a module path".format(dotted_path)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "{}" does not define a "{}" attribute/class'.format(module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
|
google-research/football
|
gfootball/scenarios/tests/keeper_test.py
|
Python
|
apache-2.0
| 1,184
| 0.010135
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distribute
|
d on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 30
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
builder.SetBallPosition(0.9, 0.3)
else:
builder.SetBallPosition(-0.9, -0.3)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-
|
1.00, 0.00, e_PlayerRole_GK, True)
builder.AddPlayer(0.85, 0.30, e_PlayerRole_RM, True)
builder.AddPlayer(0.00, 0.00, e_PlayerRole_RM, True)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.00, 0.00, e_PlayerRole_GK, True)
builder.AddPlayer(0.85, 0.30, e_PlayerRole_RM, True)
|
jmgilman/neolib2
|
neolib/plots/altador/steps/HealPetPet.py
|
Python
|
gpl-2.0
| 2,736
| 0.001827
|
from neolib.plots.Step import Step
from neolib.NST import NST
import time
class HealPetPet(Step):
_paths = {
'links': '//*[@id="content"]/table/tr/td[2]//a/@href',
'img': '//*[@id="content"]/table/tr/td[2]/div/img/@src',
'cert': '//area/@href',
}
_HEALS = {
'http://images.neopets.com/altador/misc/petpet_act_b_ffabe6bc57.gif': 0,
'http://images.neopets.com/altador/misc/petpet_act_a_2a605ae262.gif': 1,
'http://images.neopets.com/altador/misc/petpet_act_c_5f4438778c.gif': 2,
'http://images.neopets.com/altador/misc/petpet_act_d_42b934a33b.gif': 3,
}
def __init__(self, usr):
super().__init__(usr, '', '', False)
# Setup link
self.link = ['http://www.neopets.com/altador/petpet.phtml?ppheal=1',
'http://www.neopets.com/altador/petpet.phtml?ppheal=1&sthv=%s']
# Setup checks
self._checks = ['']
def execute(self, last_pg=None):
# Heal the PetPet 10 times to get the certificate
check = ''
for i in range(0, 11):
if check:
pg = self._usr.get_page(check)
else:
pg = self._usr.get_page(self.link[0])
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
if len(self._xpath('cert', pg)) > 0:
print('Found certificate!')
url = self._base_url + self._xpath('cert', pg)[0]
pg = self._usr.get_page(url)
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
print('Saved page')
exit()
links = self._xpath('links', pg)
action = self._HEALS[self._xpath('img', pg)[0]]
url = self._base_url + links[action]
print('URL: ' + url)
pg = self._usr.get_page(url)
links = self._xpath('links', pg)
check = self._base_url + links[4]
|
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
if len(self._xpath('cert', pg)) > 0:
print('Found certificate!')
url = self._base_url + self._xpath('cert', pg)[0]
pg = self._usr.get_page(url)
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
print('Saved pa
|
ge')
exit()
# Wait till the next minute to check on the petpet
wait = (60 - NST.sec) + 1
print('Waiting ' + str(wait) + ' seconds')
time.sleep(wait)
|
aniruddha-adhikary/bookit
|
bookit/providers/models/phone_number.py
|
Python
|
mit
| 235
| 0
|
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class PhoneNumber(models.
|
Model):
provider = models.F
|
oreignKey(
to='providers.Provider'
)
phone_number = PhoneNumberField()
|
meisterluk/taptaptap
|
tests/proc_005.py
|
Python
|
bsd-3-clause
| 472
| 0.019068
|
#!/usr/bin/env python2
from taptaptap.proc import plan, ok, not_ok, out
plan(first=1, last=13)
ok('Starting the program')
ok('Starting the engine')
ok('Find the object')
ok('Grab it', todo=True)
ok('Use it', todo=True)
2 * 2 == 4 and ok('2 * 2 == 4') or
|
not_ok('2 * 2 != 4')
|
out()
## validity: -1
## ok testcases: 6 / 13
## bailout: no
## stderr: 2 * 2 == 4
## stderr: TODO
## stderr: ~TRUE
## stderr: ~True
## stderr: ~true
|
boundarydevices/android_external_chromium_org
|
tools/json_schema_compiler/cpp_bundle_generator.py
|
Python
|
bsd-3-clause
| 11,513
| 0.006428
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self,
root,
model,
api_defs,
cpp_type_generator,
cpp_namespace,
source_file_dir,
impl_dir):
self._root = root
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._source_file_dir = source_file_dir
self._impl_dir = impl_dir
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % self._source_file_dir)
ifndef_name = cpp_util.GenerateIfndefName(self._source_file_dir, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
elif platform == Platforms.LINUX:
ifdefs.append('defined(OS_LINUX)')
elif platform == Platforms.MAC:
ifdefs.append('defined(OS_MACOSX)')
elif platform == Platforms.WIN:
ifdefs.append('defined(OS_WIN)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' || '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s"
|
% namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
|
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"%s/%s/%s_api.h" % (self._bundle._impl_dir,
namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"extensions/browser/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Appen
|
openfisca/LawToCode
|
lawtocode/scripts/harvest_ipp_prelevements_sociaux.py
|
Python
|
agpl-3.0
| 11,104
| 0.010558
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Law-to-Code -- Extract formulas & parameters from laws
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 OpenFisca Team
# https://github.com/openfisca/LawToCode
#
# This file is part of Law-to-Code.
#
# Law-to-Code is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Law-to-Code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Extract parameters from IPP's "Barèmes des prélèvements sociaux" and upload them to Law-to-Code.
IPP = Institut des politiques publiques
http://www.ipp.eu/fr/outils/baremes-prelevements-sociaux/
http://www.ipp.eu/fr/outils/taxipp-simulation/
"""
import argparse
import collections
import ConfigParser
import itertools
import json
import logging
import os
import sys
import urlparse
from biryani1 import baseconv, custom_conv, datetimeconv, states
import requests
import xlrd
app_name = os.path.splitext(os.path.basename(__file__))[0]
conv = custom_conv(baseconv, datetimeconv, states)
log = logging.getLogger(app_name)
N_ = lambda message: message
parameters = []
currency_converter = conv.first_match(
conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
),
conv.pipe(
conv.test_isinstance(tuple),
conv.test(lambda couple: len(couple) == 2, error = N_(u"Invalid couple length")),
conv.struct(
(
conv.pipe(
conv.test_isinstance((float, int)),
conv.not_none,
),
conv.pipe(
conv.test_isinstance(basestring),
conv.test_in([
u'EUR',
u'FRF',
]),
),
),
),
),
)
pss_converters = collections.OrderedDict((
(u"Date d'effet", conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.date_to_iso8601_str,
conv.not_none,
)),
(u'Plafond de la Sécurité sociale (mensuel)', currency_converter),
(u'Plafond de la Sécurité sociale (annuel)', currency_converter),
(u'Référence législative', conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(u'Parution au JO', conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.date_to_iso8601_str,
)),
(u'Notes', conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(None, conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
)),
))
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = 'path of configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
config_parser = ConfigParser.SafeConfigParser(dict(
here = os.path.dirname(os.path.abspath(os.path.normpath(args.config))),
))
config_parser.read(args.config)
conf = conv.check(conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'law_to_code.api_key': conv.pipe(
conv.cleanup_line,
conv.not_none,
),
'law_to_code.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
conv.not_none,
),
'user_agent': conv.pipe(
conv.cleanup_line,
conv.not_none,
),
},
default = 'drop',
),
conv.not_none,
))(dict(config_parser.items('Law-to-Code-TAXIPP-Harvester')), conv.default_state)
response = requests.get('http://www.ipp.eu/wp-content/uploads/2012/01/IPP-prelevements-sociaux-avril2012.xls')
book = xlrd.open_workbook(file_contents = response.content, formatting_info = True)
sheet_names = book.sheet_names()
assert sheet_names == [
u'Sommaire',
u'PSS',
u'SMIG',
u'SMIC',
u'GMR',
u'CSG-1',
u'CSG-2',
u'CRDS',
u'SS',
u'MMID',
u'MMID-AM',
u'CNAV',
u'VEUVAGE',
u'CSA',
u'FAMILLE',
u'CSS_RED',
u'CHOMAGE',
u'ASF',
u'AGFF',
u'AGS',
u'ARRCO',
u'AGIRC',
u'APEC',
u'CET',
u'DECES_CADRES',
u'ASSIETTE PU',
u'MMID-Etat',
u'MMID-CL',
u'RP',
u'CI',
u'RAFP',
u'CNRACL',
u'IRCANTEC',
u'FDS',
u'TAXSAL',
u'CONSTRUCTION',
u'FNAL',
u'ACCIDENTS',
u'FORMATION',
u'APPRENTISSAGE',
u'VT',
u'PREVOYANCE',
u'AUBRY I',
u'ALLEG_GEN',
u'AUBRYII',
u'SFT',
u'INDICE_FP',
], str((sheet_names,))
sheet = book.sheet_b
|
y_name(u'PSS')
sheet_data = [
[
transform_xls_cell_to_json(book, cell_type, cell_value, she
|
et.cell_xf_index(row_index, column_index))
for column_index, (cell_type, cell_value) in enumerate(itertools.izip(sheet.row_types(row_index),
sheet.row_values(row_index)))
]
for row_index in range(sheet.nrows)
]
taxipp_names = sheet_data[0]
labels = sheet_data[1]
assert labels == pss_converters.keys(), str((labels,))
taxipp_name_by_label = dict(zip(labels, taxipp_names))
description_lines = []
entries = []
state = None
for row_index, row in enumerate(itertools.islice(sheet_data, 2, None)):
if all(cell in (None, u'') for cell in row):
state = 'description'
if state is None:
entry = conv.check(conv.struct(pss_converters))(dict(zip(labels, row)), state = conv.default_state)
entries.append(entry)
else:
description_line = u' '.join(
cell.strip()
for cell in row
if cell is not None
)
description_lines.append(description_line)
description = u'\n'.join(description_lines) or None
parameters = []
for entry in entries:
value_label = u'Plafond de la Sécurité sociale (mensuel)'
parameters.append(dict(
comment = entry[u"Notes"],
description = description,
format = u'float',
legislative_reference = entry[u'Référence législative'],
official_publication_date = entry[u'Parution au JO'],
start_date = entry[u"Date d'effet"],
taxipp_code = taxipp_name_by_label[value_label],
title = value_label,
unit = entry[value_label][1]
if entry[value_label] is not None
else None,
value = entry[value_label][0]
if entry[value_label] is not None
else None,
))
value_label = u'Plafond de la Sécurité sociale (annuel)'
parameters.append(dict(
comment = entry[u"Notes"],
description = description,
format = u'float',
legislative_reference = entry[u'Référence législative'],
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/layer3/net_l3_interface.py
|
Python
|
bsd-3-clause
| 2,074
| 0.000964
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on network devices
description:
- This module provides declarative management of L3 interfaces
on network devices.
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
purge:
description:
- Purge L3 interfaces not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
net_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
net_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.1
|
68.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode com
|
mands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
|
tomchor/pymicra
|
pymicra/algs/units.py
|
Python
|
gpl-3.0
| 7,214
| 0.016912
|
from __future__ import absolute_import, print_function, division
from .. import decorators
def multiply(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Multiply elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='*')
def add(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Add elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='+')
def divide(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Divide elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='/')
def operate(elems, units, inplace_units=False, unitdict=None, key=None, operation='+'):
"""
Operate on elements considering their units
Parameters
-----------
elems: list, tuple
list of pandas.Series
units: list, tuple
list of pint.units ordered as the elems list
inplace_units: bool
sets dictionary inplace_units
unitdict: dict
dict to be set inplace
key: str
name of variables to be set inplace as dict key
"""
import pandas as pd
import numpy as np
idx = elems[0].index
if operation=='+':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result += elem.values*unit
else:
result += elem*unit
if operation=='*':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result *= elem.values*unit
else:
result *= elem*unit
if operation=='/':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result /= elem.values*unit
else:
result /= elem*unit
out = pd.Series(result.magnitude, index=idx)
funit = result.units
if inplace_units==True:
unitdict.update({key : funit})
return out
else:
return out, funit
def parseUnits(unitstr):
"""
Gets unit from string, list of strings, or dict's values, using the UnitRegistry
defined in __init__.py
"""
from .. import ureg
if isinstance(unitstr, str):
return ureg(unitstr).u
elif isinstance(unitstr, list):
return [ ureg(el).u for el in unitstr ]
elif isinstance(unitstr, dict):
return { key: ureg(el).u for key, el in unitstr.items() }
def convert_to(data, inunit, outunit, inplace_units=False, key=None):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.series
to be chanhed from one unit to the other
inunit: pint.quantity or dict
unit(s) that the data is in
outunit: str
convert to this unit
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
key: str
if inunit is a dict, it is the name of the variable to be changed
"""
from .. import Q_
if key:
Q = inunit[key].to(outunit)
else:
Q = inunit.to(outunit)
coef = Q.magnitude
outunit = Q.units
if inplace_units:
inunit.update({key : outunit})
return data*coef
else:
return data*coef, outunit
def convert_cols(data, guide, units, inplace_units=False):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.DataFrame
to be chanhed from one unit to the other
guide: dict
{names of columns : units to converted to}
units: dict
units dictionary
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
"""
from .. import algs
from .. import Q_
data = data.copy()
#-------
# An attempt to make it work with Series
if len(data.columns)==1 and (type(guide) != dict):
guide = { data.columns[0] : guide }
guide = algs.parseUnits(guide)
#-------
#-------
# We first turn it into a numpy array to make the conversion using pint natively
for col, outunit in guide.items():
aux = Q_(data[ col ].values, units[ col ])
aux = aux.to(outunit)
data.loc[:, col] = aux
#-------
if inplace_units:
units.update(guide)
return data
else:
return data, guide
def convert_indexes(data, guide, units, inplace_units=False):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.Series
to be chanhed from one unit to the other
guide: dict
{names of columns : units to converted to}
units: dict
units dictionary
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
"""
from .. import algs
data = data.copy()
guide = algs.parseUnits(guide)
#-------
# We first turn it into a numpy array to make the conversion using pint natively
for idx, outunit in guide.items():
aux = data[ idx ] * units[ idx ]
aux = aux.to(outunit)
data.loc[ idx ] = aux.magnitude
#-------
if inplace_units:
units.update(guide)
return data
else:
return data, guide
def with_units(data, units):
"""
Wrapper around toUnitsCsv to create a method to print the contents of
a dataframe plus its units into a unitsCsv file.
Parameters
-----------
self: pandas.DataFrame, pandas.Series
dataframe or series to which units belong
units: dict
dictionary with the names of each column and their unit
"""
import pandas as pd
data = data.copy()
if isinstance(data, pd.DataFrame):
cols = data.columns
#-------------
# A series can be a column of a main DataFrame, or separate elements
elif isinstance(data, pd.Series):
#---------
# We check if it's a column by the name of the Series
if data.name in units.keys() or isinstance(data.index, pd.DatetimeIndex):
data = pd.DataFrame(data, colum
|
ns=[ data.name ])
cols = data.columns
#---------
#---------
# If the name is None or it's not in the list of units, then it's different variables
else:
|
cols = data.index
#---------
#-------------
unts = [ '<{}>'.format(units[c]) if c in units.keys() else '<?>' for c in cols ]
columns = pd.MultiIndex.from_tuples(tuple(zip(cols, unts)))
if isinstance(data, pd.DataFrame):
data.columns = columns
elif isinstance(data, pd.Series):
data.index = columns
return data
|
urbansearchTUD/UrbanSearch
|
urbansearch/server/relations.py
|
Python
|
gpl-3.0
| 807
| 0.001239
|
from flask import Blueprint, jsonify, request
from urbansearch.utils import db_utils
relations_api = Blueprint('relations_api', __name__)
@relations_api.route('/document_info'
|
, methods=['GET'])
def document_info():
if 'city_a' not in request.args or 'city_b' not in request.args:
return jsonify(status=400, error='No city pair given')
city_a = request.args.get('city_a')
city_b = request.args.get('city_b')
documents = db_utils.get_related_documents(city_a, city_b, int(request.args.get('limit', 300)))
return jsonify(status=200, documents=documents)
@relations_api.route('/all', methods=['GET'])
def all():
threshold = int(reque
|
st.args.get('threshold', 125))
relations = db_utils.get_ic_rels(None, threshold)
return jsonify(status=200, relations=relations)
|
yangjincai/Xq2EFT
|
testAndOutputGrids.py
|
Python
|
apache-2.0
| 6,139
| 0.012543
|
#!/usr/bin/env python2
import numpy as np
import pdb
from random import sample
from time import time
import heapq
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import sys, os
from eft_calculator import EFT_calculator, Water
import tools
def load_coordinates(name):
lines = open('test.dat/random/'+name).readlines()[-7:-1]
coors = [[float(item) for item in line.split()[2:5]] for line in lines]
return np.array(coors)
class Classical_calculator:
def __init__(self):
self.eps = [0.12, 0.046, 0.046]
self.sigma = [1.7, 0.2245, 0.2245]
self.charge = [-0.834, 0.417, 0.417]
def eval(self, coors):
mol = Water()
coor0 = coors[:3]
coor1 = coors[3:]
e = 0.
f = np.zeros(3)
t = np.zeros(3)
com1 = mol.getCOM(coor1)
eps, sigma, charge = self.eps, self.sigma, self.charge
for i in range(3):
for j in range(3):
ener, force = self.atomicEF(coor0[i], eps[i], sigma
|
[i], charge[i], coor1[j], eps[j], sigma[j], charge[j])
e += ener
f += force
t += np.cross(coor1[j]-com1, force)
#if e>100.0:
# e = 100.0
# f = f/np.linalg.norm(f) * 100.0
# t = t/np.linalg.norm(t) * 100.0
return np.array([e, f[0], f[1], f[2], t[0], t[1], t[2]])
def atomicEF(self, x0, e0, s0, q0, x1, e1, s1, q1):
k = 138.935456
e = np.sq
|
rt(e0 * e1)
s = s0 + s1
r = np.linalg.norm(x0 - x1)
if r <0.1 : return 100.0, np.array([100., 100.,100.,])
sor6 = (s/r) ** 6
evdw = e * (sor6**2 - 2 * sor6)
fvdw = e / r**2 * sor6 * (sor6 - 1) * (x1 - x0)
eelec = k * q0 * q1 / r
felec = k * q0 * q1 / r**3 * (x1 - x0)
ener = evdw + eelec
force = fvdw + felec
return ener, force
def test_random_set():
e0 = []
e1 = []
fce0 = []
fce1 = []
trq0 = []
trq1 = []
all = []
t1 = time()
for i in range(1, 2000):
# load atomic coor
name = 'test.dat/random/test%04d.inp.log' % i
#if i == 1693: pdb.set_trace()
eft, coors = calculator._parseQMlog(name)
# evaluate with analytical function
eft = cc.eval(coors)
e0.append(eft[0])
fce0 += list(eft[1:4])
trq0 += list(eft[4:7])
# convert atomic coor to r, phi, theta...
X0, q0 = calculator.mol.atomic2Xq(coors[:3])
X1, q1 = calculator.mol.atomic2Xq(coors[3:])
# evaluate with calculator
eft = calculator.eval(X0, q0, X1, q1)
e1.append(eft[0])
#if eft[0] > 15:
# print(coors, name)
# print(np.dtype(q1[0]))
fce1 += list(eft[1:4])
trq1 += list(eft[4:7])
#all.append((-np.abs(e0[-1]-e1[-1]), name))
all.append((-np.linalg.norm(np.array(fce0) - np.array(fce1)), name))
t2 = time()
print('took %.1f s to evaluate the random set' % (t2 - t1))
heapq.heapify(all)
#for i in range(3):
# de, name = heapq.heappop(all)
# print -de, name
"""
for i in range(len(e0)):
if e1[i]> 100.0:
e0[i] = e1[i] = 0.0
for j in range(3):
fce0[i*3 +j ] = fce1[i*3+j] = trq0[i*3+j] = trq1[i*3+j] = 0.0
"""
# make a plot
_, axarr = plt.subplots(1, 3)
p = np.corrcoef(e0, e1)[0, 1]
print("Energy: p =", p)
axarr[0].scatter(e0, e1)
axarr[0].text(0, 0, 'p=%.4f'%p)
p = np.corrcoef(fce0, fce1)[0, 1]
print("Force: p =", p)
axarr[1].scatter(fce0, fce1)
axarr[1].text(0, 0, 'p=%.4f'%p)
p = np.corrcoef(trq0, trq1)[0, 1]
print("Torque: p =", p)
axarr[2].scatter(trq0, trq1)
axarr[2].text(0, 0, 'p=%.4f'%p)
plt.savefig(figname)
def randomSample():
root = 'golden.dat'
if not os.path.exists(root):os.mkdir(root)
def mol2mol_init(ele):
mol = [[i,0.0,0.0,0.0] for i in ele]
return mol
size = 200
folder_id = 0
file_count = 0
confs = calculator.grid._iter_conf()
confs = list(confs)
if len(confs) > 2000:
confs = sample(list(confs), 2000)
for idx, coors in calculator.gen_PDB(confs):
#for id, coors in calculator.gen_atomic_coors(0,10):
#print(idx, coors)
if file_count%size == 0:
folder = os.path.join(root,"EFT_%04d"%(folder_id))
if not os.path.exists(folder):os.mkdir(folder)
folder_id += 1
pdb = open("%s/eft.%s.pdb"%(folder,idx),"w")
pdb.write(coors)
pdb.close()
file_count += 1
def grids_conf():
root = 'grids.dat'
if not os.path.exists(root):os.mkdir(root)
def mol2mol_init(ele):
mol = [[i,0.0,0.0,0.0] for i in ele]
return mol
size = 200
folder_id = 0
file_count = 0
confs = calculator.grid._grid_conf()
for idx, coors in calculator.gen_PDB(confs):
#for id, coors in calculator.gen_atomic_coors(0,10):
#print(idx, coors)
if file_count%size == 0:
folder = os.path.join(root,"EFT_%04d"%(folder_id))
if not os.path.exists(folder):os.mkdir(folder)
folder_id += 1
pdb = open("%s/eft.%s.pdb"%(folder,idx),"w")
pdb.write(coors)
pdb.close()
file_count += 1
if __name__ == '__main__':
if len(sys.argv) < 2:
print("\n Usage:#0 figname.png [datfilename.dat err_cutoff]\n")
sys.exit()
figname = sys.argv[1] # a output fig name
databaseName = sys.argv[2]
t0 = time()
cc = Classical_calculator()
if os.path.exists(databaseName):
print("loaded a old database")
calculator = EFT_calculator(databaseName)
else:
print("created a new mesh")
calculator = EFT_calculator()
if len(sys.argv) == 4:
error_cutoff = float(sys.argv[3])
print("set cutoff as %f"%(error_cutoff))
calculator.fill_grid(cc, databaseName, error_cutoff)
t1 = time()
print('took %.1f s to fill the grid' % (t1 - t0))
test_random_set()
#randomSample()
grids_conf()
|
i02sopop/Kirinki
|
kirinki/mainviewer.py
|
Python
|
agpl-3.0
| 2,339
| 0.013254
|
# -*- coding: utf-8 -*-
__license__ = "GNU Affero General Public License, Ver.3"
__author__ = "Pablo Alvarez de Sotomayor Posadillo"
# This file is part of Kirinki.
#
# Kirinki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Kirinki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License al
|
ong with kirinki. If not, see <http://www.gnu.org/licenses/>.
from django.core.cache import cache
from django.contrib.sessions.models impo
|
rt Session
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from recaptcha.client import captcha
from datetime import datetime, timedelta
import logging
class MainViewer:
def __init__(self, req):
logging.basicConfig(filename='/var/log/kirinki.log',level=logging.DEBUG)
self.request = req
self.session_data = req.session
def getLeftCol(self, blocks = []):
return render_to_string('kirinki/left.html', {'blocks' : blocks})
def getCenterCol(self, blocks = []):
return render_to_string('kirinki/center.html', {'blocks' : blocks})
def getRightCol(self, blocks = []):
return render_to_string('kirinki/right.html', {'blocks' : blocks})
def render(self, leftBlocks, centerBlocks, rightBlocks):
self.page = render_to_response('kirinki/index.html', {'copy' : '© Pablo Alvarez de Sotomayor Posadillo',
'session' : self.session_data,
'leftCol' : self.getLeftCol(leftBlocks),
'centerCol' : self.getCenterCol(centerBlocks),
'rightCol' : self.getRightCol(rightBlocks)}, context_instance=RequestContext(self.request))
return self.page
|
mahim97/zulip
|
zerver/webhooks/homeassistant/tests.py
|
Python
|
apache-2.0
| 1,083
| 0.00554
|
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HomeAssistantHookTests(WebhookTestCase):
STREAM_NAME = 'homeassistant'
URL_TEMPLATE = "/api/v1/external/homeassistant?&api_key={api_key}"
FIXTURE_DIR_NAME = 'homeassistant'
def test_simplereq(self) -> None:
expected_subject = "homeas
|
sistan
|
t"
expected_message = "The sun will be shining today!"
self.send_and_test_stream_message('simplereq', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_req_with_title(self) -> None:
expected_subject = "Weather forecast"
expected_message = "It will be 30 degrees Celsius out there today!"
self.send_and_test_stream_message('reqwithtitle', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: Text) -> Text:
return self.fixture_data("homeassistant", fixture_name, file_type="json")
|
meejah/AutobahnPython
|
autobahn/util.py
|
Python
|
mit
| 24,461
| 0.001431
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission
|
is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be inclu
|
ded in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import os
import time
import struct
import sys
import re
import base64
import math
import random
import binascii
from datetime import datetime, timedelta
from pprint import pformat
from array import array
import six
import txaio
__all__ = ("xor",
"utcnow",
"utcstr",
"id",
"rid",
"newid",
"rtime",
"Stopwatch",
"Tracker",
"EqualityMixin",
"ObservableMixin",
"IdGenerator",
"generate_token",
"generate_activation_code",
"generate_serial_number",
"generate_user_password")
def encode_truncate(text, limit, encoding='utf8', return_encoded=True):
"""
Given a string, return a truncated version of the string such that
the UTF8 encoding of the string is smaller than the given limit.
This function correctly truncates even in the presence of Unicode code
points that encode to multi-byte encodings which must not be truncated
in the middle.
:param text: The Unicode string to truncate.
:type text: unicode
:param limit: The number of bytes to limit the UTF8 encoding to.
:type limit: int
:returns: The truncated Unicode string.
:rtype: unicode
"""
assert(text is None or type(text) == six.text_type)
assert(type(limit) in six.integer_types)
assert(limit >= 0)
if text is None:
return
# encode the given string in the specified encoding
s = text.encode(encoding)
# when the resulting byte string is longer than the given limit ..
if len(s) > limit:
# .. truncate, and
s = s[:limit]
# decode back, ignoring errors that result from truncation
# in the middle of multi-byte encodings
text = s.decode(encoding, 'ignore')
if return_encoded:
s = text.encode(encoding)
if return_encoded:
return s
else:
return text
def xor(d1, d2):
"""
XOR two binary strings of arbitrary (equal) length.
:param d1: The first binary string.
:type d1: binary
:param d2: The second binary string.
:type d2: binary
:returns: XOR(d1, d2)
:rtype: binary
"""
if type(d1) != six.binary_type:
raise Exception("invalid type {} for d1 - must be binary".format(type(d1)))
if type(d2) != six.binary_type:
raise Exception("invalid type {} for d2 - must be binary".format(type(d2)))
if len(d1) != len(d2):
raise Exception("cannot XOR binary string of differing length ({} != {})".format(len(d1), len(d2)))
d1 = array('B', d1)
d2 = array('B', d2)
for i in range(len(d1)):
d1[i] ^= d2[i]
if six.PY3:
return d1.tobytes()
else:
return d1.tostring()
def utcstr(ts=None):
"""
Format UTC timestamp in ISO 8601 format.
Note: to parse an ISO 8601 formatted string, use the **iso8601**
module instead (e.g. ``iso8601.parse_date("2014-05-23T13:03:44.123Z")``).
:param ts: The timestamp to format.
:type ts: instance of :py:class:`datetime.datetime` or None
:returns: Timestamp formatted in ISO 8601 format.
:rtype: unicode
"""
assert(ts is None or isinstance(ts, datetime))
if ts is None:
ts = datetime.utcnow()
return u"{0}Z".format(ts.strftime(u"%Y-%m-%dT%H:%M:%S.%f")[:-3])
def utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns: Current time as string in ISO 8601 format.
:rtype: unicode
"""
return utcstr()
class IdGenerator(object):
"""
ID generator for WAMP request IDs.
WAMP request IDs are sequential per WAMP session, starting at 1 and
wrapping around at 2**53 (both value are inclusive [1, 2**53]).
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
See https://github.com/wamp-proto/wamp-proto/blob/master/spec/basic.md#ids
"""
def __init__(self):
self._next = 0 # starts at 1; next() pre-increments
def next(self):
"""
Returns next ID.
:returns: The next ID.
:rtype: int
"""
self._next += 1
if self._next > 9007199254740992:
self._next = 1
return self._next
# generator protocol
def __next__(self):
return self.next()
#
# Performance comparison of IdGenerator.next(), id() and rid().
#
# All tests were performed on:
#
# - Ubuntu 14.04 LTS x86-64
# - Intel Core i7 920 @ 3.3GHz
#
# The tests generated 100 mio. IDs and run-time was measured
# as wallclock from Unix "time" command. In each run, a single CPU
# core was essentially at 100% load all the time (though the sys/usr
# ratio was different).
#
# PyPy 2.6.1:
#
# IdGenerator.next() 0.5s
# id() 29.4s
# rid() 106.1s
#
# CPython 2.7.10:
#
# IdGenerator.next() 49.0s
# id() 370.5s
# rid() 196.4s
#
#
# Note on the ID range [0, 2**53]. We once reduced the range to [0, 2**31].
# This lead to extremely hard to track down issues due to ID collisions!
# Here: https://github.com/crossbario/autobahn-python/issues/419#issue-90483337
#
# 8 byte mask with 53 LSBs set (WAMP requires IDs from [0, 2**53]
_WAMP_ID_MASK = struct.unpack(">Q", b"\x00\x1f\xff\xff\xff\xff\xff\xff")[0]
def rid():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is uniformly distributed over the whole range, doesn't have
a period (no pseudo-random generator is used) and cryptographically strong.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
:returns: A random integer ID.
:rtype: int
"""
return struct.unpack("@Q", os.urandom(8))[0] & _WAMP_ID_MASK
# noinspection PyShadowingBuiltins
def id():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is based on a pseudo-random number generator (Mersenne Twister,
which has a period of 2**19937-1). It is NOT cryptographically strong, and
hence NOT suitable to generate e.g. secret keys or access tokens.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or on
|
mfwarren/FreeCoding
|
2015/04/fc_2015_04_09.py
|
Python
|
mit
| 378
| 0.002646
|
#!/usr/bin/env python3
# imports go here
import threadin
|
g
#
# Free Coding session for 2015-04-09
# Written by Matt Warren
#
data = threading.local()
def message():
print(data.name)
class Foo(threading.Thread):
def run(self):
data.name = self.getName()
message()
if __name__ == '__main__':
f = Foo()
f2 = Foo()
f.start()
f2.s
|
tart()
|
sdague/home-assistant
|
tests/mock/zwave.py
|
Python
|
apache-2.0
| 6,380
| 0.00047
|
"""Mock helpers for Z-Wave component."""
from pydispatch import dispatcher
from tests.async_mock import MagicMock
def value_changed(value):
"""Fire a value changed."""
dispatcher.send(
MockNetwork.SIGNAL_VALUE_CHANGED,
value=value,
node=value.node,
network=value.node._network,
)
def node_changed(node):
"""Fire a node changed."""
dispatcher.send(MockNetwork.SIGNAL_NODE, node=node, network=node._network)
def notification(node_id, network=None):
"""Fire a notification."""
dispatcher.send(
MockNetwork.SIGNAL_NOTIFICATION, args={"nodeId": node_id}, network=network
)
class MockOption(MagicMock):
"""Mock Z-Wave options."""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""Initialize a Z-Wave mock options."""
super().__init__()
self.device = device
self.config_path = config_path
self.user_path = user_path
self.cmd_line = cmd_line
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockNetwork(MagicMock):
"""Mock Z-Wave network."""
SIGNAL_NETWORK_FAILED = "mock_NetworkFailed"
SIGNAL_NETWORK_STARTED = "mock_NetworkStarted"
SIGNAL_NETWORK_READY = "mock_NetworkReady"
SIGNAL_NETWORK_STOPPED = "mock_NetworkStopped"
SIGNAL_NETWORK_RESETTED = "mock_DriverResetted"
SIGNAL_NETWORK_AWAKED = "mock_DriverAwaked"
SIGNAL_DRIVER_FAILED = "mock_DriverFailed"
SIGNAL_DRIVER_READY = "mock_DriverReady"
SIGNAL_DRIVER_RESET = "mock_DriverReset"
SIGNAL_DRIVER_REMOVED = "mock_DriverRemoved"
SIGNAL_GROUP = "mock_Group"
SIGNAL_NODE = "mock_Node"
SIGNAL_NODE_ADDED = "mock_NodeAdded"
SIGNAL_NODE_EVENT = "mock_NodeEvent"
SIGNAL_NODE_NAMING = "mock_NodeNaming"
SIGNAL_NODE_NEW = "mock_NodeNew"
SIGNAL_NODE_PROTOCOL_INFO = "mock_NodeProtocolInfo"
SIGNAL_NODE_READY = "mock_NodeReady"
SIGNAL_NODE_REMOVED = "mock_NodeRemoved"
SIGNAL_SCENE_EVENT = "mock_SceneEvent"
SIGNAL_VALUE = "mock_Value"
SIGNAL_VALUE_ADDED = "mock_ValueAdded"
SIGNAL_VALUE_CHANGED = "mock_ValueChanged"
SIGNAL_VALUE_REFRESHED = "mock_ValueRefreshed"
SIGNAL_VALUE_REMOVED = "mock_ValueRemoved"
SIGNAL
|
_POLLING_ENABLED = "mock_PollingEnabled"
SIGNAL_POLLING_DISABLED = "mock_PollingDisabled"
SIGNAL_CREATE_BUTTON = "mock_CreateButton"
SIGNAL_DELETE_BUTTON = "mock_DeleteButton"
SIGNAL_BUTTON_ON = "mock_ButtonOn"
SIGNAL_BUTTON_OFF = "moc
|
k_ButtonOff"
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = "mock_EssentialNodeQueriesComplete"
SIGNAL_NODE_QUERIES_COMPLETE = "mock_NodeQueriesComplete"
SIGNAL_AWAKE_NODES_QUERIED = "mock_AwakeNodesQueried"
SIGNAL_ALL_NODES_QUERIED = "mock_AllNodesQueried"
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = "mock_AllNodesQueriedSomeDead"
SIGNAL_MSG_COMPLETE = "mock_MsgComplete"
SIGNAL_NOTIFICATION = "mock_Notification"
SIGNAL_CONTROLLER_COMMAND = "mock_ControllerCommand"
SIGNAL_CONTROLLER_WAITING = "mock_ControllerWaiting"
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
def __init__(self, options=None, *args, **kwargs):
"""Initialize a Z-Wave mock network."""
super().__init__()
self.options = options
self.state = MockNetwork.STATE_STOPPED
class MockNode(MagicMock):
"""Mock Z-Wave node."""
def __init__(
self,
*,
node_id=567,
name="Mock Node",
manufacturer_id="ABCD",
product_id="123",
product_type="678",
command_classes=None,
can_wake_up_value=True,
manufacturer_name="Test Manufacturer",
product_name="Test Product",
network=None,
**kwargs,
):
"""Initialize a Z-Wave mock node."""
super().__init__()
self.node_id = node_id
self.name = name
self.manufacturer_id = manufacturer_id
self.product_id = product_id
self.product_type = product_type
self.manufacturer_name = manufacturer_name
self.product_name = product_name
self.can_wake_up_value = can_wake_up_value
self._command_classes = command_classes or []
if network is not None:
self._network = network
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def has_command_class(self, command_class):
"""Test if mock has a command class."""
return command_class in self._command_classes
def get_battery_level(self):
"""Return mock battery level."""
return 42
def can_wake_up(self):
"""Return whether the node can wake up."""
return self.can_wake_up_value
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockValue(MagicMock):
"""Mock Z-Wave value."""
_mock_value_id = 1234
def __init__(
self,
*,
label="Mock Value",
node=None,
instance=0,
index=0,
value_id=None,
**kwargs,
):
"""Initialize a Z-Wave mock value."""
super().__init__()
self.label = label
self.node = node
self.instance = instance
self.index = index
if value_id is None:
MockValue._mock_value_id += 1
value_id = MockValue._mock_value_id
self.value_id = value_id
self.object_id = value_id
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
def refresh(self):
"""Mock refresh of node value."""
value_changed(self)
class MockEntityValues:
"""Mock Z-Wave entity values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.primary = None
self.wakeup = None
self.battery = None
self.power = None
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
"""Allow iteration over all values."""
return iter(self.__dict__.values())
|
curaloucura/Enso-Ubuntu
|
ensocommands/random.py
|
Python
|
bsd-3-clause
| 2,893
| 0.023159
|
import re, os
def cmd_install(ensoapi):
seldict = ensoapi.get_selection()
text = seldict.get("text", "").strip()
lines = text.split("\n")
ensoapi.display_message(lines)
return
if len(lines) < 3:
msg = "There was no command to install!"
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
while lines[0].strip() == "":
lines.pop(0)
if lines[0].strip() != "# Enso command file":
msg = "There was no command to install!"
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
command_file_name = re.sub("^\s*#\s*","",lines[1].strip())
if not command_file_name.endswith(".py"):
msg = "Couldn't install this command %s" % command_file_name
|
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
cmd_folde
|
r = ensoapi.get_enso_commands_folder()
command_file_path = os.path.join(cmd_folder, command_file_name)
shortname = os.path.splitext(command_file_name)[0]
if os.path.exists(command_file_path):
msg = "You already have a command named %s" % shortname
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
installed_commands = [x['cmdName'] for x in ensoapi.get_commands_from_text(text)]
if len(installed_commands) == 1:
install_message = "%s is now a command" % installed_commands[0]
else:
install_message = "%s are now commands" % ", ".join(installed_commands)
fp = open(command_file_path, "w")
fp.write(text)
fp.close()
ensoapi.display_message(install_message)
ensoapi.set_selection({
"text":"Enso: %s" % install_message
})
def cmd_footnote(ensoapi):
"Wrap text in my in-HTML footnote style"
seldict = ensoapi.get_selection()
text = seldict.get("text", "")
html = seldict.get("html", text)
if not text:
ensoapi.display_message("No selection!")
else:
result = '<span style="color:red" title="%s">*</span>' % html
ensoapi.set_selection({
"text":result
})
def cmd_echo(ensoapi):
"Displays the current selection dictionary"
sel = ensoapi.get_selection()
ensoapi.display_message(str(sel))
def cmd_learn_as(ensoapi, new_command):
"Remember current selection as a command"
sel = ensoapi.get_selection().get("text", "")
if not sel:
ensoapi.display_message("No selection!")
return
cmd_folder = ensoapi.get_enso_commands_folder()
learned_commands = os.path.join(cmd_folder, "learned_commands.py")
write_os = False
if not os.path.exists(learned_commands): write_os = True
fp = open(learned_commands,"a")
if write_os: fp.write("import os\n")
fp.write("def cmd_%s(ensoapi): os.system('gnome-open %s')\n" % (new_command.replace(" ","_"),sel))
fp.close()
ensoapi.display_message("%s is now a command" % new_command)
|
yuzie007/ph_plotter
|
ph_plotter/total_dos_plotter.py
|
Python
|
mit
| 1,815
| 0.002204
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .dos_plotter import DOSPlotter
__author__ = "Yuji Ikeda"
class TotalDOSPlotter(DOSPlotter):
def load_data(self, data_file='total_dos.dat'):
super(TotalDOSPlotter, self).load_data(data_file)
return self
def run(self):
variables = self._variables
primitive = self.create_primitive()
natoms = primitive.get_number_of_atoms()
symbols = primitive.get_chemical_symbols()
print("natoms:", natoms)
print("symbols:", symbols)
self.set_figure_name_prefix("total_dos")
self.set_plot_symbol(False)
self.set_plot_atom(False)
self.load_data(variables["data_file"])
variables.update({
"freq_unit": "THz",
"unit": 1.0,
"natoms": natoms,
"symbols": symbols,
})
self.update_variables(variables)
# self.set_is_horizontal(True)
# self.plot_dos()
self.set_is_horizontal(False)
self.create_figure()
return
from scipy.constants import eV, Planck
THz2meV = Planck / eV * 1e+15 # 4.135667662340164
# meV
variables.update({
"freq_unit": "meV",
"unit": THz2meV,
})
scale = 4.0
variabl
|
es["f_min"] *= scale
variables["f_max"] *= scale
variables["d_freq"] *= scale
variables["dos_min"] /= scale
variables["dos_max"] /= scale
variables["dos_ticks"] /= scale
self.up
|
date_variables(variables)
# self.set_is_horizontal(True)
# self.plot_dos()
self.set_is_horizontal(False)
self.create_figure()
|
fred3m/astro-toyz
|
astrotoyz/__init__.py
|
Python
|
bsd-3-clause
| 727
| 0.001376
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is an Astropy affiliated package.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from example_mod import *
from astrotoyz import tasks
from astrotoyz import viewer
from astrotoyz import detect_sources
from astrotoyz import io
fro
|
m astrotoyz import data_typ
|
es
from astrotoyz import config
|
Mlieou/leetcode_python
|
leetcode/python/ex_376.py
|
Python
|
mit
| 570
| 0.007018
|
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2: return len(nums)
prev_diff = nums[1] - nums[0]
if prev_diff != 0:
longest = 2
else:
lo
|
ngest = 1
for i in range(2, len(nums)):
curr_diff = (nums[i] - nums[i-1])
if (curr_diff > 0 and prev_diff <= 0) or (curr_diff < 0 and prev_diff >= 0):
longest += 1
|
prev_diff = curr_diff
return longest
|
djangophx/beer-tracker
|
tracker/migrations/0001_initial.py
|
Python
|
mit
| 2,278
| 0.003512
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-19 16:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Brewery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('location', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(au
|
to_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('venue_type', models.CharField(choices=[('bar', 'Bar'), ('brew', 'Brewery'),
|
('truck', 'Food Truck')], max_length=5)),
('beers', models.ManyToManyField(related_name='venues', to='tracker.Beer')),
],
),
migrations.AddField(
model_name='beer',
name='brewery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.Brewery'),
),
migrations.AddField(
model_name='beer',
name='style',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Style'),
),
]
|
tensorflow/tensorboard
|
tensorboard/compat/tensorflow_stub/tensor_shape.py
|
Python
|
apache-2.0
| 34,472
| 0.000754
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from . import compat, dtypes
from tensorboard.compat.proto import tensor_shape_pb2
# @tf_export("Dimension")
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (
not isinstance(value, compat.bytes_or_text_types)
and self._value != value
):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this
Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_convertible_with(self, other):
"""Returns true if `other` is convertible with this Dimension.
Two known Dimensions are convertible if they have the same value.
An unknown Dimension is convertible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are convertible.
"""
other = as_dimension(other)
return (
self._value is None
or other.value is None
or self._value == other.value
)
def assert_is_convertible_with(self, other):
"""Raises an exception if `other` is not convertible with this
Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with).
"""
if not self.is_convertible_with(other):
raise ValueError(
"Dimensions %s and %s are not convertible" % (self, other)
)
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and
`other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with).
"""
other = as_dimension(other)
self.assert_is_convertible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dime
|
nsion(m) + tf.Dimension(n) == tf.Dimension(m + n)
tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
|
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n)
tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n)
tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_
|
mfalesni/cfme_tests
|
cfme/tests/services/test_catalog_item.py
|
Python
|
gpl-2.0
| 5,757
| 0.001563
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from selenium.common.exceptions import NoSuchElementException
import cfme.tests.configure.test_access_control as tac
from cfme.base.login import BaseLoggedInPage
from cfme import test_requirements
from cfme.utils import error
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.update import update
pytestmark = [test_requirements.service, pytest.mark.tier(3), pytest.mark.ignore_stream("upstream")]
@pytest.yield_fixture(scope="function")
def catalog_item(appliance, dialog, catalog):
cat_item = appliance.collections.catalog_items.create(
appliance.collections.catalog_items.GENERIC,
name='test_item_{}'.format(fauxfactory.gen_alphanumeric()),
description="my catalog item", display_in=True,
catalog=catalo
|
g, dialog=dialog
)
yield cat_item
# fixture cleanup
try:
cat_item.delete()
except NoSuchElementException:
logger.warning(
|
'test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" '
'not found'.format(cat_item.name))
@pytest.yield_fixture(scope="function")
def catalog_bundle(appliance, catalog_item):
""" Create catalog bundle
Args:
catalog_item: as resource for bundle creation
"""
bundle_name = "bundle" + fauxfactory.gen_alphanumeric()
catalog_bundle = appliance.collections.catalog_bundles.create(
bundle_name, description="catalog_bundle",
display_in=True, catalog=catalog_item.catalog,
dialog=catalog_item.dialog,
catalog_items=[catalog_item.name])
yield catalog_bundle
# fixture cleanup
try:
catalog_bundle.delete()
except NoSuchElementException:
logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" '
'not found'.format(catalog_bundle.name))
@pytest.fixture(scope="function")
def check_catalog_visibility(request, user_restricted, tag):
def _check_catalog_visibility(test_item_object):
"""
Args:
test_item_object: object for visibility check
"""
category_name = ' '.join((tag.category.display_name, '*'))
test_item_object.add_tag(category_name, tag.display_name)
with user_restricted:
assert test_item_object.exists
test_item_object.remove_tag(category_name, tag.display_name)
with user_restricted:
assert not test_item_object.exists
return _check_catalog_visibility
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
def test_create_catalog_item(catalog_item):
catalog_item.create()
def test_update_catalog_item(catalog_item):
with update(catalog_item):
catalog_item.description = "my edited item description"
def test_add_button_group(catalog_item, appliance):
button_name = catalog_item.add_button_group()
view = appliance.browser.create_view(BaseLoggedInPage)
if appliance.version.is_in_series('5.8'):
message = 'Buttons Group "{}" was added'.format(button_name)
else:
message = 'Button Group "{}" was added'.format(button_name)
view.flash.assert_success_message(message)
def test_add_button(catalog_item, appliance):
button_name = catalog_item.add_button()
view = appliance.browser.create_view(BaseLoggedInPage)
if appliance.version.is_in_series('5.8'):
message = 'Button "{}" was added'.format(button_name)
else:
message = 'Custom Button "{}" was added'.format(button_name)
view.flash.assert_success_message(message)
def test_edit_tags(catalog_item):
catalog_item.add_tag("Cost Center *", "Cost Center 001")
catalog_item.remove_tag("Cost Center *", "Cost Center 001")
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(blockers=[BZ(1531512, forced_streams=["5.8", "5.9", "upstream"])])
def test_catalog_item_duplicate_name(catalog_item):
catalog_item.create()
with error.expected("Name has already been taken"):
catalog_item.create()
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(blockers=[BZ(1460891, forced_streams=["5.8", "upstream"])])
def test_permissions_catalog_item_add(catalog_item):
"""Test that a catalog can be added only with the right permissions."""
tac.single_task_permission_test([['Everything', 'Services', 'Catalogs Explorer',
'Catalog Items']],
{'Add Catalog Item': catalog_item.create})
def test_tagvis_catalog_items(check_catalog_visibility, catalog_item):
""" Checks catalog item tag visibility for restricted user
Prerequisites:
Catalog, tag, role, group and restricted user should be created
Steps:
1. As admin add tag to catalog item
2. Login as restricted user, catalog item is visible for user
3. As admin remove tag
4. Login as restricted user, catalog item is not visible for user
"""
check_catalog_visibility(catalog_item)
def test_tagvis_catalog_bundle(check_catalog_visibility, catalog_bundle):
""" Checks catalog bundle tag visibility for restricted user
Prerequisites:
Catalog, tag, role, group, catalog item and restricted user should be created
Steps:
1. As admin add tag to catalog bundle
2. Login as restricted user, catalog bundle is visible for user
3. As admin remove tag
4. Login as restricted user, catalog bundle is not visible for user
"""
check_catalog_visibility(catalog_bundle)
|
csilzen/whatdoyousee
|
python/label/label.py
|
Python
|
apache-2.0
| 2,624
| 0.001143
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script uses the Vision API's label detection capabilities to find a label
based on an image's content.
To run the example, install the necessary libraries by running:
pip install -r requirements.txt
Run the script on an image to get a label, E.g.:
./label.py <path-to-image>
"""
# [START import_libraries]
import argparse
import base64
import httplib2
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
# The url template to retrieve the discovery document for trusted testers.
DISCOVERY_URL='https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
# [END import_libraries]
def main(photo_file):
"""Run a label request on a single image"""
# [START authenticate]
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
# [END authenticate]
# [START construct_request]
with open(photo_file, 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = service.images().annotate(body={
'requests': [{
'image': {
|
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'LABEL_DETECTION',
'maxResults': 1
}]
}]
})
# [END construct_request]
# [START parse_response]
response = service_request.execute()
label = response['responses'][0]['labelAnnotations'][0]['description']
print('F
|
ound label: %s for %s' % (label, photo_file))
return 0
# [END parse_response]
# [START run_application]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image_file', help='The image you\'d like to label.')
args = parser.parse_args()
main(args.image_file)
# [END run_application]
|
cygnushan/measurement
|
ST_spectrum/Ui_ST_2400.py
|
Python
|
mit
| 22,827
| 0.002075
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G:\WorkDir\gas-sensing_resistors\ST_spectrum\ST_2400.ui'
#
# Created: Tue Apr 12 22:50:19 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_UI_sens2400(object):
def setupUi(self, UI_sens2400):
UI_sens2400.setObjectName(_fromUtf8("UI_sens2400"))
UI_sens2400.resize(486, 360)
UI_sens2400.setMinimumSize(QtCore.QSize(480, 360))
UI_sens2400.setMaximumSize(QtCore.QSize(486, 360))
font = QtGui.QFont()
font.setPointSize(12)
UI_sens2400.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/yb.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
UI_sens2400.setWindowIcon(icon)
self.verticalLayout_8 = QtGui.QVBoxLayout(UI_sens2400)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setSpacing(20)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setSpacing(20)
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setSpacing(2)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.label_13 = QtGui.QLabel(UI_sens2400)
self.label_13.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.horizontalLayout_12.addWidget(self.label_13)
self.res_range = QtGui.QComboBox(UI_sens2400)
self.res_range.setMaximumSize(QtCore.QSize(16777215, 22))
self.res_range.setObjectName(_fromUtf8("res_range"))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.horizontalLayout_12.addWidget(self.res_range)
self.horizontalLayout_14.addLayout(self.horizontalLayout_12)
self.res_detect = QtGui.QPushButton(UI_sens2400)
self.res_detect.setMaximumSize(QtCore.QSize(16777215, 22))
self.res_detect.setObjectName(_fromUtf8("res_detect"))
self.horizontalLayout_14.addWidget(self.res_detect)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem)
self.verticalLayout_7.addLayout(self.horizontalLayout_14)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setSpacing(10)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setSpacing(2)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.label_3 = QtGui.QLabel(UI_sens2400)
self.label_3.setMaximumSize(QtCore.QSize(32, 22))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_9.addWidget(self.label_3)
self.detV = QtGui.QLineEdit(UI_sens2400)
self.detV.setEnabled(False)
self.detV.setMinimumSize(QtCore.QSize(60, 22))
self.detV.setMaximumSize(QtCore.QSize(65535, 22))
self.detV.setObjectName(_fromUtf8("detV"))
self.horizontalLayout_9.addWidget(self.detV)
self.label_7 = QtGui.QLabel(UI_sens2400)
self.label_7.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_9.addWidget(self.label_7)
self.horizontalLayout_13.addLayout(self.horizontalLayout_9)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setSpacing(2)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.label_6 = QtGui.QLabel(UI_sens2400)
self.label_6.setMaximumSize(QtCore.QSize(32, 22))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_10.addWidget(self.label_6)
self.detI = QtGui.QLineEdit(UI_sens2400)
self.detI.setEnabled(False)
self.detI.setMinimumSize(QtCore.QSize(60, 22))
self.detI.setMaximumSize(QtCore.QSize(65535, 22))
self.detI.setText(_fromUtf8(""))
self.detI.setObjectName(_fromUtf8("detI"))
self.horizontalLayout_10.addWidget(self.detI)
self.label_11 = QtGui.QLabel(UI_sens2400)
self.label_11.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.horizontalLayout_10.addWidget(self.label_11)
self.horizontalLayout_13.addLayout(self.horizontalLayout_10)
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setSpacing(2)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.label_2 = QtGui.QLabel(UI_sens2400)
self.label_2.setMaximumSize(QtCore.QSize(32, 22))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_11.addWidget(self.label_2)
self.detR = QtGui.QLineEdit(UI_sens2400)
self.detR.setEnabled(False)
self.detR.setMinimumSize(QtCo
|
re.QSize(60, 22))
self.detR.setMaximumSize(QtCore.QSize(65535, 22))
self.detR.setObjectName(_fromUtf8("detR"))
self.horizontalLayout_11.addWi
|
dget(self.detR)
self.label_12 = QtGui.QLabel(UI_sens2400)
self.label_12.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.horizontalLayout_11.addWidget(self.label_12)
self.horizontalLayout_13.addLayout(self.horizontalLayout_11)
self.verticalLayout_7.addLayout(self.horizontalLayout_13)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.mode2or4 = QtGui.QComboBox(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.mode2or4.setFont(font)
self.mode2or4.setObjectName(_fromUtf8("mode2or4"))
self.mode2or4.addItem(_fromUtf8(""))
self.mode2or4.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.mode2or4)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_8 = QtGui.QLabel(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout_3.addWidget(self.label_8)
self.output_mode = QtG
|
AyoubZahid/odoo
|
openerp/tools/translate.py
|
Python
|
gpl-3.0
| 48,927
| 0.003352
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
from xml.sax.saxutils import escape
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ'
|
: 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands'
|
,
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Helper functions for translating fields
#
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
'font', 'i', 'ins', 'kbd', 'keygen', 'mark', 'math', 'meter', 'output',
'progress', 'q', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub',
'sup', 'time', 'u', 'var', 'wbr', 'text',
}
# which attributes must be translated
TRANSLATED_ATTRS = {
'string', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title',
}
avoid_pattern = re.compile(r"[\s\n]*<!DOCTYPE", re.IGNORECASE)
class XMLTranslator(object):
""" A sequence of serialized XML/HTML items, with some of them to translate
(todo) and others already translated (done). The purpose of this object
is to simplify the handling of phrasing elements (like <b>) that must be
translated together with their surrounding text.
For instance, the content of the "div" element below will be translated
as a whole (without surrounding spaces):
<div>
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
<b>sed</b> do eiusmod tempor incididunt ut labore et dolore
magna aliqua. <span class="more">Ut enim ad minim veniam,
<em>quis nostrud exercitation</em> ullamco laboris nisi ut
aliquip ex ea commodo consequat.</span>
</div>
"""
def __init__(self, callback, method, parser=None):
self.callback = callback # callback function to translate terms
self.method = method # serialization method ('xml' or 'html')
self.parser = parser # parser for validating translations
self._done = [] # translated strings
self._todo = [] # todo strings that come after _done
self.needs_trans = False # whether todo needs translation
def todo(self, text, needs_trans=True):
self._todo.append(text)
if needs_trans and text.strip():
self.needs_trans = True
def all_todo(self):
return not self._done
def get_todo(self):
return "".join(self._todo)
def flush(self):
if self._todo:
todo = "".join(self._todo)
done = self.process_text(todo) if self.needs_trans else todo
self._done.append(done)
del self._todo[:]
self.needs_trans = False
def done(self, text):
self.flush()
self._done.append(text)
def get_done(self):
""" Complete the translations and return the result. """
self.flush()
return "".join(self._done)
def process_text(self, text):
""" Translate text.strip(), but keep the surrounding spaces from text. """
term = text.strip()
trans = term and self.callback(term)
if trans:
try:
# parse the translation to validate it
etree.fromstring("<div>%s</div>" % encode(trans), parser=self.parser)
except etree.ParseError:
# fallback: escape the translation
trans = escape(trans)
text = text.replace(term, trans)
return text
def process_attr(self, attr):
""" Translate the given node attribute value. """
term = attr.strip()
trans = term and self.callback(term)
return attr.replace(term, trans) if trans else attr
def process(self, node):
""" Process the given xml `node`: collect `todo` and `done` items. """
if (
isinstance(node, SKIPPED_ELEMENT_TYPES) or
node.tag in SKIPPED_ELEMENTS or
node.get("t-translation", "").strip() == "off" or
node.tag == "attribute" and node.get("name") not in TRANSLATED_ATTRS
):
# do not translate the contents of the node
tail, node.tail = node.tail, None
self.done(etree.tostring(node, method=self.method))
self.todo(escape(tail or ""))
return
# process children nodes locally in child_trans
child_trans = XMLTranslator(self.callback, self.method, parser=self.parser)
if node.text:
if avoid_pattern.match(node.text):
child_trans.done(e
|
sredmond/acmpy
|
tests/datastructures/test_basicgraph.py
|
Python
|
mit
| 67
| 0
|
"""Tests for the :
|
mod:`campy.datastructures.basicgraph` module."""
|
|
Murillo/Hackerrank-Algorithms
|
Algorithms/Implementation/extra-long-factorials.py
|
Python
|
mit
| 437
| 0.006865
|
# Extra Long Factorials
# Developer: Murillo Grubler
# https://www.hackerrank.com/challenges/extra-long-factorials/problem
# Time Comple
|
xity = O(n)
def factorial(n):
if n == 1:
return 1
total = n
while (n > 0):
if n == total:
total = total * (n - 1)
n -= 2
else:
|
total = total * n
n -= 1
return total
n = int(input().strip())
print (factorial(n))
|
GaretJax/i18n-utils
|
i18n_utils/utils.py
|
Python
|
mit
| 291
| 0
|
import functools
def memoize(obj):
ca
|
che = obj.c
|
ache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = (args, tuple(kwargs.items()))
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
|
theikkila/lopputili
|
app/models/setting.py
|
Python
|
mit
| 685
| 0.020438
|
from orm import model
from .user import User
from orm import fields
class Setting(model.Model):
owner = fields.ForeignKeyField(User)
company_name = fields.CharField(max_length=140, blank=True)
address = fields.CharField(max_length=240, blank=True)
zip_code = fields.CharField(max_length=140, blank=True)
city = fields.CharField(max_length=140, blank=True)
phone = fields.CharField(max_length=140, blank=True)
email = fields.CharFiel
|
d(max_length=140, blank=True)
vat_code = fields.CharField(max_length=140, blank=True)
iban = fie
|
lds.CharField(max_length=140, blank=True)
bic = fields.CharField(max_length=140, blank=True)
def __repr__(self):
return str(self.company_name)
|
mcldev/geonode
|
geonode/security/views.py
|
Python
|
gpl-3.0
| 9,784
| 0.001124
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.contrib.auth import get_user_model
from geonode.utils import resolve_object
from geonode.base.models import ResourceBase
from geonode.layers.models import Layer
from geonode.people.models import Profile
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def _perms_info(obj):
info = obj.get_all_level_info()
return info
def _perms_info_json(obj):
info = _perms_info(obj)
info['users'] = dict([(u.username, perms)
for u, perms in info['users'].items()])
info['groups'] = dict([(g.name, perms)
for g, perms in info['groups'].items()])
return json.dumps(info)
def resource_permissions(request, resource_id):
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id}, 'base.change_resourcebase_permissions')
except PermissionDenied:
# we are handling this in a non-standard way
return HttpResponse(
'You are not allowed to change permissions for this resource',
status=401,
content_type='text/plain')
if request.method == 'POST':
success = True
message = "Permissions successfully updated!"
try:
permission_spec = json.loads(request.body)
resource.set_permissions(permission_spec)
# Check Users Permissions Consistency
view_any = False
info = _perms_info(resource)
info_users = dict([(u.username, perms) for u, perms in info['users'].items()])
for user, perms in info_users.items():
if user == 'AnonymousUser':
view_any = ('view_resourcebase' in perms)
break
for user, perms in info_users.items():
if 'download_resourcebase' in perms and 'view_resourcebase' not in perms and not view_any:
success = False
message = 'User ' + str(user) + ' has Download permissions but ' \
|
'cannot access the resource. ' \
'Please update permissions consistently!'
return HttpResponse(
json.dumps({'success': success, 'message': message}),
|
status=200,
content_type='text/plain'
)
except BaseException:
success = False
message = "Error updating permissions :("
return HttpResponse(
json.dumps({'success': success, 'message': message}),
status=500,
content_type='text/plain'
)
elif request.method == 'GET':
permission_spec = _perms_info_json(resource)
return HttpResponse(
json.dumps({'success': True, 'permissions': permission_spec}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
'No methods other than get and post are allowed',
status=401,
content_type='text/plain')
@require_POST
def invalidate_permissions_cache(request):
from .utils import sync_resources_with_guardian
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_permissions = request.user.has_perm(
'change_resourcebase_permissions',
resource)
if can_change_permissions:
# Push Security Rules
sync_resources_with_guardian(resource)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Security Rules Cache Refreshed!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def attributes_sats_refresh(request):
from geonode.geoserver.helpers import set_attributes_from_geoserver
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
# recalculate the layer statistics
set_attributes_from_geoserver(layer, overwrite=True)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Attributes/Stats Refreshed Successfully!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def invalidate_tiledlayer_cache(request):
from .utils import set_geowebcache_invalidate_cache
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
set_geowebcache_invalidate_cache(layer.alternate)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'GeoWebCache Tiled Layer Emptied!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def set_bulk_permissions(request):
permission_spec = json.loads(request.POST.get('permissions', None))
resource_ids = request.POST.getlist('resources', [])
if permission_spec is not None:
not_permitted = []
for resource_id in resource_ids:
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id
},
'base.change_resourcebase_permissions')
resource.set_permissions(permission_spec)
except PermissionDenied:
not_permitted.append(ResourceBase.objects.get(id=resource_id).title)
return HttpResponse(
json.dumps({'success': 'ok', 'not_changed': not_permitted}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'error': 'Wrong permissions specification'}),
status=400,
content_type='text/plain')
@require_POST
def request_permissions(request):
""" Request permission to download a resource.
"""
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
try:
notification.send(
[resource.owner],
'request_download_resourcebase',
{'fr
|
robertjacobs/zuros
|
zuros_deliberator/zuros_command_to_robot_sender/src/zuros_command_to_robot_sender.py
|
Python
|
mit
| 6,086
| 0.006408
|
#!/usr/bin/env python
# Copyright (c) 2013-2014 ZUYD Research
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author Robert Jacobs/info@rjpjacobs.nl
"""
This will make sure that the commands, as instructed by the user using the command gui, are sent to the robot
"""
import roslib; roslib.load_manifest('zuros_command_to_robot_sender')
import rospy
import tf
import math
import actionlib
import thread
#move_base_msgs
from move_base_msgs.msg import *
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import *
## Command to robot sender class
class CommandToRobotSender(object):
## Constructor
def __init__(self):
self.action_client = actionlib.SimpleActionClient("/move_base", MoveBaseAction)
#self.pub_move_base_simple = rospy.Publisher("/move_base_simple/goal", PoseStamped)
## Sorts a dictionary alphabetically
def sort_dict(self,dictionary):
keys = sorted(dictionary.iterkeys())
k=[]
return [[key,dictionary[key]] for key in keys]
## The move method. Currently only base implemented, for further hardware you must implement
def move(self, component_name, parameter, blocking):
# Is this a base command?
if component_name == "base":
# Stop command?
if parameter == "stop":
return self.base_stop(component_name)
# Not a stop command, so it should be a move base command
else:
return self.move_base(component_name, parameter, blocking)
# Add your own component here
# if component_name == "my_component":
# No valid component (not implemented? Typo?)
else:
rospy.logerror(rospy.get_name() + "The component requested is not yet implemented");
## Base stop function - gets called if the component name is "base" and the parameter is "stop" in the move function above
def base_stop(self, component_name):
#base_client = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Stop <<%s>>", component_name)
self.action_client.cancel_all_goals()
## Move base funtion
def move_base(self, component_name, position, blocking):
#ah = action_handle("move_base", component_name, position, blocking, self.parse)
# Look up position in parameter server
nav_prefix = "~nav_positions"
# Not on parameter server?
if not rospy.has_param(nav_prefix):
rospy.logerr("parameter %s does not exist on ROS Parameter Server, aborting...",param_prefix)
return False
# Get parameters
navigation_positions_params = rospy.get_param(nav_prefix)
nav_param
|
= self.sort_dict(navigation_positions_params)
nav_pos = None
# Check if this position is known
for nav in nav_param:
if(nav[0] == position):
nav_pos = nav[1]
# Position is known
if(nav_pos != None):
rospy.loginfo("Move <<%s>> to <<[x,y,yaw] %d, %d, %d>>", component_name, nav_pos[0], nav_pos[1], nav_pos[2])
# Position is not known
|
else:
ROS_ERROR("No valid position found, cancelling move command. Are you sure your position is added to the parameter server?")
return
# Convert to pose message
pose = PoseStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = "/map"
pose.pose.position.x = nav_pos[0]
pose.pose.position.y = nav_pos[1]
pose.pose.position.z = 0
quat = tf.transformations.quaternion_from_euler(0, 0, nav_pos[2])
pose.pose.orientation.x = quat[0]
pose.pose.orientation.y = quat[1]
pose.pose.orientation.z = quat[2]
pose.pose.orientation.w = quat[3]
rospy.logdebug("waiting for move_base action server to start")
# Error: server did not respond within given time
if not self.action_client.wait_for_server(rospy.Duration(5)):
rospy.logerr("move_base action server not ready within timeout, aborting...")
return
else:
rospy.logdebug("move_base action server ready")
# sending goal
client_goal = MoveBaseGoal()
client_goal.target_pose = pose
thread.start_new_thread( self.handle, (client_goal,))
#self.pub_move_base_simple.publish(pose)
## Handle function which sends the command to the action server
def handle(self, goal):
self.action_client.send_goal(goal)
self.action_client.wait_for_result()
|
ployground/ploy_openvz
|
setup.py
|
Python
|
bsd-3-clause
| 1,066
| 0
|
from setuptools impo
|
rt setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
version = "1.0b3"
setup(
version=version,
description="A plugin for ploy providing support for OpenVZ containers."
|
,
long_description=README + "\n\n",
name="ploy_openvz",
author='Florian Schulze',
author_email='florian.schulze@gmx.net',
license="BSD 3-Clause License",
url='http://github.com/ployground/ploy_openvz',
classifiers=[
'Environment :: Console',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration'],
include_package_data=True,
zip_safe=False,
packages=['ploy_openvz'],
install_requires=[
'setuptools',
'ploy >= 1.0.0, < 2dev',
'lazy'],
entry_points="""
[ploy.plugins]
vz = ploy_openvz:plugin
""")
|
iBluemind/armatis
|
armatis/models.py
|
Python
|
bsd-2-clause
| 2,596
| 0
|
# -*- coding: utf-8 -*-
class Company(object):
def __init__(self, name=None, code=None, phone=None, digit=None):
# Company's name
self.name = name
# Codename
self.code = code
# The digit of the invoice number
if digit is None:
digit = []
self.digit = digit
# Phone number of the service center
self.phone = phone
def __repr__(self):
return '[%s] %s (%s)' % (
self.code,
self.name,
self.phone
)
class Track(object):
def __init__(self, time=None, location=None, status=None,
phone1=None, ph
|
one2=None):
# Time
s
|
elf.time = time
# Location
self.location = location
# Status
self.status = status
# Phone number 1
self.phone1 = phone1
# Phone number 2
self.phone2 = phone2
def __repr__(self):
return '[%s] %s - %s / %s / %s' % (
self.time,
self.status,
self.location,
self.phone1,
self.phone2
)
class Tracker(object):
def __init__(self):
self._tracks = []
@property
def tracks(self):
return self._tracks
def add_track(self, new_track):
if not isinstance(new_track, Track):
raise TypeError('The new_track must be Track!')
self._tracks.append(new_track)
def track_by_status(self, status):
"""
Find the tracking information matching the status
:param str status: The status to find the tracking information
:return: The tracking information matching the status
"""
tracks = list(filter(lambda x: x.status == status, self._tracks))
if len(tracks) > 0:
return tracks[-1]
raise LookupError("Can't find the track by status %s" % status)
def __iter__(self):
return iter(self._tracks)
class Parcel(object):
def __init__(self, sender=None, receiver=None, invoice_number=None,
address=None, note=None):
# The sender's name
self.sender = sender
# The receiver's name
self.receiver = receiver
# Invoice number
self.invoice_number = invoice_number
# The receiver's address
self.address = address
# Note for the parcel
self.note = note
def __repr__(self):
return '[%s] From: %s, To: %s, %s' % (
self.invoice_number,
self.sender,
self.receiver,
self.note
)
|
chrislit/abydos
|
abydos/distance/_ssk.py
|
Python
|
gpl-3.0
| 4,880
| 0
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._ssk.
String subsequence kernel (SSK) similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import QSkipgrams, _Tokenizer
__all__ = ['SSK']
class SSK(_TokenDistance):
r"""String subsequence kernel (SSK) similarity.
This is based on :cite:`Lodhi:2002`.
.. versionadded:: 0.4.1
"""
def __init__(
self,
tokenizer: Optional[_Tokenizer] = None,
ssk_lambda: float = 0.9,
**kwargs: Any
) -> None:
"""Initialize SSK instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
ssk_lambda : float or Iterable
A value in the range (0.0, 1.0) used for discouting gaps between
characters according to the method described in :cite:`Lodhi:2002`.
To supply multiple values of lambda, provide an Iterable of numeric
values, such as (0.5, 0.05) or np.arange(0.05, 0.5, 0.05)
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-skipgram. Using this parameter and
tokenizer=None will cause the instance to use th
|
e QGramskipgrams
tokenizer with this q value.
.. versionadded:: 0.4.1
"""
supe
|
r(SSK, self).__init__(
tokenizer=tokenizer, ssk_lambda=ssk_lambda, **kwargs
)
qval = 2 if 'qval' not in self.params else self.params['qval']
self.params['tokenizer'] = (
tokenizer
if tokenizer is not None
else QSkipgrams(
qval=qval, start_stop='', scaler='SSK', ssk_lambda=ssk_lambda
)
)
def sim_score(self, src: str, tar: str) -> float:
"""Return the SSK similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
String subsequence kernel similarity
Examples
--------
>>> cmp = SSK()
>>> cmp.dist_abs('cat', 'hat')
0.6441281138790036
>>> cmp.dist_abs('Niall', 'Neil')
0.5290992177869402
>>> cmp.dist_abs('aluminum', 'Catalan')
0.862398428061774
>>> cmp.dist_abs('ATCG', 'TAGC')
0.38591004719395017
.. versionadded:: 0.4.1
"""
self._tokenize(src, tar)
src_wts = self._src_tokens
tar_wts = self._tar_tokens
score = sum(
src_wts[token] * tar_wts[token] for token in src_wts & tar_wts
)
return score
def sim(self, src: str, tar: str) -> float:
"""Return the normalized SSK similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Normalized string subsequence kernel similarity
Examples
--------
>>> cmp = SSK()
>>> cmp.sim('cat', 'hat')
0.3558718861209964
>>> cmp.sim('Niall', 'Neil')
0.4709007822130597
>>> cmp.sim('aluminum', 'Catalan')
0.13760157193822603
>>> cmp.sim('ATCG', 'TAGC')
0.6140899528060498
.. versionadded:: 0.4.1
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
src_wts = self._src_tokens
tar_wts = self._tar_tokens
score = sum(
src_wts[token] * tar_wts[token] for token in src_wts & tar_wts
)
norm = (
sum(src_wts[token] * src_wts[token] for token in src_wts)
* sum(tar_wts[token] * tar_wts[token] for token in tar_wts)
) ** 0.5
if not score:
return 0.0
return score / norm
if __name__ == '__main__':
import doctest
doctest.testmod()
|
getsentry/zeus
|
zeus/api/resources/revision_tests.py
|
Python
|
apache-2.0
| 1,984
| 0.002016
|
from flask import request
from sqlalchemy.dialects.postgresql import array_agg
from zeus.config import db
from zeus.constants import Result
from zeus.db.func import array_agg_row
from zeus.models import Job, TestCase, Revision
from zeus.utils.builds import fetch_build_for_revision
from .base_revision import BaseRevisionResource
from ..schemas import AggregateTestCaseSummarySchema
class RevisionTestsResource(BaseRevisionResource):
def get(self, revision: Revision):
"""
Return a list of test cases for a given revision.
"""
build = fetch_build_for_revision(revision)
if not build:
return self.respond(status=404)
build_ids = [original.id for original in build.original]
job_query = db.session.query(Job.id).filter(Job.build_id.in_(build_ids))
result = request.args.get("allowed_failures")
if result == "false":
job_query = job_query.filter(Job.allow_failure == False) # NOQA
job_ids = job_query.subquery()
query = (
db.session.query(
TestCase.hash,
TestCase.name,
array_agg_row(
TestCase.id, TestCase.job_id, TestCase.duration, TestCase.result
).label("runs"),
)
.filter(TestCase.job_id.in_(job_ids))
.group_by(TestCase.hash, TestCase.name)
)
result = request.args.get("result")
if result:
try:
query = query.filter(TestCase.result == getattr(Result, result))
|
except AttributeError:
raise NotImplementedError
query = query.order_by(
(
|
array_agg(TestCase.result).label("results").contains([Result.failed])
).desc(),
TestCase.name.asc(),
)
schema = AggregateTestCaseSummarySchema(many=True, exclude=("build",))
return self.paginate_with_schema(schema, query)
|
endlessm/chromium-browser
|
third_party/llvm/debuginfo-tests/dexter/dex/dextIR/ProgramState.py
|
Python
|
bsd-3-clause
| 3,820
| 0.000785
|
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Set of data classes for representing the complete debug program state at a
fixed point in execution.
"""
import os
from collections import OrderedDict
from typing import List
class SourceLocation:
def __init__(self, path: str = None, lineno: int = None, column: int = None):
if path:
path = os.path.normcase(path)
self.path = path
self.lineno = lineno
self.column = column
def __str__(self):
return '{}({}:{})'.format(self.path, self.lineno, self.column)
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, SourceLocation):
return False
if self.path and (self.path != other.path):
return False
if self.lineno and (self.lineno != other.lineno):
return False
if self.column and (self.column != other.column):
return False
return True
class StackFrame:
def __init__(self,
function: str = None,
is_inlined: bool = None,
location: SourceLocation = Non
|
e,
watches: OrderedDict = None):
if watches is None:
watches = {}
self.function = function
self.is_inlined = is_inlined
self.location = location
self.
|
watches = watches
def __str__(self):
return '{}{}: {} | {}'.format(
self.function,
' (inlined)' if self.is_inlined else '',
self.location,
{k: str(self.watches[k]) for k in self.watches})
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, StackFrame):
return False
if self.location and not self.location.match(other.location):
return False
if self.watches:
for name in iter(self.watches):
try:
if isinstance(self.watches[name], dict):
for attr in iter(self.watches[name]):
if (getattr(other.watches[name], attr, None) !=
self.watches[name][attr]):
return False
else:
if other.watches[name].value != self.watches[name]:
return False
except KeyError:
return False
return True
class ProgramState:
def __init__(self, frames: List[StackFrame] = None):
self.frames = frames
def __str__(self):
return '\n'.join(map(
lambda enum: 'Frame {}: {}'.format(enum[0], enum[1]),
enumerate(self.frames)))
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, ProgramState):
return False
if self.frames:
for idx, frame in enumerate(self.frames):
try:
if not frame.match(other.frames[idx]):
return False
except (IndexError, KeyError):
return False
return True
|
harshkothari410/ocportal
|
ojp/migrations/0005_auto_20161117_0303.py
|
Python
|
mit
| 493
| 0
|
# -*- coding: utf-8 -*-
# Gene
|
rated by Django 1.10.2 on 2016-11-17 03:03
from __future__ import unicode_literals
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('ojp', '0004_problem_num_of_correct_tries'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='descri
|
ption',
field=tinymce.models.HTMLField(blank=True, null=True),
),
]
|
pantheon-systems/kombu
|
kombu/tests/test_virtual_exchange.py
|
Python
|
bsd-3-clause
| 3,747
| 0.000801
|
from kombu.tests.utils import unittest
from kombu.transport.virtual import exchange
from kombu.tests.mocks import Channel
class ExchangeCase(unittest.TestCase):
type = None
def setUp(self):
if self.type:
self.e = self.type(Channel())
class test_Direct(ExchangeCase):
type = exchange.DirectExchange
table = [("rFoo", None, "qFoo"),
("rFoo", None, "qFox"),
("rBar", None, "qBar"),
("rBaz", None, "qBaz")]
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "rFoo", None),
["qFoo", "qFox"])
self.assertListEqual(self.e.lookup(
self.table, "eMoz", "rMoz", "DEFAULT"),
["DEFAULT"])
self.assertListEqual(self.e.lookup(
self.table, "eBar", "rBar", None),
["qBar"])
class test_Fanout(ExchangeCase):
type = exchange.FanoutExchange
table = [(No
|
ne, None, "qFoo"),
(None, None, "qFox"),
(None, None, "qBar")]
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "rFoo", None),
["qFoo", "qFox", "qBar"])
class test_Topic(ExchangeCase):
type = exchange.TopicExchange
table = [("stock.#", None, "rFoo"),
("stock.us.*", None, "rBar")]
def setU
|
p(self):
super(test_Topic, self).setUp()
self.table = [(rkey, self.e.key_to_pattern(rkey), queue)
for rkey, _, queue in self.table]
def test_prepare_bind(self):
x = self.e.prepare_bind("qFoo", "eFoo", "stock.#", {})
self.assertTupleEqual(x, ("stock.#", r'^stock\..*?$', "qFoo"))
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stock.us.nasdaq", None),
["rFoo", "rBar"])
self.assertTrue(self.e._compiled)
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stock.europe.OSE", None),
["rFoo"])
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stockxeuropexOSE", None),
[None])
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "candy.schleckpulver.snap_crackle", None),
[None])
class test_ExchangeType(ExchangeCase):
type = exchange.ExchangeType
def test_lookup(self):
self.assertRaises(NotImplementedError, self.e.lookup,
[], "eFoo", "rFoo", None)
def test_prepare_bind(self):
self.assertTupleEqual(self.e.prepare_bind("qFoo", "eFoo", "rFoo", {}),
("rFoo", None, "qFoo"))
def test_equivalent(self):
e1 = dict(type="direct",
durable=True,
auto_delete=True,
arguments={})
self.assertTrue(
self.e.equivalent(e1, "eFoo", "direct", True, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "topic", True, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", False, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", True, False, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", True, True, {
"expires": 3000}))
e2 = dict(e1, arguments={"expires": 3000})
self.assertTrue(
self.e.equivalent(e2, "eFoo", "direct", True, True, {
"expires": 3000}))
self.assertFalse(
self.e.equivalent(e2, "eFoo", "direct", True, True, {
"expires": 6000}))
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_textbox15.py
|
Python
|
bsd-2-clause
| 900
| 0
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox15.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
works
|
heet.insert_textbox('E9', 'This is some text',
|
{'align': {'horizontal': 'center'}})
workbook.close()
self.assertExcelEqual()
|
chrmoritz/zoxel
|
src/plugins/__init__.py
|
Python
|
gpl-3.0
| 547
| 0.003656
|
from os.path import dirname
from os import listdir
path = dirname(__file__)
|
i = path.find(".zip")
if i == -1: # OS X app or unpacked python files
__all__ = [p[:-3] for p in listdir(path) if p.endswith(".py") and p != "__init__.py"]
del p
else: # Windows binary ziped .pyc
|
files
import zipfile
__all__ = [f[8:-4] for f in zipfile.ZipFile(path[:i+4]).namelist() if f.find('plugins/') == 0 and
f.endswith(".pyc") and not f.endswith("__init__.pyc")]
del f
del zipfile
del i
del path
del dirname
del listdir
|
wrouesnel/ansible
|
lib/ansible/modules/network/aci/aci_epg_monitoring_policy.py
|
Python
|
gpl-3.0
| 6,703
| 0.00179
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_monitoring_policy
short_description: Manage monitoring policies on Cisco ACI fabrics (mon:EPGPol)
description:
- Manage monitoring policies on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information from the internal APIC class I(mon:EPGPol) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
monitoring_policy:
description:
- The name of the monitoring policy.
required: yes
aliases: [ name ]
description:
description:
- Description for the monitoring policy.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_epg_monitoring_policy:
host: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
monitoring_policy: '{{ monitoring_policy }}'
description: '{{ description }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
monitoring_policy=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
|
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
mo
|
dule = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['monitoring_policy', 'tenant']],
['state', 'present', ['monitoring_policy', 'tenant']],
],
)
monitoring_policy = module.params['monitoring_policy']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='monEPGPol',
aci_rn='monepg-{0}'.format(monitoring_policy),
filter_target='eq(monEPGPol.name, "{0}")'.format(monitoring_policy),
module_object=monitoring_policy,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='monEPGPol',
class_config=dict(
name=monitoring_policy,
descr=description,
),
)
aci.get_diff(aci_class='monEPGPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
tqchen/tvm
|
python/tvm/relay/frontend/tflite.py
|
Python
|
apache-2.0
| 137,911
| 0.001581
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend."""
import math
import itertools
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm import relay
from .. import an
|
alysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn as _qnn
from ... import nd as _nd
from .common import ExprTable
from .common import infer_shape as _infer_shape
from
|
.tflite_flexbuffer import FlexBufferDecoder
__all__ = ["from_tflite"]
class TensorWrapper(object):
"""Tensor wrapper for TFLite Tensor"""
def __init__(self, tensor_idx, tensor, buffer, qnn_params=None):
self.tensor_idx = tensor_idx
self.tensor = tensor
self.buffer = buffer
self.qnn_params = qnn_params
class OperatorConverter(object):
"""Operator Converted for converting TFLite ops to Relay ops"""
def __init__(self, model, subgraph, exp_tab):
try:
from tflite.BuiltinOperator import BuiltinOperator
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
self.model = model
self.subgraph = subgraph
self.exp_tab = exp_tab
self.builtin_op_code = build_str_map(BuiltinOperator())
self.activation_fn_type = build_str_map(ActivationFunctionType())
self.builtin_options = build_str_map(BuiltinOptions())
# Add more operators
self.convert_map = {
"ABS": self.convert_abs,
"ADD": self.convert_add,
"ADD_N": self.convert_add_n,
"ARG_MAX": self.convert_arg_max,
"ARG_MIN": self.convert_arg_min,
"AVERAGE_POOL_2D": self.convert_average_pool2d,
"BATCH_TO_SPACE_ND": self.convert_batch_to_space_nd,
"CAST": self.convert_cast,
"CEIL": self.convert_ceil,
"CONCATENATION": self.convert_concatenation,
"CONV_2D": self.convert_conv2d,
"COS": self.convert_cos,
"DEPTH_TO_SPACE": self.convert_depth_to_space,
"DEPTHWISE_CONV_2D": self.convert_depthwise_conv2d,
"DEQUANTIZE": self.convert_dequantize,
"DETECTION_POSTPROCESS": self.convert_detection_postprocess,
"DIV": self.convert_div,
"ELU": self.convert_elu,
"EQUAL": self.convert_equal,
"EXP": self.convert_exp,
"EXPAND_DIMS": self.convert_expand_dims,
"FILL": self.convert_fill,
"FLOOR_DIV": self.convert_floor_div,
"FLOOR_MOD": self.convert_floor_mod,
"FLOOR": self.convert_floor,
"FULLY_CONNECTED": self.convert_fully_connected,
"GATHER": self.convert_gather,
"GATHER_ND": self.convert_gather_nd,
"GREATER_EQUAL": self.convert_greater_equal,
"GREATER": self.convert_greater,
"HARD_SWISH": self.convert_hard_swish,
"L2_NORMALIZATION": self.convert_l2_normalization,
"L2_POOL_2D": self.convert_l2_pool2d,
"LEAKY_RELU": self.convert_leaky_relu,
"LESS_EQUAL": self.convert_less_equal,
"LESS": self.convert_less,
"LOCAL_RESPONSE_NORMALIZATION": self.convert_lrn,
"LOG": self.convert_log,
"LOG_SOFTMAX": self.convert_log_softmax,
"LOGICAL_AND": self.convert_logical_and,
"LOGICAL_NOT": self.convert_logical_not,
"LOGICAL_OR": self.convert_logical_or,
"LOGISTIC": self.convert_logistic,
"MATRIX_DIAG": self.convert_matrix_diag,
"MATRIX_SET_DIAG": self.convert_matrix_set_diag,
"MAX_POOL_2D": self.convert_max_pool2d,
"MAXIMUM": self.convert_maximum,
"MEAN": self.convert_reduce_mean,
"MINIMUM": self.convert_minimum,
"MIRROR_PAD": self.convert_mirror_pad,
"MUL": self.convert_mul,
"NEG": self.convert_neg,
"NOT_EQUAL": self.convert_not_equal,
"ONE_HOT": self.convert_one_hot,
"PACK": self.convert_pack,
"PAD": self.convert_pad,
"PADV2": self.convert_pad,
"POW": self.convert_pow,
"PRELU": self.convert_prelu,
"RANGE": self.convert_range,
"QUANTIZE": self.convert_quantize,
"REDUCE_ANY": self.convert_reduce_any,
"REDUCE_MAX": self.convert_reduce_max,
"REDUCE_MIN": self.convert_reduce_min,
"REDUCE_PROD": self.convert_reduce_prod,
"RELU": self.convert_relu,
"RELU6": self.convert_relu6,
"RELU_N1_TO_1": self.convert_relu_n1_to_1,
"RESHAPE": self.convert_reshape,
"RESIZE_BILINEAR": self.convert_resize_bilinear,
"RESIZE_NEAREST_NEIGHBOR": self.convert_resize_nearest_neighbor,
"ROUND": self.convert_round,
"RSQRT": self.convert_rsqrt,
"REVERSE_SEQUENCE": self.convert_reverse_sequence,
"REVERSE_V2": self.convert_reverse_v2,
"SELECT": self.convert_select,
"SHAPE": self.convert_shape,
"SIN": self.convert_sin,
"SLICE": self.convert_slice,
"SOFTMAX": self.convert_softmax,
"SPACE_TO_BATCH_ND": self.convert_space_to_batch_nd,
"SPACE_TO_DEPTH": self.convert_space_to_depth,
"SPARSE_TO_DENSE": self.convert_sparse_to_dense,
"SPLIT": self.convert_split,
"SPLIT_V": self.convert_split_v,
"SQRT": self.convert_sqrt,
"SQUARE": self.convert_square,
"SQUARED_DIFFERENCE": self.convert_squared_difference,
"SQUEEZE": self.convert_squeeze,
"STRIDED_SLICE": self.convert_strided_slice,
"SUB": self.convert_sub,
"SUM": self.convert_reduce_sum,
"TAN": self.convert_tan,
"TANH": self.convert_tanh,
"TILE": self.convert_tile,
"TOPK_V2": self.convert_topk_v2,
"TRANSPOSE_CONV": self.convert_transpose_conv,
"TRANSPOSE": self.convert_transpose,
"UNPACK": self.convert_unpack,
"WHERE": self.convert_select,
"ZEROS_LIKE": self.convert_zeros_like,
}
def check_unsupported_ops(self):
"""Check unsupported TFLite ops in our converter."""
unsupported_ops_set = set()
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
if op_code_str not in self.convert_map:
unsupported_ops_set.add(op_code_str)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "TFLite: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def convert_op_to_relay(self):
"""Convert TFLite ops to relay ops"""
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
outp
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/virtual_network_gateways_operations.py
|
Python
|
mit
| 36,131
| 0.002353
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, c
|
ustom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to crea
|
te or update virtual
network gateway operation.
:type parameters: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.a
|
AleksNeStu/ggrc-core
|
src/ggrc/models/mixins/__init__.py
|
Python
|
apache-2.0
| 26,526
| 0.010631
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Mixins to add common attributes and relationships. Note, all model classes
must also inherit from ``db.Model``. For example:
color
color
..
class Market(BusinessObject, db.Model):
__tablename__ = 'markets'
"""
# pylint: disable=no-self-argument
# All declared_attr properties that are class level as per sqlalchemy
# documentatio, are reported as false positives by pylint.
from logging import getLogger
from uuid import uuid1
import datetime
from sqlalchemy import event
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.orm.session import Session
from ggrc import builder
from ggrc import db
from ggrc.models import reflection
from ggrc.models.deferred import deferred
from ggrc.models.inflector import ModelInflectorDescriptor
from ggrc.models.reflection import AttributeInfo
from ggrc.models.mixins.customattributable import CustomAttributable
from ggrc.models.mixins.notifiable import Notifiable
from ggrc.utils import create_stub
from ggrc.fulltext import attributes
# pylint: disable=invalid-name
logger = getLogger(__name__)
class Identifiable(object):
"""A model with an ``id`` property that is the primary key."""
id = db.Column(db.Integer, primary_key=True) # noqa
# REST properties
_publish_attrs = ['id', 'type']
_update_attrs = []
_inflector = ModelInflectorDescriptor()
@builder.simple_property
def type(self):
return self.__class__.__name__
@classmethod
def eager_query(cls):
mapper_class = cls._sa_class_manager.mapper.base_mapper.class_
return db.session.query(cls).options(
db.Load(mapper_class).undefer_group(
mapper_class.__name__ + '_complete'),
)
@classmethod
def eager_inclusions(cls, query, include_links):
"""Load related items listed in include_links eagerly."""
options = []
for include_link in include_links:
inclusion_class = getattr(cls, include_link).property.mapper.class_
options.append(
orm.subqueryload(include_link)
.undefer_group(inclusion_class.__name__ + '_complete'))
return query.options(*options)
@declared_attr
def __table_args__(cls):
extra_table_args = AttributeInfo.gather_attrs(cls, '_extra_table_args')
table_args = []
table_dict = {}
for table_arg in extra_table_args:
if callable(table_arg):
table_arg = table_arg()
if isinstance(table_arg, (list, tuple, set)):
if isinstance(table_arg[-1], (dict,)):
table_dict.update(table_arg[-1])
table_args.extend(table_arg[:-1])
else:
table_args.extend(table_arg)
elif isinstance(table_arg, (dict,)):
table_dict.update(table_arg)
else:
table_args.append(table_arg)
if len(table_dict) > 0:
table_args.append(table_dict)
return tuple(table_args,)
class ChangeTracked(object):
"""A model with fields to tracked the last user to modify the model, the
creation time of the model, and the last time the model was updated.
"""
@declared_attr
def modified_by_id(cls):
"""Id of user who did the last modification of the object."""
return deferred(db.Column(db.Integer), cls.__name__)
@declared_attr
def created_at(cls):
"""Date of creation. Set to current time on object creation."""
column = db.Column(
db.DateTime,
nullable=False,
default=db.text('current_timestamp'),
)
return deferred(column, cls.__name__)
@declared_attr
def updated_at(cls):
"""Date of last update. Set to current time on object creation/update."""
column = db.Column(
db.DateTime,
nullable=False,
default=db.text('current_timestamp'),
onupdate=db.text('current_timestamp'),
)
return deferred(column, cls.__name__)
@declared_attr
def modified_by(cls):
"""Relationship to user referenced by modified_by_id."""
return db.relationship(
'Person',
p
|
rimaryjoin='{0}.modified_by_id == Person.id'.format(cls.__name__),
foreign_keys='{0}.modified_by_id'.format(cls.__name__),
uselist=False,
)
@staticmethod
def _extra_table_args(model):
"""Apply extra table args (like indexes) to model definition."""
return (
db.Inde
|
x('ix_{}_updated_at'.format(model.__tablename__), 'updated_at'),
)
# TODO Add a transaction id, this will be handy for generating etags
# and for tracking the changes made to several resources together.
# transaction_id = db.Column(db.Integer)
# REST properties
_publish_attrs = [
'modified_by',
'created_at',
'updated_at',
]
_fulltext_attrs = [
attributes.DatetimeFullTextAttr('created_at', 'created_at'),
attributes.DatetimeFullTextAttr('updated_at', 'updated_at'),
attributes.FullTextAttr("modified_by", "modified_by", ["name", "email"]),
]
_update_attrs = []
_aliases = {
"updated_at": {
"display_name": "Last Updated",
"filter_only": True,
},
"created_at": {
"display_name": "Created Date",
"filter_only": True,
},
}
@classmethod
def indexed_query(cls):
return super(ChangeTracked, cls).indexed_query().options(
orm.Load(cls).load_only("created_at", "updated_at"),
orm.Load(cls).joinedload(
"modified_by"
).load_only(
"name", "email", "id"
),
)
class Titled(object):
"""Mixin that defines `title` field.
Strips title on update and defines optional UNIQUE constraint on it.
"""
@validates('title')
def validate_title(self, key, value):
"""Validates and cleans Title that has leading/trailing spaces"""
# pylint: disable=unused-argument,no-self-use
return value if value is None else value.strip()
@declared_attr
def title(cls):
return deferred(db.Column(db.String, nullable=False), cls.__name__)
@classmethod
def indexed_query(cls):
return super(Titled, cls).indexed_query().options(
orm.Load(cls).load_only("title"),
)
@staticmethod
def _extra_table_args(model):
"""If model._title_uniqueness is set, apply UNIQUE constraint to title."""
if getattr(model, '_title_uniqueness', True):
return (
db.UniqueConstraint(
'title', name='uq_t_{}'.format(model.__tablename__)),
)
return ()
# REST properties
_publish_attrs = ['title']
_fulltext_attrs = ['title']
_sanitize_html = ['title']
_aliases = {"title": "Title"}
class Described(object):
"""Mixin that defines `description` field."""
@declared_attr
def description(cls):
return deferred(db.Column(db.Text), cls.__name__)
# REST properties
_publish_attrs = ['description']
_fulltext_attrs = ['description']
_sanitize_html = ['description']
_aliases = {"description": "Description"}
@classmethod
def indexed_query(cls):
return super(Described, cls).indexed_query().options(
orm.Load(cls).load_only("description"),
)
class Noted(object):
"""Mixin that defines `notes` field."""
@declared_attr
def notes(cls):
return deferred(db.Column(db.Text), cls.__name__)
# REST properties
_publish_attrs = ['notes']
_fulltext_attrs = ['notes']
_sanitize_html = ['notes']
_aliases = {"notes": "Notes"}
@classmethod
def indexed_query(cls):
return super(Noted, cls).indexed_query().options(
orm.Load(cls).load_only("notes"),
)
class Hyperlinked(object):
"""Mixin that defines `url` and `reference_url` fields."""
@declared_attr
def url(cls):
return deferred(db.Column(db.String), cls.__name__)
@declared_attr
def reference_url(cls):
return deferred(db.Column(db.String), cls.__name__)
# REST properties
_publish_attrs = ['url', 'reference_url']
_aliases = {
"url": "Url",
"reference_url": "Reference URL",
}
_fulltext_attrs = [
'url',
'reference_url',
]
@classmethod
def indexed_query(cls):
return sup
|
biothings/biothings_explorer
|
biothings_explorer/_deprecated_schema_parser.py
|
Python
|
apache-2.0
| 1,071
| 0.003735
|
"""Parse the biothings schema"""
from .config import BIOTHINGS_SCHEMA_URL, PREFIX_TO_REMOVE
from .utils.dataload import load_json_or_yaml
from .utils.common import remove_prefix
class SchemaParser():
def __init__(self):
self.schema_json = remove_prefix(load_json_or_yaml(BIOTHINGS_SCHEMA_URL),
PREFIX_TO_REMOVE)
self.properties = {}
self.ids = []
self.clses = []
self.process_schema()
def process_schema(self):
for rec in self.schema_j
|
son['@graph']:
if "rdfs:subPropertyOf" in rec and
|
rec["rdfs:subPropertyOf"]["@id"] == "http://schema.org/identifier":
self.ids.append(rec["@id"])
elif rec["@type"] == "rdf:Property":
self.properties[rec["@id"]] = {"inverse_property": None}
if "schema:inverseOf" in rec:
self.properties[rec["@id"]]["inverse_property"] = rec["schema:inverseOf"]["@id"]
elif rec["@type"] == "rdfs:Class":
self.clses.append(rec["@id"])
|
ta2xeo/python3-kii
|
tests/test_data/application/test_application_scope_data.py
|
Python
|
mit
| 13,511
| 0.00037
|
'''
Precondition
successfully pass a users test.
'''
from datetime import datetime, timedelta
import time
import pytest
import requests
from kii import AccountType, exceptions as exc, results as rs
from kii.data import BucketType, clauses as cl
from tests.conf import (
get_env,
get_api_with_test_user,
cleanup,
)
GROUP_NAME = 'test_group'
BUCKET_ID = 'test_bucket'
class TestApplicationScopeData:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
cleanup()
self.api = get_api_with_test_user()
self.scope = self.api.data.application
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
try:
self.scope.delete_a_bucket(BUCKET_ID)
except exc.KiiBucketNotFoundError:
pass
cleanup()
def test_retrieve_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
bucket = self.scope.retrieve_a_bucket(BUCKET_ID)
assert isinstance(bucket, rs.BucketResult)
assert bucket.bucket_type is BucketType.READ_WRITE
assert bucket.size > 0
def test_delete_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
self.scope.delete_a_bucket(BUCKET_ID)
with pytest.raises(exc.KiiBucketNotFoundError):
self.scope.delete_a_bucket(BUCKET_ID)
def test_create_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert isinstance(obj, rs.CreateResult)
assert obj.object_id
assert obj.created_at
assert isinstance(obj.created_at, datetime)
assert obj.data_type
assert obj.data_type == 'application/json'
def test_retrieve_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
result = self.scope(BUCKET_ID).retrieve_an_object(obj.object_id)
assert isinstance(result, rs.ObjectResult)
assert result._id
assert isinstance(result._id, str)
assert result._created
assert result._modified
def test_fully_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.fully_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' not in updated
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert updated['list key'] == [4, 5, 6]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version != updated._version
def test_create_a_new_object_with_an_id(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created2 = bucket.create_a_new_object_with_an_id('new-object-id', {
'str key': 'created2 string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 2
created2 = bucket.retrieve_an_object('new-object-id')
assert 'int key' not in created2
assert created2['str key'] == 'created2 string'
assert created2['dict key'] == {
'nest': {
'nest2': 'nest and nest',
|
}
}
assert
|
created2['list key'] == [4, 5, 6]
assert created._created != created2._created
assert created._modified != created2._modified
assert created._version == 1
assert created2._version == 1
def test_partially_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.partially_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' in updated
assert updated['int key'] == 1
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert 'list key' in updated
assert updated['list key'] == [1, 2, 3]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version == 1
assert updated._version == 2
def test_delete_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
asser
|
eggplantbren/DNest4
|
code/Examples/RJObject_1DMixture/display.py
|
Python
|
mit
| 1,414
| 0.004243
|
import dnest4.classic as dn4
from pylab import *
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 16
plt.rc("text", usetex=True)
data = loadtxt('galaxies.txt')
posterior_sample = atleast_2d(dn4.my_loadtxt('posterior_sample.txt'))
x = linspace(0., 50.0, 10001)
def mixture(x, params):
N = int(params[7])
centers = params[8:108][0:N]
widths = exp(params[108:208][0:N]) + 1.0
weights = exp(params[208:308][0:N])
weights /= weights.sum()
y = zeros(x.shape)
for i in range(0, N):
# Don't plot flukey narrow things (which ought to eventually average
# out, but won't in a finite sample)
# if widths[i] >= 0.02:
y += weights[i]/widths[i]/sqrt(2.*pi)*exp(-0.5*(x - centers[i])**2/widths[i]**2)
return y
clf()
hist(data, 100, alpha=0.2, color="k", density=True)
y_tot = zeros(len(x))
for i in range(0, posterior_sample.shape[0]):
y = mixture(x, posterior_sample[i, :])
y_tot += y
plot(x, y_tot/posterior_sample.shape[0], 'g', linewidth=2)
xlabel("Velocity (1000 km/s)")
ylabel("Density")
savefig("galaxies.pdf", bbox_inches="tight")
show()
width = 0.3
bins = arange(0, 101) - 0.5*width
hist(posterior_sample[:,7]
|
, bins, width=width, density=True, color="k", alpha=0.2)
xlim([0, 100.5])
ylim([0, 0.05])
xlabel("Number of gaussians, $N$")
ylabel("Posterior Probability")
savefig("galaxies_N.pdf", bbox_inches="tight
|
")
show()
|
juantascon/flickr_mass_upload
|
flickr_mass_upload.py
|
Python
|
gpl-3.0
| 1,220
| 0.009836
|
#! /u
|
sr/bin/env python2
import sys, os
import flickrapi
import xml.etree.ElementTree
if len(sys.argv) < 2:
sys.stderr.write("usage: %s <filename> ..." % sys.argv[0])
sys.exit(1)
def auth(
|
):
api_key = "87af34fe62dafd3c5d6d4959ca92c193"
api_secret = "18ecfc909af569af"
flickr = flickrapi.FlickrAPI(api_key, api_secret)
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
return flickr
def tags(filename):
dirname = os.path.dirname(filename)
res = ""
while len(dirname) > 0:
res = "%s %s" % (res, os.path.basename(dirname))
dirname = os.path.dirname(dirname)
return res
def upload(flickr, filename, tags):
response = flickr.upload(filename=filename, tags=tags, is_public=0)
if response.attrib["stat"] == "ok":
photoid = response.find("photoid").text
print("%s: stat:OK id:%s"% (filename, photoid))
else:
print("%s: stat:FAIL\n%s" % (filename, xml.etree.ElementTree.tostring(response)))
flickr = auth()
for filename in sys.argv[1:len(sys.argv)]:
upload(flickr, filename, tags(filename))
|
jgmanzanas/CMNT_004_15
|
project-addons/scheduled_shipment/__openerp__.py
|
Python
|
agpl-3.0
| 566
| 0
|
# -*- coding: utf-8 -*-
# © 2016 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Order scheduled shipment",
"summary": "Sale order with sc
|
heduled shipment",
"version": "8.0.1.0.0",
"category": "Connector",
"author": "Nadia Ferreyra",
"license": "AGPL-3",
"installable": True,
"depends": [
"base",
|
"sale",
"connector",
"sale_stock",
"picking_invoice_pending"
],
"data": [
'data/job_channel_data.xml',
'views/sale_view.xml',
],
}
|
team-vigir/flexbe_behavior_engine
|
flexbe_onboard/test/test_onboard.py
|
Python
|
bsd-3-clause
| 4,765
| 0.001259
|
#!/usr/bin/env python
import sys
import os
import unittest
import zlib
import rospy
from flexbe_onboard.flexbe_onboard import FlexbeOnboard
from flexbe_core.proxy import ProxySubscriberCached
from flexbe_msgs.msg import BehaviorSelection, BEStatus, BehaviorLog, BehaviorModification
class TestOnboard(unittest.TestCase):
def __init__(self, name):
super(TestOnboard, self).__init__(name)
self.sub = ProxySubscriberCached({
'flexbe/status': BEStatus,
'flexbe/log': BehaviorLog
})
self.rate = rospy.Rate(100)
# make sure that behaviors can be imported
data_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, data_folder)
# run onboard and add custom test behaviors to onboard lib
self.onboard = FlexbeOnboard()
self.lib = self.onboard._behavior_lib
self.lib._add_behavior_manifests(data_folder)
def assertStatus(self, expected, timeout):
""" Assert that the expected onboard status is received before the timeout. """
for i in range(int(timeout*100)):
self.rate.sleep()
if self.sub.has_msg('flexbe/status'):
break
else:
raise AssertionError('Did not receive a status as required.')
msg = self.sub.get_last_msg('flexbe/status')
self.sub.remove_last_msg('flexbe/status')
self.assertEqual(msg.code, expected)
return msg
def test_onboard_behaviors(self):
behavior_pub = rospy.Publisher('flexbe/start_behavior', BehaviorSelection, queue_size=1)
rospy.sleep(0.5) # wait for publisher
# wait for the initial status message
self.assertStatus(BEStatus.READY, 1)
# send simple behavior request without checksum
be_id, _ = self.lib.find_behavior("Test Behavior Log")
request = BehaviorSelection()
request.behavior_id = be_id
request.autonomy_level = 255
behavior_pub.publish(request)
self.assertStatus(BEStatus.ERROR, 2)
# send valid simple behavior request
with open(self.lib.get_sourcecode_filepath(be_id)) as f:
request.behavior_checksum = zlib.adler32(f.read().encode()) & 0x7fffffff
self.sub.enable_buffer('flexbe/log')
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
self.assertStatus(BEStatus.FIN
|
ISHED, 3)
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('Test data', behavior_logs)
# send valid complex behavior request
|
be_id, _ = self.lib.find_behavior("Test Behavior Complex")
request = BehaviorSelection()
request.behavior_id = be_id
request.autonomy_level = 255
request.arg_keys = ['param']
request.arg_values = ['value_2']
request.input_keys = ['data']
request.input_values = ['2']
with open(self.lib.get_sourcecode_filepath(be_id)) as f:
content = f.read()
modifications = [('flexbe_INVALID', 'flexbe_core'), ('raise ValueError("TODO: Remove!")', '')]
for replace, by in modifications:
index = content.index(replace)
request.modifications.append(BehaviorModification(index, index + len(replace), by))
for replace, by in modifications:
content = content.replace(replace, by)
request.behavior_checksum = zlib.adler32(content.encode()) & 0x7fffffff
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
result = self.assertStatus(BEStatus.FINISHED, 3)
self.assertEqual(result.args[0], 'finished')
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('value_2', behavior_logs)
# send the same behavior with different parameters
request.arg_keys = ['param', 'invalid']
request.arg_values = ['value_1', 'should be ignored']
request.input_keys = []
request.input_values = []
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
result = self.assertStatus(BEStatus.FINISHED, 3)
self.assertEqual(result.args[0], 'failed')
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('value_1', behavior_logs)
if __name__ == '__main__':
rospy.init_node('test_flexbe_onboard')
import rostest
rostest.rosrun('flexbe_onboard', 'test_flexbe_onboard', TestOnboard)
|
hds-lab/dsechatweb
|
dsechat/apps/accounts/forms.py
|
Python
|
mit
| 4,148
| 0.002652
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from registration.models import UserModel
from models import User as AccountsUser
class UserRegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Create a username"),
error_messages={'invalid': _(
"Your username may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(label=_("Your email address"))
first_name = forms.CharField(label=_("First
|
Name"))
last_name = forms.CharField(label=_("Last Name"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Create a password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Your password again"))
def clean_username(self):
"""
Validate that the username is alphanumeric and i
|
s not already
in use.
"""
existing = UserModel().objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean_email(self):
email = self.cleaned_data.get('email')
if email and UserModel().objects.filter(email__iexact=email).exists():
raise forms.ValidationError(_('A user with that email address already exists.'))
else:
return email
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class UserProfileUpdateForm(forms.ModelForm):
class Meta:
model = UserModel()
fields = ['email', 'first_name', 'last_name']
required_css_class = 'required'
email = forms.EmailField(label=_("Your email address"))
first_name = forms.CharField(label=_("First Name"))
last_name = forms.CharField(label=_("Last Name"))
def clean_email(self):
email = self.cleaned_data.get('email')
# You cannot change your email to another user's email
if email and UserModel().objects.filter(email__iexact=email).exclude(pk=self.instance.pk).exists():
raise forms.ValidationError(_('A user with that email address already exists.'))
else:
return email
class ConsentForm(forms.ModelForm):
class Meta:
model = AccountsUser
fields = ['gives_consent', 'over18']
required_css_class = 'required'
gives_consent = forms.BooleanField(label=_("I agree to participate in the research"),
required=False)
over18 = forms.BooleanField(label=_("I am 18 years of age or older"),
required=False)
def clean(self):
over18 = self.cleaned_data.get('over18')
gives_consent = self.cleaned_data.get('gives_consent')
if gives_consent and not over18:
raise forms.ValidationError(_('You must be at least 18 years old to participate in the research.'))
return self.cleaned_data
|
anthonyfok/frescobaldi
|
frescobaldi_app/fileinfo.py
|
Python
|
gpl-2.0
| 6,388
| 0.004383
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
C
|
omputes and caches various information about files.
"""
import itertools
import re
import os
import atexit
import ly.document
import lydocinfo
import ly.lex
import filecache
import util
import variables
_document_cache = filecache.FileCache()
_suffix_chars_re = re.compile(r'[^-\w]', re.UNICODE)
### XXX otherwise I get a segfault on shutdown when very large music trees
### are ma
|
de (and every node references the document).
### (The segfault is preceded by a "corrupted double-linked list" message.)
atexit.register(_document_cache.clear)
class _CachedDocument(object):
"""Contains a document and related items."""
filename = None
document = None
variables = None
docinfo = None
music = None
def _cached(filename):
"""Return a _CachedDocument instance for the filename, else creates one."""
filename = os.path.realpath(filename)
try:
c = _document_cache[filename]
except KeyError:
with open(filename, 'rb') as f:
text = util.decode(f.read())
c = _document_cache[filename] = _CachedDocument()
c.variables = v = variables.variables(text)
c.document = ly.document.Document(text, v.get("mode"))
c.filename = c.document.filename = filename
return c
def document(filename):
"""Return a (cached) ly.document.Document for the filename."""
return _cached(filename).document
def docinfo(filename):
"""Return a (cached) LyDocInfo instance for the specified file."""
c = _cached(filename)
if c.docinfo is None:
c.docinfo = lydocinfo.DocInfo(c.document, c.variables)
return c.docinfo
def music(filename):
"""Return a (cached) music.Document instance for the specified file."""
c = _cached(filename)
if c.music is None:
import music
c.music = music.Document(c.document)
return c.music
def textmode(text, guess=True):
"""Returns the type of the given text ('lilypond, 'html', etc.).
Checks the mode variable and guesses otherwise if guess is True.
"""
mode = variables.variables(text).get("mode")
if mode in ly.lex.modes:
return mode
if guess:
return ly.lex.guessMode(text)
def includefiles(dinfo, include_path=()):
"""Returns a set of filenames that are included by the DocInfo's document.
The specified include path is used to find files. The own filename
is NOT added to the set. Included files are checked recursively,
relative to our file, relative to the including file, and if that
still yields no file, relative to the directories in the include_path.
If the document has no local filename, only the include_path is
searched for files.
"""
filename = dinfo.document.filename
basedir = os.path.dirname(filename) if filename else None
files = set()
def tryarg(directory, arg):
path = os.path.realpath(os.path.join(directory, arg))
if path not in files and os.path.isfile(path):
files.add(path)
args = docinfo(path).include_args()
find(args, os.path.dirname(path))
return True
def find(incl_args, directory):
for arg in incl_args:
# new, recursive, relative include
if not (directory and tryarg(directory, arg)):
# old include (relative to master file)
if not (basedir and tryarg(basedir, arg)):
# if path is given, also search there:
for p in include_path:
if tryarg(p, arg):
break
find(dinfo.include_args(), basedir)
return files
def basenames(dinfo, includefiles=(), filename=None, replace_suffix=True):
"""Returns the list of basenames a document is expected to create.
The list is created based on includefiles and the define output-suffix and
\bookOutputName and \bookOutputSuffix commands.
You should add '.ext' and/or '-[0-9]+.ext' to find created files.
If filename is given, it is regarded as the filename LilyPond is run on.
Otherwise, the filename of the info's document is read.
If replace_suffix is True (the default), special characters and spaces
in the suffix are replaced with underscores (in the same way as LilyPond
does it), using the replace_suffix_chars() function.
"""
basenames = []
basepath = os.path.splitext(filename or dinfo.document.filename)[0]
dirname, basename = os.path.split(basepath)
if basepath:
basenames.append(basepath)
def args():
yield dinfo.output_args()
for filename in includefiles:
yield docinfo(filename).output_args()
for type, arg in itertools.chain.from_iterable(args()):
if type == "suffix":
if replace_suffix:
# LilyPond (lily-library.scm:223) does this, too
arg = replace_suffix_chars(arg)
arg = basename + '-' + arg
path = os.path.normpath(os.path.join(dirname, arg))
if path not in basenames:
basenames.append(path)
return basenames
def replace_suffix_chars(s):
"""Replace spaces and most non-alphanumeric characters with underscores.
This is used to mimic the behaviour of LilyPond, which also does this,
for the output-suffix. (See scm/lily-library.scm:223.)
"""
return _suffix_chars_re.sub('_', s)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.