hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
061bd88deb4206ce5331e0081dcdb2863e470f98
| 1,570
|
py
|
Python
|
venv/Lib/site-packages/bootstrap_py/pypi.py
|
prats1997/Euphorum
|
16bfee9c71ea5b1332c6263233c79a633ddfdd83
|
[
"MIT"
] | 1
|
2020-03-01T17:39:04.000Z
|
2020-03-01T17:39:04.000Z
|
venv/Lib/site-packages/bootstrap_py/pypi.py
|
prats1997/Euphorum
|
16bfee9c71ea5b1332c6263233c79a633ddfdd83
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/bootstrap_py/pypi.py
|
prats1997/Euphorum
|
16bfee9c71ea5b1332c6263233c79a633ddfdd83
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""bootstrap_py.pypi."""
import sys
import socket
from bootstrap_py.exceptions import BackendFailure, Conflict
if sys.version_info < (3, 0):
import xmlrpclib as xmlrpc_client
else:
from xmlrpc import client as xmlrpc_client
#: PyPI XML-RPC API url
PYPI_URL = 'https://pypi.python.org/pypi'
def package_existent(name):
"""search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name
"""
if sys.version_info < (3, 0):
try:
result = search_package(name)
except (socket.error,
xmlrpc_client.ProtocolError) as exc:
raise BackendFailure(exc)
else:
try:
result = search_package(name)
except (socket.gaierror,
TimeoutError,
ConnectionRefusedError,
xmlrpc_client.ProtocolError) as exc:
raise BackendFailure(exc)
if result:
msg = ('[error] "{0}" is registered already in PyPI.\n'
'\tSpecify another package name.').format(name)
raise Conflict(msg)
def search_package(name):
"""search package.
:param str name: package name
:rtype: list
:return: package name list
"""
client = xmlrpc_client.ServerProxy(PYPI_URL)
return [pkg for pkg in client.search({'name': name})
if pkg.get('name') == name]
| 27.54386
| 70
| 0.629299
|
45ab49a6475d6852478897b0a41080d2aa12e9fb
| 3,102
|
py
|
Python
|
setup.py
|
jrdzha/lux-widget
|
91f53a29bba47df84bc953b441cda211d119ab1d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jrdzha/lux-widget
|
91f53a29bba47df84bc953b441cda211d119ab1d
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jrdzha/lux-widget
|
91f53a29bba47df84bc953b441cda211d119ab1d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from glob import glob
from os.path import join as pjoin
from setupbase import (
create_cmdclass, install_npm, ensure_targets,
find_packages, combine_commands, ensure_python,
get_version, HERE
)
from setuptools import setup
# The name of the project
name = 'luxWidget'
# Ensure a valid python version
ensure_python('>=3.4')
# Get our version
version = get_version(pjoin(name, '_version.py'))
nb_path = pjoin(HERE, name, 'nbextension', 'static')
lab_path = pjoin(HERE, name, 'labextension')
# Representative files that should exist after a successful build
jstargets = [
pjoin(nb_path, 'index.js'),
pjoin(HERE, 'lib', 'plugin.js'),
]
package_data_spec = {
name: [
'nbextension/static/*.*js*',
'labextension/*.tgz'
]
}
data_files_spec = [
('share/jupyter/nbextensions/luxWidget',
nb_path, '*.js*'),
('share/jupyter/lab/extensions', lab_path, '*.tgz'),
('etc/jupyter/nbconfig/notebook.d' , HERE, 'luxWidget.json')
]
cmdclass = create_cmdclass('jsdeps', package_data_spec=package_data_spec,
data_files_spec=data_files_spec)
cmdclass['jsdeps'] = combine_commands(
install_npm(HERE, build_cmd='build:all'),
ensure_targets(jstargets),
)
setup_args = dict(
name = name,
description = 'A Custom Jupyter Widget Library',
version = version,
scripts = glob(pjoin('scripts', '*')),
# cmdclass = cmdclass,
packages = find_packages(),
author = 'Doris Lee',
author_email = 'dorisjunglinlee@gmail.com',
url = 'https://github.com/lux-org/lux-widget',
license = 'BSD',
platforms = "Linux, Mac OS X, Windows",
keywords = ['Jupyter', 'Widgets', 'IPython'],
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Jupyter',
],
include_package_data = True,
install_requires = [
'ipywidgets>=7.0.0',
],
extras_require = {
'test': [
'pytest>=3.6',
'pytest-cov',
'nbval',
],
'examples': [
# Any requirements for the examples to run
],
'docs': [
'sphinx>=1.5',
'recommonmark',
'sphinx_rtd_theme',
'nbsphinx>=0.2.13,<0.4.0',
'jupyter_sphinx',
'nbsphinx-link',
'pytest_check_links',
'pypandoc',
],
},
entry_points = {
},
)
if __name__ == '__main__':
setup(**setup_args)
| 26.512821
| 73
| 0.589297
|
6fad667f61d4bcf126aedd5fd5f0ada639fc3c54
| 270
|
py
|
Python
|
api/config.py
|
disfear86/Restaurant-Menus
|
dbccd0a42f8ca5413f079a5aacc57df9bfbf2f5d
|
[
"MIT"
] | null | null | null |
api/config.py
|
disfear86/Restaurant-Menus
|
dbccd0a42f8ca5413f079a5aacc57df9bfbf2f5d
|
[
"MIT"
] | null | null | null |
api/config.py
|
disfear86/Restaurant-Menus
|
dbccd0a42f8ca5413f079a5aacc57df9bfbf2f5d
|
[
"MIT"
] | null | null | null |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'mysql://<user>:<password>@localhost/database_name'
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = 'dev-key'
| 30
| 77
| 0.796296
|
b6600e4efb42a53e11229567ffde13b656748f02
| 690
|
py
|
Python
|
heufybot/utils/__init__.py
|
HubbeKing/PyHeufyBot
|
61f6dc9c64dc3a0cc421ce9881c2539ced22c915
|
[
"MIT"
] | null | null | null |
heufybot/utils/__init__.py
|
HubbeKing/PyHeufyBot
|
61f6dc9c64dc3a0cc421ce9881c2539ced22c915
|
[
"MIT"
] | null | null | null |
heufybot/utils/__init__.py
|
HubbeKing/PyHeufyBot
|
61f6dc9c64dc3a0cc421ce9881c2539ced22c915
|
[
"MIT"
] | null | null | null |
# Taken from txircd:
# https://github.com/ElementalAlchemist/txircd/blob/8832098149b7c5f9b0708efe5c836c8160b0c7e6/txircd/utils.py#L9
def _enum(**enums):
return type('Enum', (), enums)
ModeType = _enum(LIST=0, PARAM_SET=1, PARAM_UNSET=2, NO_PARAM=3)
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
def parseUserPrefix(prefix):
if "!" in prefix:
nick = prefix[:prefix.find("!")]
ident = prefix[prefix.find("!") + 1:prefix.find("@")]
host = prefix[prefix.find("@") + 1:]
return nick, ident, host
# Not all "users" have idents and hostnames
nick = prefix
return nick, None, None
| 27.6
| 111
| 0.634783
|
80f418eba7a63445e35b02573ee9e1b2fb15131d
| 2,212
|
py
|
Python
|
exemplos/chatbot/Chatbot.py
|
cirino/python
|
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
|
[
"MIT"
] | 1
|
2018-05-06T01:25:28.000Z
|
2018-05-06T01:25:28.000Z
|
exemplos/chatbot/Chatbot.py
|
cirino/python
|
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
|
[
"MIT"
] | 1
|
2019-02-10T18:46:37.000Z
|
2019-02-12T21:17:50.000Z
|
exemplos/chatbot/Chatbot.py
|
cirino/python
|
6c45b5305aebeeeebb7ffef335700e41cc0b6b3b
|
[
"MIT"
] | null | null | null |
import json
import subprocess as s
class Chatbot():
def __init__(self, nome):
try:
memoria = open(nome+'.json','r')
except FileNotFoundError:
memoria = open(nome+'.json','w')
memoria.write('["Will","Alfredo"]')
memoria.close()
memoria = open(nome+'.json','r')
self.nome = nome
self.conhecidos = json.load(memoria)
memoria.close()
self.historico = []
self.frases = {'oi': 'Olá, qual o seu nome?','tchau':'tchau'}
def escuta(self,frase=None):
if frase == None:
frase = input('>: ')
frase = str(frase)
frase = frase.lower()
frase = frase.replace('é','eh')
return frase
def pensa(self,frase):
if frase in self.frases:
return self.frases[frase]
if frase == 'aprende':
chave = input('Digite a frase: ')
resp = input('Digite a resposta: ')
self.frases[chave] = resp
return 'Aprendido'
if self.historico:
if self.historico[-1] == 'Olá, qual o seu nome?':
nome = self.pegaNome(frase)
frase = self.respondeNome(nome)
return frase
try:
resp = str(eval(frase))
return resp
except:
pass
return 'Não entendi'
def pegaNome(self,nome):
if 'o meu nome eh ' in nome:
nome = nome[14:]
nome = nome.title()
return nome
def respondeNome(self,nome):
if nome in self.conhecidos:
frase = 'Eaew '
else:
frase = 'Muito prazer '
self.conhecidos.append(nome)
memoria = open(self.nome+'.json','w')
json.dump(self.conhecidos,memoria)
memoria.close()
return frase+nome
def fala(self,frase):
if 'executa ' in frase:
comando = frase.replace('executa ','')
try:
s.Popen(comando)
except FileNotFoundError:
s.Popen(['xdg-open',comando])
else:
print(frase)
self.historico.append(frase)
| 29.105263
| 69
| 0.496383
|
8fa1f33308eb057a72992c71b0217d117da4ec5b
| 1,645
|
py
|
Python
|
DQM/SiPixelPhase1Config/python/SiPixelPhase1OfflineDQM_harvesting_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 13
|
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
DQM/SiPixelPhase1Config/python/SiPixelPhase1OfflineDQM_harvesting_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 640
|
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
DQM/SiPixelPhase1Config/python/SiPixelPhase1OfflineDQM_harvesting_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 51
|
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
from DQM.SiPixelPhase1Config.SiPixelPhase1OfflineDQM_source_cff import *
siPixelPhase1OfflineDQM_harvesting = cms.Sequence(SiPixelPhase1RawDataHarvester
+ SiPixelPhase1DigisHarvester
+ SiPixelPhase1DeadFEDChannelsHarvester
+ SiPixelPhase1ClustersHarvester
+ SiPixelPhase1RecHitsHarvester
+ SiPixelPhase1TrackResidualsHarvester
+ SiPixelPhase1TrackClustersHarvester
+ SiPixelPhase1TrackEfficiencyHarvester
+ SiPixelPhase1RawDataHarvester
+ RunQTests_offline
+ SiPixelPhase1SummaryOffline
+ SiPixelBarycenterOffline
+ SiPixelPhase1ResidualsExtra
)
siPixelPhase1OfflineDQM_harvesting_cosmics = siPixelPhase1OfflineDQM_harvesting.copyAndExclude([
SiPixelPhase1TrackEfficiencyHarvester,
])
siPixelPhase1OfflineDQM_harvesting_cosmics.replace(RunQTests_offline, RunQTests_cosmics)
siPixelPhase1OfflineDQM_harvesting_cosmics.replace(SiPixelPhase1SummaryOffline, SiPixelPhase1SummaryCosmics)
siPixelPhase1OfflineDQM_harvesting_hi = siPixelPhase1OfflineDQM_harvesting.copy()
| 54.833333
| 108
| 0.558055
|
f1600c45a2c60ea0252592bab7642f22482d4330
| 8,104
|
py
|
Python
|
test/functional/wallet_listreceivedby.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
test/functional/wallet_listreceivedby.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
test/functional/wallet_listreceivedby.py
|
joynicoferna/carpinchocoin
|
987284642d94e26c2b3b884c14846068d124a24a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import CARPINCHOTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import test_address
class ReceivedByTest(CARPINCHOTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_cli()
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
self.sync_blocks()
# save the number of coinbase reward addresses so far
num_cb_reward_addresses = len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True))
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
empty_addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": empty_addr},
{"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []})
# Test Address filtering
# Only on addr
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Test for regression on CLI calls with address string (#14173)
cli_res = self.nodes[1].cli.listreceivedbyaddress(0, True, True, addr)
assert_array_result(cli_res, {"address": addr}, expected)
assert_equal(len(cli_res), 1)
# Error on invalid address
assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling")
# Another address receive money
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 2 + num_cb_reward_addresses) # Right now 2 entries
other_addr = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1)
self.nodes[0].generate(1)
self.sync_all()
# Same test as above should still pass
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 11, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Same test as above but with other_addr should still pass
expected = {"address": other_addr, "label": "", "amount": Decimal("0.1"), "confirmations": 1, "txids": [txid2, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_array_result(res, {"address": other_addr}, expected)
assert_equal(len(res), 1)
# Should be two entries though without filter
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 3 + num_cb_reward_addresses) # Became 3 entries
# Not on random addr
other_addr = self.nodes[0].getnewaddress() # note on node[0]! just a random addr
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_equal(len(res), 0)
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbylabel + getreceivedbylabel Test")
# set pre-state
label = ''
address = self.nodes[1].getnewaddress()
test_address(self.nodes[1], address, labels=[label])
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbylabel should return received_by_label_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbylabel should return updated received list
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
{"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))})
# getreceivedbylabel should return updated receive total
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label + Decimal("0.1"))
# Create a new label named "mynewlabel" that has a 0 balance
address = self.nodes[1].getnewaddress()
self.nodes[1].setlabel(address, "mynewlabel")
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0]
# Test includeempty of listreceivedbylabel
assert_equal(received_by_label_json["amount"], Decimal("0.0"))
# Test getreceivedbylabel for 0 amount labels
balance = self.nodes[1].getreceivedbylabel("mynewlabel")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
| 47.116279
| 197
| 0.641288
|
d1c137be6fff73ba3b474343cc221326bda37473
| 207
|
py
|
Python
|
beerbar/beerbar/doctype/release_to_loose/test_release_to_loose.py
|
reddymeghraj/beerbar
|
ac082b11e8535e5ea5014e3a49598571ae200471
|
[
"MIT"
] | null | null | null |
beerbar/beerbar/doctype/release_to_loose/test_release_to_loose.py
|
reddymeghraj/beerbar
|
ac082b11e8535e5ea5014e3a49598571ae200471
|
[
"MIT"
] | null | null | null |
beerbar/beerbar/doctype/release_to_loose/test_release_to_loose.py
|
reddymeghraj/beerbar
|
ac082b11e8535e5ea5014e3a49598571ae200471
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, wayzon and Contributors
# See license.txt
import frappe
import unittest
test_records = frappe.get_test_records('Release To Loose')
class TestReleaseToLoose(unittest.TestCase):
pass
| 18.818182
| 58
| 0.797101
|
c8c6e1062ec7aaad40724d9db7a22b3f80b2da4b
| 7,286
|
py
|
Python
|
shadowsocks/restapi.py
|
lyrl/ssmgr-ssrest
|
33c60190189dea9d948008385b31ea843f49c63e
|
[
"Apache-2.0"
] | null | null | null |
shadowsocks/restapi.py
|
lyrl/ssmgr-ssrest
|
33c60190189dea9d948008385b31ea843f49c63e
|
[
"Apache-2.0"
] | null | null | null |
shadowsocks/restapi.py
|
lyrl/ssmgr-ssrest
|
33c60190189dea9d948008385b31ea843f49c63e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import json
import sys
import gevent
from flask import Flask, Response, request
from flask_cors import CORS
import threading
from shadowsocks.manager import Manager
from shadowsocks.cryptor import Cryptor
from flask import abort
import logging
from shadowsocks import cryptor
from shadowsocks.queue import add_task
from shadowsocks.queue import loop
logging.basicConfig(level=20,
format='%(asctime)s [%(module)s] %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
manager = Manager()
app = Flask(__name__)
config = None
CORS(app)
@app.route('/api/ping')
def ping():
return 'pong'
@app.route('/api/state')
def stat():
_check_security_key()
return Response(json.dumps({'alive': threading.activeCount()}), mimetype='application/json')
@app.route('/api/sync', methods=['POST'])
def sync():
# 清理掉僵尸端口
# 更正密码不一一致的端口
# 清理掉不存在于数据库中的端口
# user 字段
# user = {
# username: user.username,
# password: user.userNodes.password,
# method: user.userNodes.method,
# port: user.userNodes.port
# };
_check_security_key()
if request.method == 'POST':
users = json.loads(request.data)['users']
logging.info("接收同步用户请求! 数据: %s" % json.dumps(users))
# dict(u, u) for u in users
req_data = {u['username']: u for u in users}
node_data = manager.get_all_ports()
node_data_map = {n['username']: n for n in node_data}
# 1. 先检查不存在节点上的用户 直接同步
for u in req_data.keys():
if not node_data_map.has_key(u):
logging.info("用户 %s 不存在于节点,将同步!" % u)
cmp_data = req_data[u]
cmp_data['server_port'] = cmp_data['port']
cmp_data['password'] = cmp_data['password'].encode('utf-8')
cmp_data['method'] = cmp_data['method'].encode('utf-8')
manager.add_port(cmp_data)
logging.info("同步成功!")
# [{'port': k, 'username': self._relays[k][2], 'password': self._relays[k][3], 'method': self._relays[k][4]} for k in self._relays.keys()]
# 2. 存在于节点上的用户,检查配置是否与数据库中一致
for data in node_data:
# 移除不存在于数据库中发用户
if not req_data.has_key(data['username']):
logging.info("用户 %s 不存在于数据库中,将移除!" % data['username'])
manager.remove_port({'server_port': data['port']})
logging.info("移除成功!")
else: # 存在于数据库中的用户,需要检查密码、加密方式是否一致
cmp_data = req_data[data['username']]
cmp_data['server_port'] = cmp_data['port']
cmp_data['password'] = cmp_data['password'].encode('utf-8')
cmp_data['method'] = cmp_data['method'].encode('utf-8')
if cmp_data['port'] and cmp_data['port'] != data['port']:
logging.info("用户 %s 端口与数据库中不一致将强制同步!" % data['username'])
manager.remove_port({'server_port': data['port']})
manager.add_port(cmp_data)
logging.info("同步成功!")
if cmp_data['password'] != data['password']:
logging.info("用户 %s 密码与数据库中不一致将强制同步!" % data['username'])
manager.remove_port({'server_port': data['port']})
manager.add_port(cmp_data)
logging.info("同步成功!")
if cmp_data['method'] != data['method']:
logging.info("用户 %s 加密方式与数据库中不一致将强制同步!" % data['username'])
manager.remove_port({'server_port': data['port']})
manager.add_port(cmp_data)
logging.info("同步成功!")
return Response(json.dumps({'users': manager.get_all_ports()}), mimetype='application/json')
@app.route('/api/users', methods=['GET', 'POST'])
def users():
_check_security_key()
if request.method == 'GET':
return Response(json.dumps({'users': manager.get_all_ports()}), mimetype='application/json')
elif request.method == 'POST':
data = json.loads(request.data)['user']
if data.has_key('port') and data['port'] and data['port'] != 'null':
data['server_port'] = data['port']
else:
data['server_port'] = manager.gen_port_num()
method_info = Cryptor.get_method_info(data['method'].lower())
data['password'] = data['password'].encode('utf-8')
data['method'] = data['method'].encode('utf-8')
if not method_info:
logging.error(u"不支持的加密算法%s!" % data['method'])
return Response(json.dumps({'errors': {'message': u'不支持的加密算法 %s!' % data['method']}}), mimetype='application/json')
if manager.is_has_port(data['server_port']):
logging.error(u"端口已经存在%s!")
return Response(json.dumps({'errors': {'message': '端口已经存在!'}}), mimetype='application/json')
if manager.add_port(data):
logging.error(u"端口%s添加成功!" % data['server_port'])
return Response(json.dumps({'user': data}), mimetype='application/json')
@app.route('/api/users/<string:username>', methods=['DELETE'])
def delete_user(username):
_check_security_key()
if request.method == 'DELETE':
port = manager.get_port_by_username(username)
if not port:
return Response(json.dumps({'errors': {'message': '用户不存在!'}}), mimetype='application/json')
if manager.remove_port({'server_port': port}):
return Response(json.dumps({'server_port': port}), mimetype='application/json')
@app.route('/api/ports/<int:port>', methods=['DELETE'])
def delete_port(port):
_check_security_key()
if request.method == 'DELETE':
if not manager.is_has_port(port):
return Response(json.dumps({'errors': {'message': '端口不存在!'}}), mimetype='application/json')
if manager.remove_port({'server_port': port}):
return Response(json.dumps({'server_port': port}), mimetype='application/json')
def _check_security_key():
security_key = request.headers.get('Authorization')
if security_key != config['security_key']:
abort(403)
if __name__ == "__main__":
try:
file = open('config.json', 'r')
except IOError as e:
logging.error(u'在当前目录下找不到配置文件:config.json!')
sys.exit(0)
config = json.loads(file.read())
manager.set_config(config)
manager.sync_users()
# new thread to run loop
threading._start_new_thread(manager.run, ())
threading._start_new_thread(loop, ())
app.run(port=config['rest_api_port'], host='0.0.0.0')
| 34.206573
| 146
| 0.60829
|
c6594f79ecdf0796a5379cac7b65f786dd74be44
| 14,024
|
py
|
Python
|
Starscape_Module.py
|
SayanChaki/Starscape-Module
|
dde56b686d4ecd1882ff170e304f2d2debe55091
|
[
"MIT"
] | 1
|
2021-01-26T19:20:52.000Z
|
2021-01-26T19:20:52.000Z
|
Starscape_Module.py
|
SayanChaki/Starscape-Module
|
dde56b686d4ecd1882ff170e304f2d2debe55091
|
[
"MIT"
] | 8
|
2021-01-26T15:13:40.000Z
|
2021-01-26T18:14:45.000Z
|
Starscape_Module.py
|
SayanChaki/Starscape-Module
|
dde56b686d4ecd1882ff170e304f2d2debe55091
|
[
"MIT"
] | null | null | null |
@author: SAYAN CHAKI
"""
import cv2
from matplotlib import pyplot as plt
import numpy as np
import math
from PIL import Image
import PIL
def onbrightness():
img =cv2.imread("ESO.jpg")
gray =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh=cv2.threshold(gray,0,255 ,cv2.THRESH_BINARY)
f=1
while f :
fix=int(input("enter integer to fix threshold: "))
if 0<fix<255:
ret, thresh2=cv2.threshold(gray, fix, 255,cv2.THRESH_BINARY)
f=0
else:
print("Wrong threshold value")
print(ret)
plt.figure("BINARY")
plt.imshow(thresh2, cmap="gray")
plt.show()
contours, hierarchy = cv2.findContours(thresh2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxc = -1
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area>maxc:
maxc = area
minc=maxc
print(maxc)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area<minc:
minc = area
print(minc)
c=int(input("Enter upper parameter to fix range: "))
d=int(input("Enter lower parameter to fix range: "))
up=(maxc+minc)/c
low=(maxc+minc)/d
print(up,low)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if low<area<=up:
img=cv2.drawContours(img,contours[i],-1,(0,225,0),5)
plt.imshow(img)
plt.show()
cv2.imwrite('Eso_bright.jpg',img)
def shiftbased():
img =cv2.imread("ESO.jpg")
img1=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
shift=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
red_lower = np.array([136, 87, 111], np.uint8)
red_upper = np.array([180, 255, 255], np.uint8)
red_mask = cv2.inRange(shift, red_lower, red_upper)
kernal = np.ones((5, 5), "uint8")
print(red_mask)
# For red color
red_mask = cv2.dilate(red_mask, kernal)
res_red = cv2.bitwise_and(img, img,
mask = red_mask)
contours, hierarchy = cv2.findContours(red_mask,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
c=0
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if(area > 10):
img1=cv2.drawContours(img1,contours[i],-1,(0,225,0),5)
c=c+1
print("The Count of number of red shifted stars is: ")
print(c)
plt.imshow(img1)
plt.show()
cv2.imwrite('Eso_shift.jpg',img1)
def temperaturedatabase():
kelvin_table = {
1000: (255, 56, 0),
1100: (255, 71, 0),
1200: (255, 83, 0),
1300: (255, 93, 0),
1400: (255, 101, 0),
1500: (255, 109, 0),
1600: (255, 115, 0),
1700: (255, 121, 0),
1800: (255, 126, 0),
1900: (255, 131, 0),
2000: (255, 138, 18),
2100: (255, 142, 33),
2200: (255, 147, 44),
2300: (255, 152, 54),
2400: (255, 157, 63),
2500: (255, 161, 72),
2600: (255, 165, 79),
2700: (255, 169, 87),
2800: (255, 173, 94),
2900: (255, 177, 101),
3000: (255, 180, 107),
3100: (255, 184, 114),
3200: (255, 187, 120),
3300: (255, 190, 126),
3400: (255, 193, 132),
3500: (255, 196, 137),
3600: (255, 199, 143),
3700: (255, 201, 148),
3800: (255, 204, 153),
3900: (255, 206, 159),
4000: (255, 209, 163),
4100: (255, 211, 168),
4200: (255, 213, 173),
4300: (255, 215, 177),
4400: (255, 217, 182),
4500: (255, 219, 186),
4600: (255, 221, 190),
4700: (255, 223, 194),
4800: (255, 225, 198),
4900: (255, 227, 202),
5000: (255, 228, 206),
5100: (255, 230, 210),
5200: (255, 232, 213),
5300: (255, 233, 217),
5400: (255, 235, 220),
5500: (255, 236, 224),
5600: (255, 238, 227),
5700: (255, 239, 230),
5800: (255, 240, 233),
5900: (255, 242, 236),
6000: (255, 243, 239),
6100: (255, 244, 242),
6200: (255, 245, 245),
6300: (255, 246, 247),
6400: (255, 248, 251),
6500: (255, 249, 253),
6600: (254, 249, 255),
6700: (252, 247, 255),
6800: (249, 246, 255),
6900: (247, 245, 255),
7000: (245, 243, 255),
7100: (243, 242, 255),
7200: (240, 241, 255),
7300: (239, 240, 255),
7400: (237, 239, 255),
7500: (235, 238, 255),
7600: (233, 237, 255),
7700: (231, 236, 255),
7800: (230, 235, 255),
7900: (228, 234, 255),
8000: (227, 233, 255),
8100: (225, 232, 255),
8200: (224, 231, 255),
8300: (222, 230, 255),
8400: (221, 230, 255),
8500: (220, 229, 255),
8600: (218, 229, 255),
8700: (217, 227, 255),
8800: (216, 227, 255),
8900: (215, 226, 255),
9000: (214, 225, 255),
9100: (212, 225, 255),
9200: (211, 224, 255),
9300: (210, 223, 255),
9400: (209, 223, 255),
9500: (208, 222, 255),
9600: (207, 221, 255),
9700: (207, 221, 255),
9800: (206, 220, 255),
9900: (205, 220, 255),
10000: (207, 218, 255),
10100: (207, 218, 255),
10200: (206, 217, 255),
10300: (205, 217, 255),
10400: (204, 216, 255),
10500: (204, 216, 255),
10600: (203, 215, 255),
10700: (202, 215, 255),
10800: (202, 214, 255),
10900: (201, 214, 255),
11000: (200, 213, 255),
11100: (200, 213, 255),
11200: (199, 212, 255),
11300: (198, 212, 255),
11400: (198, 212, 255),
11500: (197, 211, 255),
11600: (197, 211, 255),
11700: (197, 210, 255),
11800: (196, 210, 255),
11900: (195, 210, 255),
12000: (195, 209, 255)}
kelvin_list = [1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900,
2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900,
3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700, 3800, 3900,
4000, 4100, 4200, 4300, 4400, 4500, 4600, 4700, 4800, 4900,
5000, 5100, 5200, 5300, 5400, 5500, 5600, 5700, 5800, 5900,
6000, 6100, 6200, 6300, 6400, 6500, 6600, 6700, 6800, 6900,
7000, 7100, 7200, 7300, 7400, 7500, 7600, 7700, 7800, 7900,
8000, 8100, 8200, 8300, 8400, 8500, 8600, 8700, 8800, 8900,
9000, 9100, 9200, 9300, 9400, 9500, 9600, 9700, 9800, 9900,
10000, 10100, 10200, 10300, 10400, 10500, 10600, 10700, 10800, 10900,
11000, 11100, 11200, 11300, 11400, 11500, 11600, 11700, 11800, 11900,
12000]
upper_star_temp=int(input("Enter the upper temperature"))
lower_star_temp=int(input("Enter the lower temperature"))
temp1=kelvin_table[lower_star_temp]
lower_red,lower_green,lower_blue=temp1
temp2=kelvin_table[upper_star_temp]
upper_red,upper_green,upper_blue=temp2
print(upper_red)
print(upper_blue)
print(upper_green)
ut=np.array([upper_red,upper_green,upper_blue],np.uint8)
lt=np.array([lower_red,lower_green,lower_blue],np.uint8)
print("The RGB range for the corresponding temperature range is: ")
print(lt)
print(ut)
def hubble():
img=cv2.imread("ESO.jpg")
img1=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
gray =cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
ret,bina=cv2.threshold(gray,250,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(bina, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxc = -1
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area>maxc:
maxc = area
minc=maxc
print(maxc)
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
if area<minc:
minc = area
print(minc)
c=0
up=(maxc+minc)/1
low=(maxc+minc)/2
print(up,low)
for i in range(len(contours)):
carea = cv2.contourArea(contours[i])
if low<carea<=up:
img=cv2.drawContours(img,contours[i],-1,(0,225,0),5)
c=c+1
M=cv2.moments(c)
print(M)
r,g,b=(img1[200,400])
print(r)
print(g)
print(b)
plt.imshow(img1)
plt.show()
def bv2rgb(bv):
if bv < -0.40: bv = -0.40
if bv > 2.00: bv = 2.00
r = 0.0
g = 0.0
b = 0.0
if -0.40 <= bv<0.00:
t=(bv+0.40)/(0.00+0.40)
r=0.61+(0.11*t)+(0.1*t*t)
elif 0.00 <= bv<0.40:
t=(bv-0.00)/(0.40-0.00)
r=0.83+(0.17*t)
elif 0.40 <= bv<2.10:
t=(bv-0.40)/(2.10-0.40)
r=1.00
if -0.40 <= bv<0.00:
t=(bv+0.40)/(0.00+0.40)
g=0.70+(0.07*t)+(0.1*t*t)
elif 0.00 <= bv<0.40:
t=(bv-0.00)/(0.40-0.00)
g=0.87+(0.11*t)
elif 0.40 <= bv<1.60:
t=(bv-0.40)/(1.60-0.40)
g=0.98-(0.16*t)
elif 1.60 <= bv<2.00:
t=(bv-1.60)/(2.00-1.60)
g=0.82-(0.5*t*t)
if -0.40 <= bv<0.40:
t=(bv+0.40)/(0.40+0.40)
b=1.00
elif 0.40 <= bv<1.50:
t=(bv-0.40)/(1.50-0.40)
b=1.00-(0.47*t)+(0.1*t*t)
elif 1.50 <= bv<1.94:
t=(bv-1.50)/(1.94-1.50)
b=0.63-(0.6*t*t)
return (r*255, g*255, b*255)
def color():
img=cv2.imread("ESO.jpg")
img1=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img
plt.imshow(img1)
plt.show()
rx,gx,bx=img1[75,80]
up_temp=int(input("Enter the upper temperature limit"))
up_temp=up_temp-1500
low_temp=int(input("Enter the lower temperature limit"))
low_temp=low_temp-1500
BVU=((math.sqrt((math.pow(2.13506*up_temp-(1.84*4600), 2))-3.3856*up_temp*(1.054*up_temp-2.32*4600)))-(2.13506*up_temp-8464))/(1.6928*up_temp)
BVL=((math.sqrt((math.pow(2.13506*low_temp-(1.84*4600), 2))-3.3856*low_temp*(1.054*low_temp-2.32*4600)))-(2.13506*low_temp-8464))/(1.6928*low_temp)
rl,gl,bl=bv2rgb(BVL)
r2,g2,b2=bv2rgb(BVU)
up=np.array([r2,g2,b2],np.uint8)
low=np.array([rl,gl,bl],np.uint8)
rows,cols=img.shape[:2]
c=0
print(up)
print(low)
maxr=max(rl,r2)
maxb=max(bl,b2)
maxg=max(gl,g2)
minr=min(rl,r2)
ming=min(gl,g2)
minb=min(bl,b2)
print("max r= ")
print(maxr)
print("min r= ")
print(minr)
k=int(input("Enter 1 if you want to plot data with calibrated system and 0 otherwise"))
if(k):
print("Calibrating System corresponding to obtained image")
minr,ming,minb=calibrate(minr,ming,minb)
maxr,maxg,maxb=calibrate(maxr, maxg, maxb)
print("max r= ")
print(maxr)
print("min r= ")
print(minr)
maxr=int(max(maxr,minr))
maxb=int(max(maxb,minb))
maxg=int(max(maxg,ming))
minr=int(min(maxr,minr))
minb=int(min(maxb,minb))
ming=int(min(maxg,ming))
else:
print("You have chosen to obtain data without calibrating the system")
for i in range(rows):
for j in range(cols):
x,y,z=img1[i,j]
if minr<=x<=maxr and ming<=y<=maxg and minb<=z<=maxb:
img1[i:i+25,j:j+25]=(0,255,0)
c=c+1
print(c)
plt.imshow(img1)
plt.show()
cv2.imwrite('Eso_temperature.jpg',img1)
def interpolate(x1,y1,x2,y2,z):
newz=((z-x1)*y1)/(x2-x1) + ((z-x2)*y2)/(x1-x2)
return newz
def calibrate(r,g,b):
img=cv2.imread("ZOOM.jpg")
img1=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
plt.imshow(img1)
plt.show()
rows,cols=img.shape[:2]
img2=cv2.imread("m54.jpg")
img2=cv2.cvtColor(img2,cv2.COLOR_BGR2RGB)
plt.imshow(img2)
plt.show()
BV1=float(input("Enter the BV index of the star to callibarate the software"))
rc1,gc1,bc1=bv2rgb(BV1)
ri1,gi1,bi1=img1[58,60]
BV2=float(input("Enter the BV index of the second star to callibarate the software"))
rc2,gc2,bc2=bv2rgb(BV2)
ri2,gi2,bi2=img2[18,18]
plt.imshow(img1)
plt.show()
print(rc1)
print(ri1)
n_r=interpolate(rc1,ri1,rc2,ri2,r)
n_g=interpolate(gc1,gi1,gc2,gi2,g)
n_b=interpolate(bc1,bi1,bc2,bi2,b)
print(r)
print(n_r)
return(n_r,n_g,n_b)
def main():
f=1
while(f):
print("Welcome to the Starscape Module!")
print("Based on your image data we'll help you perform three operations: ")
print("1. We'll help you track stars based on apparent brightness based on your range.")
print("2. We'll help you track stars based on their redshift.")
print("3. You may access our temperature database corresponding to RGB gradient values.")
print("4. We'll allow you to plot stars based on your image within specific temperature range")
print("5. You max exit the module.")
c=int(input("Enter your choice"))
if c==1:
onbrightness()
elif c==2:
shiftbased()
elif c==3:
temperaturedatabase()
elif c==4:
print("You shall be asked to calibrate your system based on yur image")
print("Choose to calibrate the system if you know the BV index of atleast two stars in the system")
print("Else proceed without Calibration.")
color()
elif c==5:
f=0
print("Exiting the module")
break
else:
print("Wrong Input, Try again!")
if f==0:
print("Thank You for using the starscape module!")
return 0
if __name__ == "__main__":
main()
| 30.029979
| 156
| 0.524672
|
6707e7cc993cfcd8fbe4318878d3e7c3d80cabd9
| 2,475
|
py
|
Python
|
tests/cli/helpers/xlsx_output.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 27
|
2019-04-05T12:01:49.000Z
|
2022-02-08T02:26:25.000Z
|
tests/cli/helpers/xlsx_output.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | null | null | null |
tests/cli/helpers/xlsx_output.py
|
nflexfo/plaso
|
5da7aa51c39b593773687fdf20a93ba35fc492b4
|
[
"Apache-2.0"
] | 8
|
2019-11-28T08:06:34.000Z
|
2020-08-29T13:53:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the XLSX output module CLI arguments helper."""
from __future__ import unicode_literals
import argparse
import unittest
from plaso.cli.helpers import xlsx_output
from plaso.lib import errors
from plaso.output import xlsx
from tests.cli import test_lib as cli_test_lib
from tests.cli.helpers import test_lib
class XLSXOutputArgumentsHelperTest(test_lib.OutputModuleArgumentsHelperTest):
"""Tests the XLSX output module CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--fields FIELDS] [--additional_fields ADDITIONAL_FIELDS]
[--timestamp_format TIMESTAMP_FORMAT]
Test argument parser.
optional arguments:
--additional_fields ADDITIONAL_FIELDS
Defines extra fields to be included in the output, in
addition to the default fields, which are datetime,tim
estamp_desc,source,source_long,message,parser,display_
name,tag.
--fields FIELDS Defines which fields should be included in the output.
--timestamp_format TIMESTAMP_FORMAT
Set the timestamp format that will be used in the
datetimecolumn of the XLSX spreadsheet.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py',
description='Test argument parser.', add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
xlsx_output.XLSXOutputArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
output_mediator = self._CreateOutputMediator()
output_module = xlsx.XLSXOutputModule(output_mediator)
with self.assertRaises(errors.BadConfigOption):
xlsx_output.XLSXOutputArgumentsHelper.ParseOptions(
options, output_module)
options.write = 'plaso.xlsx'
xlsx_output.XLSXOutputArgumentsHelper.ParseOptions(
options, output_module)
with self.assertRaises(errors.BadConfigObject):
xlsx_output.XLSXOutputArgumentsHelper.ParseOptions(
options, None)
if __name__ == '__main__':
unittest.main()
| 33.445946
| 78
| 0.719192
|
188e04189f8c14d7e3a5531d77eee6e4cf664ad3
| 3,799
|
py
|
Python
|
examples/arm_example.py
|
Gepetto/supaero2021
|
1f2b32ac2b2974bc3e751dd114716847c8650242
|
[
"BSD-3-Clause"
] | 9
|
2021-01-08T18:13:19.000Z
|
2021-12-29T22:22:19.000Z
|
examples/arm_example.py
|
Gepetto/supaero2021
|
1f2b32ac2b2974bc3e751dd114716847c8650242
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T07:22:31.000Z
|
2021-09-08T07:22:31.000Z
|
examples/arm_example.py
|
nmansard/supaero2021
|
1f2b32ac2b2974bc3e751dd114716847c8650242
|
[
"BSD-3-Clause"
] | 2
|
2021-01-07T20:36:37.000Z
|
2021-04-16T15:22:53.000Z
|
'''
# In this example test, we will solve the reaching-goal task with the Talos arm.
# For that, we use the forward dynamics (with its analytical derivatives)
# developed inside crocoddyl; it describes inside DifferentialActionModelFullyActuated class.
# Finally, we use an Euler sympletic integration scheme.
'''
import sys
WITHDISPLAY = 'display' in sys.argv
WITHPLOT = 'plot' in sys.argv
import crocoddyl
import pinocchio
import numpy as np
import example_robot_data
# First, let's load the Pinocchio model for the Talos arm.
robot = example_robot_data.load('talos_arm')
robot_model = robot.model
robot_model.armature =np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.])*5
robot_model.q0 = robot_model.referenceConfigurations['half_sitting']
robot_model.x0 = np.concatenate([robot_model.q0, pinocchio.utils.zero(robot_model.nv)])
# Configure task
FRAME_TIP = robot_model.getFrameId("gripper_left_fingertip_3_link")
GOAL = np.array([.2,0.5,.5])
DT = 1e-2
T = 100
# Configure viewer
from utils.meshcat_viewer_wrapper import MeshcatVisualizer
viz = MeshcatVisualizer(robot,'classical')
viz.display(robot_model.q0)
viz.addBox('world/box',[.1,.1,.1], [1.,0,0,1])
viz.addBox('world/goal',[.1,.1,.1],[0,1,0,1])
viz.applyConfiguration('world/goal',[0.2,0.5,.5,0,0,0,1])
# Create a cost model per the running and terminal action model.
state = crocoddyl.StateMultibody(robot_model)
runningCostModel = crocoddyl.CostModelSum(state)
terminalCostModel = crocoddyl.CostModelSum(state)
# Reaching cost term
pref = crocoddyl.FrameTranslation(FRAME_TIP,GOAL)
goalTrackingCost = crocoddyl.CostModelFrameTranslation(state, pref)
#Mref = crocoddyl.FramePlacement(FRAME_TIP,pinocchio.SE3(np.eye(3), GOAL))
#goalTrackingCost = crocoddyl.CostModelFramePlacement(state, Mref)
runningCostModel.addCost("gripperPose", goalTrackingCost, .001)
terminalCostModel.addCost("gripperPose", goalTrackingCost, 10)
# Regularization cost term
weights=crocoddyl.ActivationModelWeightedQuad(np.array([1,1,1,1,1,1,1, 1,1,1,1,2,2,2.]))
xRegCost = crocoddyl.CostModelState(state,weights,robot_model.x0)
uRegCost = crocoddyl.CostModelControl(state)
runningCostModel.addCost("xReg", xRegCost, 1e-3)
runningCostModel.addCost("uReg", uRegCost, 1e-6)
# Next, we need to create an action model for running and terminal knots. The
# forward dynamics (computed using ABA) are implemented
# inside DifferentialActionModelFullyActuated.
actuationModel = crocoddyl.ActuationModelFull(state)
runningModel = crocoddyl.IntegratedActionModelEuler(
crocoddyl.DifferentialActionModelFreeFwdDynamics(state, actuationModel, runningCostModel), DT)
runningModel.differential.armature = robot_model.armature
terminalModel = crocoddyl.IntegratedActionModelEuler(
crocoddyl.DifferentialActionModelFreeFwdDynamics(state, actuationModel, terminalCostModel), 0.)
terminalModel.differential.armature = robot_model.armature
# For this optimal control problem, we define 250 knots (or running action
# models) plus a terminal knot
T = 100
problem = crocoddyl.ShootingProblem(robot_model.x0, [runningModel] * T, terminalModel)
# Creating the DDP solver for this OC problem, defining a logger
ddp = crocoddyl.SolverDDP(problem)
ddp.setCallbacks([
crocoddyl.CallbackLogger(),
crocoddyl.CallbackVerbose(),
])
# Solving it with the DDP algorithm
ddp.solve([],[],1000) # xs_init,us_init,maxiter
# Plotting the solution and the DDP convergence
if WITHPLOT:
log = ddp.getCallbacks()[0]
crocoddyl.plotOCSolution(log.xs, log.us, figIndex=1, show=False)
crocoddyl.plotConvergence(log.costs, log.u_regs, log.x_regs, log.grads, log.stops, log.steps, figIndex=2)
# Visualizing the solution in gepetto-viewer
if WITHDISPLAY:
import utils.croco_utils as crocutils
crocutils.displayTrajectory(viz,ddp.xs,ddp.problem.runningModels[0].dt,12)
| 40.414894
| 109
| 0.785996
|
dba131c9d95f87099c334912924b79b465d3e5cd
| 186
|
py
|
Python
|
forms/blog.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
forms/blog.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | null | null | null |
forms/blog.py
|
anthill-gaming/anthill-admin
|
e3c29a9bd7c04d2c6ce29528578a93395adf59e0
|
[
"MIT"
] | null | null | null |
from anthill.framework.forms import Form
from anthill.framework.utils.translation import translate as _
class BlogPostForm(Form):
pass
class BlogPostCategoryForm(Form):
pass
| 16.909091
| 62
| 0.790323
|
dec2452cbdf3a25e5699df3e05d3eff36720cf1a
| 3,675
|
py
|
Python
|
challenges/HighFrequencyTradingAlgo/poller/for-testing/machine.py
|
vaibhavbsharma/cb-multios
|
02accd8338714fb57f1b78cac30e1034df042e25
|
[
"MIT"
] | 1
|
2019-11-23T21:53:46.000Z
|
2019-11-23T21:53:46.000Z
|
challenges/HighFrequencyTradingAlgo/poller/for-testing/machine.py
|
vaibhavbsharma/cb-multios
|
02accd8338714fb57f1b78cac30e1034df042e25
|
[
"MIT"
] | null | null | null |
challenges/HighFrequencyTradingAlgo/poller/for-testing/machine.py
|
vaibhavbsharma/cb-multios
|
02accd8338714fb57f1b78cac30e1034df042e25
|
[
"MIT"
] | 1
|
2019-12-02T20:53:55.000Z
|
2019-12-02T20:53:55.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions
import string
import random
import itertools
import math
from struct import *
class TemplateGenerator(Actions):
def start(self):
starting_balance = 1000
record_str = pack('HH', 0, starting_balance)
self.write(record_str)
dataset = {'sampleSize' : 0.0, 'mean' : 0.0, 'Q' : 0.0, 'variance' : 0.0, 'stdDev' : 0.0}
for i in range(0, 500):
while True:
record = [random.normalvariate(50.0, 10), random.normalvariate(50.0, 10)]
if (record[0] < 65000 and record[0] >= 1 and record[1] < 65000 and record[1] >= 1):
break
dataset['sampleSize'] += 1
priceRelative = record[0]/record[1]
oldMean = dataset['mean']
dataset['mean'] = oldMean + (priceRelative - oldMean) / dataset['sampleSize']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.cgc_sqrt(dataset['variance'])
record_str = pack('HH', int(record[0]), int(record[1]))
self.write(record_str)
for i in range(0,500):
minRange = dataset['mean'] + dataset['stdDev']*2
maxRange = dataset['mean'] + dataset['stdDev']*3
priceRelative = random.uniform(minRange, maxRange)
firstStock = random.uniform(40.0, 50.0)
secondStock = firstStock/priceRelative - .1
record = [firstStock, secondStock]
dataset['sampleSize'] += 1
oldMean = dataset['mean']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.cgc_sqrt(dataset['variance'])
record_str = pack('HH', record[0], record[1])
self.write(record_str)
for i in range(0,500):
minRange = dataset['mean'] + dataset['stdDev']
maxRange = dataset['mean'] + dataset['stdDev']*2
priceRelative = random.uniform(minRange, maxRange)
firstStock = random.uniform(40.0, 50.0)
secondStock = firstStock/priceRelative - .1
record = [secondStock, firstStock]
dataset['sampleSize'] += 1
oldMean = dataset['mean']
dataset['Q'] = dataset['Q'] + (priceRelative - oldMean) * (priceRelative - dataset['mean'])
dataset['variance'] = dataset['Q'] / dataset['sampleSize']
dataset['stdDev'] = math.cgc_sqrt(dataset['variance'])
record_str = pack('HH', record[0], record[1])
self.write(record_str)
record_str = pack('hh', -1, -1)
self.write(record_str)
self.read(delim="\n", expect="You doubled your money!")
def quit(self):
pass
| 39.945652
| 95
| 0.697143
|
32042a73644a8de3fa96585d853cd6785414890d
| 2,371
|
py
|
Python
|
setup.py
|
qaprosoft/zafira-pytest
|
711fd8574cf35c95ad1c56ae057d4351c2aaa32c
|
[
"Apache-2.0"
] | 1
|
2021-03-29T03:45:42.000Z
|
2021-03-29T03:45:42.000Z
|
setup.py
|
qaprosoft/zafira-pytest
|
711fd8574cf35c95ad1c56ae057d4351c2aaa32c
|
[
"Apache-2.0"
] | 2
|
2021-06-01T23:58:30.000Z
|
2021-11-15T17:49:02.000Z
|
setup.py
|
qaprosoft/zafira-pytest
|
711fd8574cf35c95ad1c56ae057d4351c2aaa32c
|
[
"Apache-2.0"
] | 1
|
2019-07-25T11:53:34.000Z
|
2019-07-25T11:53:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup, find_packages
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-zafira',
version='1.0.2',
author='Vadim Delendik',
author_email='vdelendik@qaprosoft.com',
maintainer='Vadim Delendik',
maintainer_email='vdelendik@qaprosoft.com',
license='Apache Software License 2.0',
url='https://github.com/qaprosoft/zafira-pytest',
description='A Zafira plugin for pytest',
long_description=read('README.rst'),
packages=find_packages(),
py_modules=['pytest_zafira'],
python_requires='!=2.*.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
install_requires=[
'allure-python-commons==2.5.4',
'atomicwrites==1.2.1',
'attrs==18.2.0',
'boto3==1.9.106',
'botocore==1.12.106',
'certifi==2018.11.29',
'chardet==3.0.4',
'configparser==3.5.0',
'docutils==0.14',
'idna==2.8',
'jmespath==0.9.4',
'more-itertools==4.3.0',
'pika==1.0.1',
'pluggy==0.7.1',
'py==1.6.0',
'pytest==4.1.1',
'python-dateutil==2.8.0',
'PyYAML==3.13',
'requests==2.21.0',
's3transfer==0.2.0',
'selenium==3.14.0',
'six==1.11.0',
'urllib3==1.23',
],
keywords=['pytest', 'zafira'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
],
entry_points={
'pytest11': [
'zafira = pytest_zafira',
],
},
)
| 30.792208
| 75
| 0.557149
|
392e02f6ebc560c4ec7600b23d66d62ed24055fa
| 1,135
|
py
|
Python
|
setup.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
setup.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
setup.py
|
adir-intsights/sergeant
|
76229b045309a3d795ac760d9f08da04b5e0a750
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name='sergeant',
version='0.17.1',
author='Gal Ben David',
author_email='gal@intsights.com',
url='https://github.com/Intsights/sergeant',
project_urls={
'Source': 'https://github.com/Intsights/sergeant',
},
license='MIT',
description='Fast, Safe & Simple Asynchronous Task Queues Written In Pure Python',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='tasks worker queue redis async',
python_requires='>=3.7',
zip_safe=False,
install_requires=[
'hiredis==1.*',
'msgpack==1.*',
'orjson==3.*',
'psutil==5.*',
'pymongo==3.*',
'redis==3.*',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
],
package_data={},
packages=setuptools.find_packages(),
)
| 26.395349
| 86
| 0.58326
|
448643dd67771edeea6aa75054f66c6c806ca18c
| 1,408
|
py
|
Python
|
launch/launch/substitutions/__init__.py
|
bedieber/launch
|
4dfe69763379e405df7a21bde536aad7e39fdd93
|
[
"Apache-2.0"
] | null | null | null |
launch/launch/substitutions/__init__.py
|
bedieber/launch
|
4dfe69763379e405df7a21bde536aad7e39fdd93
|
[
"Apache-2.0"
] | null | null | null |
launch/launch/substitutions/__init__.py
|
bedieber/launch
|
4dfe69763379e405df7a21bde536aad7e39fdd93
|
[
"Apache-2.0"
] | 1
|
2020-03-06T09:31:38.000Z
|
2020-03-06T09:31:38.000Z
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for substitutions."""
from .environment_variable import EnvironmentVariable
from .find_executable import FindExecutable
from .launch_configuration import LaunchConfiguration
from .local_substitution import LocalSubstitution
from .path_join_substitution import PathJoinSubstitution
from .python_expression import PythonExpression
from .substitution_failure import SubstitutionFailure
from .text_substitution import TextSubstitution
from .this_launch_file import ThisLaunchFile
from .this_launch_file_dir import ThisLaunchFileDir
__all__ = [
'EnvironmentVariable',
'FindExecutable',
'LaunchConfiguration',
'LocalSubstitution',
'PathJoinSubstitution',
'PythonExpression',
'SubstitutionFailure',
'TextSubstitution',
'ThisLaunchFile',
'ThisLaunchFileDir',
]
| 35.2
| 74
| 0.793324
|
ce7818e80d5a56c6aa48046ad8ef5fb808f6012e
| 7,864
|
py
|
Python
|
source/rttov_test/profile-datasets-py/varying101lev_o3/001.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
source/rttov_test/profile-datasets-py/varying101lev_o3/001.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T12:19:59.000Z
|
2022-03-12T12:19:59.000Z
|
source/rttov_test/profile-datasets-py/varying101lev_o3/001.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Profile ../profile-datasets-py/varying101lev_o3/001.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/varying101lev_o3/001.py"
self["Q"] = numpy.array([ 1.34824700e+00, 2.45593100e+00, 3.65560800e+00,
4.63755400e+00, 5.32968500e+00, 5.77722700e+00,
5.99996400e+00, 5.99996400e+00, 5.99996400e+00,
5.99996400e+00, 5.93161200e+00, 5.79571900e+00,
5.64363400e+00, 5.50439800e+00, 5.31213100e+00,
5.13088800e+00, 4.96225500e+00, 4.80505400e+00,
4.65733100e+00, 4.51875100e+00, 4.38790900e+00,
4.26377900e+00, 4.14693900e+00, 4.03492200e+00,
3.90555900e+00, 3.77107300e+00, 3.64157400e+00,
3.52881300e+00, 3.42580000e+00, 3.32625600e+00,
3.24302100e+00, 3.21057400e+00, 3.07437300e+00,
2.89693200e+00, 2.83812700e+00, 2.77226800e+00,
2.69119600e+00, 2.63766700e+00, 2.61254200e+00,
2.59999300e+00, 2.59999300e+00, 2.62480900e+00,
2.69030200e+00, 2.75417300e+00, 2.81572400e+00,
2.87586400e+00, 2.92343800e+00, 2.96232700e+00,
3.00372200e+00, 3.36782800e+00, 3.72431300e+00,
4.16917700e+00, 4.95686400e+00, 5.72886300e+00,
6.69739100e+00, 8.03079500e+00, 9.33885100e+00,
1.35847500e+01, 2.00101200e+01, 2.63185300e+01,
3.75052900e+01, 5.23583000e+01, 6.69516100e+01,
9.53947600e+01, 1.33620600e+02, 1.71201800e+02,
2.24074700e+02, 2.94464500e+02, 3.63705600e+02,
4.45886900e+02, 5.55371500e+02, 6.63121600e+02,
7.72479600e+02, 9.33617900e+02, 1.09226900e+03,
1.24849700e+03, 1.46988400e+03, 1.71038400e+03,
1.94728000e+03, 2.22789300e+03, 2.58697200e+03,
2.94073800e+03, 3.28932000e+03, 3.60400700e+03,
3.90984700e+03, 4.21134700e+03, 4.75947100e+03,
5.89418200e+03, 7.01133700e+03, 8.11136500e+03,
9.63843600e+03, 1.14121800e+04, 1.31569700e+04,
1.48735400e+04, 1.60240900e+04, 1.70720700e+04,
1.81048300e+04, 1.91258100e+04, 2.07111900e+04,
2.22722500e+04, 2.38095700e+04])
self["P"] = numpy.array([ 4.65000000e-03, 1.41971800e-02, 3.29790600e-02,
6.49001300e-02, 1.14247000e-01, 1.85633800e-01,
2.83960300e-01, 4.14377000e-01, 5.82255900e-01,
7.93166000e-01, 1.05285200e+00, 1.36721200e+00,
1.74228700e+00, 2.18423800e+00, 2.69933600e+00,
3.29394600e+00, 3.97452200e+00, 4.74758700e+00,
5.61972900e+00, 6.59758800e+00, 7.68785100e+00,
8.89723800e+00, 1.02325000e+01, 1.17004100e+01,
1.33077600e+01, 1.50613300e+01, 1.69679200e+01,
1.90343300e+01, 2.12673300e+01, 2.36736900e+01,
2.62601600e+01, 2.90334500e+01, 3.20002600e+01,
3.51672300e+01, 3.85410000e+01, 4.21281400e+01,
4.59351800e+01, 4.99685900e+01, 5.42348100e+01,
5.87402200e+01, 6.34911100e+01, 6.84937500e+01,
7.37543200e+01, 7.92789400e+01, 8.50736500e+01,
9.11444300e+01, 9.74971800e+01, 1.04137700e+02,
1.11071800e+02, 1.18305000e+02, 1.25843100e+02,
1.33691300e+02, 1.41855300e+02, 1.50340200e+02,
1.59151300e+02, 1.68293700e+02, 1.77772500e+02,
1.87592500e+02, 1.97758800e+02, 2.08275800e+02,
2.19148500e+02, 2.30381200e+02, 2.41978400e+02,
2.53944400e+02, 2.66283600e+02, 2.79000000e+02,
2.92097700e+02, 3.05580500e+02, 3.19452300e+02,
3.33716800e+02, 3.48377700e+02, 3.63438300e+02,
3.78902100e+02, 3.94772300e+02, 4.11052100e+02,
4.27744600e+02, 4.44852700e+02, 4.62379200e+02,
4.80326900e+02, 4.98698300e+02, 5.17495900e+02,
5.36722200e+02, 5.56379300e+02, 5.76469500e+02,
5.96994700e+02, 6.17957000e+02, 6.39358100e+02,
6.61199700e+02, 6.83483500e+02, 7.06210800e+02,
7.29383100e+02, 7.53001700e+02, 7.77067600e+02,
8.01581900e+02, 8.26545500e+02, 8.51959200e+02,
8.77823700e+02, 9.04139700e+02, 9.30907500e+02,
9.58127500e+02, 9.85800000e+02])
self["T"] = numpy.array([ 177.5644, 189.8424, 206.8676, 221.5139, 234.6645, 246.7519,
255.8482, 261.8739, 266.0489, 269.3991, 269.7899, 267.0981,
263.279 , 259.5196, 256.0189, 252.7334, 249.6418, 246.7914,
244.1324, 241.6379, 239.2827, 237.0484, 234.9452, 232.9289,
231.0489, 229.2669, 227.551 , 225.8612, 224.213 , 222.6203,
221.0934, 219.6657, 218.2788, 216.9266, 215.5152, 213.8792,
211.7713, 209.7139, 207.7039, 205.7543, 203.8627, 202.0548,
200.352 , 198.6885, 197.0472, 195.4434, 195.3158, 196.1714,
197.025 , 199.4645, 201.853 , 204.203 , 206.5448, 208.84 ,
211.1692, 213.5969, 215.9785, 218.27 , 220.4846, 222.6588,
224.849 , 227.0429, 229.1985, 231.405 , 233.6381, 235.8337,
237.9941, 240.1204, 242.2123, 244.287 , 246.3618, 248.4042,
250.4196, 252.4784, 254.506 , 256.5033, 258.4878, 260.4489,
262.3814, 264.3096, 266.2514, 268.1657, 270.0534, 271.9584,
273.8439, 275.7038, 277.5497, 279.3973, 281.2205, 283.0197,
284.372 , 285.4471, 286.5085, 287.5564, 289.0663, 290.6329,
292.18 , 293.7081, 295.2459, 296.7648, 298.2655])
self["O3"] = numpy.array([ 0.4897464 , 0.2855071 , 0.2155613 , 0.3535 , 0.6226628 ,
0.9329402 , 1.286764 , 1.696277 , 2.189535 , 2.682206 ,
3.244274 , 3.997278 , 4.89431 , 5.868968 , 6.901779 ,
7.799331 , 8.530079 , 9.084725 , 9.527897 , 9.74057 ,
9.805993 , 9.783587 , 9.569379 , 9.364016 , 8.945867 ,
8.441546 , 7.955922 , 7.311922 , 6.605545 , 5.922956 ,
5.246672 , 4.532837 , 3.923137 , 3.369391 , 2.781341 ,
2.289096 , 1.964808 , 1.701385 , 1.500383 , 1.293604 ,
1.080798 , 0.8755503 , 0.6790714 , 0.4930312 , 0.3904458 ,
0.2902115 , 0.2252402 , 0.1841733 , 0.1443294 , 0.1374842 ,
0.1307822 , 0.1240295 , 0.1167202 , 0.1095565 , 0.1034282 ,
0.0990401 , 0.09473543, 0.09005244, 0.0851061 , 0.0802498 ,
0.07583747, 0.07177954, 0.06779256, 0.06419853, 0.06090188,
0.05766083, 0.05504149, 0.05312127, 0.05123239, 0.04943556,
0.04779287, 0.0461762 , 0.04463123, 0.04386269, 0.04310602,
0.0423609 , 0.0416412 , 0.04093708, 0.04024352, 0.03956652,
0.03891056, 0.03826431, 0.03762753, 0.03702617, 0.03643735,
0.03585689, 0.03539397, 0.03519736, 0.03500379, 0.03481319,
0.03443275, 0.0339406 , 0.03345649, 0.0329802 , 0.03245426,
0.03192695, 0.03140729, 0.0308938 , 0.03013862, 0.02939502,
0.02866274])
self["CTP"] = 949.0
self["CFRACTION"] = 0.6
self["IDG"] = 1
self["ISH"] = 1
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 298.7
self["S2M"]["Q"] = 24323.6123443
self["S2M"]["O"] = 0.0279921555618
self["S2M"]["P"] = 950.0
self["S2M"]["U"] = 5.0
self["S2M"]["V"] = 2.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 0
self["SKIN"]["T"] = 302.0
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 45.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 40.0
self["SUNAZANGLE"] = 179.0
self["LATITUDE"] = 15.0
self["GAS_UNITS"] = 2
self["BE"] = 0.2
self["COSBK"] = 0.0
self["DATE"] = numpy.array([1949, 1, 1])
self["TIME"] = numpy.array([0, 0, 0])
| 53.135135
| 90
| 0.578586
|
374cfc040c347e887e0e47d0c755d389adc0b48f
| 3,110
|
py
|
Python
|
src/OTLMOW/PostenMapping/Model/Post060311414.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/PostenMapping/Model/Post060311414.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/PostenMapping/Model/Post060311414.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060311414(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.11414',
beschrijving='Bestrating van in rijen te leggen kasseien volgens 6-3.2, vierkante kasseien 14 x 14 cm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcAfmetingBxlInCm.breedte',
dotnotatie='afmetingVanBestratingselementBxl.breedte',
defaultWaarde='14',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/implementatieelement#DtcAfmetingBxlInCm.lengte',
dotnotatie='afmetingVanBestratingselementBxl.lengte',
defaultWaarde='14',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanKassei',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.11414')])
| 47.846154
| 124
| 0.585209
|
05a23266b0ed311e8c6b4fd0f0941c039839383e
| 2,065
|
py
|
Python
|
mmaction/models/losses/binary_logistic_regression_loss.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | 1
|
2021-11-02T15:21:42.000Z
|
2021-11-02T15:21:42.000Z
|
mmaction/models/losses/binary_logistic_regression_loss.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/losses/binary_logistic_regression_loss.py
|
hellock/mmaction2
|
def3b651ab7818ece637d8637dddacbca027910c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from ..registry import LOSSES
def binary_logistic_regression_loss(reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Binary Logistic Regression Loss."""
label = label.view(-1).to(reg_score.device)
reg_score = reg_score.contiguous().view(-1)
pmask = (label > threshold).float().to(reg_score.device)
num_positive = max(torch.sum(pmask), 1)
num_entries = len(label)
ratio = num_entries / num_positive
# clip ratio value between ratio_range
ratio = min(max(ratio, ratio_range[0]), ratio_range[1])
coef_0 = 0.5 * ratio / (ratio - 1)
coef_1 = 0.5 * ratio
loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (
1.0 - pmask) * torch.log(1.0 - reg_score + eps)
loss = -torch.mean(loss)
return loss
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
"""Binary Logistic Regression Loss.
It will calculate binary logistic regression loss given reg_score
and label.
"""
def forward(self,
reg_score,
label,
threshold=0.5,
ratio_range=(1.05, 21),
eps=1e-5):
"""Calculate Binary Logistic Regression Loss.
Args:
reg_score (torch.Tensor): Predicted score by model.
gt_label (torch.Tensor): Groundtruth labels.
threshold (float): Threshold for positive instances.
Default: 0.5.
ratio_range (tuple): Lower bound and upper bound for ratio.
Default: (1.05, 21)
eps (float): Epsilon for small value. Default: 1e-5.
Returns:
torch.Tensor: Returned binary logistic loss.
"""
return binary_logistic_regression_loss(reg_score, label, threshold,
ratio_range, eps)
| 33.306452
| 75
| 0.554479
|
bd89cdc63995ff8e991e00d173762496c726efcd
| 44,671
|
py
|
Python
|
testing/test_doctest.py
|
blueyed/pytest
|
2b52e24a9fe013a043c36e3df3d62b4b4f6348f1
|
[
"MIT"
] | 3
|
2019-11-26T02:30:12.000Z
|
2020-04-15T17:49:07.000Z
|
testing/test_doctest.py
|
blueyed/pytest
|
2b52e24a9fe013a043c36e3df3d62b4b4f6348f1
|
[
"MIT"
] | 59
|
2019-10-22T04:34:22.000Z
|
2021-11-27T18:23:11.000Z
|
testing/test_doctest.py
|
blueyed/pytest
|
2b52e24a9fe013a043c36e3df3d62b4b4f6348f1
|
[
"MIT"
] | 1
|
2019-11-14T16:47:19.000Z
|
2019-11-14T16:47:19.000Z
|
import inspect
import textwrap
import pytest
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.compat import TYPE_CHECKING
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
if TYPE_CHECKING:
from _pytest.pytester import Testdir
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (testdir.tmpdir, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(
xdoc="""
>>> 1
1
"""
)
testdir.makefile(
".foo",
test="""
>>> 1
1
""",
)
testdir.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, testdir, test_string, encoding):
"""Test support for doctest_encoding ini option.
"""
testdir.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"test_doctest_unexpected_exception.txt F *",
"",
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_unexpected_exception.txt _*",
"001 >>> i = 0",
"002 >>> 0 / i",
"UNEXPECTED EXCEPTION: ZeroDivisionError*",
"Traceback (most recent call last):",
' File "*/doctest.py", line *, in __run',
" *",
' File "<doctest test_doctest_unexpected_exception.txt[1]>", line 1, in <module>',
"ZeroDivisionError: division by zero",
"*/test_doctest_unexpected_exception.txt:2: UnexpectedException",
],
consecutive=True,
)
def test_doctest_outcomes(self, testdir):
testdir.maketxtfile(
test_skip="""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
>>> 2
3
""",
test_xfail="""
>>> import pytest
>>> pytest.xfail("xfail_reason")
>>> foo
bar
""",
test_importorskip="""
>>> import pytest
>>> pytest.importorskip("doesnotexist")
>>> foo
bar
""",
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"collected 3 items",
"",
"test_importorskip.txt s *",
"test_skip.txt s *",
"test_xfail.txt x *",
"",
"*= 2 skipped, 1 xfailed in *",
]
)
def test_docstring_partial_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
["*hello*", "006*>>> 1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*"]
)
def test_doctest_linedata_on_property(self, testdir):
testdir.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_linedata_on_property.Sample.some_property _*",
"004 ",
"005 >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_linedata_on_property.py:5: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_no_linedata_on_overriden_property(self, testdir: "Testdir") -> None:
testdir.makepyfile(
"""
class Sample(object):
@property
def some_property(self):
'''
>>> Sample().some_property
'another thing'
'''
return 'something'
some_property = property(some_property.__get__, None, None, some_property.__doc__)
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ [[]doctest[]] test_doctest_no_linedata_on_overriden_property.Sample.some_property _*",
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example",
"[?][?][?] >>> Sample().some_property",
"Expected:",
" 'another thing'",
"Got:",
" 'something'",
"",
"*/test_doctest_no_linedata_on_overriden_property.py: DocTestFailure",
"*= 1 failed in *",
]
)
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
]
)
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
testdir.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile(
"""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = testdir.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = testdir.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = testdir.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = testdir.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, testdir):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = testdir.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, testdir):
"""
Test case to make sure that DoctestItem.reportinfo() returns lineno.
"""
p = testdir.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_invalid_setup_py(self, testdir):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
testdir.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
testdir.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
testdir.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, testdir, expression, output):
testdir.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == "text":
testdir.maketxtfile(doctest)
else:
assert mode == "module"
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest("")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, testdir):
testdir.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest(
"""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
testdir.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
testdir.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, testdir, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
testdir.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, testdir, format):
testdir.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
pytest.importorskip(mock_module)
testdir.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(stop):
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop)
assert inspect.unwrap.__module__ == "inspect"
| 29.900268
| 109
| 0.459403
|
4318b6573d5e641831509c74088e7c40c9018af3
| 6,443
|
py
|
Python
|
evd_ros_backend/evd_ros_core/src/evd_script/environment_nodes/reach_sphere.py
|
Wisc-HCI/CoFrame
|
7a54344248d80cb316d36aabd40bbd3cdbbc07eb
|
[
"MIT"
] | null | null | null |
evd_ros_backend/evd_ros_core/src/evd_script/environment_nodes/reach_sphere.py
|
Wisc-HCI/CoFrame
|
7a54344248d80cb316d36aabd40bbd3cdbbc07eb
|
[
"MIT"
] | null | null | null |
evd_ros_backend/evd_ros_core/src/evd_script/environment_nodes/reach_sphere.py
|
Wisc-HCI/CoFrame
|
7a54344248d80cb316d36aabd40bbd3cdbbc07eb
|
[
"MIT"
] | null | null | null |
'''
Simplification of the joint-configuration space that a robot can reach.
We can think of the robot's max reach as being bounded by a sphere. Tuning of
this sphere can further restrict the reachability region.
'''
from .environment_node import EnvironmentNode
from ..data_nodes.geometry import Position
from ..visualizable import VisualizeMarker, ColorTable
from ..node_parser import NodeParser
from ..type_defs import NUMBER_TYPE, STRING_TYPE
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Vector3
class ReachSphere(EnvironmentNode, VisualizeMarker):
'''
Constants
'''
GOOD_STATE = "good"
WARN_STATE = "warn"
ERROR_STATE = "error"
'''
Data structure methods
'''
@classmethod
def display_name(cls):
return 'Reach Sphere'
@classmethod
def type_string(cls, trailing_delim=True):
return 'reach-sphere' + ('.' if trailing_delim else '')
@classmethod
def full_type_string(cls):
return EnvironmentNode.full_type_string() + cls.type_string()
@classmethod
def template(cls):
template = EnvironmentNode.template()
template['fields'].append({
'type': NUMBER_TYPE,
'key': 'radius',
'is_uuid': False,
'is_list': False
})
template['fields'].append({
'type': Position.full_type_string(),
'key': 'offset',
'is_uuid': False,
'is_list': False
})
template['fields'].append({
'type': STRING_TYPE,
'key': 'link',
'is_uuid': False,
'is_list': False
})
return template
def __init__(self, radius=1, link=None, offset=None, type='', name='', parent=None,
uuid=None, append_type=True, editable=True, deleteable=True,
description=''):
self._radius = None
self._offset = None
self._link = None
super(ReachSphere,self).__init__(
type=ReachSphere.type_string() + type if append_type else type,
name=name,
uuid=uuid,
parent=parent,
append_type=append_type,
editable=editable,
deleteable=deleteable,
description=description)
self.radius = radius
self.offset = offset if offset != None else Position(0,0,0, editable=editable, deletable=False)
self.link = link
def to_dct(self):
msg = super(ReachSphere,self).to_dct()
msg.update({
'radius': self.radius,
'offset': self.offset.to_dct(),
'link': self.link
})
return msg
@classmethod
def from_dct(cls, dct):
return cls(radius=dct['radius'],
offset=NodeParser(dct['offset'], enforce_types=[Position.type_string(trailing_delim=False)]),
type=dct['type'] if 'type' in dct.keys() else '',
link=dct['link'],
append_type=not 'type' in dct.keys(),
editable=dct['editable'],
deleteable=dct['deleteable'],
description=dct['description'],
uuid=dct['uuid'] if 'uuid' in dct.keys() else None,
name=dct['name'] if 'name' in dct.keys() else '')
def to_ros_marker(self, frame_id='app', id=0, state='good'):
# The frame_id should be app
if state == self.GOOD_STATE:
color = ColorTable.GOOD_COLOR
elif state == self.WARN_STATE:
color = ColorTable.WARN_COLOR
elif state == self.ERROR_STATE:
color = ColorTable.ERROR_COLOR
else:
raise Exception('State {} is not a valid state'.format(state))
marker = Marker()
marker.header.frame_id = frame_id if self.link == None else self.link
marker.type = Marker.SPHERE
marker.ns = 'reach_sphere'
marker.id = id
marker.pose.position = self.offset.to_ros()
marker.pose.orientation.w = 1
marker.scale = Vector3(self.radius*2,self.radius*2,self.radius*2)
marker.color = color
return marker
'''
Data accessor/modifier methods
'''
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
if self._radius != value:
if value < 0:
raise Exception('Radius must be a postive number')
self._radius = value
self.updated_attribute('radius','set')
@property
def link(self):
return self._link
@link.setter
def link(self, value):
if self._link != value:
self._link = value
self.updated_attribute('link','set')
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, value):
if self._offset != value:
if self._offset != None:
self._offset.remove_from_cache()
self._offset = value
self._offset.parent = self
self.updated_attribute('offset','set')
def set(self, dct):
if 'radius' in dct.keys():
self.radius = dct['radius']
if 'offset' in dct.keys():
self.offset = NodeParser(dct['offset'], enforce_types=[Position.type_string(trailing_delim=False)])
if 'link' in dct.keys():
self.link = dct['link']
super(ReachSphere,self).set(dct)
'''
Cache Methods
'''
def remove_from_cache(self):
self.offset.remove_from_cache()
super(ReachSphere,self).remove_from_cache()
def add_to_cache(self):
self.offset.add_to_cache()
super(ReachSphere,self).add_to_cache()
'''
Update Methods
'''
def late_construct_update(self):
self.offset.late_construct_update()
super(ReachSphere,self).late_construct_update()
def deep_update(self):
self.offset.deep_update()
super(ReachSphere,self).deep_update()
self.updated_attribute('radius','update')
self.updated_attribute('offset','update')
self.updated_attribute('link','update')
def shallow_update(self):
super(ReachSphere,self).shallow_update()
self.updated_attribute('radius','update')
self.updated_attribute('offset','update')
self.updated_attribute('link','update')
| 28.135371
| 112
| 0.586838
|
fcfd0db7450f539811fa087104b9bc5c3f354b03
| 2,069
|
py
|
Python
|
tests/test_presenter.py
|
WqyJh/auto-changelog
|
884fa133bb13013b694646472b2b113d6be2abc4
|
[
"MIT"
] | 1
|
2019-08-21T10:41:17.000Z
|
2019-08-21T10:41:17.000Z
|
tests/test_presenter.py
|
WqyJh/auto-changelog
|
884fa133bb13013b694646472b2b113d6be2abc4
|
[
"MIT"
] | null | null | null |
tests/test_presenter.py
|
WqyJh/auto-changelog
|
884fa133bb13013b694646472b2b113d6be2abc4
|
[
"MIT"
] | null | null | null |
import pytest
from textwrap import dedent
from auto_changelog.domain_model import Changelog
from auto_changelog.presenter import MarkdownPresenter
@pytest.fixture(params=['', 'Title'])
def title(request):
return request.param
@pytest.fixture(params=['', 'Description'])
def description(request):
return request.param
@pytest.fixture
def empty_changelog(title, description):
return Changelog(title, description)
@pytest.fixture
def changelog(title, description):
return Changelog(title, description)
@pytest.fixture
def markdown_presenter():
return MarkdownPresenter()
def test_markdown_presenter_empty_changelog(empty_changelog, markdown_presenter):
markdown = markdown_presenter.present(empty_changelog)
assert '# {}\n\n{}'.format(empty_changelog.title, empty_changelog.description) in markdown
def test_markdown_presenter_changelog_with_features(changelog, markdown_presenter):
changelog.add_release('Unreleased', None, None)
changelog.add_note('', 'feat', 'description')
changelog.add_note('', 'feat', 'description', scope='scope')
description = '{}\n'.format(changelog.description) if changelog.description else ''
assert_markdown = dedent('''\
# {}
{}
## Unreleased
#### Features
* description
* (scope): description
'''.format(changelog.title, description))
markdown = markdown_presenter.present(changelog)
assert assert_markdown in markdown
def test_markdown_presenter_changelog_with_features(changelog, markdown_presenter):
changelog.add_release('Unreleased', None, None)
changelog.add_note('', 'fix', 'description')
changelog.add_note('', 'fix', 'description', scope='scope')
description = '{}\n'.format(changelog.description) if changelog.description else ''
assert_markdown = dedent('''\
# {}
{}
## Unreleased
#### Fixes
* description
* (scope): description
'''.format(changelog.title, description))
markdown = markdown_presenter.present(changelog)
assert assert_markdown in markdown
| 27.586667
| 94
| 0.724021
|
7b7a56bfbd16c14a0355f53d31879a7403f88f52
| 82,466
|
py
|
Python
|
src/transformers/pipelines.py
|
amoux/transformers
|
fa5423b1695cd24856bcff47214172e0f540d924
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/pipelines.py
|
amoux/transformers
|
fa5423b1695cd24856bcff47214172e0f540d924
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/pipelines.py
|
amoux/transformers
|
fa5423b1695cd24856bcff47214172e0f540d924
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import logging
import os
import pickle
import sys
from abc import ABC, abstractmethod
from contextlib import contextmanager
from itertools import chain
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import numpy as np
from .configuration_auto import AutoConfig
from .configuration_utils import PretrainedConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TFAutoModel,
TFAutoModelForSequenceClassification,
TFAutoModelForQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
if is_torch_available():
import torch
from .modeling_auto import (
AutoModel,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoModelForSeq2SeqLM,
)
if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
from .modeling_tf_utils import TFPreTrainedModel
logger = logging.getLogger(__name__)
def get_framework(model=None):
""" Select framework (TensorFlow/PyTorch) to use.
If both frameworks are installed and no specific model is provided, defaults to using PyTorch.
"""
if is_tf_available() and is_torch_available() and model is not None and not isinstance(model, str):
# Both framework are available but the user supplied a model class instance.
# Try to guess which framework to use from the model classname
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
elif not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
else:
# framework = 'tf' if is_tf_available() else 'pt'
framework = "pt" if is_torch_available() else "tf"
return framework
class PipelineException(Exception):
"""
Raised by pipelines when handling __call__
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling varargs for each Pipeline
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class DefaultArgumentHandler(ArgumentHandler):
"""
Default varargs argument parser handling parameters for each Pipeline
"""
@staticmethod
def handle_kwargs(kwargs: Dict) -> List:
if len(kwargs) == 1:
output = list(kwargs.values())
else:
output = list(chain(kwargs.values()))
return DefaultArgumentHandler.handle_args(output)
@staticmethod
def handle_args(args: Sequence[Any]) -> List[str]:
# Only one argument, let's do case by case
if len(args) == 1:
if isinstance(args[0], str):
return [args[0]]
elif not isinstance(args[0], list):
return list(args)
else:
return args[0]
# Multiple arguments (x1, x2, ...)
elif len(args) > 1:
if all([isinstance(arg, str) for arg in args]):
return list(args)
# If not instance of list, then it should instance of iterable
elif isinstance(args, Iterable):
return list(chain.from_iterable(chain(args)))
else:
raise ValueError(
"Invalid input type {}. Pipeline supports Union[str, Iterable[str]]".format(type(args))
)
else:
return []
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ValueError("Pipeline cannot handle mixed args and kwargs")
if len(kwargs) > 0:
return DefaultArgumentHandler.handle_kwargs(kwargs)
else:
return DefaultArgumentHandler.handle_args(args)
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
PipelineDataFormat also includes some utilities to work with multi-columns like mapping from datasets columns
to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: dict):
"""
Save the provided data object with the representation for the current `DataFormat`.
:param data: data to store
:return:
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
:param data: data to store
:return: (str) Path where the data has been saved
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
def __init__(
self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process.
For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations.
Pipeline workflow is defined as a sequence of the following operations:
Input -> Tokenization -> Model Inference -> Post-Processing (Task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument. Users can specify
device argument as an integer, -1 meaning "CPU", >= 0 referring the CUDA device ordinal.
Some pipeline, like for instance FeatureExtractionPipeline ('feature-extraction') outputs large
tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the binary_output constructor argument. If set to True, the output will be stored in the
pickle format.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e. pickle) or as raw text.
Return:
:obj:`List` or :obj:`Dict`:
Pipeline returns list or dictionary depending on:
- Whether the user supplied multiple samples
- Whether the pipeline exposes multiple fields in the output object
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework()
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
self._args_parser = args_parser or DefaultArgumentHandler()
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory):
"""
Save the pipeline's model and tokenizer to the specified save_directory
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
example:
# Explicitly ask for tensor allocation on CUDA device :0
nlp = pipeline(..., device=0)
with nlp.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = nlp(...)
Returns:
Context manager
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
:param inputs:
:return:
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs, add_special_tokens=add_special_tokens, return_tensors=self.framework, padding=padding,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching.
Args:
inputs: dict holding all the keyworded arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array.
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using Model head. This pipeline extracts the hidden states from the base transformer,
which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "feature-extraction", for extracting features of a sequence.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs).tolist()
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any ModelWithLMHead head. This pipeline predicts the words that will follow a specified text prompt.
This language generation pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "text-generation", for generating text from a specified prompt.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling objective,
which includes the uni-directional models in the library (e.g. gpt2).
See the list of available community models on
`huggingface.co/models <https://huggingface.co/models?search=&filter=lm-head>`__.
"""
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. """
ALLOWED_MODELS = [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"ReformerModelWithLMHead",
"GPT2LMHeadModel",
"OpenAIGPTLMHeadModel",
"CTRLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
"TFGPT2LMHeadModel",
"TFOpenAIGPTLMHeadModel",
"TFCTRLLMHeadModel",
]
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
**tokenizer_kwargs,
)
return inputs
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
if self.model.__class__.__name__ not in self.ALLOWED_MODELS:
raise NotImplementedError(
"Generation is currently not supported for {}. Please select a model from {} for generation.".format(
self.model.__class__.__name__, self.ALLOWED_MODELS
)
)
text_inputs = self._args_parser(*args)
results = []
for prompt_text in text_inputs:
# Manage correct placement of the tensors
with self.device_placement():
if self.model.__class__.__name__ in ["XLNetLMHeadModel", "TransfoXLLMHeadModel"]:
# For XLNet and TransformerXL we had an article to the prompt to give more state to the model.
padding_text = self.PADDING_TEXT + self.tokenizer.eos_token
padding = self._parse_and_tokenize(padding_text, padding=False, add_special_tokens=False)
# This impacts max_length and min_length argument that need adjusting.
padding_length = padding["input_ids"].shape[-1]
if "max_length" in generate_kwargs and generate_kwargs["max_length"] is not None:
generate_kwargs["max_length"] += padding_length
if "min_length" in generate_kwargs and generate_kwargs["min_length"] is not None:
generate_kwargs["min_length"] += padding_length
inputs = self._parse_and_tokenize(
padding_text + prompt_text, padding=False, add_special_tokens=False
)
else:
inputs = self._parse_and_tokenize(prompt_text, padding=False, add_special_tokens=False)
# set input_ids to None to allow empty prompt
if inputs["input_ids"].shape[-1] == 0:
inputs["input_ids"] = None
inputs["attention_mask"] = None
if self.framework == "pt" and inputs["input_ids"] is not None:
inputs = self.ensure_tensor_on_device(**inputs)
input_ids = inputs["input_ids"]
# Ensure that batch size = 1 (batch generation not allowed for now)
assert (
input_ids is None or input_ids.shape[0] == 1
), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."
output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs) # BS x SL
result = []
for generated_sequence in output_sequences:
generated_sequence = generated_sequence.numpy().tolist()
record = {}
if return_tensors:
record["generated_token_ids"] = generated_sequence
if return_text:
# Decode text
text = self.tokenizer.decode(
generated_sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
record["generated_text"] = prompt_text + text[prompt_length:]
result.append(record)
results += [result]
if len(results) == 1:
return results[0]
return results
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using ModelForSequenceClassification head. See the
`sequence classification usage <../usage.html#sequence-classification>`__ examples for more information.
This text classification pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "sentiment-analysis", for classifying sequences according to positive or negative sentiments.
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=text-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, return_all_scores: bool = False, **kwargs):
super().__init__(**kwargs)
self.return_all_scores = return_all_scores
def __call__(self, *args, **kwargs):
outputs = super().__call__(*args, **kwargs)
scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
if self.return_all_scores:
return [
[{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
for item in scores
]
else:
return [
{"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
]
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using ModelWithLMHead head. See the
`masked language modeling usage <../usage.html#masked-language-modeling>`__ examples for more information.
This mask filling pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "fill-mask", for predicting masked tokens in a sequence.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=lm-head>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
topk=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.topk = topk
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
topk = tf.math.top_k(probs, k=self.topk)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
masked_index = (input_ids == self.tokenizer.mask_token_id).nonzero()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
values, predictions = probs.topk(self.topk)
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens),
"score": v,
"token": p,
"token_str": self.tokenizer.convert_ids_to_tokens(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
grouped_entities: bool = False,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.grouped_entities = grouped_entities
def __call__(self, *args, **kwargs):
inputs = self._args_parser(*args, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer(
sentence, return_attention_mask=False, return_tensors=self.framework, truncation=True,
)
# Forward
if self.framework == "tf":
entities = self.model(tokens.data)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
entities = []
# Filter to labels not in `self.ignore_labels`
filtered_labels_idx = [
(idx, label_idx)
for idx, label_idx in enumerate(labels_idx)
if self.model.config.id2label[label_idx] not in self.ignore_labels
]
for idx, label_idx in filtered_labels_idx:
entity = {
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
"index": idx,
}
entities += [entity]
# Append grouped entities
if self.grouped_entities:
answers += [self.group_entities(entities)]
# Append ungrouped entities
else:
answers += [entities]
if len(answers) == 1:
return answers[0]
return answers
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Returns grouped sub entities
"""
# Get the first entity in the entity group
entity = entities[0]["entity"]
scores = np.mean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
}
return entity_group
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Returns grouped entities
"""
entity_groups = []
entity_group_disagg = []
if entities:
last_idx = entities[-1]["index"]
for entity in entities:
is_last_idx = entity["index"] == last_idx
if not entity_group_disagg:
entity_group_disagg += [entity]
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
continue
# If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" suffixes
if (
entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
and entity["index"] == entity_group_disagg[-1]["index"] + 1
):
entity_group_disagg += [entity]
# Group the entities at the last entity
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
# If the current entity is different from the previous entity, aggregate the disaggregated entity group
else:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
entity_group_disagg = [entity]
# If it's the last entity, add it to the entity groups
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
return entity_groups
NerPipeline = TokenClassificationPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped
to internal SquadExample / SquadFeature structures.
QuestionAnsweringArgumentHandler manages all the possible to create SquadExample from the command-line supplied
arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using ModelForQuestionAnswering head. See the
`question answering usage <../usage.html#question-answering>`__ examples for more information.
This question answering can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "question-answering", for answering questions given a context.
The models that this pipeline can use are models that have been fine-tuned on a question answering task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=question-answering>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
task=task,
**kwargs,
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the SquadExample/SquadFeatures internally.
This helper method encapsulate all the logic for converting question(s) and context(s) to SquadExample(s).
We currently support extractive question answering.
Arguments:
question: (str, List[str]) The question to be ask for the associated context
context: (str, List[str]) The context in which we will look for the answer.
Returns:
SquadExample initialized with the corresponding question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *args, **kwargs):
"""
Args:
We support multiple use-cases, the following are exclusive:
X: sequence of SquadExample
data: sequence of SquadExample
question: (str, List[str]), batch of question(s) to map along with context
context: (str, List[str]), batch of context(s) associated with the provided question keyword argument
Returns:
dict: {'answer': str, 'score": float, 'start": int, "end": int}
answer: the textual answer in the intial context
score: the score the current answer scored for the model
start: the character index in the original string corresponding to the beginning of the answer' span
end: the character index in the original string corresponding to the ending of the answer' span
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
kwargs.setdefault("handle_impossible_answer", False)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
features_list = [
squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=kwargs["max_seq_len"],
doc_stride=kwargs["doc_stride"],
max_query_length=kwargs["max_question_len"],
is_training=False,
tqdm_enabled=False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
model_input_names = self.tokenizer.model_input_names + ["input_ids"]
fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)[:2]
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)[:2]
start, end = start.cpu().numpy(), end.cpu().numpy()
min_null_score = 1000000 # large and positive
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Mask padding and question
start_, end_ = (
start_ * np.abs(np.array(feature.p_mask) - 1),
end_ * np.abs(np.array(feature.p_mask) - 1),
)
# Mask CLS
start_[0] = end_[0] = 0
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
if kwargs["handle_impossible_answer"]:
min_null_score = min(min_null_score, (start_[0] * end_[0]).item())
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
if kwargs["handle_impossible_answer"]:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any QuestionAnswering head and will generate probalities for each span to be
the actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than
max_answer_len or answer end position being before the starting position.
The method supports output the k-best answer through the topk argument.
Args:
start: numpy array, holding individual start probabilities for each token
end: numpy array, holding individual end probabilities for each token
topk: int, indicates how many possible answer span(s) to extract from the model's output
max_answer_len: int, maximum size of the answer to extract from the model's output
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int):
"""
When decoding from token probalities, this method maps token indexes to actual word in
the initial context.
Args:
text: str, the actual context to extract the answer from
start: int, starting answer token index
end: int, ending answer token index
Returns:
dict: {'answer': str, 'start': int, 'end': int}
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents
Usage::
# use bart in pytorch
summarizer = pipeline("summarization")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
The models that this pipeline can use are models that have been fine-tuned on a summarization task,
which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __init__(self, **kwargs):
kwargs.update(task="summarization")
super().__init__(**kwargs)
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*documents: (list of strings) articles to be summarized
return_text: (bool, default=True) whether to add a decoded "summary_text" to each result
return_tensors: (bool, default=False) whether to return the raw "summary_token_ids" to each result
clean_up_tokenization_spaces: (`optional`) bool whether to include extra spaces in the output
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'summary_text' and/or 'summary_token_ids' for each document_to_summarize
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
raise NotImplementedError(
"Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
)
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
padding = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
The models that this pipeline can use are models that have been fine-tuned on a translation task,
currently: "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b"
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=translation>`__.
Arguments:
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Args:
*args: (list of strings) texts to be translated
return_text: (bool, default=True) whether to add a decoded "translation_text" to each result
return_tensors: (bool, default=False) whether to return the raw "translation_token_ids" to each result
**generate_kwargs: extra kwargs passed to `self.model.generate`_
Returns:
list of dicts with 'translation_text' and/or 'translation_token_ids' for each text_to_translate
.. _`self.model.generate`:
https://huggingface.co/transformers/model_doc/bart.html#transformers.BartForConditionalGeneration.generate
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
args = ([prefix + text for text in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
# Register all the supported tasks here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
},
},
"ner": {
"impl": TokenClassificationPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
},
"summarization": {
"impl": SummarizationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
},
"translation_en_to_fr": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_de": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_ro": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelWithLMHead if is_torch_available() else None,
"default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
},
}
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a pipeline.
Pipeline are made of:
- A Tokenizer instance in charge of mapping raw textual input to token
- A Model instance
- Some (optional) post processing for enhancing model's output
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- "feature-extraction": will return a :class:`~transformers.FeatureExtractionPipeline`
- "sentiment-analysis": will return a :class:`~transformers.TextClassificationPipeline`
- "ner": will return a :class:`~transformers.TokenClassificationPipeline`
- "question-answering": will return a :class:`~transformers.QuestionAnsweringPipeline`
- "fill-mask": will return a :class:`~transformers.FillMaskPipeline`
- "summarization": will return a :class:`~transformers.SummarizationPipeline`
- "translation_xx_to_yy": will return a :class:`~transformers.TranslationPipeline`
- "text-generation": will return a :class:`~transformers.TextGenerationPipeline`
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`,
a model identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default for this pipeline will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`, defaults to :obj:`None`):
The configuration that will be used by the pipeline to instantiate the model. This can be :obj:`None`,
a model identifier or an actual pre-trained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If :obj:`None`, the default for this pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a model identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default for this pipeline will be loaded.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
Returns:
:class:`~transformers.Pipeline`: Class inheriting from :class:`~transformers.Pipeline`, according to
the task.
Examples::
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
# Sentiment analysis pipeline
pipeline('sentiment-analysis')
# Question answering pipeline, specifying the checkpoint identifier
pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
# Named entity recognition pipeline, passing in a specific model and tokenizer
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
pipeline('ner', model=model, tokenizer=tokenizer)
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"][framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
| 43.403158
| 183
| 0.621298
|
4ada4f42196a47a71cf4e69c28f8a55c47848baf
| 4,231
|
py
|
Python
|
Python/Flask/Flask.py
|
xlui/real-rest
|
907948adbefd90dfd3349ce2542320b2c76e811c
|
[
"MIT"
] | null | null | null |
Python/Flask/Flask.py
|
xlui/real-rest
|
907948adbefd90dfd3349ce2542320b2c76e811c
|
[
"MIT"
] | null | null | null |
Python/Flask/Flask.py
|
xlui/real-rest
|
907948adbefd90dfd3349ce2542320b2c76e811c
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify, current_app
from flask_script import Manager, Shell
from app import db
from app.models import User
from app.my_exception import MyException
from conf.config import Config
app = Flask(__name__)
app.config.from_object(Config)
manager = Manager(app)
db.init_app(app)
@app.route('/', methods=['GET'])
def index():
users = User.query.all()
json = [user.get_json() for user in users]
return jsonify(json)
@app.route('/<user_id>', methods=['GET'])
def get_one(user_id):
user = User.query.filter_by(id=user_id).first()
if user:
return jsonify(user.get_json())
else:
raise MyException('user id is invalid!')
@app.route('/', methods=['POST'])
def post():
if not request.json or not ('username' in request.json and 'password' in request.json):
raise MyException('request payload must be JSON format and ALL field off entity `user` should be included!')
user = User.query.get(request.json.get('username'))
if user:
raise MyException('username already exist!')
else:
user = User(username=request.json.get('username'),
password=request.json.get('password'))
db.session.add(user)
db.session.commit()
return jsonify(user.get_json())
@app.route('/<user_id>', methods=['PUT'])
def put(user_id):
if not request.json or 'username' not in request.json or 'password' not in request.json:
raise MyException('request payload must be JSON format and all field off entity `user` should be included!')
user = User.query.get(user_id)
if user:
user.username = request.json.get('username')
user.password = request.json.get('password')
return jsonify(user.get_json())
else:
raise MyException('user id is invalid!')
@app.route('/<user_id>', methods=['PATCH'])
def patch(user_id):
if not request.json:
raise MyException('request payload must be JSON format!')
user = User.query.get(user_id)
if user:
# check username or password is contained or not
username = request.json.get('username')
password = request.json.get('password')
if not username and not password:
raise MyException('At least include one field off entity `user`!')
if username:
user.username = username
if password:
user.password = password
return jsonify(user.get_json())
else:
print('User id is invalid!')
raise MyException('user id is invalid!', 400)
@app.route('/<user_id>', methods=['DELETE'])
def delete(user_id):
user = User.query.get(user_id)
if user:
db.session.delete(user)
return jsonify({
'message': 'Successfully delete user {id}'.format(id=user_id)
})
else:
raise MyException('user id is invalid!')
@app.route('/login', methods=['POST'])
def login():
if not request.json or 'username' not in request.json:
raise MyException('request body must be in JSON format!')
user = User.query.get(request.json.get('username')) # type: User
if not user:
raise MyException('Username is invalid!')
if user.password == request.json.get('password'):
return jsonify({
'login': 'Success!',
'token': user.generate_token().decode('utf-8')
})
else:
raise MyException('Password is incorrect!')
@app.route('/verify', methods=['GET'])
def verify():
token = request.headers.get('authorization')
if not token:
raise MyException('Must include token in request header!')
user = User.verify_token(token) # type: User
if user:
return jsonify({
'verify': 'Success',
'user': user.username
})
else:
raise MyException('Token is invalid!')
@app.errorhandler(MyException)
def handle_my_exception(error: MyException):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
def make_shell_context():
return dict(User=User, app=current_app, db=db)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.run()
| 31.110294
| 116
| 0.643347
|
209f10f744900ce41741d69f8015192e2072a8d3
| 16,134
|
py
|
Python
|
kgtk/cli/calc.py
|
mann-brinson/kgtk
|
269e3b5c155e03acacbf48ccbdcc7b56a4f807aa
|
[
"MIT"
] | null | null | null |
kgtk/cli/calc.py
|
mann-brinson/kgtk
|
269e3b5c155e03acacbf48ccbdcc7b56a4f807aa
|
[
"MIT"
] | null | null | null |
kgtk/cli/calc.py
|
mann-brinson/kgtk
|
269e3b5c155e03acacbf48ccbdcc7b56a4f807aa
|
[
"MIT"
] | null | null | null |
"""
Reorder KGTK file columns (while copying)
TODO: Need KgtkWriterOptions
"""
from argparse import Namespace, SUPPRESS
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Perform calculations on KGTK file columns.',
'description': 'This command performs calculations on one or more columns in a KGTK file. ' +
'\nIf no input filename is provided, the default is to read standard input. ' +
'\n\nAdditional options are shown in expert help.\nkgtk --expert rename_columns --help'
}
AVERAGE_OP: str = "average"
COPY_OP: str = "copy"
JOIN_OP: str = "join"
PERCENTAGE_OP: str = "percentage"
SET_OP: str = "set"
SUM_OP: str = "sum"
OPERATIONS: typing.List[str] = [ AVERAGE_OP,
COPY_OP,
JOIN_OP,
PERCENTAGE_OP,
SET_OP,
SUM_OP,
]
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
# import modules locally
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if _expert:
return msg
else:
return SUPPRESS
parser.add_input_file()
parser.add_output_file()
parser.add_argument( "--output-format", dest="output_format", help=h("The file format (default=kgtk)"), type=str)
parser.add_argument('-c', "--columns", dest="column_names", nargs='*',
metavar="COLUMN_NAME",
help="The list of source column names, optionally containing '..' for column ranges " +
"and '...' for column names not explicitly mentioned.")
parser.add_argument( "--into", dest="into_column_names",
help="The name of the column to receive the result of the calculation.",
required=True, nargs="+")
parser.add_argument( "--do", dest="operation", help="The name of the operation.", required=True,
choices=OPERATIONS)
parser.add_argument( "--values", dest="values", nargs='*',
metavar="VALUES",
help="An optional list of values")
parser.add_argument( "--format", dest="format_string", help="The format string for the calculation.")
KgtkReader.add_debug_arguments(parser, expert=_expert)
KgtkReaderOptions.add_arguments(parser, mode_options=True, expert=_expert)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def run(input_file: KGTKFiles,
output_file: KGTKFiles,
output_format: typing.Optional[str],
column_names: typing.Optional[typing.List[str]],
into_column_names: typing.List[str],
operation: str,
values: typing.Optional[typing.List[str]],
format_string: typing.Optional[str],
errors_to_stdout: bool = False,
errors_to_stderr: bool = True,
show_options: bool = False,
verbose: bool = False,
very_verbose: bool = False,
**kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.
)->int:
# import modules locally
from pathlib import Path
import sys
from kgtk.exceptions import KGTKException
from kgtk.io.kgtkreader import KgtkReader, KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
input_kgtk_file: Path = KGTKArgumentParser.get_input_file(input_file)
output_kgtk_file: Path = KGTKArgumentParser.get_output_file(output_file)
# Select where to send error messages, defaulting to stderr.
error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_dict(kwargs)
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
# Show the final option structures for debugging and documentation.
if show_options:
print("--input-file=%s" % str(input_kgtk_file), file=error_file, flush=True)
print("--output-file=%s" % str(output_kgtk_file), file=error_file, flush=True)
if output_format is not None:
print("--output-format=%s" % output_format, file=error_file, flush=True)
if column_names is not None:
print("--columns %s" % " ".join(column_names), file=error_file, flush=True)
if into_column_names is not None:
print("--into %s" % " ".join(into_column_names), file=error_file, flush=True)
print("--operation=%s" % str(operation), file=error_file, flush=True)
if values is not None:
print("--values %s" % " ".join(values), file=error_file, flush=True)
if format_string is not None:
print("--format=%s" % format_string, file=error_file, flush=True)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
try:
if verbose:
print("Opening the input file %s" % str(input_kgtk_file), file=error_file, flush=True)
kr = KgtkReader.open(input_kgtk_file,
options=reader_options,
value_options = value_options,
error_file=error_file,
verbose=verbose,
very_verbose=very_verbose,
)
remaining_names: typing.List[str] = kr.column_names.copy()
selected_names: typing.List[str] = [ ]
save_selected_names: typing.Optional[typing.List[str]] = None
ellipses: str = "..." # All unmentioned columns
ranger: str = ".." # All columns between two columns.
idx: int
if column_names is None:
column_names = [ ]
saw_ranger: bool = False
column_name: str
for column_name in column_names:
if column_name == ellipses:
if save_selected_names is not None:
raise KGTKException("Elipses may appear only once")
if saw_ranger:
raise KGTKException("Elipses may not appear directly after a range operator ('..').")
save_selected_names = selected_names
selected_names = [ ]
continue
if column_name == ranger:
if len(selected_names) == 0:
raise KGTKException("The column range operator ('..') may not appear without a preceeding column name.")
saw_ranger = True
continue
if column_name not in kr.column_names:
raise KGTKException("Unknown column name '%s'." % column_name)
if column_name not in remaining_names:
raise KGTKException("Column name '%s' was duplicated in the list." % column_name)
if saw_ranger:
saw_ranger = False
prior_column_name: str = selected_names[-1]
prior_column_idx: int = kr.column_name_map[prior_column_name]
column_name_idx: int = kr.column_name_map[column_name]
start_idx: int
end_idx: int
idx_inc: int
if column_name_idx > prior_column_idx:
start_idx = prior_column_idx + 1
end_idx = column_name_idx - 1
idx_inc = 1
else:
start_idx = prior_column_idx - 1
end_idx = column_name_idx + 1
idx_inc = -1
idx = start_idx
while idx <= end_idx:
idx_column_name: str = kr.column_names[idx]
if idx_column_name not in remaining_names:
raise KGTKException("Column name '%s' (%s .. %s) was duplicated in the list." % (column_name, prior_column_name, column_name))
selected_names.append(idx_column_name)
remaining_names.remove(idx_column_name)
idx += idx_inc
selected_names.append(column_name)
remaining_names.remove(column_name)
if saw_ranger:
raise KGTKException("The column ranger operator ('..') may not end the list of column names.")
if len(remaining_names) > 0 and save_selected_names is None:
if verbose:
print("Omitting the following columns: %s" % " ".join(remaining_names), file=error_file, flush=True)
if save_selected_names is not None:
if len(remaining_names) > 0:
save_selected_names.extend(remaining_names)
if len(selected_names) > 0:
save_selected_names.extend(selected_names)
selected_names = save_selected_names
sources: typing.List[int] = [ ]
name: str
for name in selected_names:
sources.append(kr.column_name_map[name])
new_column_count: int = 0
into_column_idxs: typing.List[int] = [ ]
into_column_idx: int
output_column_names: typing.List[str] = kr.column_names.copy()
into_column_name: str
for idx, into_column_name in enumerate(into_column_names):
if into_column_name in kr.column_name_map:
into_column_idx = kr.column_name_map[into_column_name]
into_column_idxs.append(into_column_idx)
if verbose:
print("Putting result %d of the calculation into old column %d (%s)." % (idx + 1, into_column_idx, into_column_name), file=error_file, flush=True)
else:
new_column_count += 1
into_column_idx = len(output_column_names)
into_column_idxs.append(into_column_idx)
output_column_names.append(into_column_name)
if verbose:
print("Putting result %d of the calculation into new column %d (%s)." % (idx + 1, into_column_idx, into_column_name), file=error_file, flush=True)
if verbose:
print("Opening the output file %s" % str(output_kgtk_file), file=error_file, flush=True)
kw: KgtkWriter = KgtkWriter.open(output_column_names,
output_kgtk_file,
require_all_columns=True,
prohibit_extra_columns=True,
fill_missing_columns=False,
gzip_in_parallel=False,
mode=KgtkWriter.Mode[kr.mode.name],
output_format=output_format,
verbose=verbose,
very_verbose=very_verbose,
)
if values is None:
values = [ ]
if operation == AVERAGE_OP:
if len(sources) == 0:
raise KGTKException("Average needs at least one source, got %d" % len(sources))
if len(into_column_idxs) != 1:
raise KGTKException("Average needs 1 destination columns, got %d" % len(into_column_idxs))
elif operation == COPY_OP:
if len(sources) == 0:
raise KGTKException("Copy needs at least one source, got %d" % len(sources))
if len(selected_names) != len(into_column_idxs):
raise KGTKException("Copy needs the same number of input columns and into columns, got %d and %d" % (len(selected_names), len(into_column_idxs)))
elif operation == JOIN_OP:
if len(sources) == 0:
raise KGTKException("Join needs at least one source, got %d" % len(sources))
if len(into_column_idxs) != 1:
raise KGTKException("Join needs 1 destination columns, got %d" % len(into_column_idxs))
if len(values) != 1:
raise KGTKException("Join needs 1 value, got %d" % len(values))
elif operation == PERCENTAGE_OP:
if len(into_column_idxs) != 1:
raise KGTKException("Percent needs 1 destination columns, got %d" % len(into_column_idxs))
if len(selected_names) != 2:
raise KGTKException("Percent needs 2 input columns, got %d" % len(selected_names))
elif operation == SET_OP:
if len(sources) != 0:
raise KGTKException("Set needs no sources, got %d" % len(sources))
if len(into_column_idxs) == 0:
raise KGTKException("Set needs at least one destination column, got %d" % len(into_column_idxs))
if len(values) == 0:
raise KGTKException("Set needs at least one value, got %d" % len(values))
if len(into_column_idxs) != len(values):
raise KGTKException("Set needs the same number of destination columns and values, got %d and %d" % (len(into_column_idxs), len(values)))
elif operation == SUM_OP:
if len(sources) == 0:
raise KGTKException("Sum needs at least one source, got %d" % len(sources))
if len(into_column_idxs) != 1:
raise KGTKException("Sum needs 1 destination columns, got %d" % len(into_column_idxs))
fs: str = format_string if format_string is not None else "%5.2f"
item: str
into_column_idx = into_column_idxs[0] # for convenience
input_data_lines: int = 0
row: typing.List[str]
for row in kr:
input_data_lines += 1
output_row: typing.List[str] = row.copy()
for idx in range(new_column_count):
output_row.append("") # Easiest way to add a new column.
if operation == AVERAGE_OP:
atotal: float = 0
acount: int = 0
for idx in sources:
item = row[idx]
if len(item) > 0:
atotal += float(item)
acount += 1
output_row[into_column_idx] = (fs % (atotal / float(acount))) if acount > 0 else ""
elif operation == COPY_OP:
for idx in range(len(sources)):
output_row[into_column_idxs[idx]] = row[sources[idx]]
elif operation == JOIN_OP:
output_row[into_column_idx] = values[0].join((row[sources[idx]] for idx in range(len(sources))))
elif operation == PERCENTAGE_OP:
output_row[into_column_idx] = fs % (float(row[sources[0]]) * 100 / float(row[sources[1]]))
elif operation == SET_OP:
for idx in range(len(values)):
output_row[into_column_idxs[idx]] = values[idx]
elif operation == SUM_OP:
total: float = 0
for idx in sources:
item = row[idx]
if len(item) > 0:
total += float(item)
output_row[into_column_idx] = fs % total
kw.write(output_row)
# Flush the output file so far:
kw.flush()
if verbose:
print("Read %d data lines from file %s" % (input_data_lines, input_kgtk_file), file=error_file, flush=True)
kw.close()
return 0
except SystemExit as e:
raise KGTKException("Exit requested")
except Exception as e:
raise KGTKException(str(e))
| 42.569921
| 166
| 0.579645
|
52bb2621848ce1e626c45be8913e399823585078
| 119
|
py
|
Python
|
bloom/editor/properties/__init__.py
|
thomasrogers03/bloom
|
5d49c18a241216aca354aa79971940691e6f33b4
|
[
"Apache-2.0"
] | 9
|
2020-11-22T03:04:52.000Z
|
2022-01-17T15:36:25.000Z
|
bloom/editor/properties/__init__.py
|
thomasrogers03/bloom
|
5d49c18a241216aca354aa79971940691e6f33b4
|
[
"Apache-2.0"
] | null | null | null |
bloom/editor/properties/__init__.py
|
thomasrogers03/bloom
|
5d49c18a241216aca354aa79971940691e6f33b4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Thomas Rogers
# SPDX-License-Identifier: Apache-2.0
from . import sprite_properties, wall_properties
| 23.8
| 48
| 0.806723
|
2804cd18a50b302835dc512d7397879e3c2c9685
| 1,035
|
py
|
Python
|
anyway/widgets/suburban_widgets/sub_urban_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | 1
|
2022-01-19T18:23:03.000Z
|
2022-01-19T18:23:03.000Z
|
anyway/widgets/suburban_widgets/sub_urban_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | 2
|
2021-11-02T13:37:23.000Z
|
2021-11-23T15:51:06.000Z
|
anyway/widgets/suburban_widgets/sub_urban_widget.py
|
shaniwein/anyway
|
dcd13bf7dc4a120f4d697ab0c08b906f43eea52e
|
[
"MIT"
] | null | null | null |
import logging
from anyway.request_params import RequestParams
from anyway.widgets.widget import Widget
class SubUrbanWidget(Widget):
def __init__(self, request_params: RequestParams, name: str):
if not SubUrbanWidget.is_sub_urban(request_params):
logging.error(
f"SubUrbanWidget initialized with missing location fields:{request_params}"
)
raise ValueError("SubUrban fields missing")
super().__init__(request_params, name)
@staticmethod
def is_sub_urban(request_params: RequestParams) -> bool:
return (
request_params is not None
and "road1" in request_params.location_info
and (
"road_segment_name" in request_params.location_info
or "road_segment_id" in request_params.location_info
)
)
@staticmethod
def is_relevant(request_params: RequestParams) -> bool:
return SubUrbanWidget.is_sub_urban(request_params)
| 35.689655
| 92
| 0.65314
|
0a847fb12850ea4db0d064ccaf1a75c34c328636
| 263
|
py
|
Python
|
ske_customization/customizations_for_ske/doctype/finance_charges/finance_charges.py
|
akshay83/ske_customization
|
910e8ca88ffc83554ebb23f7480901dba9f08221
|
[
"MIT"
] | null | null | null |
ske_customization/customizations_for_ske/doctype/finance_charges/finance_charges.py
|
akshay83/ske_customization
|
910e8ca88ffc83554ebb23f7480901dba9f08221
|
[
"MIT"
] | null | null | null |
ske_customization/customizations_for_ske/doctype/finance_charges/finance_charges.py
|
akshay83/ske_customization
|
910e8ca88ffc83554ebb23f7480901dba9f08221
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Akshay Mehta and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class FinanceCharges(Document):
pass
| 23.909091
| 51
| 0.78327
|
a95bbc76b86be34f9334ea6ccfc0fd3fd0467808
| 1,095
|
py
|
Python
|
pioemu/state.py
|
NathanY3G/raspberrypi-pio-poc
|
97a19174666bd8cb820ca825390b10d3dfeacd75
|
[
"Apache-2.0"
] | 6
|
2021-05-24T08:08:37.000Z
|
2022-02-16T05:28:06.000Z
|
pioemu/state.py
|
NathanY3G/rp2040-pio-emulator
|
97a19174666bd8cb820ca825390b10d3dfeacd75
|
[
"Apache-2.0"
] | null | null | null |
pioemu/state.py
|
NathanY3G/rp2040-pio-emulator
|
97a19174666bd8cb820ca825390b10d3dfeacd75
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Nathan Young
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from dataclasses import dataclass
from typing import Deque
@dataclass(frozen=True)
class ShiftRegister:
contents: int
counter: int
@dataclass(frozen=True)
class State:
clock: int = 0
program_counter: int = 0
pin_directions: int = 0
pin_values: int = 0
transmit_fifo: Deque = deque()
input_shift_register: ShiftRegister = ShiftRegister(0, 0)
output_shift_register: ShiftRegister = ShiftRegister(0, 32)
x_register: int = 0
y_register: int = 0
| 30.416667
| 74
| 0.743379
|
f1bffff4502324a3039c123fb4465d331e7aae79
| 11,555
|
py
|
Python
|
priorities/seak/migrations/0004_auto__add_field_conservationfeature_desc.py
|
Ecotrust/cogs-priorities
|
07dac509f85cfdddbbd5145ee8ea1efaea76a2aa
|
[
"BSD-3-Clause"
] | 3
|
2015-06-23T21:43:47.000Z
|
2021-09-10T18:22:26.000Z
|
priorities/seak/migrations/0004_auto__add_field_conservationfeature_desc.py
|
Ecotrust/cogs-priorities
|
07dac509f85cfdddbbd5145ee8ea1efaea76a2aa
|
[
"BSD-3-Clause"
] | 19
|
2015-04-09T19:27:30.000Z
|
2015-05-12T20:52:50.000Z
|
priorities/seak/migrations/0004_auto__add_field_conservationfeature_desc.py
|
Ecotrust/juniper-priorities
|
16c8c0c96adef40e1f262c53d79215960cec7b4c
|
[
"BSD-3-Clause"
] | 1
|
2021-09-10T18:22:28.000Z
|
2021-09-10T18:22:28.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ConservationFeature.desc'
db.add_column('seak_conservationfeature', 'desc',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ConservationFeature.desc'
db.delete_column('seak_conservationfeature', 'desc')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'seak.conservationfeature': {
'Meta': {'object_name': 'ConservationFeature'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'level1': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'level2': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level3': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level4': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level5': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '90', 'null': 'True', 'blank': 'True'})
},
'seak.cost': {
'Meta': {'object_name': 'Cost'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'seak.definedgeography': {
'Meta': {'object_name': 'DefinedGeography'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'planning_units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['seak.PlanningUnit']", 'symmetrical': 'False'})
},
'seak.folder': {
'Meta': {'object_name': 'Folder'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_folder_related'", 'to': "orm['auth.User']"})
},
'seak.planningunit': {
'Meta': {'object_name': 'PlanningUnit'},
'calculated_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'fid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'})
},
'seak.planningunitshapes': {
'Meta': {'object_name': 'PlanningUnitShapes'},
'bests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'fid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"}),
'stamp': ('django.db.models.fields.FloatField', [], {})
},
'seak.puvscf': {
'Meta': {'unique_together': "(('pu', 'cf'),)", 'object_name': 'PuVsCf'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.ConservationFeature']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.puvscost': {
'Meta': {'unique_together': "(('pu', 'cost'),)", 'object_name': 'PuVsCost'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.Cost']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.scenario': {
'Meta': {'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_geography': ('seak.models.JSONField', [], {}),
'input_penalties': ('seak.models.JSONField', [], {}),
'input_relativecosts': ('seak.models.JSONField', [], {}),
'input_scalefactor': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'input_targets': ('seak.models.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_best': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'output_pu_count': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_scenario_related'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['seak']
| 76.019737
| 215
| 0.556036
|
91900c3a7b07192afab89c7369455f9c0224b2e9
| 1,141
|
py
|
Python
|
backend/core/pages/pageBuilder.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | null | null | null |
backend/core/pages/pageBuilder.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 50
|
2021-03-28T03:06:19.000Z
|
2021-10-18T12:36:16.000Z
|
backend/core/pages/pageBuilder.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 1
|
2021-07-30T07:12:46.000Z
|
2021-07-30T07:12:46.000Z
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
class PageBuilder(object):
""" The PageBuilder ...
"""
def __init__(self, ):
# preparation to instanciate
pass
def init_builder(self, app, db, userManager, workspaceManager):
self.app = app
self.db = db
self.workspaceManager = workspaceManager
self.userManager = userManager
| 31.694444
| 78
| 0.730061
|
740aa05d2bc13e96f642cf6245dbbc1838ff9e16
| 469
|
py
|
Python
|
dvol/__main__.py
|
Flare576/dvol
|
208e3ea3572415f0232c953d6c166a3aef915042
|
[
"MIT"
] | null | null | null |
dvol/__main__.py
|
Flare576/dvol
|
208e3ea3572415f0232c953d6c166a3aef915042
|
[
"MIT"
] | null | null | null |
dvol/__main__.py
|
Flare576/dvol
|
208e3ea3572415f0232c953d6c166a3aef915042
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
# TODO: when `get` is called, always show override, but mark as disabled if container isn't curently using it
# TODO: when config set with -p but no other params, clear it?
# TODO: when removing, should loop through existing mappings, delete those folders, then nuke the root/project folder
# TODO: after removing files, recursivly delete empty folders upward
import cli
def main ():
cli.dispatch()
if __name__ == '__main__':
main()
| 33.5
| 117
| 0.739872
|
2208ec6a9c1832d4f07ae34c260758f2af888ac7
| 2,809
|
py
|
Python
|
blogofile/server.py
|
zsoldosp/blogofile
|
48b8e71b5ed9a35cbc9ee60fead367e7ff8b1a9e
|
[
"MIT"
] | null | null | null |
blogofile/server.py
|
zsoldosp/blogofile
|
48b8e71b5ed9a35cbc9ee60fead367e7ff8b1a9e
|
[
"MIT"
] | null | null | null |
blogofile/server.py
|
zsoldosp/blogofile
|
48b8e71b5ed9a35cbc9ee60fead367e7ff8b1a9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import os
import sys
import threading
try:
from urllib.parse import urlparse # For Python 2
except ImportError:
from urlparse import urlparse # For Python 3; flake8 ignore # NOQA
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from blogofile import config
from blogofile import util
from .cache import bf
bf.server = sys.modules['blogofile.server']
logger = logging.getLogger("blogofile.server")
class Server(threading.Thread):
def __init__(self, port, address="127.0.0.1"):
self.port = int(port)
self.address = address
if self.address == "0.0.0.0":
# Bind to all addresses available
address = ""
threading.Thread.__init__(self)
self.is_shutdown = False
server_address = (address, self.port)
HandlerClass = BlogofileRequestHandler
HandlerClass.protocol_version = "HTTP/1.0"
ServerClass = socketserver.TCPServer
self.httpd = ServerClass(server_address, HandlerClass)
self.sa = self.httpd.socket.getsockname()
def run(self):
print("Blogofile server started on {0}:{1} ..."
.format(self.sa[0], self.sa[1]))
self.httpd.serve_forever()
def shutdown(self):
print("\nshutting down webserver...")
self.httpd.shutdown()
self.httpd.socket.close()
self.is_shutdown = True
class BlogofileRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
error_template = """
<head>
<title>Error response</title>
</head>
<body>
<h1>404 Error</h1>
Your Blogofile site is configured for a subdirectory, maybe you were looking
for the root page? : <a href="{0}">{1}</a>
</body>"""
def __init__(self, *args, **kwargs):
path = urlparse(config.site.url).path
self.BLOGOFILE_SUBDIR_ERROR = self.error_template.format(path, path)
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, *args, **kwargs)
def translate_path(self, path):
site_path = urlparse(config.site.url).path
if(len(site_path.strip("/")) > 0 and
not path.startswith(site_path)):
self.error_message_format = self.BLOGOFILE_SUBDIR_ERROR
# Results in a 404
return ""
p = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(
self, path)
if len(site_path.strip("/")) > 0:
build_path = os.path.join(
os.getcwd(),
util.path_join(site_path.strip("/")))
else:
build_path = os.getcwd()
build_path = p.replace(build_path, os.path.join(os.getcwd(), "_site"))
return build_path
def log_message(self, format, *args):
pass
| 31.920455
| 78
| 0.642933
|
be26c46b2e91533c2e5a7e5f1df8617e1219ec85
| 2,630
|
py
|
Python
|
tests/integration_tests/tests/agentless_tests/test_deployment_logs.py
|
cloudify-cosmo/cloudify-manager
|
4a3f44ceb49d449bc5ebc8766b1c7b9c174ff972
|
[
"Apache-2.0"
] | 124
|
2015-01-22T22:28:37.000Z
|
2022-02-26T23:12:06.000Z
|
tests/integration_tests/tests/agentless_tests/test_deployment_logs.py
|
cloudify-cosmo/cloudify-manager
|
4a3f44ceb49d449bc5ebc8766b1c7b9c174ff972
|
[
"Apache-2.0"
] | 345
|
2015-01-08T15:49:40.000Z
|
2022-03-29T08:33:00.000Z
|
tests/integration_tests/tests/agentless_tests/test_deployment_logs.py
|
cloudify-cosmo/cloudify-manager
|
4a3f44ceb49d449bc5ebc8766b1c7b9c174ff972
|
[
"Apache-2.0"
] | 77
|
2015-01-07T14:04:35.000Z
|
2022-03-07T22:46:00.000Z
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import pytest
import retrying
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
pytestmark = pytest.mark.group_deployments
@pytest.mark.usefixtures('testmockoperations_plugin')
class TestDeploymentLogs(AgentlessTestCase):
# retrying is needed as the delete_deployment_environment workflow
# which truncates the deployment log file is async.
@retrying.retry(wait_fixed=5000, stop_max_attempt_number=10)
def _assert_log_file_truncated(self,
read_deployment_logs_func,
previous_log_file_size):
self.assertLess(len(read_deployment_logs_func()),
previous_log_file_size)
def test_deployment_logs(self):
message = 'TEST MESSAGE'
inputs = {'message': message}
dsl_path = resource("dsl/deployment_logs.yaml")
deployment, _ = self.deploy_application(dsl_path, inputs=inputs)
deployment_log_path = ('/var/log/cloudify/mgmtworker/logs/{0}.log'
.format(deployment.id))
def read_deployment_logs():
return self.read_manager_file(deployment_log_path, no_strip=True)
def verify_logs_exist_with_content():
deployment_logs = read_deployment_logs()
self.assertIn(message, deployment_logs)
return len(deployment_logs)
log_file_size = verify_logs_exist_with_content()
self.undeploy_application(deployment.id, is_delete_deployment=True)
# Verify log file id truncated on deployment delete
self._assert_log_file_truncated(read_deployment_logs, log_file_size)
deployment, _ = self.deploy_application(
dsl_path, inputs=inputs,
deployment_id=deployment.id)
# Verify new deployment with the same deployment id
# can write to the previous location.
verify_logs_exist_with_content()
| 38.115942
| 79
| 0.698859
|
8c19bd1d4ac1e38c3c1c3fa26d433ea7e4303249
| 1,688
|
py
|
Python
|
test/test_schedule_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
test/test_schedule_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
test/test_schedule_api.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | 1
|
2020-11-07T11:27:13.000Z
|
2020-11-07T11:27:13.000Z
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opsgenie_swagger
from opsgenie_swagger.api.schedule_api import ScheduleApi # noqa: E501
from opsgenie_swagger.rest import ApiException
class TestScheduleApi(unittest.TestCase):
"""ScheduleApi unit test stubs"""
def setUp(self):
self.api = opsgenie_swagger.api.schedule_api.ScheduleApi() # noqa: E501
def tearDown(self):
pass
def test_create_schedule(self):
"""Test case for create_schedule
Create Schedule # noqa: E501
"""
pass
def test_delete_schedule(self):
"""Test case for delete_schedule
Delete Schedule # noqa: E501
"""
pass
def test_export_schedule(self):
"""Test case for export_schedule
Export Schedule # noqa: E501
"""
pass
def test_get_schedule(self):
"""Test case for get_schedule
Get Schedule # noqa: E501
"""
pass
def test_get_schedule_timeline(self):
"""Test case for get_schedule_timeline
Get Schedule Timeline # noqa: E501
"""
pass
def test_list_schedules(self):
"""Test case for list_schedules
List Schedules # noqa: E501
"""
pass
def test_update_schedule(self):
"""Test case for update_schedule
Update Schedule (Partial) # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 20.095238
| 80
| 0.622038
|
e6758fad9429f6ec3117fcdd60fbc50ba24660f0
| 1,449
|
py
|
Python
|
lib/galaxy/web/framework/middleware/statsd.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 2
|
2017-03-28T12:11:41.000Z
|
2017-04-22T02:58:25.000Z
|
lib/galaxy/web/framework/middleware/statsd.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 12
|
2020-07-24T23:55:19.000Z
|
2021-12-19T11:40:06.000Z
|
lib/galaxy/web/framework/middleware/statsd.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 1
|
2019-01-16T22:21:54.000Z
|
2019-01-16T22:21:54.000Z
|
"""
Middleware for sending request statistics to statsd.
"""
from __future__ import absolute_import
import time
from galaxy.model.orm.engine_factory import QUERY_COUNT_LOCAL
from galaxy.web.statsd_client import GalaxyStatsdClient
class StatsdMiddleware(object):
"""
This middleware will log request durations to the configured statsd
instance.
"""
def __init__(self,
application,
statsd_host,
statsd_port,
statsd_prefix,
statsd_influxdb):
self.application = application
self.galaxy_stasd_client = GalaxyStatsdClient(
statsd_host,
statsd_port,
statsd_prefix,
statsd_influxdb
)
def __call__(self, environ, start_response):
start_time = time.time()
req = self.application(environ, start_response)
dt = int((time.time() - start_time) * 1000)
page = environ.get('controller_action_key', None) or environ.get('PATH_INFO', "NOPATH").strip('/').replace('/', '.')
self.galaxy_stasd_client.timing(page, dt)
try:
times = QUERY_COUNT_LOCAL.times
self.galaxy_stasd_client.timing("sql." + page, sum(times) * 1000.)
self.galaxy_stasd_client.incr("sqlqueries." + page, len(times))
except AttributeError:
# Not logging query counts, skip
pass
return req
| 30.829787
| 124
| 0.620428
|
d7f52b1f54a59ea315b4eab32fc9de49b2b87cc1
| 18,843
|
py
|
Python
|
tests/tensorflow_autolog/test_tensorflow2_autolog.py
|
garciparedes/mlflow
|
b8e108351b6cc7aa449d4b06bf717930d8615f68
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow_autolog/test_tensorflow2_autolog.py
|
garciparedes/mlflow
|
b8e108351b6cc7aa449d4b06bf717930d8615f68
|
[
"Apache-2.0"
] | null | null | null |
tests/tensorflow_autolog/test_tensorflow2_autolog.py
|
garciparedes/mlflow
|
b8e108351b6cc7aa449d4b06bf717930d8615f68
|
[
"Apache-2.0"
] | null | null | null |
# pep8: disable=E501
import collections
import pytest
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.python.keras import layers # pylint: disable=import-error
import mlflow
import mlflow.tensorflow
import mlflow.keras
import os
np.random.seed(1337)
SavedModelInfo = collections.namedtuple(
"SavedModelInfo",
["path", "meta_graph_tags", "signature_def_key", "inference_df", "expected_results_df"],
)
@pytest.fixture
def random_train_data():
return np.random.random((1000, 32))
@pytest.fixture
def random_one_hot_labels():
n, n_class = (1000, 10)
classes = np.random.randint(0, n_class, n)
labels = np.zeros((n, n_class))
labels[np.arange(n), classes] = 1
return labels
@pytest.fixture(params=[True, False])
def manual_run(request):
if request.param:
mlflow.start_run()
yield
mlflow.end_run()
def create_tf_keras_model():
model = tf.keras.Sequential()
model.add(layers.Dense(64, activation="relu", input_shape=(32,)))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_ends_auto_created_run(
random_train_data, random_one_hot_labels, fit_variant
):
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_tf_keras_autolog_log_models_configuration(
random_train_data, random_one_hot_labels, log_models
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(log_models=log_models)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
model.fit(data, labels, epochs=10)
client = mlflow.tracking.MlflowClient()
run_id = client.list_run_infos(experiment_id="0")[0].run_id
artifacts = client.list_artifacts(run_id)
artifacts = map(lambda x: x.path, artifacts)
assert ("model" in artifacts) == log_models
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_persists_manually_created_run(
random_train_data, random_one_hot_labels, fit_variant
):
mlflow.tensorflow.autolog()
with mlflow.start_run() as run:
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_keras_random_data_run(
random_train_data, random_one_hot_labels, manual_run, fit_variant, initial_epoch
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
history = model.fit_generator(
generator(), epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
else:
history = model.fit(
data, labels, epochs=initial_epoch + 10, steps_per_epoch=1, initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_logs_expected_data(tf_keras_random_data_run):
run, history = tf_keras_random_data_run
data = run.data
assert "accuracy" in data.metrics
assert "loss" in data.metrics
# Testing explicitly passed parameters are logged correctly
assert "epochs" in data.params
assert data.params["epochs"] == str(history.epoch[-1] + 1)
assert "steps_per_epoch" in data.params
assert data.params["steps_per_epoch"] == "1"
# Testing default parameters are logged correctly
assert "initial_epoch" in data.params
assert data.params["initial_epoch"] == str(history.epoch[0])
# Testing unwanted parameters are not logged
assert "callbacks" not in data.params
assert "validation_data" not in data.params
# Testing optimizer parameters are logged
assert "opt_name" in data.params
assert data.params["opt_name"] == "Adam"
assert "opt_learning_rate" in data.params
assert "opt_decay" in data.params
assert "opt_beta_1" in data.params
assert "opt_beta_2" in data.params
assert "opt_epsilon" in data.params
assert "opt_amsgrad" in data.params
assert data.params["opt_amsgrad"] == "False"
client = mlflow.tracking.MlflowClient()
all_epoch_acc = client.get_metric_history(run.info.run_id, "accuracy")
assert all((x.step - 1) % 5 == 0 for x in all_epoch_acc)
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model_summary.txt" in artifacts
@pytest.mark.large
def test_tf_keras_autolog_names_positional_parameters_correctly(
random_train_data, random_one_hot_labels
):
mlflow.tensorflow.autolog(every_n_iter=5)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
with mlflow.start_run():
# Pass `batch_size` as a positional argument for testing purposes
model.fit(data, labels, 8, epochs=10, steps_per_epoch=1)
run_id = mlflow.active_run().info.run_id
client = mlflow.tracking.MlflowClient()
run_info = client.get_run(run_id)
assert run_info.data.params.get("batch_size") == "8"
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_model_can_load_from_artifact(tf_keras_random_data_run, random_train_data):
run, _ = tf_keras_random_data_run
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
assert "tensorboard_logs" in artifacts
model = mlflow.keras.load_model("runs:/" + run.info.run_id + "/model")
model.predict(random_train_data)
@pytest.fixture
def tf_keras_random_data_run_with_callback(
random_train_data,
random_one_hot_labels,
manual_run,
callback,
restore_weights,
patience,
initial_epoch,
):
# pylint: disable=unused-argument
mlflow.tensorflow.autolog(every_n_iter=1)
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if callback == "early":
# min_delta is set as such to guarantee early stopping
callback = tf.keras.callbacks.EarlyStopping(
monitor="loss",
patience=patience,
min_delta=99999999,
restore_best_weights=restore_weights,
)
else:
class CustomCallback(tf.keras.callbacks.Callback):
def on_train_end(self, logs=None):
print("Training completed")
callback = CustomCallback()
history = model.fit(
data, labels, epochs=initial_epoch + 10, callbacks=[callback], initial_epoch=initial_epoch
)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id), history, callback
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [0, 1, 5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_logs(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" in metrics
restored_epoch = int(metrics["restored_epoch"])
assert int(metrics["stopped_epoch"]) - max(1, callback.patience) == restored_epoch
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == max(1, callback.patience) + 1
# Check that MLflow has logged the metrics of the "best" model
assert len(metric_history) == num_of_epochs + 1
# Check that MLflow has logged the correct data
assert history.history["loss"][history.epoch.index(restored_epoch)] == metric_history[-1].value
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [True])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [11])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_stop_does_not_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert metrics["stopped_epoch"] == 0
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_early_stop_no_restore_doesnt_log(tf_keras_random_data_run_with_callback):
run, history, callback = tf_keras_random_data_run_with_callback
metrics = run.data.metrics
params = run.data.params
assert "patience" in params
assert params["patience"] == str(callback.patience)
assert "monitor" in params
assert params["monitor"] == "loss"
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == callback.patience + 1
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("restore_weights", [False])
@pytest.mark.parametrize("callback", ["not-early"])
@pytest.mark.parametrize("patience", [5])
@pytest.mark.parametrize("initial_epoch", [0, 10])
def test_tf_keras_autolog_non_early_stop_callback_no_log(tf_keras_random_data_run_with_callback):
run, history = tf_keras_random_data_run_with_callback[:-1]
metrics = run.data.metrics
params = run.data.params
assert "patience" not in params
assert "monitor" not in params
assert "verbose" not in params
assert "mode" not in params
assert "stopped_epoch" not in metrics
assert "restored_epoch" not in metrics
assert "loss" in history.history
num_of_epochs = len(history.history["loss"])
client = mlflow.tracking.MlflowClient()
metric_history = client.get_metric_history(run.info.run_id, "loss")
# Check the test epoch numbers are correct
assert num_of_epochs == 10
assert len(metric_history) == num_of_epochs
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_does_not_delete_logging_directory_for_tensorboard_callback(
tmpdir, random_train_data, random_one_hot_labels, fit_variant
):
tensorboard_callback_logging_dir_path = str(tmpdir.mkdir("tb_logs"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(
tensorboard_callback_logging_dir_path, histogram_freq=0
)
mlflow.tensorflow.autolog()
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(
generator(), epochs=10, steps_per_epoch=1, callbacks=[tensorboard_callback]
)
else:
model.fit(data, labels, epochs=10, callbacks=[tensorboard_callback])
assert os.path.exists(tensorboard_callback_logging_dir_path)
@pytest.mark.large
@pytest.mark.parametrize("fit_variant", ["fit", "fit_generator"])
def test_tf_keras_autolog_logs_to_and_deletes_temporary_directory_when_tensorboard_callback_absent(
tmpdir, random_train_data, random_one_hot_labels, fit_variant
):
from unittest import mock
from mlflow.tensorflow import _TensorBoardLogDir
mlflow.tensorflow.autolog()
mock_log_dir_inst = _TensorBoardLogDir(location=str(tmpdir.mkdir("tb_logging")), is_temp=True)
with mock.patch("mlflow.tensorflow._TensorBoardLogDir", autospec=True) as mock_log_dir_class:
mock_log_dir_class.return_value = mock_log_dir_inst
data = random_train_data
labels = random_one_hot_labels
model = create_tf_keras_model()
if fit_variant == "fit_generator":
def generator():
while True:
yield data, labels
model.fit_generator(generator(), epochs=10, steps_per_epoch=1)
else:
model.fit(data, labels, epochs=10)
assert not os.path.exists(mock_log_dir_inst.location)
def create_tf_estimator_model(directory, export):
CSV_COLUMN_NAMES = ["SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "Species"]
train = pd.read_csv(
os.path.join(os.path.dirname(__file__), "iris_training.csv"),
names=CSV_COLUMN_NAMES,
header=0,
)
train_y = train.pop("Species")
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle and repeat if you are in training mode.
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
feature_spec = {}
for feature in CSV_COLUMN_NAMES:
feature_spec[feature] = tf.Variable([], dtype=tf.float64, name=feature)
receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_spec)
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[30, 10],
# The model must choose between 3 classes.
n_classes=3,
model_dir=directory,
)
classifier.train(input_fn=lambda: input_fn(train, train_y, training=True), steps=500)
if export:
classifier.export_saved_model(directory, receiver_fn)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_ends_auto_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run() is None
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_persists_manually_created_run(tmpdir, export):
directory = tmpdir.mkdir("test")
with mlflow.start_run() as run:
create_tf_estimator_model(str(directory), export)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.fixture
def tf_estimator_random_data_run(tmpdir, manual_run, export):
# pylint: disable=unused-argument
directory = tmpdir.mkdir("test")
mlflow.tensorflow.autolog()
create_tf_estimator_model(str(directory), export)
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_tf_estimator_autolog_logs_metrics(tf_estimator_random_data_run):
assert "loss" in tf_estimator_random_data_run.data.metrics
assert "steps" in tf_estimator_random_data_run.data.params
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(tf_estimator_random_data_run.info.run_id, "loss")
assert all((x.step - 1) % 100 == 0 for x in metrics)
@pytest.mark.large
@pytest.mark.parametrize("export", [True])
def test_tf_estimator_autolog_model_can_load_from_artifact(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
artifacts = client.list_artifacts(tf_estimator_random_data_run.info.run_id)
artifacts = map(lambda x: x.path, artifacts)
assert "model" in artifacts
mlflow.tensorflow.load_model("runs:/" + tf_estimator_random_data_run.info.run_id + "/model")
@pytest.mark.large
@pytest.mark.parametrize("export", [True, False])
def test_duplicate_autolog_second_overrides(tf_estimator_random_data_run):
client = mlflow.tracking.MlflowClient()
metrics = client.get_metric_history(tf_estimator_random_data_run.info.run_id, "loss")
assert all((x.step - 1) % 4 == 0 for x in metrics)
| 34.385036
| 100
| 0.718941
|
2ddfc48bb6870ad18c9be2ae98ddac709c828f49
| 401
|
py
|
Python
|
fuse/asgi.py
|
elvo194/microfuse_comp_tech
|
300d9b4e1e3f064bf37390e10e013b22d39bf4c5
|
[
"MIT"
] | null | null | null |
fuse/asgi.py
|
elvo194/microfuse_comp_tech
|
300d9b4e1e3f064bf37390e10e013b22d39bf4c5
|
[
"MIT"
] | null | null | null |
fuse/asgi.py
|
elvo194/microfuse_comp_tech
|
300d9b4e1e3f064bf37390e10e013b22d39bf4c5
|
[
"MIT"
] | null | null | null |
"""
ASGI config for fuse project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fuse.settings')
application = get_asgi_application()
| 23.588235
| 79
| 0.750623
|
a2db43f9131f8a608b7632151cfbf7bf1899cb78
| 233
|
py
|
Python
|
os/pdf/convert_image_to_text.py
|
pydeveloper510/Python
|
2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d
|
[
"MIT"
] | 3
|
2021-04-23T08:04:14.000Z
|
2021-05-08T01:24:08.000Z
|
os/pdf/convert_image_to_text.py
|
pydeveloper510/Python
|
2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d
|
[
"MIT"
] | null | null | null |
os/pdf/convert_image_to_text.py
|
pydeveloper510/Python
|
2e3cf5f9d132fbc6dd8c41a96166b6e879d86e0d
|
[
"MIT"
] | 1
|
2021-05-08T01:24:46.000Z
|
2021-05-08T01:24:46.000Z
|
from PIL import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files (x86)\Tesseract-OCR\tesseract.exe"
im = Image.open('images/P_5.png')
text = pytesseract.image_to_string(im, lang='eng')
print(text)
| 29.125
| 93
| 0.781116
|
afaa83f829fb9a658cad1d97e2b336bf3dc4dc92
| 716
|
py
|
Python
|
tests/test_utils.py
|
tantikristanti/delft
|
620ddf9e55e13213d2fc9af25b9d01331256d698
|
[
"Apache-2.0"
] | 333
|
2018-05-16T07:02:05.000Z
|
2022-03-31T11:30:32.000Z
|
tests/test_utils.py
|
tantikristanti/delft
|
620ddf9e55e13213d2fc9af25b9d01331256d698
|
[
"Apache-2.0"
] | 126
|
2018-06-26T18:47:18.000Z
|
2022-03-30T05:59:28.000Z
|
tests/test_utils.py
|
tantikristanti/delft
|
620ddf9e55e13213d2fc9af25b9d01331256d698
|
[
"Apache-2.0"
] | 67
|
2018-05-15T21:28:59.000Z
|
2022-03-20T19:10:29.000Z
|
import logging
from functools import wraps
# derived from https://github.com/elifesciences/sciencebeam-trainer-delft/tree/develop/tests
LOGGER = logging.getLogger(__name__)
def log_on_exception(f: callable) -> callable:
"""
Wraps function to log error on exception.
That is useful for tests that log a lot of things,
and pytest displaying the test failure at the top of the method.
(there doesn't seem to be an option to change that)
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
LOGGER.exception('failed due to %s', repr(e))
raise
return wrapper
| 29.833333
| 92
| 0.664804
|
51ad8afcb6f18a931fb8b3a83255e891eef6ecc5
| 7,238
|
py
|
Python
|
tests/unit/baskerville_tests/features_tests/test_feature_response4xx_to_request_ratio.py
|
equalitie/baskerville
|
433551d03aee85d5c983ff6b25b388155b54190d
|
[
"CC-BY-4.0"
] | 25
|
2020-05-19T11:20:47.000Z
|
2021-09-20T03:15:28.000Z
|
tests/unit/baskerville_tests/features_tests/test_feature_response4xx_to_request_ratio.py
|
mkaranasou/baskerville
|
433551d03aee85d5c983ff6b25b388155b54190d
|
[
"CC-BY-4.0"
] | 29
|
2020-05-26T13:21:48.000Z
|
2021-09-21T06:52:28.000Z
|
tests/unit/baskerville_tests/features_tests/test_feature_response4xx_to_request_ratio.py
|
deflect-ca/baskerville
|
9659f4b39ab66fcf5329a4eccff15e97245b04f0
|
[
"CC-BY-4.0"
] | 4
|
2020-06-11T07:00:16.000Z
|
2021-05-07T09:10:36.000Z
|
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from pyspark.sql import functions as F, types as T
from baskerville.util.enums import FeatureComputeType
from baskerville.features.feature_response4xx_to_request_ratio import \
FeatureResponse4xxToRequestRatio, FeatureResponse4xxTotal, FeatureRequestTotal
from tests.unit.baskerville_tests.helpers.spark_testing_base import \
FeatureSparkTestCase
class TestSparkResponse4xxToRequestRatio(FeatureSparkTestCase):
def setUp(self):
super(TestSparkResponse4xxToRequestRatio, self).setUp()
self.feature = FeatureResponse4xxToRequestRatio()
def test_instance(self):
self.assertTrue(hasattr(self.feature, 'feature_name'))
self.assertTrue(hasattr(self.feature, 'COLUMNS'))
self.assertTrue(hasattr(self.feature, 'DEPENDENCIES'))
self.assertTrue(hasattr(self.feature, 'DEFAULT_VALUE'))
self.assertTrue(hasattr(self.feature, 'compute_type'))
self.assertTrue(self.feature.feature_name ==
'response4xx_to_request_ratio')
self.assertTrue(
self.feature.columns == ['http_response_code', '@timestamp'])
self.assertTrue(self.feature.dependencies == [FeatureRequestTotal,
FeatureResponse4xxTotal])
self.assertTrue(self.feature.DEFAULT_VALUE == 0.)
self.assertTrue(self.feature.compute_type == FeatureComputeType.ratio)
self.assertIsNotNone(self.feature.feature_name)
self.assertIsNotNone(self.feature.feature_default)
self.assertTrue(isinstance(self.feature.feature_name, str))
self.assertTrue(isinstance(self.feature.feature_default, float))
def test_compute_single_record(self):
ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2/page3?query',
"http_response_code": 201
}
sub_df = self.get_sub_df_for_feature(self.feature, [ats_record])
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(0).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_compute_multiple_records_200_and_400(self):
first_ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2/page3',
"http_response_code": 201,
}
second_ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2',
"http_response_code": 499,
}
sub_df = self.get_sub_df_for_feature(
self.feature,
[
first_ats_record,
second_ats_record,
]
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(0.5).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_compute_multiple_records_200_400_and_500(self):
first_ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2/page3',
"http_response_code": 201,
}
second_ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2',
"http_response_code": 401,
}
third_ats_record = {
"client_ip": '55.555.55.55',
"@timestamp": '2018-01-17T08:30:00.000Z',
"content_type": 'html',
"client_url": 'page1/page2',
"http_response_code": 501,
}
sub_df = self.get_sub_df_for_feature(
self.feature,
[
first_ats_record,
second_ats_record,
third_ats_record
]
)
result = self.feature.compute(sub_df)
expected_df = sub_df.withColumn(
self.feature.feature_name,
F.lit(1. / 3.).cast('float')
)
expected_df = self.schema_helper(
expected_df, result.schema, [self.feature.feature_name]
)
result.show()
expected_df.show()
self.assertDataFrameEqual(
result,
expected_df
)
def test_update_row(self):
denominator = FeatureRequestTotal()
numerator = FeatureResponse4xxTotal()
test_current = {self.feature.feature_name: 1.,
denominator.feature_name: 1.,
numerator.feature_name: 2.}
test_past = {self.feature.feature_name: 1.,
denominator.feature_name: 2.,
numerator.feature_name: 4.}
value = self.feature.update_row(
test_current, test_past
)
self.assertAlmostEqual(value, 2., places=2)
def test_update(self):
denominator = FeatureRequestTotal.feature_name_from_class()
numerator = FeatureResponse4xxTotal.feature_name_from_class()
schema = T.StructType([
T.StructField(
self.feature.current_features_column,
T.MapType(T.StringType(), T.FloatType())
),
T.StructField(
self.feature.past_features_column,
T.MapType(T.StringType(), T.FloatType())
),
])
sub_df = self.session.createDataFrame(
[{
self.feature.current_features_column: {
self.feature.feature_name: 1.,
numerator: 2.,
denominator: 1.,
},
self.feature.past_features_column: {
self.feature.feature_name: 1.,
numerator: 4.,
denominator: 2.,
}
}],
schema=schema
)
result_df = self.feature.update(
sub_df
)
result_df.show()
value = result_df.select(
self.feature.updated_feature_col_name
).collect()[0][self.feature.updated_feature_col_name]
expected_value = 2.
self.assertAlmostEqual(value, expected_value, places=2)
| 33.981221
| 82
| 0.574054
|
25e64faf43d18714eb692972324183f5fd854bb0
| 7,610
|
py
|
Python
|
test/functional/wallet_hd.py
|
barrystyle/nyc3
|
43a15d192e23602d2d5d97d458efbc1cb7a4da7d
|
[
"MIT"
] | 1
|
2019-06-06T22:44:39.000Z
|
2019-06-06T22:44:39.000Z
|
test/functional/wallet_hd.py
|
barrystyle/nyc3
|
43a15d192e23602d2d5d97d458efbc1cb7a4da7d
|
[
"MIT"
] | null | null | null |
test/functional/wallet_hd.py
|
barrystyle/nyc3
|
43a15d192e23602d2d5d97d458efbc1cb7a4da7d
|
[
"MIT"
] | 3
|
2019-06-05T22:50:07.000Z
|
2021-04-19T22:59:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The NYC3 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
import os
import shutil
from test_framework.test_framework import NYC3TestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
assert_raises_rpc_error
)
class WalletHDTest(NYC3TestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].getaddressinfo(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, "hd.bak"))
#self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, "hd.dump"))
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
NUM_HD_ADDS = 10
for i in range(NUM_HD_ADDS):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].getaddressinfo(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdseedid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].getaddressinfo(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for i in range(NUM_HD_ADDS):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].getaddressinfo(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info_2["hdseedid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "blocks"))
shutil.rmtree(os.path.join(self.nodes[1].datadir, "regtest", "chainstate"))
shutil.copyfile(os.path.join(self.nodes[1].datadir, "hd.bak"), os.path.join(self.nodes[1].datadir, "regtest", "wallets", "wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Wallet automatically scans blocks older than key on startup
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), NUM_HD_ADDS + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].getaddressinfo(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
# Generate a new HD seed on node 1 and make sure it is set
orig_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
self.nodes[1].sethdseed()
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/0\'') # Make sure the new address is the first from the keypool
self.nodes[1].keypoolrefill(1) # Fill keypool with 1 key
# Set a new HD seed on node 1 without flushing the keypool
new_seed = self.nodes[0].dumpprivkey(self.nodes[0].getnewaddress())
orig_masterkeyid = new_masterkeyid
self.nodes[1].sethdseed(False, new_seed)
new_masterkeyid = self.nodes[1].getwalletinfo()['hdseedid']
assert orig_masterkeyid != new_masterkeyid
addr = self.nodes[1].getnewaddress()
assert_equal(orig_masterkeyid, self.nodes[1].getaddressinfo(addr)['hdseedid'])
assert_equal(self.nodes[1].getaddressinfo(addr)['hdkeypath'], 'm/0\'/0\'/1\'') # Make sure the new address continues previous keypool
# Check that the next address is from the new seed
self.nodes[1].keypoolrefill(1)
next_addr = self.nodes[1].getnewaddress()
assert_equal(new_masterkeyid, self.nodes[1].getaddressinfo(next_addr)['hdseedid'])
assert_equal(self.nodes[1].getaddressinfo(next_addr)['hdkeypath'], 'm/0\'/0\'/0\'') # Make sure the new address is not from previous keypool
assert next_addr != addr
# Sethdseed parameter validity
assert_raises_rpc_error(-1, 'sethdseed', self.nodes[0].sethdseed, False, new_seed, 0)
assert_raises_rpc_error(-5, "Invalid private key", self.nodes[1].sethdseed, False, "not_wif")
assert_raises_rpc_error(-1, "JSON value is not a boolean as expected", self.nodes[1].sethdseed, "Not_bool")
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[1].sethdseed, False, True)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, new_seed)
assert_raises_rpc_error(-5, "Already have this key", self.nodes[1].sethdseed, False, self.nodes[1].dumpprivkey(self.nodes[1].getnewaddress()))
if __name__ == '__main__':
WalletHDTest().main ()
| 48.471338
| 150
| 0.659001
|
b4384ef5545885e078880b594bec3752858f282d
| 4,747
|
py
|
Python
|
utils.py
|
new2scala/graph-cnn.pytorch
|
8bee0c2ed687dcfdb277c71b70c8ea747b6ca9c7
|
[
"MIT"
] | null | null | null |
utils.py
|
new2scala/graph-cnn.pytorch
|
8bee0c2ed687dcfdb277c71b70c8ea747b6ca9c7
|
[
"MIT"
] | null | null | null |
utils.py
|
new2scala/graph-cnn.pytorch
|
8bee0c2ed687dcfdb277c71b70c8ea747b6ca9c7
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import torch
from scipy.sparse import csgraph
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def laplacian(mx, norm):
"""Laplacian-normalize sparse matrix"""
assert (all (len(row) == len(mx) for row in mx)), "Input should be a square matrix"
return csgraph.laplacian(adj, normed = norm)
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def load_data(path="./data", dataset="cora"):
"""
ind.[:dataset].x => the feature vectors of the training instances (scipy.sparse.csr.csr_matrix)
ind.[:dataset].y => the one-hot labels of the labeled training instances (numpy.ndarray)
ind.[:dataset].allx => the feature vectors of both labeled and unlabeled training instances (csr_matrix)
ind.[:dataset].ally => the labels for instances in ind.dataset_str.allx (numpy.ndarray)
ind.[:dataset].graph => the dict in the format {index: [index of neighbor nodes]} (collections.defaultdict)
ind.[:dataset].tx => the feature vectors of the test instances (scipy.sparse.csr.csr_matrix)
ind.[:dataset].ty => the one-hot labels of the test instances (numpy.ndarray)
ind.[:dataset].test.index => indices of test instances in graph, for the inductive setting
"""
print("\n[STEP 1]: Upload {} dataset.".format(dataset))
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("{}/ind.{}.{}".format(path, dataset, names[i]), 'rb') as f:
objects.append(pkl.load(f, encoding='latin1'))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("{}/ind.{}.test.index".format(path, dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
#Citeseer dataset contains some isolated nodes in the graph
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
print("| # of nodes : {}".format(adj.shape[0]))
print("| # of edges : {}".format(adj.sum().sum()/2))
features = normalize(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
print("| # of features : {}".format(features.shape[1]))
print("| # of clases : {}".format(ally.shape[1]))
features = torch.FloatTensor(np.array(features.todense()))
sparse_mx = adj.tocoo().astype(np.float32)
adj = torch.FloatTensor(np.array(adj.todense()))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
if dataset == 'citeseer':
save_label = np.where(labels)[1]
labels = torch.LongTensor(np.where(labels)[1])
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
idx_test = test_idx_range.tolist()
print("| # of train set : {}".format(len(idx_train)))
print("| # of val set : {}".format(len(idx_val)))
print("| # of test set : {}".format(len(idx_test)))
idx_train, idx_val, idx_test = list(map(lambda x: torch.LongTensor(x), [idx_train, idx_val, idx_test]))
def missing_elements(L):
start, end = L[0], L[-1]
return sorted(set(range(start, end+1)).difference(L))
if dataset == 'citeseer':
L = np.sort(idx_test)
missing = missing_elements(L)
for element in missing:
save_label = np.insert(save_label, element, 0)
labels = torch.LongTensor(save_label)
return adj, features, labels, idx_train, idx_val, idx_test
| 35.962121
| 111
| 0.650516
|
adb4a3e59b78ea172fba507f225dccbb52f72cef
| 3,365
|
py
|
Python
|
src/nn/sketch_encoder.py
|
VIVelev/sketchy-code
|
351ba3c770cccdf4189a99ae765fc6ef36742912
|
[
"MIT"
] | null | null | null |
src/nn/sketch_encoder.py
|
VIVelev/sketchy-code
|
351ba3c770cccdf4189a99ae765fc6ef36742912
|
[
"MIT"
] | null | null | null |
src/nn/sketch_encoder.py
|
VIVelev/sketchy-code
|
351ba3c770cccdf4189a99ae765fc6ef36742912
|
[
"MIT"
] | null | null | null |
from keras import Model
from keras.layers import (Conv2D, Dense, Dropout, Flatten, Input, MaxPool2D,
Reshape)
from keras.optimizers import RMSprop
from ..utils.config import IMAGE_SIZE
__all__ = [
'SketchEncoder',
]
class SketchEncoder:
"""Sketch Encoder
Sketch (Image) Enmbedding (Encoder) Model.
Parameters:
-----------
embedding_dim : integer, the dimension in which to embed the sketch image and the tokens
name : string, the name of the model, optional
"""
def __init__(self, embedding_dim, name='sketch_encoder'):
self.embedding_dim = embedding_dim
self.name = name
# Inputs
self.image_input = Input(IMAGE_SIZE, name='image_input')
# Conv 32
self.conv_32_1 = Conv2D(32, (3, 3), activation='relu', padding='valid', name='conv_32_1')
self.conv_32_2 = Conv2D(32, (3, 3), activation='relu', padding='valid', name='conv_32_2')
self.maxpool_1 = MaxPool2D(pool_size=(2, 2), name='maxpool_1')
self.conv_dropout_1 = Dropout(0.3, name='conv_dropout_1')
# Conv 64
self.conv_64_1 = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv_64_1')
self.conv_64_2 = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv_64_2')
self.maxpool_2 = MaxPool2D(pool_size=(2, 2), name='maxpool_2')
self.conv_dropout_2 = Dropout(0.3, name='conv_dropout_2')
# Conv 128
self.conv_128_1 = Conv2D(128, (3, 3), activation='relu', padding='valid', name='conv_128_1')
self.conv_128_2 = Conv2D(128, (3, 3), activation='relu', padding='valid', name='conv_128_2')
self.maxpool_3 = MaxPool2D(pool_size=(2, 2), name='maxpool_3')
self.conv_dropout_3 = Dropout(0.3, name='conv_dropout_3')
# Flatten
self.flatten = Flatten(name='flatten')
# Dense -> ReLU 1
self.dense_relu_1 = Dense(1024, activation='relu', name='dense_relu_1')
self.dense_dropout_1 = Dropout(0.3, name='dense_dropout_1')
# Dense -> ReLU 2
self.dense_relu_2 = Dense(1024, activation='relu', name='dense_relu_2')
self.dense_dropout_2 = Dropout(0.3, name='dense_dropout_2')
# Dense -> ReLU encoder
self.dense_relu_encoder = Dense(embedding_dim, activation='relu', name='dense_relu_encoder')
self.embedding_reshapor = Reshape((1, embedding_dim), name='embedding_reshapor')
self.model = None
def build_model(self):
"""Builds a Keras Model to train/predict"""
x = self.conv_32_1(self.image_input)
x = self.conv_32_2(x)
x = self.maxpool_1(x)
x = self.conv_dropout_1(x)
x = self.conv_64_1(x)
x = self.conv_64_2(x)
x = self.maxpool_2(x)
x = self.conv_dropout_2(x)
x = self.conv_128_1(x)
x = self.conv_128_2(x)
x = self.maxpool_3(x)
x = self.conv_dropout_3(x)
x = self.flatten(x)
x = self.dense_relu_1(x)
x = self.dense_dropout_1(x)
x = self.dense_relu_2(x)
x = self.dense_dropout_2(x)
x = self.dense_relu_encoder(x)
x = self.embedding_reshapor(x)
self.model = Model(self.image_input, x, name=self.name)
self.model.compile(RMSprop(1e-4), loss='categorical_crossentropy')
return self
| 33.989899
| 100
| 0.622585
|
4cb7ca82217bd3d6308b067273694171c6693d8e
| 1,188
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vod/apis/GetHttpSslRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vod/apis/GetHttpSslRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vod/apis/GetHttpSslRequest.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class GetHttpSslRequest(JDCloudRequest):
"""
查询CDN域名SSL配置
"""
def __init__(self, parameters, header=None, version="v1"):
super(GetHttpSslRequest, self).__init__(
'/domains/{domainId}:getHttpSsl', 'GET', header, version)
self.parameters = parameters
class GetHttpSslParameters(object):
def __init__(self, domainId, ):
"""
:param domainId: 域名ID
"""
self.domainId = domainId
| 28.285714
| 75
| 0.710438
|
72eed61456852315e3b93887ea7717a292a6154e
| 995
|
py
|
Python
|
zerver/management/commands/print_email_delivery_backlog.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/print_email_delivery_backlog.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | 1
|
2019-11-02T09:06:05.000Z
|
2019-11-02T09:06:05.000Z
|
zerver/management/commands/print_email_delivery_backlog.py
|
erinis-eligro/zulip-outcasts
|
51153a6ce219370aee79bfe462f6e4fb956993d9
|
[
"Apache-2.0"
] | 1
|
2021-06-10T15:12:52.000Z
|
2021-06-10T15:12:52.000Z
|
#!/usr/bin/env python
"""
Shows backlog count of ScheduledJobs of type Email
"""
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from django.conf import settings
from django.core.management.base import BaseCommand
from zerver.models import ScheduledJob
from datetime import datetime, timedelta
class Command(BaseCommand):
help = """Shows backlog count of ScheduledJobs of type Email
(The number of currently overdue (by at least a minute) email jobs)
This is run as part of the nagios health check for the deliver_email command.
Please note that this is only relevant to the SMTP-based email delivery (no Mandrill).
Usage: ./manage.py print_email_delivery_backlog
"""
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
print(len(ScheduledJob.objects.filter(type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=datetime.utcnow()-timedelta(minutes=1))))
| 31.09375
| 112
| 0.733668
|
c4bbc544f0a46772593f92d614ff89de0ce9fa1a
| 4,488
|
py
|
Python
|
tests/test_work_create_reply.py
|
messense/wechatpy
|
46fdd873a0a04b8539a759a90ee81645405feb22
|
[
"MIT"
] | 140
|
2015-01-12T09:18:59.000Z
|
2022-03-24T09:17:18.000Z
|
tests/test_work_create_reply.py
|
messense/wechatpy
|
46fdd873a0a04b8539a759a90ee81645405feb22
|
[
"MIT"
] | 41
|
2015-01-05T11:56:30.000Z
|
2016-05-10T03:12:23.000Z
|
tests/test_work_create_reply.py
|
messense/wechatpy
|
46fdd873a0a04b8539a759a90ee81645405feb22
|
[
"MIT"
] | 56
|
2015-01-12T04:14:24.000Z
|
2020-03-10T12:02:42.000Z
|
# -*- coding: utf-8 -*-
import unittest
from wechatpy.work.replies import TextReply, create_reply
class CreateReplyTestCase(unittest.TestCase):
def test_create_reply_with_text_not_render(self):
text = 'test'
reply = create_reply(text, render=False)
self.assertEqual('text', reply.type)
self.assertEqual(text, reply.content)
self.assertEqual(0, reply.agent)
def test_create_reply_with_text_render(self):
text = 'test'
reply = create_reply(text, render=True)
self.assertTrue(isinstance(reply, str))
def test_create_reply_should_return_none(self):
reply = create_reply(None)
self.assertTrue(reply is None)
def test_create_reply_with_message(self):
from wechatpy.work.messages import TextMessage
msg = TextMessage({
'FromUserName': 'user1',
'ToUserName': 'user2',
'AgentID': 1,
})
reply = create_reply('test', msg, render=False)
self.assertEqual('user1', reply.target)
self.assertEqual('user2', reply.source)
self.assertEqual(1, reply.agent)
def test_create_reply_with_reply(self):
_reply = TextReply(content='test')
reply = create_reply(_reply, render=False)
self.assertEqual(_reply, reply)
def test_create_reply_with_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
]
reply = create_reply(articles, render=False)
self.assertEqual('news', reply.type)
def test_create_reply_with_more_than_ten_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
{
'title': 'test 4',
'description': 'test 4',
'image': 'http://www.qq.com/4.png',
'url': 'http://www.qq.com/4'
},
{
'title': 'test 5',
'description': 'test 5',
'image': 'http://www.qq.com/5.png',
'url': 'http://www.qq.com/5'
},
{
'title': 'test 6',
'description': 'test 6',
'image': 'http://www.qq.com/6.png',
'url': 'http://www.qq.com/6'
},
{
'title': 'test 7',
'description': 'test 7',
'image': 'http://www.qq.com/7.png',
'url': 'http://www.qq.com/7'
},
{
'title': 'test 8',
'description': 'test 8',
'image': 'http://www.qq.com/8.png',
'url': 'http://www.qq.com/8'
},
{
'title': 'test 9',
'description': 'test 9',
'image': 'http://www.qq.com/9.png',
'url': 'http://www.qq.com/9'
},
{
'title': 'test 10',
'description': 'test 10',
'image': 'http://www.qq.com/10.png',
'url': 'http://www.qq.com/10'
},
{
'title': 'test 11',
'description': 'test 11',
'image': 'http://www.qq.com/11.png',
'url': 'http://www.qq.com/11'
},
]
self.assertRaises(AttributeError, create_reply, articles)
| 32.057143
| 65
| 0.439171
|
529e2308f788d5f4bb028370ef5bea362327475d
| 4,066
|
py
|
Python
|
reskit/wind/core/design_turbine.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 1
|
2021-01-10T13:29:33.000Z
|
2021-01-10T13:29:33.000Z
|
reskit/wind/core/design_turbine.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 1
|
2021-01-12T10:07:49.000Z
|
2021-01-12T10:23:06.000Z
|
reskit/wind/core/design_turbine.py
|
OfficialCodexplosive/RESKit
|
e006e8c9923ddb044dab6951c95a15fa43489398
|
[
"MIT"
] | 2
|
2021-01-05T10:50:29.000Z
|
2021-01-15T10:55:54.000Z
|
# from ._util import *
# from ._costModel import *
# from scipy.optimize import differential_evolution
# from scipy.stats import exponweib
import numpy as np
import pandas as pd
from .power_curve import compute_specific_power
def onshore_turbine_from_avg_wind_speed(wind_speed, constant_rotor_diam=True, base_capacity=4200, base_hub_height=120, base_rotor_diam=136, reference_wind_speed=6.7, min_tip_height=20, min_specific_power=180):
"""
Suggest onshore turbine design characteristics (capacity, hub height, rotor diameter, specific power) for a 2050 European context based on an average wind speed value.
The default values and the function's normalization correspond to the baseline turbine design considered by Ryberg et al. [1] for a wind speed equal to 6.7 m/s. See notes.
Parameters
----------
wind_speed : numeric or array_like
Local average wind speed close to or at the hub height.
constant_rotor_diam : bool, optional
Whether the rotor diameter is mantained constant or not, by default True
base_capacity : numeric or array_like, optional
Baseline turbine capacity in kW, by default 4200.
base_hub_height : numeric or array_like, optional
Baseline turbine hub height in m, by default 120.
base_rotor_diam : numeric or array_like, optional
Baseline turbine rotor diameter in m, by default 136.
reference_wind_speed : numeric, optional
Average wind speed corresponding to the baseline turbine design, by default 6.7.
min_tip_height : numeric, optional.
Minimum distance in m between the lower tip of the blades and the ground, by default 20.
min_specific_power : numeric, optional
Minimum specific power allowed in kw/m2, by default 180.
Returns
-------
dict or pandas DataFrame
Returns a the suggested values of hub height in m, specific power in W/m2, and capacity in kW as dictionary when numeric values are input or as a pandas DataFrame when array-like objects are input.
Notes
-------
The default baseline onshore turbine has 4200 kW capacity, 120m hub height, and 136m rotor diameter [1]
References
-------
[1] David S. Ryberg, Dilara C. Caglayan, Sabrina Schmitt, Jochen Linssen, Detlef Stolten, Martin Robinius - The Future of European Onshore Wind Energy Potential:
Detailed Distributionand Simulation of Advanced Turbine Designs, Energy, 2019, available at https://www.sciencedirect.com/science/article/abs/pii/S0360544219311818
"""
wind_speed = np.array(wind_speed)
multi = wind_speed.size > 1
# Design Specific Power
scaling = compute_specific_power(base_capacity, base_rotor_diam) / (np.exp(0.53769024 * np.log(reference_wind_speed) + 4.74917728))
specific_power = scaling * np.exp(0.53769024 * np.log(wind_speed) + 4.74917728)
if multi:
lt180 = specific_power < min_specific_power
if lt180.any():
specific_power[lt180] = min_specific_power
else:
if specific_power < min_specific_power:
specific_power = min_specific_power
if constant_rotor_diam:
rotor_diam = base_rotor_diam
capacity = specific_power * np.pi * np.power((rotor_diam / 2), 2) / 1000
else:
capacity = base_capacity
rotor_diam = 2 * np.sqrt(capacity * 1000 / specific_power / np.pi)
# Design Hub Height
scaling = base_hub_height / (np.exp(-0.84976623 * np.log(reference_wind_speed) + 6.1879937))
hub_height = scaling * np.exp(-0.84976623 * np.log(wind_speed) + 6.1879937)
if multi:
lt20 = hub_height < (rotor_diam / 2 + min_tip_height)
if lt20.any():
hub_height[lt20] = rotor_diam[lt20] / 2 + min_tip_height
else:
if hub_height < (rotor_diam / 2 + min_tip_height):
hub_height = rotor_diam / 2 + min_tip_height
output = dict(capacity=capacity, hub_height=hub_height, rotor_diam=rotor_diam, specific_power=specific_power)
if multi:
return pd.DataFrame(output)
else:
return output
| 43.72043
| 209
| 0.712494
|
d718d6d48fdb5c5d4bfb2a7ba8eebd82e00e9586
| 375
|
py
|
Python
|
deepleasy/migrations/0002_progress_task_id.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | 1
|
2020-12-12T14:26:52.000Z
|
2020-12-12T14:26:52.000Z
|
deepleasy/migrations/0002_progress_task_id.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | 7
|
2019-12-04T23:38:56.000Z
|
2022-02-10T00:16:17.000Z
|
deepleasy/migrations/0002_progress_task_id.py
|
Bechma/deepleasy-backend
|
d536aa79a45af673bd53137b041c60bd33d7130f
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-23 14:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deepleasy', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='progress',
name='task_id',
field=models.TextField(default=''),
),
]
| 19.736842
| 47
| 0.581333
|
4f382bc07143a55fe0cbd4abc73e8c1acd9e0861
| 8,785
|
py
|
Python
|
pysit/util/derivatives/fd_tools.py
|
zfang-slim/pysit
|
8fca42b9749841abc302d1f8195a1437fad7ae4d
|
[
"BSD-3-Clause"
] | 64
|
2015-09-08T06:23:27.000Z
|
2022-03-09T23:35:24.000Z
|
pysit/util/derivatives/fd_tools.py
|
zfang-slim/pysit
|
8fca42b9749841abc302d1f8195a1437fad7ae4d
|
[
"BSD-3-Clause"
] | 23
|
2015-10-08T01:14:24.000Z
|
2021-07-15T11:37:05.000Z
|
pysit/util/derivatives/fd_tools.py
|
zfang-slim/pysit
|
8fca42b9749841abc302d1f8195a1437fad7ae4d
|
[
"BSD-3-Clause"
] | 48
|
2015-06-25T14:48:22.000Z
|
2021-12-06T19:50:25.000Z
|
import warnings
import math
import numpy as np
from pyamg.gallery import stencil_grid
__all__ = ['cd_coeffs', 'fd_stencil', 'stencil_grid', 'fd_coeffs', 'build_1D_fd']
cd_coeffs = {
1 : { 1 : None,
2 : [-0.5, 0, 0.5],
3 : None,
4 : [1./12, -2./3, 0., 2./3, -1./12],
5 : None,
6 : [-1.0/60.0, 3.0/20.0, -3.0/4.0, 0.0, 3.0/4.0, -3.0/20.0, 1.0/60.0],
7 : None,
8 : [1.0/280.0, -4.0/105.0, 1.0/5.0, -4.0/5.0, 0.0, 4.0/5.0, -1.0/5.0, 4.0/105.0, -1.0/280.0],
},
2 : { 1 : None,
2 : [1.0, -2.0, 1.0],
3 : None,
4 : [-1.0/12.0, 4.0/3.0, -5.0/2.0, 4.0/3.0, -1.0/12.0],
5 : None,
6 : [1.0/90.0, -3.0/20.0, 3.0/2.0, -49.0/18.0, 3.0/2.0, -3.0/20.0, 1.0/90.0],
7 : None,
8 : [-1.0/560.0, 8.0/315.0, -1.0/5.0, 8.0/5.0, -205.0/72.0, 8.0/5.0, -1.0/5.0, 8.0/315.0, -1.0/560.0],
}
}
def fd_stencil(base_stencil, dim, axis='all'):
if axis == 'all':
axes = list(range(dim))
else:
if axis >= dim: raise ValueError()
axes = [axis]
ln = len(base_stencil)
mid = int(np.floor(ln/2))
stencil = np.zeros([ln for x in range(dim)])
for axis in axes:
# shift the axes around so that they match our coordinate system (see
# domain.py for more details)
if dim == 3:
warnings.warn('Behavior for 3D problems is not confirmed to be proper!!!')
# ax = np.mod(axis+(dim-1),dim)
ax = axis
stencil[[mid if x!=ax else slice(None) for x in range(dim)]] += base_stencil
return stencil
def fd_coeffs(derivative, params):
"""
%---------------------------------
% finite-difference weights
% (Fornberg algorithm)
%
% z: expansion point
% x: vector of evaluation points
% m: order of derivative
%
% Example: cwei = FDweights(0,[0 1 2],1);
% gives cwei = [-3/2 2 -1/2]
%
% h f'_0 = -3/2 f_0 + 2 f_1 - 1/2 f_2
%
%---------------------------------
"""
if np.iterable(params[0]):
x = params[0]
z = params[1]
else:
x = np.arange(params[0])
z = params[1]
m = derivative
x = np.asarray(x)
z = float(z)
n = len(x)-1
c1 = 1.
c4 = x[0]-z
C = np.zeros((len(x),m+1))
C[0,0] = 1.
for i in range(1,n+1):
mn = min(i,m)
c2 = 1.
c5 = c4
c4 = x[i]-z
for j in range(0,i):
c3 = x[i]-x[j]
c2 *= c3
if j == i-1:
for k in range(mn,0,-1):
C[i,k] = c1*(k*C[i-1,k-1]-c5*C[i-1,k])/c2
C[i,0] = -c1*c5*C[i-1,0]/c2
for k in range(mn,0,-1):
C[j,k] = (c4*C[j,k]-k*C[j,k-1])/c3
C[j,0] = c4*C[j,0]/c3
c1 = c2
C[np.abs(C) < 1e-16] = 0.0
return C[:,-1].flatten()
def build_1D_fd(deriv, order, length, delta, lbc=None, rbc=None, limit_boundary=True):
""" Builds the finite difference stencil matrix in 1D that can be kroncker producted to build higher dimensional operators.
None in the BC slot leaves the purest form of the operator.
"""
bulk_npoints = deriv + order - (1 if not deriv%2 else 0)
bulk_center = int(math.floor(bulk_npoints/2))
boundary_npoints = deriv + order
stencil = fd_coeffs(deriv, (bulk_npoints, bulk_center))
stencil[np.abs(stencil) < 1e-12] = 0.0
L = stencil_grid(stencil, (length,), format='lil')
if not limit_boundary:
L /= (delta**deriv)
return L.tocsr()
# left side
for i in range(bulk_center):
boundary_center = i
if i == 0:
if lbc != 'dirichlet':
warnings.warn('Only Dirichlet boundaries are supported in this matrix construction.')
L[i,:] = 0.0
L[0,0]=1.0
# else: #lbc == 'neumann'
# # Not sure that this is correct...neumann likely need to be patched after the time step...
# L[i,:] = 0.0
# coeffs = -fd_coeffs(1, (1+order,boundary_center))
# coeffs /= coeffs[0]
# coeffs[0] = 0.0
# L[i,0:(1+order)] = coeffs
else:
L[i,:] = 0
stencil = fd_coeffs(deriv, (boundary_npoints,boundary_center))
stencil[np.abs(stencil) < 1e-12] = 0.0
L[i,0:boundary_npoints] = stencil
# right side
print(boundary_npoints-bulk_center-1)
for i in range(-1, -(boundary_npoints-bulk_center-deriv+1), -1):
boundary_center = boundary_npoints + i
idx = i
print(i, boundary_center, idx)
if idx == -1:
if lbc != 'dirichlet':
warnings.warn('Only Dirichlet boundaries are supported in this matrix construction.')
L[idx,:] = 0.0
L[-1,-1] = 1.0
# else: #lbc == 'neumann'
# # Not sure that this is correct...neumann likely need to be patched after the time step...
# L[i,:] = 0.0
# coeffs = -fd_coeffs(1, (1+order,boundary_center))
# coeffs /= coeffs[0]
# coeffs[0] = 0.0
# L[i,0:(1+order)] = coeffs
else:
L[idx,:] = 0
stencil = fd_coeffs(deriv, (boundary_npoints,boundary_center))
stencil[np.abs(stencil)<1e-12] = 0.0
L[idx,-boundary_npoints::] = stencil
L /= (delta**deriv)
return L.tocsr()
#
#
#if __name__=='__main__':
#
# from pysit import Domain, PML
#
# pml = PML(0.0, 100,ftype='polynomial')
#
# x_config = (0.0, 3.0, 3, pml, pml)
# y_config = (0.0, 3.0, 3, pml, pml)
# z_config = (0.0, 3.0, 3, pml, pml)
#
# d = Domain( (x_config, y_config, z_config) )
#
# sten = cd_coeffs[2][2]
#
# sx = fd_stencil(sten, 3, 0)
# sy = fd_stencil(sten, 3, 1)
# sz = fd_stencil(sten, 3, 2)
#
# gx = stencil_grid(sx, (3,3,3)).todense()
# gy = stencil_grid(sy, (3,3,3)).todense()
# gz = stencil_grid(sz, (3,3,3)).todense()
#
# print gx
# print gy
# print gz
def test_1st():
L = build_1D_fd(1, 4, 7, 1.0).todense()
correct = np.array([[ 1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[-0.25 , -0.8333333333333334, 1.5 , -0.5 , 0.0833333333333333, 0. , 0. ],
[ 0.0833333333333333, -0.6666666666666666, 0. , 0.6666666666666666, -0.0833333333333333, 0. , 0. ],
[ 0. , 0.0833333333333333, -0.6666666666666666, 0. , 0.6666666666666666, -0.0833333333333333, 0. ],
[ 0. , 0. , 0.0833333333333333, -0.6666666666666666, 0. , 0.6666666666666666, -0.0833333333333333],
[ 0. , 0. , -0.0833333333333333, 0.5 , -1.5 , 0.8333333333333333, 0.25 ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. ]])
assert np.linalg.norm(L-correct) < 1e-14
def test_2nd():
L = build_1D_fd(2, 4, 7, 1.0).todense()
correct = np.array([[ 1. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0.8333333333333333, -1.25 , -0.3333333333333333, 1.1666666666666665, -0.5 , 0.0833333333333333, 0. ],
[-0.0833333333333333, 1.3333333333333333, -2.5 , 1.3333333333333335, -0.0833333333333333, 0. , 0. ],
[ 0. , -0.0833333333333333, 1.3333333333333333, -2.5 , 1.3333333333333335, -0.0833333333333333, 0. ],
[ 0. , 0. , -0.0833333333333333, 1.3333333333333333, -2.5 , 1.3333333333333333, -0.0833333333333333],
[ 0. , 0.0833333333333333, -0.5000000000000001, 1.1666666666666667, -0.333333333333333 , -1.2499999999999996, 0.8333333333333333],
[ 0. , 0. , 0. , 0. , 0. , 0. , 1. ]])
assert np.linalg.norm(L-correct) < 1e-14
if __name__ == '__main__':
pass
# test_1st()
# test_2nd()
| 34.586614
| 173
| 0.448264
|
eb10083e8e3d04b83e5dc90903b16d654ca99781
| 776
|
py
|
Python
|
test_project/config/urls.py
|
wishmaestro/drf-fat-models
|
09b8c8a15140044e570db4e9af3354c42768ec5c
|
[
"MIT"
] | null | null | null |
test_project/config/urls.py
|
wishmaestro/drf-fat-models
|
09b8c8a15140044e570db4e9af3354c42768ec5c
|
[
"MIT"
] | null | null | null |
test_project/config/urls.py
|
wishmaestro/drf-fat-models
|
09b8c8a15140044e570db4e9af3354c42768ec5c
|
[
"MIT"
] | null | null | null |
"""test_project URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path("admin/", admin.site.urls),
]
| 35.272727
| 78
| 0.690722
|
7c0426a02ef9c364c643278ae8e36e4a98685821
| 5,722
|
py
|
Python
|
scripts/mycobot_topics.py
|
lowpair/mycobot_ros
|
344bbf7392a0bdc2a4fdadbd2ff46e4327117c70
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/mycobot_topics.py
|
lowpair/mycobot_ros
|
344bbf7392a0bdc2a4fdadbd2ff46e4327117c70
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/mycobot_topics.py
|
lowpair/mycobot_ros
|
344bbf7392a0bdc2a4fdadbd2ff46e4327117c70
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python2
import time
import os
import sys
import signal
import threading
import rospy
from mycobot_ros.msg import (MycobotAngles, MycobotCoords, MycobotSetAngles, MycobotSetCoords, MycobotGripperStatus, MycobotPumpStatus)
from sensor_msgs.msg import JointState
from pymycobot.mycobot import MyCobot
class Watcher:
"""this class solves two problems with multithreaded
programs in Python, (1) a signal might be delivered
to any thread (which is just a malfeature) and (2) if
the thread that gets the signal is waiting, the signal
is ignored (which is a bug).
The watcher is a concurrent process (not thread) that
waits for a signal and the process that contains the
threads. See Appendix A of The Little Book of Semaphores.
http://greenteapress.com/semaphores/
I have only tested this on Linux. I would expect it to
work on the Macintosh and not work on Windows.
"""
def __init__(self):
""" Creates a child thread, which returns. The parent
thread waits for a KeyboardInterrupt and then kills
the child thread.
"""
self.child = os.fork()
if self.child == 0:
return
else:
self.watch()
def watch(self):
try:
os.wait()
except KeyboardInterrupt:
# I put the capital B in KeyBoardInterrupt so I can
# tell when the Watcher gets the SIGINT
print 'KeyBoardInterrupt'
self.kill()
sys.exit()
def kill(self):
try:
os.kill(self.child, signal.SIGKILL)
except OSError: pass
class MycobotTopics(object):
def __init__(self):
super(MycobotTopics, self).__init__()
rospy.init_node('mycobot_topics')
rospy.loginfo('start ...')
port = rospy.get_param('~port', '/dev/ttyUSB0')
baud = rospy.get_param('~baud', 115200)
rospy.loginfo("%s,%s" % (port, baud))
self.mc = MyCobot(port, baud)
self.lock = threading.Lock()
def start(self):
pa = threading.Thread(target=self.pub_real_angles)
pb = threading.Thread(target=self.pub_real_coords)
sa = threading.Thread(target=self.sub_set_angles)
sb = threading.Thread(target=self.sub_set_coords)
sg = threading.Thread(target=self.sub_gripper_status)
sp = threading.Thread(target=self.sub_pump_status)
pa.setDaemon(True)
pa.start()
pb.setDaemon(True)
pb.start()
sa.setDaemon(True)
sa.start()
sb.setDaemon(True)
sb.start()
sg.setDaemon(True)
sg.start()
sp.setDaemon(True)
sp.start()
pa.join()
pb.join()
sa.join()
sb.join()
sg.join()
sp.join()
def pub_real_angles(self):
pub = rospy.Publisher('mycobot/angles_real', MycobotAngles, queue_size=5)
ma = MycobotAngles()
while not rospy.is_shutdown():
self.lock.acquire()
angles = self.mc.get_angles()
self.lock.release()
if angles:
ma.joint_1 = angles[0]
ma.joint_2 = angles[1]
ma.joint_3 = angles[2]
ma.joint_4 = angles[3]
ma.joint_5 = angles[4]
ma.joint_6 = angles[5]
pub.publish(ma)
time.sleep(.25)
def pub_real_coords(self):
pub = rospy.Publisher('mycobot/coords_real', MycobotCoords, queue_size=5)
ma = MycobotCoords()
while not rospy.is_shutdown():
self.lock.acquire()
coords = self.mc.get_coords()
self.lock.release()
if coords:
ma.x = coords[0]
ma.y = coords[1]
ma.z = coords[2]
ma.rx = coords[3]
ma.ry = coords[4]
ma.rz = coords[5]
pub.publish(ma)
time.sleep(.25)
def sub_set_angles(self):
def callback(data):
angles = [data.joint_1, data.joint_2, data.joint_3, data.joint_4, data.joint_5, data.joint_6]
sp = int(data.speed)
self.mc.send_angles(angles, sp)
sub = rospy.Subscriber('mycobot/angles_goal', MycobotSetAngles, callback=callback)
rospy.spin()
def sub_set_coords(self):
def callback(data):
angles = [data.x, data.y, data.z, data.rx, data.ry, data.rz]
sp = int(data.speed)
model = int(data.model)
self.mc.send_coords(angles, sp, model)
sub = rospy.Subscriber('mycobot/coords_goal', MycobotSetCoords, callback=callback)
rospy.spin()
def sub_gripper_status(self):
def callback(data):
if data.Status:
self.mc.set_gripper_state(0, 80)
else:
self.mc.set_gripper_state(1, 80)
sub = rospy.Subscriber('mycobot/gripper_status', MycobotGripperStatus, callback=callback)
rospy.spin()
def sub_pump_status(self):
def callback(data):
if data.Status:
self.mc.set_basic_output(2, 0)
self.mc.set_basic_output(5, 0)
else:
self.mc.set_basic_output(2, 1)
self.mc.set_basic_output(5, 1)
sub = rospy.Subscriber('mycobot/pump_status', MycobotPumpStatus, callback=callback)
rospy.spin()
if __name__ == '__main__':
Watcher()
mc_topics = MycobotTopics()
mc_topics.start()
# while True:
# mc_topics.pub_real_coords()
# mc_topics.sub_set_angles()
pass
| 31.097826
| 135
| 0.574799
|
d8d8de4f540ab11477217b9ddd173cb4ef0737e2
| 4,804
|
py
|
Python
|
SCSCons/Tool/lex.py
|
Relintai/pandemonium_engine
|
3de05db75a396b497f145411f71eb363572b38ae
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
SCSCons/Tool/lex.py
|
Relintai/pandemonium_engine
|
3de05db75a396b497f145411f71eb363572b38ae
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
SCSCons/Tool/lex.py
|
Relintai/pandemonium_engine
|
3de05db75a396b497f145411f71eb363572b38ae
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Tool-specific initialization for lex.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
import os.path
import sys
import SCons.Action
import SCons.Tool
import SCons.Util
import SCons.Warnings
from SCons.Platform.mingw import MINGW_DEFAULT_PATHS
from SCons.Platform.cygwin import CYGWIN_DEFAULT_PATHS
from SCons.Platform.win32 import CHOCO_DEFAULT_PATH
LexAction = SCons.Action.Action("$LEXCOM", "$LEXCOMSTR")
if sys.platform == 'win32':
BINS = ['flex', 'lex', 'win_flex']
else:
BINS = ["flex", "lex"]
def lexEmitter(target, source, env):
sourceBase, sourceExt = os.path.splitext(SCons.Util.to_String(source[0]))
if sourceExt == ".lm": # If using Objective-C
target = [sourceBase + ".m"] # the extension is ".m".
# This emitter essentially tries to add to the target all extra
# files generated by flex.
# Different options that are used to trigger the creation of extra files.
fileGenOptions = ["--header-file=", "--tables-file="]
lexflags = env.subst("$LEXFLAGS", target=target, source=source)
for option in SCons.Util.CLVar(lexflags):
for fileGenOption in fileGenOptions:
l = len(fileGenOption)
if option[:l] == fileGenOption:
# A file generating option is present, so add the
# file name to the target list.
fileName = option[l:].strip()
target.append(fileName)
return (target, source)
def get_lex_path(env, append_paths=False):
"""
Find the path to the lex tool, searching several possible names
Only called in the Windows case, so the default_path
can be Windows-specific
:param env: current construction environment
:param append_paths: if set, add the path to the tool to PATH
:return: path to lex tool, if found
"""
for prog in BINS:
bin_path = SCons.Tool.find_program_path(
env,
prog,
default_paths=CHOCO_DEFAULT_PATH + MINGW_DEFAULT_PATHS + CYGWIN_DEFAULT_PATHS )
if bin_path:
if append_paths:
env.AppendENVPath('PATH', os.path.dirname(bin_path))
return bin_path
SCons.Warnings.warn(
SCons.Warnings.SConsWarning,
'lex tool requested, but lex or flex binary not found in ENV PATH'
)
def generate(env):
"""Add Builders and construction variables for lex to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
# C
c_file.add_action(".l", LexAction)
c_file.add_emitter(".l", lexEmitter)
c_file.add_action(".lex", LexAction)
c_file.add_emitter(".lex", lexEmitter)
# Objective-C
cxx_file.add_action(".lm", LexAction)
cxx_file.add_emitter(".lm", lexEmitter)
# C++
cxx_file.add_action(".ll", LexAction)
cxx_file.add_emitter(".ll", lexEmitter)
env["LEXFLAGS"] = SCons.Util.CLVar("")
if sys.platform == 'win32':
# ignore the return - we do not need the full path here
_ = get_lex_path(env, append_paths=True)
env["LEX"] = env.Detect(BINS)
if not env.get("LEXUNISTD"):
env["LEXUNISTD"] = SCons.Util.CLVar("")
env["LEXCOM"] = "$LEX $LEXUNISTD $LEXFLAGS -t $SOURCES > $TARGET"
else:
env["LEX"] = env.Detect(BINS)
env["LEXCOM"] = "$LEX $LEXFLAGS -t $SOURCES > $TARGET"
def exists(env):
if sys.platform == 'win32':
return get_lex_path(env)
else:
return env.Detect(BINS)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.830986
| 91
| 0.681099
|
35b527a31ca1fe2a3196889ee3d2ab21e2242e79
| 4,956
|
py
|
Python
|
EvoAlgs/BreakersEvo/GenotypeEncoders/AngularEncoder.py
|
ITMO-NSS-team/breakwaters-evolutionary-optimisation
|
28cd184e5348659adf0da8eb9b6612269aaca4b6
|
[
"MIT"
] | 1
|
2020-10-09T13:59:15.000Z
|
2020-10-09T13:59:15.000Z
|
EvoAlgs/BreakersEvo/GenotypeEncoders/AngularEncoder.py
|
ITMO-NSS-team/breakwaters-evolutionary-optimisation
|
28cd184e5348659adf0da8eb9b6612269aaca4b6
|
[
"MIT"
] | null | null | null |
EvoAlgs/BreakersEvo/GenotypeEncoders/AngularEncoder.py
|
ITMO-NSS-team/breakwaters-evolutionary-optimisation
|
28cd184e5348659adf0da8eb9b6612269aaca4b6
|
[
"MIT"
] | null | null | null |
import copy
import random
import numpy as np
from EvoAlgs.BreakersEvo.GenotypeEncoders.GenotypeEncoder import DirectGenotypeEncoder
class AngularGenotypeEncoder(DirectGenotypeEncoder):
def __init__(self):
self.min_for_init = [0, -75]
self.max_for_init = [5, 75]
def parameterized_genotype_to_breakers(self, genotype, task, grid):
gen_id = 0
new_modifications = []
for modification in task.possible_modifications:
converted_modification = copy.deepcopy(modification)
if converted_modification.breaker_id in task.mod_points_to_optimise:
point_ids_to_optimise_in_modification = task.mod_points_to_optimise[converted_modification.breaker_id]
anchor_point = converted_modification.points[max(point_ids_to_optimise_in_modification) + 1]
prev_anchor = converted_modification.points[max(point_ids_to_optimise_in_modification) + 2]
for point_ind in point_ids_to_optimise_in_modification:
anchor_angle = anchor_point.get_relative_polar_coordinates(prev_anchor)["angle"]
length = genotype[gen_id]
direction = (genotype[gen_id + 1] + anchor_angle + 360) % 360
converted_modification.points[point_ind] = converted_modification.points[point_ind].from_polar(
length,
direction,
anchor_point, grid)
gen_id += 2
prev_anchor = anchor_point
anchor_point = converted_modification.points[point_ind]
new_modifications.append(converted_modification)
return new_modifications
def breakers_to_parameterized_genotype(self, breakers, task, grid):
chromosome = []
for modification in task.possible_modifications:
if modification.breaker_id in task.mod_points_to_optimise:
breaker = [b for b in breakers if b.breaker_id == modification.breaker_id][0]
point_ids_to_optimise_in_modification = task.mod_points_to_optimise[modification.breaker_id]
anchor_point = modification.points[max(point_ids_to_optimise_in_modification) + 1]
prev_anchor = modification.points[max(point_ids_to_optimise_in_modification) + 2]
for point_ind in point_ids_to_optimise_in_modification:
anchor_angle = anchor_point.get_relative_polar_coordinates(prev_anchor)["angle"]
if breaker.points[max(point_ids_to_optimise_in_modification)].x == -1:
length = 0
direction = anchor_angle
prev_anchor = anchor_point
anchor_point = modification.points[point_ind]
else:
last_point = breaker.points[max(point_ids_to_optimise_in_modification)]
length = last_point.get_relative_polar_coordinates(anchor_point)["length"]
direction = last_point.get_relative_polar_coordinates(anchor_point)["angle"]
prev_anchor = anchor_point
anchor_point = last_point
chromosome.append(length)
chromosome.append(direction)
return chromosome
def mutate_components(self, comp_values):
mutation_params_len = [2, 1.5, 1]
mutation_params_dir = [35, 5, 1]
mutation_ratio_len = abs(
np.random.normal(mutation_params_len[0], mutation_params_len[1],
mutation_params_len[2])[0])
mutation_ratio_dir = abs(
np.random.normal(mutation_params_dir[0], mutation_params_dir[1],
mutation_params_dir[2])[0])
sign = 1 if random.random() < 0.5 else -1
comp_value1 = comp_values[0]
comp_value1 += sign * mutation_ratio_len
comp_value1 = round(abs(comp_value1))
comp_value2 = comp_values[1]
comp_value2 += sign * mutation_ratio_dir
comp_value2 = max(comp_value2, self.min_for_init[1])
comp_value2 = min(comp_value2, self.max_for_init[1])
return comp_value1, comp_value2
def crossover_components(self, comp_values1, comp_values2):
part1_rate = abs(random.random())
part2_rate = 1 - part1_rate
new_value1 = round(comp_values1[0] * part1_rate +
comp_values2[0] * part2_rate)
rate = abs(random.random())
if rate < 0.5:
new_value2 = comp_values1[1]
else:
new_value2 = comp_values2[1]
return new_value1, new_value2
def mutate(self, ancestor_genotype):
return super(AngularGenotypeEncoder, self).mutate(ancestor_genotype)
def crossover(self, ancestor_genotype1, ancestor_genotype2):
raise NotImplementedError
| 40.958678
| 118
| 0.635593
|
6781454ef0877b6e751ac3545da893f79647223a
| 8,345
|
py
|
Python
|
var/spack/repos/builtin/packages/curl/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-10-04T20:05:45.000Z
|
2021-10-04T20:05:45.000Z
|
var/spack/repos/builtin/packages/curl/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14
|
2021-05-12T05:45:58.000Z
|
2022-03-04T17:04:12.000Z
|
var/spack/repos/builtin/packages/curl/package.py
|
mtmiller/spack
|
c97c135f1dbe24955048fcc4f0f98281ef0c9300
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-10-27T19:25:49.000Z
|
2020-10-27T19:25:49.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack import *
class Curl(AutotoolsPackage):
"""cURL is an open source command line tool and library for
transferring data with URL syntax"""
homepage = "https://curl.se/"
# URL must remain http:// so Spack can bootstrap curl
url = "http://curl.haxx.se/download/curl-7.78.0.tar.bz2"
version('7.79.0', sha256='d607a677f473f79f96c964100327125a6204a39d835dc00dab7fc0129b959f42')
version('7.78.0', sha256='98530b317dc95ccb324bbe4f834f07bb642fbc393b794ddf3434f246a71ea44a')
version('7.77.0', sha256='6c0c28868cb82593859fc43b9c8fdb769314c855c05cf1b56b023acf855df8ea')
version('7.76.1', sha256='7a8e184d7d31312c4ebf6a8cb59cd757e61b2b2833a9ed4f9bf708066e7695e9')
version('7.76.0', sha256='e29bfe3633701590d75b0071bbb649ee5ca4ca73f00649268bd389639531c49a')
version('7.75.0', sha256='50552d4501c178e4cc68baaecc487f466a3d6d19bbf4e50a01869effb316d026')
version('7.74.0', sha256='0f4d63e6681636539dc88fa8e929f934cd3a840c46e0bf28c73be11e521b77a5')
version('7.73.0', sha256='cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131')
version('7.72.0', sha256='ad91970864102a59765e20ce16216efc9d6ad381471f7accceceab7d905703ef')
version('7.71.0', sha256='600f00ac2481a89548a4141ddf983fd9386165e1960bac91d0a1c81dca5dd341')
version('7.68.0', sha256='207f54917dd6a2dc733065ccf18d61bb5bebeaceb5df49cd9445483e8623eeb9')
version('7.64.0', sha256='d573ba1c2d1cf9d8533fadcce480d778417964e8d04ccddcc76e591d544cf2eb')
version('7.63.0', sha256='9bab7ed4ecff77020a312d84cc5fb7eb02d58419d218f267477a724a17fd8dd8')
version('7.60.0', sha256='897dfb2204bd99be328279f88f55b7c61592216b0542fcbe995c60aa92871e9b')
version('7.59.0', sha256='b5920ffd6a8c95585fb95070e0ced38322790cb335c39d0dab852d12e157b5a0')
version('7.56.0', sha256='de60a4725a3d461c70aa571d7d69c788f1816d9d1a8a2ef05f864ce8f01279df')
version('7.54.0', sha256='f50ebaf43c507fa7cc32be4b8108fa8bbd0f5022e90794388f3c7694a302ff06')
version('7.53.1', sha256='1c7207c06d75e9136a944a2e0528337ce76f15b9ec9ae4bb30d703b59bf530e8')
version('7.52.1', sha256='d16185a767cb2c1ba3d5b9096ec54e5ec198b213f45864a38b3bda4bbf87389b')
version('7.50.3', sha256='7b7347d976661d02c84a1f4d6daf40dee377efdc45b9e2c77dedb8acf140d8ec')
version('7.50.2', sha256='0c72105df4e9575d68bcf43aea1751056c1d29b1040df6194a49c5ac08f8e233')
version('7.50.1', sha256='3c12c5f54ccaa1d40abc65d672107dcc75d3e1fcb38c267484334280096e5156')
version('7.49.1', sha256='eb63cec4bef692eab9db459033f409533e6d10e20942f4b060b32819e81885f1')
version('7.47.1', sha256='ddc643ab9382e24bbe4747d43df189a0a6ce38fcb33df041b9cb0b3cd47ae98f')
version('7.46.0', sha256='b7d726cdd8ed4b6db0fa1b474a3c59ebbbe4dcd4c61ac5e7ade0e0270d3195ad')
version('7.45.0', sha256='65154e66b9f8a442b57c436904639507b4ac37ec13d6f8a48248f1b4012b98ea')
version('7.44.0', sha256='1e2541bae6582bb697c0fbae49e1d3e6fad5d05d5aa80dbd6f072e0a44341814')
version('7.43.0', sha256='baa654a1122530483ccc1c58cc112fec3724a82c11c6a389f1e6a37dc8858df9')
version('7.42.1', sha256='e2905973391ec2dfd7743a8034ad10eeb58dab8b3a297e7892a41a7999cac887')
default_tls = 'openssl'
if sys.platform == 'darwin':
default_tls = 'secure_transport'
# TODO: add dependencies for other possible TLS backends
values_tls = [
# 'amissl',
# 'bearssl',
'gnutls',
'mbedtls',
# 'mesalink',
'nss',
'openssl',
# 'rustls',
# 'schannel',
'secure_transport',
# 'wolfssl',
]
variant('tls', default=default_tls, description='TLS backend', values=values_tls, multi=True)
variant('nghttp2', default=False, description='build nghttp2 library (requires C++11)')
variant('libssh2', default=False, description='enable libssh2 support')
variant('libssh', default=False, description='enable libssh support') # , when='7.58:')
variant('gssapi', default=False, description='enable Kerberos support')
variant('librtmp', default=False, description='enable Rtmp support')
variant('ldap', default=False, description='enable ldap support')
variant('libidn2', default=False, description='enable libidn2 support')
conflicts('+libssh', when='@:7.57')
# on OSX and --with-ssh the configure steps fails with
# one or more libs available at link-time are not available run-time
# unless the libssh are installed externally (e.g. via homebrew), even
# though spack isn't supposed to know about such a libssh installation.
# C.f. https://github.com/spack/spack/issues/7777
conflicts('platform=darwin', when='+libssh2')
conflicts('platform=darwin', when='+libssh')
conflicts('platform=cray', when='tls=secure_transport', msg='Only supported on macOS')
conflicts('platform=linux', when='tls=secure_transport', msg='Only supported on macOS')
conflicts('tls=mbedtls', when='@:7.45')
depends_on('gnutls', when='tls=gnutls')
depends_on('mbedtls', when='tls=mbedtls')
depends_on('nss', when='tls=nss')
depends_on('openssl', when='tls=openssl')
depends_on('libidn2', when='+libidn2')
depends_on('zlib')
depends_on('nghttp2', when='+nghttp2')
depends_on('libssh2', when='+libssh2')
depends_on('libssh', when='+libssh')
depends_on('krb5', when='+gssapi')
# curl queries pkgconfig for openssl compilation flags
depends_on('pkgconfig', type='build')
def configure_args(self):
spec = self.spec
args = [
'--with-zlib=' + spec['zlib'].prefix,
# Prevent unintentional linking against system libraries: we could
# add variants for these in the future
'--without-brotli',
'--without-libgsasl',
'--without-libpsl',
'--without-zstd',
'--without-ca-bundle',
'--without-ca-path',
'--with-ca-fallback',
]
# https://daniel.haxx.se/blog/2021/06/07/bye-bye-metalink-in-curl/
# We always disable it explicitly, but the flag is gone in newer
# versions.
if spec.satisfies('@:7.77'):
args.append('--without-libmetalink')
if spec.satisfies('+gssapi'):
args.append('--with-gssapi=' + spec['krb5'].prefix)
else:
args.append('--without-gssapi')
args += self.with_or_without('tls')
args += self.with_or_without('libidn2', 'prefix')
args += self.with_or_without('librtmp')
args += self.with_or_without('nghttp2')
args += self.with_or_without('libssh2')
args += self.with_or_without('libssh')
args += self.enable_or_disable('ldap')
return args
def with_or_without_gnutls(self, activated):
if activated:
return '--with-gnutls=' + self.spec['gnutls'].prefix
else:
return '--without-gnutls'
def with_or_without_mbedtls(self, activated):
if self.spec.satisfies('@7.46:'):
if activated:
return '--with-mbedtls=' + self.spec['mbedtls'].prefix
else:
return '--without-mbedtls'
def with_or_without_nss(self, activated):
if activated:
return '--with-nss=' + self.spec['nss'].prefix
else:
return '--without-nss'
def with_or_without_openssl(self, activated):
if self.spec.satisfies('@7.77:'):
if activated:
return '--with-openssl=' + self.spec['openssl'].prefix
else:
return '--without-openssl'
else:
if activated:
return '--with-ssl=' + self.spec['openssl'].prefix
else:
return '--without-ssl'
def with_or_without_secure_transport(self, activated):
if self.spec.satisfies('@7.65:'):
if activated:
return '--with-secure-transport'
else:
return '--without-secure-transport'
else:
if activated:
return '--with-darwinssl'
else:
return '--without-darwinssl'
| 46.104972
| 97
| 0.678969
|
6830ad9eb17470175cc81002eafc43c9fb113b4d
| 1,538
|
py
|
Python
|
setup.py
|
OaklandPeters/pathological
|
c561eb30df8cdcc0f277a17cd08a03cf173e312f
|
[
"MIT"
] | null | null | null |
setup.py
|
OaklandPeters/pathological
|
c561eb30df8cdcc0f277a17cd08a03cf173e312f
|
[
"MIT"
] | null | null | null |
setup.py
|
OaklandPeters/pathological
|
c561eb30df8cdcc0f277a17cd08a03cf173e312f
|
[
"MIT"
] | null | null | null |
from setuptools import setup
long_description = '''
Pathological
===========================
Unit-testing data-sets on the edge of sanity.
A suite of examples of poorly behaving data, ready for unit-testing your
libraries to death, along with tools unit-testing tool to simplify using them.
'''
classifiers = [
# Select one 'Development Status'
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Topic :: Utilities' # only if appropriate
]
version = open('VERSION').read().strip()
setup(
name='pathological',
version=version,
author='Oakland John Peters',
author_email='oakland.peters@gmail.com',
description="Unit-testing data-sets on the edge of sanity.",
long_description=long_description,
url='http://bitbucket.org/OPeters/pathological',
license='MIT',
packages=['pathological'],
include_package_data=True,
classifiers=classifiers,
)
| 30.156863
| 78
| 0.664499
|
6c366349d9e2aad51179702923c5f4b65325af7f
| 12,635
|
py
|
Python
|
metadata-ingestion/src/datahub/ingestion/source/ge_data_profiler.py
|
l0ginp/datahub
|
5e79b7a65bee8dc41a7fd6042f709a281f59eb85
|
[
"Apache-2.0"
] | 1
|
2021-09-08T06:07:30.000Z
|
2021-09-08T06:07:30.000Z
|
metadata-ingestion/src/datahub/ingestion/source/ge_data_profiler.py
|
l0ginp/datahub
|
5e79b7a65bee8dc41a7fd6042f709a281f59eb85
|
[
"Apache-2.0"
] | null | null | null |
metadata-ingestion/src/datahub/ingestion/source/ge_data_profiler.py
|
l0ginp/datahub
|
5e79b7a65bee8dc41a7fd6042f709a281f59eb85
|
[
"Apache-2.0"
] | 1
|
2021-07-13T16:56:13.000Z
|
2021-07-13T16:56:13.000Z
|
import contextlib
import dataclasses
import unittest.mock
from typing import Any, Iterable, Optional
from great_expectations.core.expectation_validation_result import (
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
DataContextConfig,
DatasourceConfig,
InMemoryStoreBackendDefaults,
)
from great_expectations.datasource.sqlalchemy_datasource import SqlAlchemyDatasource
from datahub.emitter.mce_builder import get_sys_time
from datahub.ingestion.api.source import SourceReport
from datahub.metadata.schema_classes import (
DatasetFieldProfileClass,
DatasetProfileClass,
HistogramClass,
QuantileClass,
ValueFrequencyClass,
)
from datahub.utilities.groupby import groupby_unsorted
# The reason for this wacky structure is quite fun. GE basically assumes that
# the config structures were generated directly from YML and further assumes that
# they can be `deepcopy`'d without issue. The SQLAlchemy engine and connection
# objects, however, cannot be copied. Despite the fact that the SqlAlchemyDatasource
# class accepts an `engine` argument (which can actually be an Engine or Connection
# object), we cannot use it because of the config loading system. As such, we instead
# pass a "dummy" config into the DatasourceConfig, but then dynamically add the
# engine parameter when the SqlAlchemyDatasource is actually set up, and then remove
# it from the cached config object to avoid those same copying mechanisms. While
# you might expect that this is sufficient because GE caches the Datasource objects
# that it constructs, it actually occassionally bypasses this cache (likely a bug
# in GE), and so we need to wrap every call to GE with the below context manager.
@contextlib.contextmanager
def _properly_init_datasource(conn):
underlying_datasource_init = SqlAlchemyDatasource.__init__
def sqlalchemy_datasource_init(
self: SqlAlchemyDatasource, *args: Any, **kwargs: Any
) -> None:
underlying_datasource_init(self, *args, **kwargs, engine=conn)
self.drivername = conn.dialect.name
del self._datasource_config["engine"]
with unittest.mock.patch(
"great_expectations.datasource.sqlalchemy_datasource.SqlAlchemyDatasource.__init__",
sqlalchemy_datasource_init,
), unittest.mock.patch(
"great_expectations.data_context.store.validations_store.ValidationsStore.set"
):
yield
@dataclasses.dataclass
class DatahubGEProfiler:
data_context: BaseDataContext
report: SourceReport
# The actual value doesn't matter, it just matters that we use it consistently throughout.
datasource_name: str = "my_sqlalchemy_datasource"
def __init__(self, conn, report):
self.conn = conn
self.report = report
data_context_config = DataContextConfig(
datasources={
self.datasource_name: DatasourceConfig(
class_name="SqlAlchemyDatasource",
credentials={
# This isn't actually used since we pass the connection directly,
# but GE parses it to change some of its behavior so it's useful
# to emulate that here.
"url": self.conn.engine.url,
},
)
},
store_backend_defaults=InMemoryStoreBackendDefaults(),
anonymous_usage_statistics={
"enabled": False,
# "data_context_id": <not set>,
},
)
with _properly_init_datasource(self.conn):
self.data_context = BaseDataContext(project_config=data_context_config)
def generate_profile(
self,
pretty_name: str,
schema: str = None,
table: str = None,
limit: int = None,
offset: int = None,
send_sample_values: bool = True,
**kwargs: Any,
) -> DatasetProfileClass:
with _properly_init_datasource(self.conn):
evrs = self._profile_data_asset(
{
"schema": schema,
"table": table,
"limit": limit,
"offset": offset,
**kwargs,
},
pretty_name=pretty_name,
)
profile = self._convert_evrs_to_profile(
evrs, pretty_name=pretty_name, send_sample_values=send_sample_values
)
return profile
def _profile_data_asset(
self,
batch_kwargs: dict,
pretty_name: str,
) -> ExpectationSuiteValidationResult:
# Internally, this uses the GE dataset profiler:
# great_expectations.profile.basic_dataset_profiler.BasicDatasetProfiler
profile_results = self.data_context.profile_data_asset(
self.datasource_name,
batch_kwargs={
"datasource": self.datasource_name,
**batch_kwargs,
},
)
assert profile_results["success"]
assert len(profile_results["results"]) == 1
_suite, evrs = profile_results["results"][0]
return evrs
@staticmethod
def _get_column_from_evr(evr: ExpectationValidationResult) -> Optional[str]:
return evr.expectation_config.kwargs.get("column")
# The list of handled expectations has been created by referencing these files:
# - https://github.com/great-expectations/great_expectations/blob/71e9c1eae433a31416a38de1688e2793e9778299/great_expectations/render/renderer/profiling_results_overview_section_renderer.py
# - https://github.com/great-expectations/great_expectations/blob/71e9c1eae433a31416a38de1688e2793e9778299/great_expectations/render/renderer/column_section_renderer.py
# - https://github.com/great-expectations/great_expectations/blob/71e9c1eae433a31416a38de1688e2793e9778299/great_expectations/profile/basic_dataset_profiler.py
def _convert_evrs_to_profile(
self,
evrs: ExpectationSuiteValidationResult,
pretty_name: str,
send_sample_values: bool,
) -> DatasetProfileClass:
profile = DatasetProfileClass(timestampMillis=get_sys_time())
for col, evrs_for_col in groupby_unsorted(
evrs.results, key=self._get_column_from_evr
):
if col is None:
self._handle_convert_table_evrs(
profile, evrs_for_col, pretty_name=pretty_name
)
else:
self._handle_convert_column_evrs(
profile,
col,
evrs_for_col,
pretty_name=pretty_name,
send_sample_values=send_sample_values,
)
return profile
def _handle_convert_table_evrs(
self,
profile: DatasetProfileClass,
table_evrs: Iterable[ExpectationValidationResult],
pretty_name: str,
) -> None:
# TRICKY: This method mutates the profile directly.
for evr in table_evrs:
exp: str = evr.expectation_config.expectation_type
res: dict = evr.result
if exp == "expect_table_row_count_to_be_between":
profile.rowCount = res["observed_value"]
elif exp == "expect_table_columns_to_match_ordered_list":
profile.columnCount = len(res["observed_value"])
else:
self.report.report_warning(
f"profile of {pretty_name}", f"unknown table mapper {exp}"
)
def _handle_convert_column_evrs( # noqa: C901 (complexity)
self,
profile: DatasetProfileClass,
column: str,
col_evrs: Iterable[ExpectationValidationResult],
pretty_name: str,
send_sample_values: bool,
) -> None:
# TRICKY: This method mutates the profile directly.
column_profile = DatasetFieldProfileClass(fieldPath=column)
profile.fieldProfiles = profile.fieldProfiles or []
profile.fieldProfiles.append(column_profile)
for evr in col_evrs:
exp: str = evr.expectation_config.expectation_type
res: dict = evr.result
if not res:
self.report.report_warning(
f"profile of {pretty_name}", f"{exp} did not yield any results"
)
continue
if exp == "expect_column_unique_value_count_to_be_between":
column_profile.uniqueCount = res["observed_value"]
elif exp == "expect_column_proportion_of_unique_values_to_be_between":
column_profile.uniqueProportion = res["observed_value"]
elif exp == "expect_column_values_to_not_be_null":
column_profile.nullCount = res["unexpected_count"]
if (
"unexpected_percent" in res
and res["unexpected_percent"] is not None
):
column_profile.nullProportion = res["unexpected_percent"] / 100
elif exp == "expect_column_values_to_not_match_regex":
# ignore; generally used for whitespace checks using regex r"^\s+|\s+$"
pass
elif exp == "expect_column_mean_to_be_between":
column_profile.mean = str(res["observed_value"])
elif exp == "expect_column_min_to_be_between":
column_profile.min = str(res["observed_value"])
elif exp == "expect_column_max_to_be_between":
column_profile.max = str(res["observed_value"])
elif exp == "expect_column_median_to_be_between":
column_profile.median = str(res["observed_value"])
elif exp == "expect_column_stdev_to_be_between":
column_profile.stdev = str(res["observed_value"])
elif exp == "expect_column_quantile_values_to_be_between":
if "observed_value" in res:
column_profile.quantiles = [
QuantileClass(quantile=str(quantile), value=str(value))
for quantile, value in zip(
res["observed_value"]["quantiles"],
res["observed_value"]["values"],
)
]
elif exp == "expect_column_values_to_be_in_set":
column_profile.sampleValues = [
str(v) for v in res["partial_unexpected_list"]
]
if not send_sample_values:
column_profile.sampleValues = []
elif exp == "expect_column_kl_divergence_to_be_less_than":
if "details" in res and "observed_partition" in res["details"]:
partition = res["details"]["observed_partition"]
column_profile.histogram = HistogramClass(
[str(v) for v in partition["bins"]],
[
partition["tail_weights"][0],
*partition["weights"],
partition["tail_weights"][1],
],
)
elif exp == "expect_column_distinct_values_to_be_in_set":
if "details" in res and "value_counts" in res["details"]:
# This can be used to produce a bar chart since it includes values and frequencies.
# As such, it is handled differently from expect_column_values_to_be_in_set, which
# is nonexhaustive.
column_profile.distinctValueFrequencies = [
ValueFrequencyClass(value=str(value), frequency=count)
for value, count in res["details"]["value_counts"].items()
]
if not send_sample_values:
column_profile.distinctValueFrequencies = []
elif exp == "expect_column_values_to_be_in_type_list":
# ignore; we already know the types for each column via ingestion
pass
elif exp == "expect_column_values_to_be_unique":
# ignore; this is generally covered by the unique value count test
pass
else:
self.report.report_warning(
f"profile of {pretty_name}",
f"warning: unknown column mapper {exp} in col {column}",
)
| 42.399329
| 192
| 0.620657
|
e44c096a455d97dd848cee1ec77a1cd870d232f0
| 1,146
|
py
|
Python
|
GITcourse/main.py
|
CristianTeodorNita/GITcourse
|
0aa418b5f8700e243bff61ad030350a39a31568c
|
[
"MIT"
] | null | null | null |
GITcourse/main.py
|
CristianTeodorNita/GITcourse
|
0aa418b5f8700e243bff61ad030350a39a31568c
|
[
"MIT"
] | null | null | null |
GITcourse/main.py
|
CristianTeodorNita/GITcourse
|
0aa418b5f8700e243bff61ad030350a39a31568c
|
[
"MIT"
] | null | null | null |
# import c
# import a.a2.hello as hello
# from b.random_number_generator import generate_number_between, generate_until_drawn
# from a import try_again
# from a.a1.number_generator import *
from a import try_again # daca vrei sa importi din __init__.py al unui package
from a.a1 import number_generator # daca vrei sa importi un modul .py dintr-un package
from a.a2 import hello
from b import random_number_generator
from c.c2 import reward
from c.c3 import result_info
from a.a1.number_generator import generate_until_drawn # daca vrei sa ai o referinta la ceva specific dintr-un modul (se incarca oricum tot modulul, dar specifi ca vrei sa te folosesti in fisierul curent doar de ce ai importat)
def try_again():
return False
def thank_you():
print("Thank you")
def lotto():
"""
:return:
"""
playing = True
a2.hello()
# while playing:
# number = c.retrieve_number_from_user()
# times = generate_until_drawn(number, 1, 100)
# c.inform_about_the_result(times)
# c.get_reward(times)
# playing = try_again()
thank_you()
if __name__ == "__main__":
lotto()
| 27.285714
| 228
| 0.713787
|
42b5bafd2e30fa20130a3fa726bca422f0756fbd
| 1,500
|
py
|
Python
|
jdcloud_sdk/services/vod/apis/CreateTranscodeTemplateGroupRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
jdcloud_sdk/services/vod/apis/CreateTranscodeTemplateGroupRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
jdcloud_sdk/services/vod/apis/CreateTranscodeTemplateGroupRequest.py
|
Tanc009/jdcloud-sdk-python
|
8b045c99bc5b73ca7348e950b6f01e03a27982f5
|
[
"Apache-2.0"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class CreateTranscodeTemplateGroupRequest(JDCloudRequest):
"""
创建转码模板组
"""
def __init__(self, parameters, header=None, version="v1"):
super(CreateTranscodeTemplateGroupRequest, self).__init__(
'/transcodeTemplateGroups', 'POST', header, version)
self.parameters = parameters
class CreateTranscodeTemplateGroupParameters(object):
def __init__(self, ):
"""
"""
self.groupName = None
self.templates = None
def setGroupName(self, groupName):
"""
:param groupName: (Optional) 转码模板组名称
"""
self.groupName = groupName
def setTemplates(self, templates):
"""
:param templates: (Optional)
"""
self.templates = templates
| 27.777778
| 75
| 0.688
|
3e9f1497bf03e87547cba07b2878e37fa803896a
| 3,883
|
py
|
Python
|
covidxpert/utils/peaks_and_valleys.py
|
LucaCappelletti94/covidxpert
|
8adda25f3d6fb648607c0f8af7d3ff54b42c59fb
|
[
"MIT"
] | 2
|
2020-05-22T12:50:11.000Z
|
2021-03-12T01:00:17.000Z
|
covidxpert/utils/peaks_and_valleys.py
|
LucaCappelletti94/covidxpert
|
8adda25f3d6fb648607c0f8af7d3ff54b42c59fb
|
[
"MIT"
] | 6
|
2020-05-27T19:03:15.000Z
|
2021-03-02T11:12:06.000Z
|
covidxpert/utils/peaks_and_valleys.py
|
LucaCappelletti94/covidxpert
|
8adda25f3d6fb648607c0f8af7d3ff54b42c59fb
|
[
"MIT"
] | 1
|
2020-05-27T07:21:02.000Z
|
2020-05-27T07:21:02.000Z
|
from typing import Tuple
import numpy as np
import cv2
def central_peak(image: np.ndarray, use_left_padding: bool = True, use_right_padding: bool = True) -> int:
"""Return central peak of given image.
The central peak is detected best in blurred images.
Parameters
------------------------
image: np.ndarray,
Image from which to detect the central peak.
use_left_padding: bool = True,
Wethever to add a left padding mask.
use_right_padding: bool = True,
Wethever to add a right padding mask.
Returns
------------------------
X abscissa of the central peak.
"""
best_x = np.mean(image, axis=0)
if use_left_padding:
best_x[:image.shape[1]//3] = 0
if use_right_padding:
best_x[-image.shape[1]//3:] = 0
return best_x.argmax()
def main_peaks(image: np.ndarray) -> Tuple[int, int, int]:
"""return main peaks of a given image.
In a chest x-ray, these peaks represent the left chest, the spine cord
and the right chest peaks.
These peaks are detected best on a blurred image.
Parameters
------------------
image: np.ndarray,
Image from which we need to detect the central peaks.
Returns
------------------
Triple with left, middle and central peak.
"""
central = central_peak(image)
left_padding = central-image.shape[1]//5
left_peak = central_peak(image[:, :left_padding])
right_padding = central+image.shape[1]//5
right_peak = right_padding + central_peak(image[:, right_padding:])
return left_peak, central, right_peak
def main_valleys(image: np.ndarray, left_factor=0.25, right_factor=0.4) -> Tuple[int, int]:
"""Return the image two main valleys.
The valleys in a chest xray are meant to represent the left and right lungs.
These valleys are detected best on a blurred image.
Parameters
----------------------
image: np.ndarray,
The image to apply the valleys cut on.
left_factor: float = 0.2,
Percentage to interpolate from left valley minima to center peak.
right_factor: float = 0.4,
Percentage to interpolate from right valley minima to center peak.
Returns
----------------------
Tuple with left and right valley (the lungs in a chest xray).
"""
left_peak, central, right_peak = main_peaks(image)
inverted_image = image.max() - image
left_padding = int(central*left_factor+(1-left_factor)*left_peak)
left_valley = left_padding + central_peak(
inverted_image[:, left_padding:central],
use_right_padding=False
)
# The right is more towards the center because of the heart
right_valley = central + central_peak(
inverted_image[:, central: int(
right_factor*central+(1-right_factor)*right_peak)],
use_left_padding=False
)
return left_valley, right_valley
def valleys_cut(image: np.ndarray, left_factor: float = 0.25, right_factor: float = 0.4) -> np.ndarray:
"""Return the image with black before and after left and right valleys.
These valleys are detected best on a blurred image.
Used in get_spinal_cord_mask.py.
Parameters
----------------------
image: np.ndarray,
The image to apply the valleys cut on.
left_factor: float = 0.2,
Percentage to interpolate from left valley minima to center peak.
right_factor: float = 0.4,
Percentage to interpolate from right valley minima to center peak.
Returns
----------------------
Image with areas before and after left and right valleys in black.
"""
left_valley, right_valley = main_valleys(
cv2.blur(image, (33, 33)), # pylint: disable=no-member
left_factor,
right_factor
)
copy = image.copy()
copy[:, :left_valley] = 0
copy[:, right_valley:] = 0
return copy
| 32.090909
| 106
| 0.641772
|
c427c84b38dc40b6b11550a344f48353d0eaeedd
| 9,666
|
py
|
Python
|
google/ads/google_ads/v6/proto/enums/distance_bucket_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2021-04-09T04:28:47.000Z
|
2021-04-09T04:28:47.000Z
|
google/ads/google_ads/v6/proto/enums/distance_bucket_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/enums/distance_bucket_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v6/proto/enums/distance_bucket.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v6/proto/enums/distance_bucket.proto',
package='google.ads.googleads.v6.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v6.enumsB\023DistanceBucketProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V6.Enums\312\002\035Google\\Ads\\GoogleAds\\V6\\Enums\352\002!Google::Ads::GoogleAds::V6::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n9google/ads/googleads_v6/proto/enums/distance_bucket.proto\x12\x1dgoogle.ads.googleads.v6.enums\x1a\x1cgoogle/api/annotations.proto\"\xad\x04\n\x12\x44istanceBucketEnum\"\x96\x04\n\x0e\x44istanceBucket\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0f\n\x0bWITHIN_700M\x10\x02\x12\x0e\n\nWITHIN_1KM\x10\x03\x12\x0e\n\nWITHIN_5KM\x10\x04\x12\x0f\n\x0bWITHIN_10KM\x10\x05\x12\x0f\n\x0bWITHIN_15KM\x10\x06\x12\x0f\n\x0bWITHIN_20KM\x10\x07\x12\x0f\n\x0bWITHIN_25KM\x10\x08\x12\x0f\n\x0bWITHIN_30KM\x10\t\x12\x0f\n\x0bWITHIN_35KM\x10\n\x12\x0f\n\x0bWITHIN_40KM\x10\x0b\x12\x0f\n\x0bWITHIN_45KM\x10\x0c\x12\x0f\n\x0bWITHIN_50KM\x10\r\x12\x0f\n\x0bWITHIN_55KM\x10\x0e\x12\x0f\n\x0bWITHIN_60KM\x10\x0f\x12\x0f\n\x0bWITHIN_65KM\x10\x10\x12\x0f\n\x0b\x42\x45YOND_65KM\x10\x11\x12\x13\n\x0fWITHIN_0_7MILES\x10\x12\x12\x10\n\x0cWITHIN_1MILE\x10\x13\x12\x11\n\rWITHIN_5MILES\x10\x14\x12\x12\n\x0eWITHIN_10MILES\x10\x15\x12\x12\n\x0eWITHIN_15MILES\x10\x16\x12\x12\n\x0eWITHIN_20MILES\x10\x17\x12\x12\n\x0eWITHIN_25MILES\x10\x18\x12\x12\n\x0eWITHIN_30MILES\x10\x19\x12\x12\n\x0eWITHIN_35MILES\x10\x1a\x12\x12\n\x0eWITHIN_40MILES\x10\x1b\x12\x12\n\x0e\x42\x45YOND_40MILES\x10\x1c\x42\xe8\x01\n!com.google.ads.googleads.v6.enumsB\x13\x44istanceBucketProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V6.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V6\\Enums\xea\x02!Google::Ads::GoogleAds::V6::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_DISTANCEBUCKETENUM_DISTANCEBUCKET = _descriptor.EnumDescriptor(
name='DistanceBucket',
full_name='google.ads.googleads.v6.enums.DistanceBucketEnum.DistanceBucket',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_700M', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_1KM', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_5KM', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_10KM', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_15KM', index=6, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_20KM', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_25KM', index=8, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_30KM', index=9, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_35KM', index=10, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_40KM', index=11, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_45KM', index=12, number=12,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_50KM', index=13, number=13,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_55KM', index=14, number=14,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_60KM', index=15, number=15,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_65KM', index=16, number=16,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BEYOND_65KM', index=17, number=17,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_0_7MILES', index=18, number=18,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_1MILE', index=19, number=19,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_5MILES', index=20, number=20,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_10MILES', index=21, number=21,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_15MILES', index=22, number=22,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_20MILES', index=23, number=23,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_25MILES', index=24, number=24,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_30MILES', index=25, number=25,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_35MILES', index=26, number=26,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WITHIN_40MILES', index=27, number=27,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BEYOND_40MILES', index=28, number=28,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=146,
serialized_end=680,
)
_sym_db.RegisterEnumDescriptor(_DISTANCEBUCKETENUM_DISTANCEBUCKET)
_DISTANCEBUCKETENUM = _descriptor.Descriptor(
name='DistanceBucketEnum',
full_name='google.ads.googleads.v6.enums.DistanceBucketEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DISTANCEBUCKETENUM_DISTANCEBUCKET,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=680,
)
_DISTANCEBUCKETENUM_DISTANCEBUCKET.containing_type = _DISTANCEBUCKETENUM
DESCRIPTOR.message_types_by_name['DistanceBucketEnum'] = _DISTANCEBUCKETENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DistanceBucketEnum = _reflection.GeneratedProtocolMessageType('DistanceBucketEnum', (_message.Message,), {
'DESCRIPTOR' : _DISTANCEBUCKETENUM,
'__module__' : 'google.ads.googleads_v6.proto.enums.distance_bucket_pb2'
,
'__doc__': """Container for distance buckets of a user’s distance from an
advertiser’s location extension.""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.enums.DistanceBucketEnum)
})
_sym_db.RegisterMessage(DistanceBucketEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 41.663793
| 1,506
| 0.755121
|
b722f2801741dcdc881dd6633de26fa8b305e994
| 574
|
py
|
Python
|
0077 Anagram Partitioning.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
0077 Anagram Partitioning.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
0077 Anagram Partitioning.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
class Solution:
def solve(self, a, b):
if Counter(a) != Counter(b): return []
counts = defaultdict(int)
diffs = 0
ans = [0]
for i in range(len(a)):
counts[a[i]] += 1
if counts[a[i]] == 1: diffs += 1
counts[b[i]] -= 1
if b[i] != a[i] and counts[b[i]] == -1: diffs += 1
if counts[a[i]] == 0: diffs -= 1
if b[i] != a[i] and counts[b[i]] == 0: diffs -= 1
if diffs == 0: ans.append(i+1)
ans.pop()
return ans
| 23.916667
| 62
| 0.400697
|
88dfedfed22c8b2f36a2d8edd282ce494ff072c9
| 408
|
py
|
Python
|
fixture/application.py
|
vladTsaparin/python_for_qa
|
de819d4041080daf70ea069acf569effd9702baf
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
vladTsaparin/python_for_qa
|
de819d4041080daf70ea069acf569effd9702baf
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
vladTsaparin/python_for_qa
|
de819d4041080daf70ea069acf569effd9702baf
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from session import SessionHelper
class Application:
def __init__(self):
self.wd = webdriver.Firefox()
self.wd.implicitly_wait(60)
self.session = SessionHelper(self)
def open_login_page(self):
wd = self.wd
wd.get("https://netfanz:1qaz2wsx0@netfanz.inprogress.rocks/auth/login")
def destroy(self):
self.wd.quit()
| 22.666667
| 79
| 0.666667
|
292f5f527aa15960a696850cc45a9bc18562ad8c
| 16,006
|
py
|
Python
|
openquake.hazardlib/openquake/hazardlib/tests/geo/surface/_simple_fault_test_data.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/geo/surface/_simple_fault_test_data.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/geo/surface/_simple_fault_test_data.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
# The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TEST_1_MESH = [
[(0.0, 0.0, 0.0), (0.0, 0.00899322029396, 6.12323399574e-17),
(0.0, 0.0179864405879, 1.22464679915e-16),
(0.0, 0.0269796608819, 1.83697019872e-16),
(0.0, 0.0359728811759, 2.44929359829e-16),
(0.00635916826875, 0.0423320485043, 1.83697019872e-16),
(0.0127183370981, 0.0486912157934, 1.22464679915e-16),
(0.0190775065665, 0.0550503830434, 6.12323399574e-17),
(0.0259667166036, 0.0608311135245, -2.46519032882e-32),
(0.0328559274138, 0.0666118439639, -6.12323399574e-17),
(0.0397451390672, 0.0723925743614, -1.22464679915e-16)],
[(0.00557430374083, -0.00306041556938, 0.70710678123),
(0.00557430376276, 0.00593280468202, 0.70710678123),
(0.00557430392202, 0.0149260249334, 0.70710678123),
(0.00557430421862, 0.0239192451849, 0.70710678123),
(0.00557430465255, 0.0329124654363, 0.70710678123),
(0.011933473311, 0.0392716327345, 0.70710678123),
(0.0182926425988, 0.0456307999936, 0.70710678123),
(0.0246518125942, 0.0519899672135, 0.70710678123),
(0.0315410231701, 0.0577706976673, 0.70710678123),
(0.0384302345757, 0.0635514280792, 0.70710678123),
(0.0453194468812, 0.0693321584494, 0.70710678123)],
[(0.0111486075055, -0.00612083112428, 1.41421356246),
(0.0111486075026, 0.00287238908456, 1.41421356246),
(0.0111486077744, 0.0118656092934, 1.41421356246),
(0.0111486083209, 0.0208588295023, 1.41421356246),
(0.011148609142, 0.0298520497111, 1.41421356246),
(0.0175077781572, 0.0362112169793, 1.41421356246),
(0.0238669478703, 0.0425703842083, 1.41421356246),
(0.0302261183597, 0.0489295513981, 1.41421356246),
(0.0371153294442, 0.0547102818245, 1.41421356246),
(0.0440045414153, 0.0604910122091, 1.41421356246),
(0.0508937543429, 0.0662717425519, 1.41421356246)],
[(0.0167229113099, -0.00918124666469, 2.12132034369),
(0.0167229112355, -0.000188026498413, 2.12132034369),
(0.0167229115731, 0.00880519366788, 2.12132034369),
(0.0167229123227, 0.0177984138342, 2.12132034369),
(0.0167229134843, 0.0267916340005, 2.12132034369),
(0.0230820828231, 0.0331508012385, 2.12132034369),
(0.0294412529285, 0.0395099684374, 2.12132034369),
(0.0358004238789, 0.0458691355971, 2.12132034369),
(0.042689635442, 0.0516498659962, 2.12132034369),
(0.0495788479484, 0.0574305963534, 2.12132034369),
(0.0564680614682, 0.0632113266689, 2.12132034369)],
[(0.0222972151701, -0.0122416621906, 2.82842712492),
(0.0222972149774, -0.0032484420669, 2.82842712492),
(0.022297215334, 0.00574477805683, 2.82842712492),
(0.02229721624, 0.0147379981806, 2.82842712492),
(0.0222972176953, 0.0237312183043, 2.82842712492),
(0.0286563873247, 0.0300903855123, 2.82842712492),
(0.0350155577894, 0.0364495526811, 2.82842712492),
(0.0413747291676, 0.0428087198107, 2.82842712492),
(0.0482639411794, 0.0485894501823, 2.82842712492),
(0.0551531541911, 0.0543701805122, 2.82842712492),
(0.0620423682729, 0.0601509108003, 2.82842712492)],
[(0.0278715191017, -0.0153020777021, 3.53553390615),
(0.027871518744, -0.00630885762091, 3.53553390615),
(0.027871519073, 0.00268436246026, 3.53553390615),
(0.0278715200887, 0.0116775825414, 3.53553390615),
(0.027871521791, 0.0206708026226, 3.53553390615),
(0.0342306916779, 0.0270299698005, 3.53553390615),
(0.0405898624687, 0.0333891369392, 3.53553390615),
(0.0469490342419, 0.0397483040387, 3.53553390615),
(0.0538382466722, 0.045529034383, 3.53553390615),
(0.0607274601592, 0.0513097646855, 3.53553390615),
(0.067616674773, 0.0570904949463, 3.53553390615)],
[(0.0334458231209, -0.018362493199, 4.24264068738),
(0.0334458225514, -0.00936927316043, 4.24264068738),
(0.033445822806, -0.000376053121823, 4.24264068738),
(0.0334458238846, 0.00861716691679, 4.24264068738),
(0.0334458257871, 0.0176103869554, 4.24264068738),
(0.0398049958985, 0.0239695541032, 4.24264068738),
(0.0461641669826, 0.0303287212118, 4.24264068738),
(0.0525233391176, 0.0366878882812, 4.24264068738),
(0.0594125519363, 0.0424686185982, 4.24264068738),
(0.0663017658685, 0.0482493488733, 4.24264068738),
(0.0731909809844, 0.0540300791067, 4.24264068738)]]
TEST_2_MESH = [
[(0.0167229113507, -0.00918124659284, 2.12132034356),
(0.0167229117541, -0.000188026301819, 2.12132034356),
(0.0167229121575, 0.00880519391224, 2.12132034356),
(0.0167229125609, 0.0177984143111, 2.12132034356),
(0.0167229129643, 0.0267916346333, 2.12132034356),
(0.023082082083, 0.0331508004971, 2.12132034356),
(0.0294412516492, 0.0395099663216, 2.12132034356),
(0.0358004217413, 0.045869132107, 2.12132034356),
(0.0426896319712, 0.0516498611333, 2.12132034356),
(0.0495788428628, 0.0574305901179, 2.12132034356),
(0.0564680544862, 0.0632113190606, 2.12132034356)],
[(0.0222972152113, -0.012241662119, 2.82842712473),
(0.0222972154964, -0.00324844187056, 2.82842712473),
(0.0222972159189, 0.00574477830094, 2.82842712473),
(0.0222972164786, 0.0147379986572, 2.82842712473),
(0.0222972171758, 0.0237312189369, 2.82842712473),
(0.028656386585, 0.0300903847705, 2.82842712473),
(0.0350155565105, 0.036449550565, 2.82842712473),
(0.0413747270305, 0.0428087163203, 2.82842712473),
(0.048263937709, 0.0485894453193, 2.82842712473),
(0.0551531491059, 0.0543701742765, 2.82842712473),
(0.0620423612913, 0.0601509031919, 2.82842712473)],
[(0.0278715191434, -0.0153020776307, 3.53553390591),
(0.0278715192635, -0.00630885742481, 3.53553390591),
(0.0278715196583, 0.00268436270412, 3.53553390591),
(0.0278715203277, 0.0116775830178, 3.53553390591),
(0.0278715212718, 0.020670803255, 3.53553390591),
(0.0342306909386, 0.0270299690585, 3.53553390591),
(0.0405898611903, 0.0333891348229, 3.53553390591),
(0.0469490321052, 0.0397483005481, 3.53553390591),
(0.0538382432022, 0.0455290295197, 3.53553390591),
(0.0607274550744, 0.0513097584495, 3.53553390591),
(0.0676166677919, 0.0570904873375, 3.53553390591)],
[(0.033445823163, -0.0183624931279, 4.24264068708),
(0.0334458230714, -0.00936927296458, 4.24264068708),
(0.0334458233917, -0.000376052878211, 4.24264068708),
(0.0334458241241, 0.00861716739293, 4.24264068708),
(0.0334458252685, 0.0176103875875, 4.24264068708),
(0.0398049951598, 0.023969553361, 4.24264068708),
(0.0461641657046, 0.0303287190952, 4.24264068708),
(0.0525233369813, 0.0366878847903, 4.24264068708),
(0.0594125484668, 0.0424686137346, 4.24264068708),
(0.0663017607843, 0.0482493426371, 4.24264068708),
(0.0731909740037, 0.0540300714977, 4.24264068708)]]
TEST_4_MESH = [
[(0.0, 0.0, 0.0), (0.0, 0.00899322029396, 6.12323399574e-17),
(0.0, 0.0179864405879, 1.22464679915e-16),
(0.0, 0.0269796608819, 1.83697019872e-16),
(0.0, 0.0359728811759, 2.44929359829e-16),
(0.00635916826875, 0.0423320485043, 1.83697019872e-16),
(0.0127183370981, 0.0486912157934, 1.22464679915e-16),
(0.0190775065665, 0.0550503830434, 6.12323399574e-17),
(0.0259667166036, 0.0608311135245, -2.46519032882e-32),
(0.0328559274138, 0.0666118439639, -6.12323399574e-17),
(0.0397451390672, 0.0723925743614, -1.22464679915e-16)],
[(0.0, 7.01670929853e-15, 1.0),
(0.0, 0.00899322029399, 1.0),
(0.0, 0.017986440588, 1.0),
(0.0, 0.026979660882, 1.0),
(0.0, 0.0359728811759, 1.0),
(0.00635916826875, 0.0423320485043, 1.0),
(0.0127183370981, 0.0486912157935, 1.0),
(0.0190775065665, 0.0550503830434, 1.0),
(0.0259667166036, 0.0608311135245, 1.0),
(0.0328559274138, 0.0666118439639, 1.0),
(0.0397451390672, 0.0723925743614, 1.0)],
[(0.0, 7.01670929853e-15, 2.0),
(0.0, 0.00899322029399, 2.0),
(0.0, 0.017986440588, 2.0),
(0.0, 0.026979660882, 2.0),
(0.0, 0.0359728811759, 2.0),
(0.00635916826875, 0.0423320485043, 2.0),
(0.0127183370981, 0.0486912157935, 2.0),
(0.0190775065665, 0.0550503830434, 2.0),
(0.0259667166036, 0.0608311135245, 2.0),
(0.0328559274138, 0.0666118439639, 2.0),
(0.0397451390672, 0.0723925743614, 2.0)],
[(0.0, 7.01670929853e-15, 3.0),
(0.0, 0.00899322029399, 3.0),
(0.0, 0.017986440588, 3.0),
(0.0, 0.026979660882, 3.0),
(0.0, 0.0359728811759, 3.0),
(0.00635916826875, 0.0423320485043, 3.0),
(0.0127183370981, 0.0486912157935, 3.0),
(0.0190775065665, 0.0550503830434, 3.0),
(0.0259667166036, 0.0608311135245, 3.0),
(0.0328559274138, 0.0666118439639, 3.0),
(0.0397451390672, 0.0723925743614, 3.0)],
[(0.0, 7.01670929853e-15, 4.0),
(0.0, 0.00899322029399, 4.0),
(0.0, 0.017986440588, 4.0),
(0.0, 0.026979660882, 4.0),
(0.0, 0.0359728811759, 4.0),
(0.00635916826875, 0.0423320485043, 4.0),
(0.0127183370981, 0.0486912157935, 4.0),
(0.0190775065665, 0.0550503830434, 4.0),
(0.0259667166036, 0.0608311135245, 4.0),
(0.0328559274138, 0.0666118439639, 4.0),
(0.0397451390672, 0.0723925743614, 4.0)]]
TEST_5_MESH = [
[(179.9, 7.01670929853e-15, 1.0),
(179.90899322, 7.01725988802e-15, 1.0),
(179.917986441, 7.0178104775e-15, 1.0),
(179.926979661, 7.01836106697e-15, 1.0),
(179.935972881, 7.01891165644e-15, 1.0),
(179.944966101, 7.01946224589e-15, 1.0),
(179.953959322, 7.02001283534e-15, 1.0),
(179.962952542, 7.02056342479e-15, 1.0),
(179.971945762, 7.02111401423e-15, 1.0),
(179.980938983, 7.02166460366e-15, 1.0),
(179.989932203, 7.02221519308e-15, 1.0),
(179.998925423, 7.0227657825e-15, 1.0),
(-179.992081356, 7.02331637191e-15, 1.0),
(-179.983088136, 7.02386696131e-15, 1.0),
(-179.974094916, 7.02441755071e-15, 1.0),
(-179.965101696, 7.0249681401e-15, 1.0),
(-179.956108475, 7.02551872949e-15, 1.0),
(-179.947115255, 7.02606931886e-15, 1.0),
(-179.938122035, 7.02661990823e-15, 1.0),
(-179.929128814, 7.0271704976e-15, 1.0),
(-179.920135594, 7.02772108695e-15, 1.0),
(-179.911142374, 7.0282716763e-15, 1.0),
(-179.902149154, 7.02882226564e-15, 1.0)],
[(179.9, 7.01670929853e-15, 2.0),
(179.90899322, 7.01670929853e-15, 2.0),
(179.917986441, 7.01670929853e-15, 2.0),
(179.926979661, 7.01670929853e-15, 2.0),
(179.935972881, 7.01670929853e-15, 2.0),
(179.944966101, 7.01670929853e-15, 2.0),
(179.953959322, 7.01670929853e-15, 2.0),
(179.962952542, 7.01670929853e-15, 2.0),
(179.971945762, 7.01670929853e-15, 2.0),
(179.980938983, 7.01670929853e-15, 2.0),
(179.989932203, 7.01670929853e-15, 2.0),
(179.998925423, 7.01670929853e-15, 2.0),
(-179.992081356, 7.01670929853e-15, 2.0),
(-179.983088136, 7.01670929853e-15, 2.0),
(-179.974094916, 7.01670929853e-15, 2.0),
(-179.965101696, 7.01670929853e-15, 2.0),
(-179.956108475, 7.01670929853e-15, 2.0),
(-179.947115255, 7.01670929853e-15, 2.0),
(-179.938122035, 7.01670929853e-15, 2.0),
(-179.929128814, 7.01670929853e-15, 2.0),
(-179.920135594, 7.01670929853e-15, 2.0),
(-179.911142374, 7.01670929853e-15, 2.0),
(-179.902149154, 7.01670929853e-15, 2.0)],
[(179.9, 7.01670929853e-15, 3.0),
(179.90899322, 7.01670929853e-15, 3.0),
(179.917986441, 7.01670929853e-15, 3.0),
(179.926979661, 7.01670929853e-15, 3.0),
(179.935972881, 7.01670929853e-15, 3.0),
(179.944966101, 7.01670929853e-15, 3.0),
(179.953959322, 7.01670929853e-15, 3.0),
(179.962952542, 7.01670929853e-15, 3.0),
(179.971945762, 7.01670929853e-15, 3.0),
(179.980938983, 7.01670929853e-15, 3.0),
(179.989932203, 7.01670929853e-15, 3.0),
(179.998925423, 7.01670929853e-15, 3.0),
(-179.992081356, 7.01670929853e-15, 3.0),
(-179.983088136, 7.01670929853e-15, 3.0),
(-179.974094916, 7.01670929853e-15, 3.0),
(-179.965101696, 7.01670929853e-15, 3.0),
(-179.956108475, 7.01670929853e-15, 3.0),
(-179.947115255, 7.01670929853e-15, 3.0),
(-179.938122035, 7.01670929853e-15, 3.0),
(-179.929128814, 7.01670929853e-15, 3.0),
(-179.920135594, 7.01670929853e-15, 3.0),
(-179.911142374, 7.01670929853e-15, 3.0),
(-179.902149154, 7.01670929853e-15, 3.0)],
[(179.9, 7.01670929853e-15, 4.0),
(179.90899322, 7.01670929853e-15, 4.0),
(179.917986441, 7.01670929853e-15, 4.0),
(179.926979661, 7.01670929853e-15, 4.0),
(179.935972881, 7.01670929853e-15, 4.0),
(179.944966101, 7.01670929853e-15, 4.0),
(179.953959322, 7.01670929853e-15, 4.0),
(179.962952542, 7.01670929853e-15, 4.0),
(179.971945762, 7.01670929853e-15, 4.0),
(179.980938983, 7.01670929853e-15, 4.0),
(179.989932203, 7.01670929853e-15, 4.0),
(179.998925423, 7.01670929853e-15, 4.0),
(-179.992081356, 7.01670929853e-15, 4.0),
(-179.983088136, 7.01670929853e-15, 4.0),
(-179.974094916, 7.01670929853e-15, 4.0),
(-179.965101696, 7.01670929853e-15, 4.0),
(-179.956108475, 7.01670929853e-15, 4.0),
(-179.947115255, 7.01670929853e-15, 4.0),
(-179.938122035, 7.01670929853e-15, 4.0),
(-179.929128814, 7.01670929853e-15, 4.0),
(-179.920135594, 7.01670929853e-15, 4.0),
(-179.911142374, 7.01670929853e-15, 4.0),
(-179.902149154, 7.01670929853e-15, 4.0)],
[(179.9, 7.01670929853e-15, 5.0),
(179.90899322, 7.01670929853e-15, 5.0),
(179.917986441, 7.01670929853e-15, 5.0),
(179.926979661, 7.01670929853e-15, 5.0),
(179.935972881, 7.01670929853e-15, 5.0),
(179.944966101, 7.01670929853e-15, 5.0),
(179.953959322, 7.01670929853e-15, 5.0),
(179.962952542, 7.01670929853e-15, 5.0),
(179.971945762, 7.01670929853e-15, 5.0),
(179.980938983, 7.01670929853e-15, 5.0),
(179.989932203, 7.01670929853e-15, 5.0),
(179.998925423, 7.01670929853e-15, 5.0),
(-179.992081356, 7.01670929853e-15, 5.0),
(-179.983088136, 7.01670929853e-15, 5.0),
(-179.974094916, 7.01670929853e-15, 5.0),
(-179.965101696, 7.01670929853e-15, 5.0),
(-179.956108475, 7.01670929853e-15, 5.0),
(-179.947115255, 7.01670929853e-15, 5.0),
(-179.938122035, 7.01670929853e-15, 5.0),
(-179.929128814, 7.01670929853e-15, 5.0),
(-179.920135594, 7.01670929853e-15, 5.0),
(-179.911142374, 7.01670929853e-15, 5.0),
(-179.902149154, 7.01670929853e-15, 5.0)],
[(179.9, 7.01670929853e-15, 6.0),
(179.90899322, 7.01670929853e-15, 6.0),
(179.917986441, 7.01670929853e-15, 6.0),
(179.926979661, 7.01670929853e-15, 6.0),
(179.935972881, 7.01670929853e-15, 6.0),
(179.944966101, 7.01670929853e-15, 6.0),
(179.953959322, 7.01670929853e-15, 6.0),
(179.962952542, 7.01670929853e-15, 6.0),
(179.971945762, 7.01670929853e-15, 6.0),
(179.980938983, 7.01670929853e-15, 6.0),
(179.989932203, 7.01670929853e-15, 6.0),
(179.998925423, 7.01670929853e-15, 6.0),
(-179.992081356, 7.01670929853e-15, 6.0),
(-179.983088136, 7.01670929853e-15, 6.0),
(-179.974094916, 7.01670929853e-15, 6.0),
(-179.965101696, 7.01670929853e-15, 6.0),
(-179.956108475, 7.01670929853e-15, 6.0),
(-179.947115255, 7.01670929853e-15, 6.0),
(-179.938122035, 7.01670929853e-15, 6.0),
(-179.929128814, 7.01670929853e-15, 6.0),
(-179.920135594, 7.01670929853e-15, 6.0),
(-179.911142374, 7.01670929853e-15, 6.0),
(-179.902149154, 7.01670929853e-15, 6.0)]]
| 47.35503
| 74
| 0.675497
|
7500ab8aeb3af5829cc69af1a73cb89fd816c328
| 12,003
|
py
|
Python
|
models.py
|
Johnson-yue/Implicit-Competitive-Regularization
|
b2ef9e41e083e9733cdc218b296a486f2e470275
|
[
"Apache-2.0"
] | 1
|
2019-12-28T15:32:08.000Z
|
2019-12-28T15:32:08.000Z
|
models.py
|
ibrahim85/Implicit-Competitive-Regularization
|
3b4f1bab2b3a9d944b4d4a91f88a0c88af0c647b
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
ibrahim85/Implicit-Competitive-Regularization
|
3b4f1bab2b3a9d944b4d4a91f88a0c88af0c647b
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
DIM = 64
class GoodGenerator(nn.Module):
def __init__(self):
super(GoodGenerator, self).__init__()
self.preprocess = nn.Sequential(
nn.Linear(128, 4 * 4 * 4 * DIM),
nn.BatchNorm1d(4 * 4 * 4 * DIM),
nn.ReLU(True),
)
self.main_module = nn.Sequential(
nn.ConvTranspose2d(4 * DIM, 2 * DIM, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(2 * DIM),
nn.ReLU(True),
# nn.Softplus(),
nn.ConvTranspose2d(2 * DIM, DIM, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DIM),
nn.ReLU(True),
# nn.Softplus(),
nn.ConvTranspose2d(DIM, 3, kernel_size=4, stride=2, padding=1),
nn.Tanh(),
)
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * DIM, 4, 4)
output = self.main_module(output)
return output.view(-1, 3, 32, 32)
class GoodDiscriminator(nn.Module):
def __init__(self):
super(GoodDiscriminator, self).__init__()
self.main_module = nn.Sequential(
nn.Conv2d(3, DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
# nn.Softplus(),
# nn.Dropout2d(),
# 16x16
nn.Conv2d(DIM, 2 * DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
# nn.Softplus(),
# nn.Dropout2d(),
# 8x8
nn.Conv2d(2 * DIM, 4 * DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
# nn.Softplus(),
# nn.Dropout2d(),
# 4 x 4
)
self.linear = nn.Linear(4 * 4 * 4 * DIM, 1)
def forward(self, input):
output = self.main_module(input)
output = output.view(-1, 4 * 4 * 4 * DIM)
# print(output.shape)
output = self.linear(output)
# print(output.shape)
return output
class GoodDiscriminatord(nn.Module):
def __init__(self, dropout=0.5):
super(GoodDiscriminatord, self).__init__()
self.main_module = nn.Sequential(
nn.Conv2d(3, DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Dropout2d(dropout),
# 16x16
nn.Conv2d(DIM, 2 * DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Dropout2d(dropout),
# 8x8
nn.Conv2d(2 * DIM, 4 * DIM, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Dropout2d(dropout),
# 4 x 4
)
self.linear = nn.Linear(4 * 4 * 4 * DIM, 1)
def forward(self, input):
output = self.main_module(input)
output = output.view(-1, 4 * 4 * 4 * DIM)
# print(output.shape)
output = self.linear(output)
# print(output.shape)
return output
class dc_d(nn.Module):
def __init__(self):
super(dc_d, self).__init__()
self.conv = nn.Sequential(
# 3 * 32x32
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2),
# 32 * 14x14
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1),
nn.LeakyReLU(0.01),
nn.MaxPool2d(2, 2)
# 64 * 5x5
)
self.fc = nn.Sequential(
nn.Linear(1600, 1024),
nn.LeakyReLU(0.01),
nn.Linear(1024, 1)
)
def forward(self, x):
x = self.conv(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
class dc_g(nn.Module):
def __init__(self, z_dim=96):
super(dc_g, self).__init__()
self.fc = nn.Sequential(
nn.Linear(z_dim, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Linear(1024, 8 * 8 * 128),
nn.ReLU(),
nn.BatchNorm1d(8 * 8 * 128),
)
self.convt = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2,
padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(in_channels=64, out_channels=3, kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
x = self.fc(x)
x = x.view(x.shape[0], 128, 8, 8)
return self.convt(x)
class DC_g(nn.Module):
def __init__(self, z_dim=100, channel_num=3):
super(DC_g, self).__init__()
self.main_module = nn.Sequential(
nn.ConvTranspose2d(z_dim, 1024, kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
# 1024 * 4x4
nn.ConvTranspose2d(1024, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
# 512 * 8x8
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
# 256 * 16x16
nn.ConvTranspose2d(256, channel_num, kernel_size=4, stride=2, padding=1),
nn.Tanh()
# 3 * 32x32
)
def forward(self, input):
return self.main_module(input)
class DC_d(nn.Module):
def __init__(self, channel_num=3):
super(DC_d, self).__init__()
self.main_module = nn.Sequential(
nn.Conv2d(channel_num, 256, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True),
# 16x16
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# 8x8
nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
# 1024 * 4x4
nn.Conv2d(1024, 1, kernel_size=4, stride=2, padding=0),
)
def forward(self, input):
return self.main_module(input)
class DC_generator(nn.Module):
def __init__(self, z_dim=100, channel_num=3, feature_num=64):
super(DC_generator, self).__init__()
self.main_module = nn.Sequential(
nn.ConvTranspose2d(z_dim, feature_num * 8, kernel_size=4, stride=1, padding=0,
bias=False),
nn.BatchNorm2d(feature_num * 8),
nn.ReLU(inplace=True),
# (feature_num * 8) * 4x4
nn.ConvTranspose2d(feature_num * 8, feature_num * 4, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 4),
nn.ReLU(inplace=True),
# (feature_num * 4) * 8x8
nn.ConvTranspose2d(feature_num * 4, feature_num * 2, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 2),
nn.ReLU(inplace=True),
# (feature_num * 2) * 16x16
nn.ConvTranspose2d(feature_num * 2, feature_num, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num),
nn.ReLU(inplace=True),
# (feature_num * 2) * 32x32
nn.ConvTranspose2d(feature_num, channel_num, kernel_size=4, stride=2, padding=1,
bias=False),
# channel_num * 64x64
nn.Tanh()
)
def forward(self, input):
return self.main_module(input)
class DC_discriminator(nn.Module):
def __init__(self, channel_num=3, feature_num=64):
super(DC_discriminator, self).__init__()
self.main_module = nn.Sequential(
# channel_num * 64x64
nn.Conv2d(channel_num, feature_num, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# (feature_num) * 32x32
nn.Conv2d(feature_num, feature_num * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(feature_num * 2),
nn.LeakyReLU(0.2, inplace=True),
# (feature_num * 2) * 16x16
nn.Conv2d(feature_num * 2, feature_num * 4, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 4),
nn.LeakyReLU(0.2, inplace=True),
# (feature_num * 4) * 8x8
nn.Conv2d(feature_num * 4, feature_num * 8, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 8),
nn.LeakyReLU(0.2, inplace=True),
# (feature_num * 8) * 4x4
nn.Conv2d(feature_num * 8, 1, kernel_size=4, stride=1, padding=0, bias=False),
# feature_num * 16x16
)
def forward(self, input):
return self.main_module(input)
class DC_discriminatord(nn.Module):
def __init__(self, channel_num=3, feature_num=64):
super(DC_discriminatord, self).__init__()
self.main_module = nn.Sequential(
# channel_num * 64x64
nn.Conv2d(channel_num, feature_num, kernel_size=4, stride=2, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(),
# (feature_num) * 32x32
nn.Conv2d(feature_num, feature_num * 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(feature_num * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(),
# (feature_num * 2) * 16x16
nn.Conv2d(feature_num * 2, feature_num * 4, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(),
# (feature_num * 4) * 8x8
nn.Conv2d(feature_num * 4, feature_num * 8, kernel_size=4, stride=2, padding=1,
bias=False),
nn.BatchNorm2d(feature_num * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(),
# (feature_num * 8) * 4x4
nn.Conv2d(feature_num * 8, 1, kernel_size=4, stride=1, padding=0, bias=False),
# feature_num * 16x16
)
def forward(self, input):
return self.main_module(input)
class dc_D(nn.Module):
def __init__(self):
super(dc_D, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1),
nn.LeakyReLU(0.01),
# nn.BatchNorm2d(32),
nn.MaxPool2d(2, 2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1),
nn.LeakyReLU(0.01),
# nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2)
)
self.fc = nn.Sequential(
nn.Linear(1024, 1024),
nn.LeakyReLU(0.01),
nn.Linear(1024, 1)
)
def forward(self, x):
x = self.conv(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
class dc_G(nn.Module):
def __init__(self, z_dim=96):
super(dc_G, self).__init__()
self.fc = nn.Sequential(
nn.Linear(z_dim, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Linear(1024, 7 * 7 * 128),
nn.ReLU(),
nn.BatchNorm1d(7 * 7 * 128),
)
self.convt = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2,
padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(in_channels=64, out_channels=1, kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
x = self.fc(x)
x = x.view(x.shape[0], 128, 7, 7)
return self.convt(x)
| 35.302941
| 100
| 0.53195
|
62244b93f3effa54f32d6e52d497ae614a6d9de2
| 40,459
|
py
|
Python
|
tests/integration/standard/test_cluster.py
|
richardARPANET/python-driver
|
19d64f50d708bbdc8dc723befb73f3c17bf7ef53
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_cluster.py
|
richardARPANET/python-driver
|
19d64f50d708bbdc8dc723befb73f3c17bf7ef53
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/standard/test_cluster.py
|
richardARPANET/python-driver
|
19d64f50d708bbdc8dc723befb73f3c17bf7ef53
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from collections import deque
from copy import copy
from mock import patch
import time
from uuid import uuid4
import logging
import cassandra
from cassandra.cluster import Cluster, NoHostAvailable, ExecutionProfile, EXEC_PROFILE_DEFAULT
from cassandra.concurrent import execute_concurrent
from cassandra.policies import (RoundRobinPolicy, ExponentialReconnectionPolicy,
RetryPolicy, SimpleConvictionPolicy, HostDistance,
WhiteListRoundRobinPolicy, AddressTranslator)
from cassandra.protocol import MAX_SUPPORTED_VERSION
from cassandra.query import SimpleStatement, TraceUnavailable, tuple_factory
from tests.integration import use_singledc, PROTOCOL_VERSION, get_server_versions, get_node, CASSANDRA_VERSION, execute_until_pass, execute_with_long_wait_retry, get_node, MockLoggingHandler
from tests.integration.util import assert_quiescent_pool_state
def setup_module():
use_singledc()
class ClusterTests(unittest.TestCase):
def test_host_resolution(self):
"""
Test to insure A records are resolved appropriately.
@since 3.3
@jira_ticket PYTHON-415
@expected_result hostname will be transformed into IP
@test_category connection
"""
cluster = Cluster(contact_points=["localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1)
self.assertTrue('127.0.0.1' in cluster.contact_points_resolved)
def test_host_duplication(self):
"""
Ensure that duplicate hosts in the contact points are surfaced in the cluster metadata
@since 3.3
@jira_ticket PYTHON-103
@expected_result duplicate hosts aren't surfaced in cluster.metadata
@test_category connection
"""
cluster = Cluster(contact_points=["localhost", "127.0.0.1", "localhost", "localhost", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1)
cluster.connect()
self.assertEqual(len(cluster.metadata.all_hosts()), 3)
cluster.shutdown()
cluster = Cluster(contact_points=["127.0.0.1", "localhost"], protocol_version=PROTOCOL_VERSION, connect_timeout=1)
cluster.connect()
self.assertEqual(len(cluster.metadata.all_hosts()), 3)
cluster.shutdown()
def test_raise_error_on_control_connection_timeout(self):
"""
Test for initial control connection timeout
test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection
timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object
via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with
an OperationTimedOut for 1 second.
@expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made.
@since 2.6.0
@jira_ticket PYTHON-206
@expected_result NoHostAvailable exception should be raised after 1 second.
@test_category connection
"""
get_node(1).pause()
cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1)
with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\('errors=Timed out creating connection \(1 seconds\)"):
cluster.connect()
get_node(1).resume()
def test_basic(self):
"""
Test basic connection and usage
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = execute_until_pass(session,
"""
CREATE KEYSPACE clustertests
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
self.assertFalse(result)
result = execute_with_long_wait_retry(session,
"""
CREATE TABLE clustertests.cf0 (
a text,
b text,
c text,
PRIMARY KEY (a, b)
)
""")
self.assertFalse(result)
result = session.execute(
"""
INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c')
""")
self.assertFalse(result)
result = session.execute("SELECT * FROM clustertests.cf0")
self.assertEqual([('a', 'b', 'c')], result)
execute_with_long_wait_retry(session, "DROP KEYSPACE clustertests")
cluster.shutdown()
def test_protocol_negotiation(self):
"""
Test for protocol negotiation
test_protocol_negotiation tests that the driver will select the correct protocol version to match
the correct cassandra version. Please note that 2.1.5 has a
bug https://issues.apache.org/jira/browse/CASSANDRA-9451 that will cause this test to fail
that will cause this to not pass. It was rectified in 2.1.6
@since 2.6.0
@jira_ticket PYTHON-240
@expected_result the correct protocol version should be selected
@test_category connection
"""
cluster = Cluster()
self.assertEqual(cluster.protocol_version, MAX_SUPPORTED_VERSION)
session = cluster.connect()
updated_protocol_version = session._protocol_version
updated_cluster_version = cluster.protocol_version
# Make sure the correct protocol was selected by default
if CASSANDRA_VERSION >= '2.2':
self.assertEqual(updated_protocol_version, 4)
self.assertEqual(updated_cluster_version, 4)
elif CASSANDRA_VERSION >= '2.1':
self.assertEqual(updated_protocol_version, 3)
self.assertEqual(updated_cluster_version, 3)
elif CASSANDRA_VERSION >= '2.0':
self.assertEqual(updated_protocol_version, 2)
self.assertEqual(updated_cluster_version, 2)
else:
self.assertEqual(updated_protocol_version, 1)
self.assertEqual(updated_cluster_version, 1)
cluster.shutdown()
def test_connect_on_keyspace(self):
"""
Ensure clusters that connect on a keyspace, do
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
result = session.execute(
"""
INSERT INTO test3rf.test (k, v) VALUES (8889, 8889)
""")
self.assertFalse(result)
result = session.execute("SELECT * FROM test3rf.test")
self.assertEqual([(8889, 8889)], result)
# test_connect_on_keyspace
session2 = cluster.connect('test3rf')
result2 = session2.execute("SELECT * FROM test")
self.assertEqual(result, result2)
cluster.shutdown()
def test_set_keyspace_twice(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
session.execute("USE system")
session.execute("USE system")
cluster.shutdown()
def test_default_connections(self):
"""
Ensure errors are not thrown when using non-default policies
"""
Cluster(
load_balancing_policy=RoundRobinPolicy(),
reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0),
default_retry_policy=RetryPolicy(),
conviction_policy_factory=SimpleConvictionPolicy,
protocol_version=PROTOCOL_VERSION
)
def test_connect_to_already_shutdown_cluster(self):
"""
Ensure you cannot connect to a cluster that's been shutdown
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.shutdown()
self.assertRaises(Exception, cluster.connect)
def test_auth_provider_is_callable(self):
"""
Ensure that auth_providers are always callable
"""
self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1)
c = Cluster(protocol_version=1)
self.assertRaises(TypeError, setattr, c, 'auth_provider', 1)
def test_v2_auth_provider(self):
"""
Check for v2 auth_provider compliance
"""
bad_auth_provider = lambda x: {'username': 'foo', 'password': 'bar'}
self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2)
c = Cluster(protocol_version=2)
self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider)
def test_conviction_policy_factory_is_callable(self):
"""
Ensure that conviction_policy_factory are always callable
"""
self.assertRaises(ValueError, Cluster, conviction_policy_factory=1)
def test_connect_to_bad_hosts(self):
"""
Ensure that a NoHostAvailable Exception is thrown
when a cluster cannot connect to given hosts
"""
cluster = Cluster(['127.1.2.9', '127.1.2.10'],
protocol_version=PROTOCOL_VERSION)
self.assertRaises(NoHostAvailable, cluster.connect)
def test_cluster_settings(self):
"""
Test connection setting getters and setters
"""
if PROTOCOL_VERSION >= 3:
raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol")
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection)
cluster.set_min_requests_per_connection(HostDistance.LOCAL, min_requests_per_connection + 1)
self.assertEqual(cluster.get_min_requests_per_connection(HostDistance.LOCAL), min_requests_per_connection + 1)
max_requests_per_connection = cluster.get_max_requests_per_connection(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_REQUESTS, max_requests_per_connection)
cluster.set_max_requests_per_connection(HostDistance.LOCAL, max_requests_per_connection + 1)
self.assertEqual(cluster.get_max_requests_per_connection(HostDistance.LOCAL), max_requests_per_connection + 1)
core_connections_per_host = cluster.get_core_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, core_connections_per_host)
cluster.set_core_connections_per_host(HostDistance.LOCAL, core_connections_per_host + 1)
self.assertEqual(cluster.get_core_connections_per_host(HostDistance.LOCAL), core_connections_per_host + 1)
max_connections_per_host = cluster.get_max_connections_per_host(HostDistance.LOCAL)
self.assertEqual(cassandra.cluster.DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, max_connections_per_host)
cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1)
self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1)
def test_refresh_schema(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
# full schema refresh, with wait
cluster.refresh_schema_metadata()
self.assertIsNot(original_meta, cluster.metadata.keyspaces)
self.assertEqual(original_meta, cluster.metadata.keyspaces)
cluster.shutdown()
def test_refresh_schema_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
original_system_meta = original_meta['system']
# only refresh one keyspace
cluster.refresh_keyspace_metadata('system')
current_meta = cluster.metadata.keyspaces
self.assertIs(original_meta, current_meta)
current_system_meta = current_meta['system']
self.assertIsNot(original_system_meta, current_system_meta)
self.assertEqual(original_system_meta.as_cql_query(), current_system_meta.as_cql_query())
cluster.shutdown()
def test_refresh_schema_table(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
original_meta = cluster.metadata.keyspaces
original_system_meta = original_meta['system']
original_system_schema_meta = original_system_meta.tables['local']
# only refresh one table
cluster.refresh_table_metadata('system', 'local')
current_meta = cluster.metadata.keyspaces
current_system_meta = current_meta['system']
current_system_schema_meta = current_system_meta.tables['local']
self.assertIs(original_meta, current_meta)
self.assertIs(original_system_meta, current_system_meta)
self.assertIsNot(original_system_schema_meta, current_system_schema_meta)
self.assertEqual(original_system_schema_meta.as_cql_query(), current_system_schema_meta.as_cql_query())
cluster.shutdown()
def test_refresh_schema_type(self):
if get_server_versions()[0] < (2, 1, 0):
raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1')
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest('UDTs are not specified in change events for protocol v2')
# We may want to refresh types on keyspace change events in that case(?)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
keyspace_name = 'test1rf'
type_name = self._testMethodName
execute_until_pass(session, 'CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name))
original_meta = cluster.metadata.keyspaces
original_test1rf_meta = original_meta[keyspace_name]
original_type_meta = original_test1rf_meta.user_types[type_name]
# only refresh one type
cluster.refresh_user_type_metadata('test1rf', type_name)
current_meta = cluster.metadata.keyspaces
current_test1rf_meta = current_meta[keyspace_name]
current_type_meta = current_test1rf_meta.user_types[type_name]
self.assertIs(original_meta, current_meta)
self.assertEqual(original_test1rf_meta.export_as_string(), current_test1rf_meta.export_as_string())
self.assertIsNot(original_type_meta, current_type_meta)
self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query())
session.shutdown()
def test_refresh_schema_no_wait(self):
contact_points = ['127.0.0.1']
cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=10,
contact_points=contact_points, load_balancing_policy=WhiteListRoundRobinPolicy(contact_points))
session = cluster.connect()
schema_ver = session.execute("SELECT schema_version FROM system.local WHERE key='local'")[0][0]
new_schema_ver = uuid4()
session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (new_schema_ver,))
try:
agreement_timeout = 1
# cluster agreement wait exceeded
c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=agreement_timeout)
c.connect()
self.assertTrue(c.metadata.keyspaces)
# cluster agreement wait used for refresh
original_meta = c.metadata.keyspaces
start_time = time.time()
self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata)
end_time = time.time()
self.assertGreaterEqual(end_time - start_time, agreement_timeout)
self.assertIs(original_meta, c.metadata.keyspaces)
# refresh wait overrides cluster value
original_meta = c.metadata.keyspaces
start_time = time.time()
c.refresh_schema_metadata(max_schema_agreement_wait=0)
end_time = time.time()
self.assertLess(end_time - start_time, agreement_timeout)
self.assertIsNot(original_meta, c.metadata.keyspaces)
self.assertEqual(original_meta, c.metadata.keyspaces)
c.shutdown()
refresh_threshold = 0.5
# cluster agreement bypass
c = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0)
start_time = time.time()
s = c.connect()
end_time = time.time()
self.assertLess(end_time - start_time, refresh_threshold)
self.assertTrue(c.metadata.keyspaces)
# cluster agreement wait used for refresh
original_meta = c.metadata.keyspaces
start_time = time.time()
c.refresh_schema_metadata()
end_time = time.time()
self.assertLess(end_time - start_time, refresh_threshold)
self.assertIsNot(original_meta, c.metadata.keyspaces)
self.assertEqual(original_meta, c.metadata.keyspaces)
# refresh wait overrides cluster value
original_meta = c.metadata.keyspaces
start_time = time.time()
self.assertRaisesRegexp(Exception, r"Schema metadata was not refreshed.*", c.refresh_schema_metadata,
max_schema_agreement_wait=agreement_timeout)
end_time = time.time()
self.assertGreaterEqual(end_time - start_time, agreement_timeout)
self.assertIs(original_meta, c.metadata.keyspaces)
c.shutdown()
finally:
# TODO once fixed this connect call
session = cluster.connect()
session.execute("UPDATE system.local SET schema_version=%s WHERE key='local'", (schema_ver,))
cluster.shutdown()
def test_trace(self):
"""
Ensure trace can be requested for async and non-async queries
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
def check_trace(trace):
self.assertIsNotNone(trace.request_type)
self.assertIsNotNone(trace.duration)
self.assertIsNotNone(trace.started_at)
self.assertIsNotNone(trace.coordinator)
self.assertIsNotNone(trace.events)
result = session.execute( "SELECT * FROM system.local", trace=True)
check_trace(result.get_query_trace())
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
result = session.execute(statement, trace=True)
check_trace(result.get_query_trace())
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
result = session.execute(statement)
self.assertIsNone(result.get_query_trace())
statement2 = SimpleStatement(query)
future = session.execute_async(statement2, trace=True)
future.result()
check_trace(future.get_query_trace())
statement2 = SimpleStatement(query)
future = session.execute_async(statement2)
future.result()
self.assertIsNone(future.get_query_trace())
prepared = session.prepare("SELECT * FROM system.local")
future = session.execute_async(prepared, parameters=(), trace=True)
future.result()
check_trace(future.get_query_trace())
cluster.shutdown()
def test_trace_timeout(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement, trace=True)
future.result()
self.assertRaises(TraceUnavailable, future.get_query_trace, -1.0)
cluster.shutdown()
def test_string_coverage(self):
"""
Ensure str(future) returns without error
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
future = session.execute_async(statement)
self.assertIn(query, str(future))
future.result()
self.assertIn(query, str(future))
self.assertIn('result', str(future))
cluster.shutdown()
def test_idle_heartbeat(self):
interval = 2
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=interval)
if PROTOCOL_VERSION < 3:
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
session = cluster.connect()
# This test relies on impl details of connection req id management to see if heartbeats
# are being sent. May need update if impl is changed
connection_request_ids = {}
for h in cluster.get_connection_holders():
for c in h.get_connections():
# make sure none are idle (should have startup messages)
self.assertFalse(c.is_idle)
with c.lock:
connection_request_ids[id(c)] = deque(c.request_ids) # copy of request ids
# let two heatbeat intervals pass (first one had startup messages in it)
time.sleep(2 * interval + interval/2)
connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]
# make sure requests were sent on all connections
for c in connections:
expected_ids = connection_request_ids[id(c)]
expected_ids.rotate(-1)
with c.lock:
self.assertListEqual(list(c.request_ids), list(expected_ids))
# assert idle status
self.assertTrue(all(c.is_idle for c in connections))
# send messages on all connections
statements_and_params = [("SELECT release_version FROM system.local", ())] * len(cluster.metadata.all_hosts())
results = execute_concurrent(session, statements_and_params)
for success, result in results:
self.assertTrue(success)
# assert not idle status
self.assertFalse(any(c.is_idle if not c.is_control_connection else False for c in connections))
# holders include session pools and cc
holders = cluster.get_connection_holders()
self.assertIn(cluster.control_connection, holders)
self.assertEqual(len(holders), len(cluster.metadata.all_hosts()) + 1) # hosts pools, 1 for cc
# include additional sessions
session2 = cluster.connect()
holders = cluster.get_connection_holders()
self.assertIn(cluster.control_connection, holders)
self.assertEqual(len(holders), 2 * len(cluster.metadata.all_hosts()) + 1) # 2 sessions' hosts pools, 1 for cc
cluster._idle_heartbeat.stop()
cluster._idle_heartbeat.join()
assert_quiescent_pool_state(self, cluster)
cluster.shutdown()
@patch('cassandra.cluster.Cluster.idle_heartbeat_interval', new=0.1)
def test_idle_heartbeat_disabled(self):
self.assertTrue(Cluster.idle_heartbeat_interval)
# heartbeat disabled with '0'
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0)
self.assertEqual(cluster.idle_heartbeat_interval, 0)
session = cluster.connect()
# let two heatbeat intervals pass (first one had startup messages in it)
time.sleep(2 * Cluster.idle_heartbeat_interval)
connections = [c for holders in cluster.get_connection_holders() for c in holders.get_connections()]
# assert not idle status (should never get reset because there is not heartbeat)
self.assertFalse(any(c.is_idle for c in connections))
cluster.shutdown()
def test_pool_management(self):
# Ensure that in_flight and request_ids quiesce after cluster operations
cluster = Cluster(protocol_version=PROTOCOL_VERSION, idle_heartbeat_interval=0) # no idle heartbeat here, pool management is tested in test_idle_heartbeat
session = cluster.connect()
session2 = cluster.connect()
# prepare
p = session.prepare("SELECT * FROM system.local WHERE key=?")
self.assertTrue(session.execute(p, ('local',)))
# simple
self.assertTrue(session.execute("SELECT * FROM system.local WHERE key='local'"))
# set keyspace
session.set_keyspace('system')
session.set_keyspace('system_traces')
# use keyspace
session.execute('USE system')
session.execute('USE system_traces')
# refresh schema
cluster.refresh_schema_metadata()
cluster.refresh_schema_metadata(max_schema_agreement_wait=0)
assert_quiescent_pool_state(self, cluster)
cluster.shutdown()
def test_profile_load_balancing(self):
"""
Tests that profile load balancing policies are honored.
@since 3.5
@jira_ticket PYTHON-569
@expected_result Execution Policy should be used when applicable.
@test_category config_profiles
"""
query = "select release_version from system.local"
node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
with Cluster(execution_profiles={'node1': node1}) as cluster:
session = cluster.connect()
# default is DCA RR for all hosts
expected_hosts = set(cluster.metadata.all_hosts())
queried_hosts = set()
for _ in expected_hosts:
rs = session.execute(query)
queried_hosts.add(rs.response_future._current_host)
self.assertEqual(queried_hosts, expected_hosts)
# by name we should only hit the one
expected_hosts = set(h for h in cluster.metadata.all_hosts() if h.address == '127.0.0.1')
queried_hosts = set()
for _ in cluster.metadata.all_hosts():
rs = session.execute(query, execution_profile='node1')
queried_hosts.add(rs.response_future._current_host)
self.assertEqual(queried_hosts, expected_hosts)
# use a copied instance and override the row factory
# assert last returned value can be accessed as a namedtuple so we can prove something different
named_tuple_row = rs[0]
self.assertIsInstance(named_tuple_row, tuple)
self.assertTrue(named_tuple_row.release_version)
tmp_profile = copy(node1)
tmp_profile.row_factory = tuple_factory
queried_hosts = set()
for _ in cluster.metadata.all_hosts():
rs = session.execute(query, execution_profile=tmp_profile)
queried_hosts.add(rs.response_future._current_host)
self.assertEqual(queried_hosts, expected_hosts)
tuple_row = rs[0]
self.assertIsInstance(tuple_row, tuple)
with self.assertRaises(AttributeError):
tuple_row.release_version
# make sure original profile is not impacted
self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version)
def test_profile_lb_swap(self):
"""
Tests that profile load balancing policies are not shared
Creates two LBP, runs a few queries, and validates that each LBP is execised
seperately between EP's
@since 3.5
@jira_ticket PYTHON-569
@expected_result LBP should not be shared.
@test_category config_profiles
"""
query = "select release_version from system.local"
rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
exec_profiles = {'rr1': rr1, 'rr2': rr2}
with Cluster(execution_profiles=exec_profiles) as cluster:
session = cluster.connect()
# default is DCA RR for all hosts
expected_hosts = set(cluster.metadata.all_hosts())
rr1_queried_hosts = set()
rr2_queried_hosts = set()
rs = session.execute(query, execution_profile='rr1')
rr1_queried_hosts.add(rs.response_future._current_host)
rs = session.execute(query, execution_profile='rr2')
rr2_queried_hosts.add(rs.response_future._current_host)
self.assertEqual(rr2_queried_hosts, rr1_queried_hosts)
def test_ta_lbp(self):
"""
Test that execution profiles containing token aware LBP can be added
@since 3.5
@jira_ticket PYTHON-569
@expected_result Queries can run
@test_category config_profiles
"""
query = "select release_version from system.local"
ta1 = ExecutionProfile()
with Cluster() as cluster:
session = cluster.connect()
cluster.add_execution_profile("ta1", ta1)
rs = session.execute(query, execution_profile='ta1')
def test_clone_shared_lbp(self):
"""
Tests that profile load balancing policies are shared on clone
Creates one LBP clones it, and ensures that the LBP is shared between
the two EP's
@since 3.5
@jira_ticket PYTHON-569
@expected_result LBP is shared
@test_category config_profiles
"""
query = "select release_version from system.local"
rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
exec_profiles = {'rr1': rr1}
with Cluster(execution_profiles=exec_profiles) as cluster:
session = cluster.connect()
rr1_clone = session.execution_profile_clone_update('rr1', row_factory=tuple_factory)
cluster.add_execution_profile("rr1_clone", rr1_clone)
rr1_queried_hosts = set()
rr1_clone_queried_hosts = set()
rs = session.execute(query, execution_profile='rr1')
rr1_queried_hosts.add(rs.response_future._current_host)
rs = session.execute(query, execution_profile='rr1_clone')
rr1_clone_queried_hosts.add(rs.response_future._current_host)
self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts)
def test_missing_exec_prof(self):
"""
Tests to verify that using an unknown profile raises a ValueError
@since 3.5
@jira_ticket PYTHON-569
@expected_result ValueError
@test_category config_profiles
"""
query = "select release_version from system.local"
rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy())
exec_profiles = {'rr1': rr1, 'rr2': rr2}
with Cluster(execution_profiles=exec_profiles) as cluster:
session = cluster.connect()
with self.assertRaises(ValueError):
session.execute(query, execution_profile='rr3')
def test_profile_pool_management(self):
"""
Tests that changes to execution profiles correctly impact our cluster's pooling
@since 3.5
@jira_ticket PYTHON-569
@expected_result pools should be correctly updated as EP's are added and removed
@test_category config_profiles
"""
node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster:
session = cluster.connect()
pools = session.get_pool_state()
# there are more hosts, but we connected to the ones in the lbp aggregate
self.assertGreater(len(cluster.metadata.all_hosts()), 2)
self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2')))
# dynamically update pools on add
node3 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.3']))
cluster.add_execution_profile('node3', node3)
pools = session.get_pool_state()
self.assertEqual(set(h.address for h in pools), set(('127.0.0.1', '127.0.0.2', '127.0.0.3')))
def test_add_profile_timeout(self):
"""
Tests that EP Timeouts are honored.
@since 3.5
@jira_ticket PYTHON-569
@expected_result EP timeouts should override defaults
@test_category config_profiles
"""
node1 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster:
session = cluster.connect()
pools = session.get_pool_state()
self.assertGreater(len(cluster.metadata.all_hosts()), 2)
self.assertEqual(set(h.address for h in pools), set(('127.0.0.1',)))
node2 = ExecutionProfile(load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.2']))
self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'node2', node2, pool_wait_timeout=0.0000001)
class LocalHostAdressTranslator(AddressTranslator):
def __init__(self, addr_map=None):
self.addr_map = addr_map
def translate(self, addr):
new_addr = self.addr_map.get(addr)
return new_addr
class TestAddressTranslation(unittest.TestCase):
def test_address_translator_basic(self):
"""
Test host address translation
Uses a custom Address Translator to map all ip back to one.
Validates AddressTranslator invocation by ensuring that only meta data associated with single
host is populated
@since 3.3
@jira_ticket PYTHON-69
@expected_result only one hosts' metadata will be populated
@test_category metadata
"""
lh_ad = LocalHostAdressTranslator({'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.1', '127.0.0.3': '127.0.0.1'})
c = Cluster(address_translator=lh_ad)
c.connect()
self.assertEqual(len(c.metadata.all_hosts()), 1)
c.shutdown()
def test_address_translator_with_mixed_nodes(self):
"""
Test host address translation
Uses a custom Address Translator to map ip's of non control_connection nodes to each other
Validates AddressTranslator invocation by ensuring that metadata for mapped hosts is also mapped
@since 3.3
@jira_ticket PYTHON-69
@expected_result metadata for crossed hosts will also be crossed
@test_category metadata
"""
adder_map = {'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.3', '127.0.0.3': '127.0.0.2'}
lh_ad = LocalHostAdressTranslator(adder_map)
c = Cluster(address_translator=lh_ad)
c.connect()
for host in c.metadata.all_hosts():
self.assertEqual(adder_map.get(str(host)), host.broadcast_address)
class ContextManagementTest(unittest.TestCase):
load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1'])
cluster_kwargs = {'load_balancing_policy': load_balancing_policy,
'schema_metadata_enabled': False,
'token_metadata_enabled': False}
def test_no_connect(self):
"""
Test cluster context without connecting.
@since 3.4
@jira_ticket PYTHON-521
@expected_result context should still be valid
@test_category configuration
"""
with Cluster() as cluster:
self.assertFalse(cluster.is_shutdown)
self.assertTrue(cluster.is_shutdown)
def test_simple_nested(self):
"""
Test cluster and session contexts nested in one another.
@since 3.4
@jira_ticket PYTHON-521
@expected_result cluster/session should be crated and shutdown appropriately.
@test_category configuration
"""
with Cluster(**self.cluster_kwargs) as cluster:
with cluster.connect() as session:
self.assertFalse(cluster.is_shutdown)
self.assertFalse(session.is_shutdown)
self.assertTrue(session.execute('select release_version from system.local')[0])
self.assertTrue(session.is_shutdown)
self.assertTrue(cluster.is_shutdown)
def test_cluster_no_session(self):
"""
Test cluster context without session context.
@since 3.4
@jira_ticket PYTHON-521
@expected_result Session should be created correctly. Cluster should shutdown outside of context
@test_category configuration
"""
with Cluster(**self.cluster_kwargs) as cluster:
session = cluster.connect()
self.assertFalse(cluster.is_shutdown)
self.assertFalse(session.is_shutdown)
self.assertTrue(session.execute('select release_version from system.local')[0])
self.assertTrue(session.is_shutdown)
self.assertTrue(cluster.is_shutdown)
def test_session_no_cluster(self):
"""
Test session context without cluster context.
@since 3.4
@jira_ticket PYTHON-521
@expected_result session should be created correctly. Session should shutdown correctly outside of context
@test_category configuration
"""
cluster = Cluster(**self.cluster_kwargs)
unmanaged_session = cluster.connect()
with cluster.connect() as session:
self.assertFalse(cluster.is_shutdown)
self.assertFalse(session.is_shutdown)
self.assertFalse(unmanaged_session.is_shutdown)
self.assertTrue(session.execute('select release_version from system.local')[0])
self.assertTrue(session.is_shutdown)
self.assertFalse(cluster.is_shutdown)
self.assertFalse(unmanaged_session.is_shutdown)
unmanaged_session.shutdown()
self.assertTrue(unmanaged_session.is_shutdown)
self.assertFalse(cluster.is_shutdown)
cluster.shutdown()
self.assertTrue(cluster.is_shutdown)
class DuplicateRpcTest(unittest.TestCase):
load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1'])
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy)
self.session = self.cluster.connect()
self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.1' WHERE peer='127.0.0.2'")
def tearDown(self):
self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.2' WHERE peer='127.0.0.2'")
self.cluster.shutdown()
def test_duplicate(self):
"""
Test duplicate RPC addresses.
Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged
@since 3.4
@jira_ticket PYTHON-366
@expected_result only one hosts' metadata will be populated
@test_category metadata
"""
mock_handler = MockLoggingHandler()
logger = logging.getLogger(cassandra.cluster.__name__)
logger.addHandler(mock_handler)
test_cluster = self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy)
test_cluster.connect()
warnings = mock_handler.messages.get("warning")
self.assertEqual(len(warnings), 1)
self.assertTrue('multiple' in warnings[0])
logger.removeHandler(mock_handler)
| 40.950405
| 190
| 0.672829
|
3fa287a0cbe0b1cd62da56e7ac2f7f2cd86e4bc0
| 2,109
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/network_watcher.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure/mgmt/network/v2017_06_01/models/network_watcher.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure/mgmt/network/v2017_06_01/models/network_watcher.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkWatcher(Resource):
"""Network watcher in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:ivar provisioning_state: The provisioning state of the resource. Possible
values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.network.v2017_06_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, etag=None):
super(NetworkWatcher, self).__init__(id=id, location=location, tags=tags)
self.etag = etag
self.provisioning_state = None
| 35.15
| 85
| 0.589853
|
1521ee80f2e3e0693f226cbe9ec4e4b96d9a6c8f
| 174
|
py
|
Python
|
0009 Largest Sum of Non-Adjacent Numbers.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 1
|
2020-12-29T21:17:26.000Z
|
2020-12-29T21:17:26.000Z
|
0009 Largest Sum of Non-Adjacent Numbers.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
0009 Largest Sum of Non-Adjacent Numbers.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 4
|
2021-09-09T17:42:43.000Z
|
2022-03-18T04:54:03.000Z
|
class Solution:
def solve(self, nums):
dp = [0,0]
for i in range(len(nums)):
dp.append(max(0,nums[i]+dp[-2],dp[-1]))
return max(dp)
| 19.333333
| 51
| 0.488506
|
52022cb43d4898cbd4d3b487f3e79ea343fe0766
| 4,670
|
py
|
Python
|
harness/determined/common/storage/gcs.py
|
gh-determined-ai/determined
|
9a1ab33a3a356b69681b3351629fef4ab98ddb56
|
[
"Apache-2.0"
] | null | null | null |
harness/determined/common/storage/gcs.py
|
gh-determined-ai/determined
|
9a1ab33a3a356b69681b3351629fef4ab98ddb56
|
[
"Apache-2.0"
] | null | null | null |
harness/determined/common/storage/gcs.py
|
gh-determined-ai/determined
|
9a1ab33a3a356b69681b3351629fef4ab98ddb56
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import tempfile
from typing import Optional, Union, no_type_check
import requests.exceptions
import urllib3.exceptions
from determined import errors
from determined.common import storage, util
from determined.common.storage.s3 import normalize_prefix
class GCSStorageManager(storage.CloudStorageManager):
"""
Store and load checkpoints on GCS. Although GCS is similar to S3, some
S3 APIs are not supported on GCS and vice versa. Moreover, Google
recommends using the google-storage-python library to access GCS,
rather than the boto library we use to access S3 -- boto uses
various S3 features that are not supported by GCS.
Batching is supported by the GCS API for deletion, however it is not used because
of observed request failures. Batching is not used for uploading
or downloading files, because the GCS API does not support it. Upload/download
performance could be improved by using multiple clients in a multithreaded fashion.
Authentication is currently only supported via the "Application
Default Credentials" method in GCP [1]. Typical configuration:
ensure your VM runs in a service account that has sufficient
permissions to read/write/delete from the GCS bucket where
checkpoints will be stored (this only works when running in GCE).
"""
def __init__(
self,
bucket: str,
prefix: Optional[str] = None,
temp_dir: Optional[str] = None,
) -> None:
super().__init__(temp_dir if temp_dir is not None else tempfile.gettempdir())
import google.cloud.storage
self.client = google.cloud.storage.Client()
self.bucket = self.client.bucket(bucket)
self.prefix = normalize_prefix(prefix)
def get_storage_prefix(self, storage_id: str) -> str:
return os.path.join(self.prefix, storage_id)
@no_type_check
@util.preserve_random_state
def upload(self, src: Union[str, os.PathLike], dst: str) -> None:
src = os.fspath(src)
prefix = self.get_storage_prefix(dst)
logging.info(f"Uploading to GCS: {prefix}")
for rel_path in sorted(self._list_directory(src)):
blob_name = f"{prefix}/{rel_path}"
blob = self.bucket.blob(blob_name)
logging.debug(f"Uploading to GCS: {blob_name}")
from google.api_core import exceptions, retry
retry_network_errors = retry.Retry(
retry.if_exception_type(
ConnectionError,
exceptions.ServerError,
urllib3.exceptions.ProtocolError,
requests.exceptions.ConnectionError,
)
)
if rel_path.endswith("/"):
# Create empty blobs for subdirectories. This ensures
# that empty directories are checkpointed correctly.
retry_network_errors(blob.upload_from_string)(b"")
else:
abs_path = os.path.join(src, rel_path)
retry_network_errors(blob.upload_from_filename)(abs_path)
@util.preserve_random_state
def download(self, src: str, dst: Union[str, os.PathLike]) -> None:
dst = os.fspath(dst)
path = self.get_storage_prefix(src)
logging.info(f"Downloading {path} from GCS")
found = False
# Listing blobs with prefix set and no delimiter is equivalent to a recursive listing. If
# you include a `delimiter="/"` you will get only the file-like blobs inside of a
# directory-like blob.
for blob in self.bucket.list_blobs(prefix=path):
found = True
_dst = os.path.join(dst, os.path.relpath(blob.name, path))
dst_dir = os.path.dirname(_dst)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, exist_ok=True)
# Only create empty directory for keys that end with "/".
# See `upload` method for more context.
if blob.name.endswith("/"):
os.makedirs(_dst, exist_ok=True)
continue
logging.debug(f"Downloading from GCS: {blob.name}")
blob.download_to_filename(_dst)
if not found:
raise errors.CheckpointNotFound(f"Did not find checkpoint {path} in GCS")
@util.preserve_random_state
def delete(self, storage_id: str) -> None:
prefix = self.get_storage_prefix(storage_id)
logging.info(f"Deleting checkpoint {prefix} from GCS")
for blob in self.bucket.list_blobs(prefix=prefix):
logging.debug(f"Deleting {blob.name} from GCS")
blob.delete()
| 39.576271
| 98
| 0.652463
|
ca79d413b5482d1b9c0b5ace8587eeaa63a31885
| 1,758
|
py
|
Python
|
code/georgia.py
|
jordankeener/ncaa_rosters
|
12e66e9ef7502ab6869e7352ae673c46680eedd0
|
[
"MIT"
] | null | null | null |
code/georgia.py
|
jordankeener/ncaa_rosters
|
12e66e9ef7502ab6869e7352ae673c46680eedd0
|
[
"MIT"
] | null | null | null |
code/georgia.py
|
jordankeener/ncaa_rosters
|
12e66e9ef7502ab6869e7352ae673c46680eedd0
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from urllib.request import FancyURLopener
from bs4 import BeautifulSoup
import pandas as pd
import os
import _proj_functions as proj
import _lookups as lookups
import re
outdir = '../output'
##### georgia #################
school = 'georgia'
url_template = 'https://georgiadogs.com/roster.aspx?path={sporturl}'
sports_dict = lookups.get_sports_dict()
# sport_id: [sporturl, sport_table]
sports_dict['baseball'] = ['baseball']
sports_dict['mens basketball'] = ['mbball']
sports_dict['womens basketball'] = ['wbball']
sports_dict['football'] = ['football']
sports_dict['womens soccer'] = ['wsoc']
sports_dict['mens golf'] = ['mgolf']
sports_dict['womens golf'] = ['wgolf']
sports_dict['mens swimming'] = ['swim']
sports_dict['womens swimming'] = ['swim']
sports_dict['mens tennis'] = ['mten']
sports_dict['womens tennis'] = ['wten']
sports_dict['mens track'] = ['track']
sports_dict['womens track'] = ['track']
sports_dict['womens volleyball'] = ['wvball']
sports_dict['mens cross country'] = ['cross']
sports_dict['womens cross country'] = ['cross']
sports_dict['softball'] = ['softball']
sports_dict['womens equestrian'] = ['equest']
sports_dict['womens gymnastics'] = ['wgym']
# remove empty sports
for (key, value) in sports_dict.copy().items():
if value == []:
del sports_dict[key]
# change list number if not first ul of given classname on page
for (key, value) in sports_dict.items():
if key in ['womens cross country', 'womens swimming', 'womens track']:
value.append(2)
else:
value.append(1)
# loop through sports collecting rosters
rosters = proj.gather_rosters_ul(sports_dict, url_template)
rosters['college'] = school
csvname = school + '_rosters.csv'
rosters.to_csv(os.path.join(outdir, csvname))
| 31.963636
| 71
| 0.717292
|
6d4e605191442b15e23845b3413c4e52af0bdb22
| 2,615
|
py
|
Python
|
art/exceptions.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-01-31T15:17:20.000Z
|
2022-01-31T15:17:20.000Z
|
art/exceptions.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-03-18T00:41:02.000Z
|
2022-03-18T00:41:02.000Z
|
art/exceptions.py
|
david-shmailov/adversarial-robustness-toolbox
|
ad8b94d3928abe218cd6ab2eed1c5c21f1d6e420
|
[
"MIT"
] | 1
|
2022-03-22T05:30:31.000Z
|
2022-03-22T05:30:31.000Z
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module containing ART's exceptions.
"""
from typing import List, Tuple, Type, Union
class EstimatorError(TypeError):
"""
Basic exception for errors raised by unexpected estimator types.
"""
def __init__(self, this_class, class_expected_list: List[Union[Type, Tuple[Type]]], classifier_given) -> None:
super().__init__()
self.this_class = this_class
self.class_expected_list = class_expected_list
self.classifier_given = classifier_given
classes_expected_message = ""
for idx, class_expected in enumerate(class_expected_list):
if idx != 0:
classes_expected_message += " and "
if isinstance(class_expected, type):
classes_expected_message += f"{class_expected}"
else:
classes_expected_message += "("
for or_idx, or_class in enumerate(class_expected):
if or_idx != 0:
classes_expected_message += " or "
classes_expected_message += f"{or_class}"
classes_expected_message += ")"
self.message = (
f"{this_class.__name__} requires an estimator derived from {classes_expected_message}, "
f"the provided classifier is an instance of {type(classifier_given)} "
f"and is derived from {classifier_given.__class__.__bases__}."
)
def __str__(self) -> str:
return self.message
| 45.877193
| 120
| 0.689101
|
3327b52c35e383f07d9be529a787ad0edb0e800a
| 8,050
|
py
|
Python
|
finance/WeekTest/WeekDataPrepare.py
|
Ernestyj/PyStudy
|
ee2e314eb808b0b7c4574b3061814abb81bbb7ab
|
[
"Apache-2.0"
] | 1
|
2016-11-28T03:26:05.000Z
|
2016-11-28T03:26:05.000Z
|
finance/WeekTest/WeekDataPrepare.py
|
Ernestyj/PyStudy
|
ee2e314eb808b0b7c4574b3061814abb81bbb7ab
|
[
"Apache-2.0"
] | null | null | null |
finance/WeekTest/WeekDataPrepare.py
|
Ernestyj/PyStudy
|
ee2e314eb808b0b7c4574b3061814abb81bbb7ab
|
[
"Apache-2.0"
] | 2
|
2017-02-02T15:13:01.000Z
|
2019-05-30T01:59:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import talib
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 30)
pd.set_option('precision', 7)
pd.options.display.float_format = '{:,.3f}'.format
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
from sklearn import preprocessing, svm, cross_validation, metrics, pipeline, grid_search
from scipy.stats import sem
from sklearn.decomposition import PCA, KernelPCA
'''
读入一支股票指定年份的ohlcv数据
输入:baseDir,stockCode为字符, startYear,yearNum为整数,
输出:dataframe
'''
def readWSDFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep='\t', usecols=[0,2,3,4,5,6,7,9,10,12,15], header=None,
skiprows=1, names=['Date','Open','High','Low','Close','Volume','Amount',
'Chg','Chg Pct','Avg','Turn'],
parse_dates=True, date_parser=dateparse)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
usecols = [0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 36, 37]
usecols = [0,6,16,17,24,31]
usecols = [0, 2,11,24,26,29,30]
usecols = [0, 1,2,3,4,5,6]
def readWSDIndexFile(baseDir, stockCode, startYear, yearNum=1):
# 解析日期
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d').date()
df = 0
for i in range(yearNum):
tempDF = pd.read_csv(baseDir+'I'+stockCode+'/wsd_'+stockCode+'_'+str(startYear+i)+'.csv',
index_col=0, sep=',', parse_dates=True, date_parser=dateparse, usecols=usecols)
if i==0: df = tempDF
else: df = df.append(tempDF)
return df
def prepareData(df, dfi):
# open(开盘价均值),high(最高价均值),low(最低价均值),volume(成交量均值),amount(成交额均值),
# change(涨跌均值),changePct(涨跌幅均值),average(均价均值),turn(换手率均值),
# r(收益率均值),
# lastR(上周收益率), weekAgoR(前周收益率), lastAmt(上周成交额均值)
# 38种技术指标
# 跳过第一个值
opens = [0]; openArr = []
highs = [0]; highArr = []
lows = [0]; lowArr = []
volumes = [0]; volumeArr = []
changes = [0]; changeArr = []
changePcts = [0]; changePctArr = []
averages = [0]; averageArr = []
turns = [0]; turnArr = []
rs = [0]; closeArr = []
lastRs = [0]
weekAgoRs = [0]
amts = [0]; amtArr = []
lastAmts = [0]
techs = []
techArr = []
upOrDowns = [0] # 为0表示跌,为1表示涨
actionDates = [0]
# fourWeekAvgAmts = [0];#暂不加入计算
week = df.index[0].week
for i in range(len(df)):
if week != df.index[i].week:
opens.append(np.mean(openArr))
highs.append(np.mean(highArr))
lows.append(np.mean(lowArr))
volumes.append(np.mean(volumeArr))
changes.append(np.mean(changeArr))
changePcts.append(np.mean(changePctArr))
averages.append(np.mean(averageArr))
turns.append(np.mean(turnArr))
rs.append((closeArr[-1] - closeArr[0]) / closeArr[0])
lastRs.append(rs[-2])
weekAgoRs.append(lastRs[-2])
amts.append(np.mean(amtArr))
lastAmts.append(amts[-2])
techs.append(np.mean(techArr, axis=0))
upOrDown = -1
if rs[-1] > 0.0: upOrDown = 1
elif rs[-1] == 0.0: upOrDown = upOrDowns[-1] # 无涨跌时,按前周的涨跌情况
else: upOrDown = -1
upOrDowns.append(upOrDown)
actionDates.append(df.index[i].date())
del openArr[:]; del highArr[:]; del lowArr[:]; del volumeArr[:]; del changeArr[:]; del changePctArr[:];
del averageArr[:]; del turnArr[:]; del closeArr[:]; del amtArr[:]
del techArr[:]
week = df.index[i].week
openArr.append(df['Open'][i])
highArr.append(df['High'][i])
lowArr.append(df['Low'][i])
volumeArr.append(df['Volume'][i])
changeArr.append(df['Chg'][i])
changePctArr.append(df['Chg Pct'][i])
averageArr.append(df['Avg'][i])
turnArr.append(df['Turn'][i])
closeArr.append(df['Close'][i])
amtArr.append(df['Amount'][i])
techArr.append(dfi.iloc[i].values)
# 处理最后一周数据
opens.append(np.mean(openArr))
highs.append(np.mean(highArr))
lows.append(np.mean(lowArr))
volumes.append(np.mean(volumeArr))
changes.append(np.mean(changeArr))
changePcts.append(np.mean(changePctArr))
averages.append(np.mean(averageArr))
turns.append(np.mean(turnArr))
rs.append((closeArr[-1] - closeArr[0]) / closeArr[0])
lastRs.append(rs[-2])
weekAgoRs.append(lastRs[-2])
amts.append(np.mean(amtArr))
lastAmts.append(amts[-2])
techs.append(np.mean(techArr, axis=0))
upOrDown = -1
if rs[-1] > 0.0: upOrDown = 1
elif rs[-1] == 0.0: upOrDown = upOrDowns[-1] # 无涨跌时,按前周的涨跌情况
else: upOrDown = -1
upOrDowns.append(upOrDown)
actionDates.append(df.index[i].date())
# tempX = np.column_stack((opens[1:], highs[1:], lows[1:], volumes[1:], changes[1:], changePcts[1:], averages[1:],
# turns[1:], rs[1:], lastRs[1:], weekAgoRs[1:], amts[1:], lastAmts[1:]))
tempX = np.column_stack((changes[1:], changePcts[1:], volumes[1:], amts[1:], turns[1:]))
X = np.hstack((tempX, techs))
y = upOrDowns[2:] # 涨跌数组向后移一位,表当前周数据预测下一周涨跌
y.append(upOrDowns[-1]) # 涨跌数组最后一位按前一位数据补上
return X, y, actionDates[1:]
def optimizeSVM(X_norm, y, kFolds=10):
clf = pipeline.Pipeline([
('svc', svm.SVC(kernel='rbf')),
])
# grid search 多参数优化
parameters = {
# 'svc__gamma': np.logspace(-8, 3, 10),
# 'svc__C': np.logspace(-5, 5, 10),
'svc__gamma': np.logspace(-3, 11, 8, base=2),
'svc__C': np.logspace(-3, 15, 10, base=2),
# 'svc__gamma': [0.001,0.01,0.1,1,10,100,1000],
# 'svc__C': [0.001,0.01,0.1,1,10,100,1000,10000,100000],
}
gs = grid_search.GridSearchCV(clf, parameters, verbose=1, refit=False, cv=kFolds)
gs.fit(X_norm, y)
return gs.best_params_['svc__gamma'], gs.best_params_['svc__C'], gs.best_score_
def plot3D(X_pca, y):
red_x, red_y, red_z = [], [], []
blue_x, blue_y, blue_z = [], [], []
for i in range(len(X_pca)):
if y[i]==-1:
red_x.append(X_pca[i][0])
red_y.append(X_pca[i][1])
red_z.append(X_pca[i][2])
elif y[i]==1:
blue_x.append(X_pca[i][0])
blue_y.append(X_pca[i][1])
blue_z.append(X_pca[i][2])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(red_x, red_y, red_z, c='r', marker='x')
ax.scatter(blue_x, blue_y, blue_z, c='g', marker='.')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
baseDir = '/Users/eugene/Downloads/data/'
stockCodes = ['000300.SH', '000016.SH', '000905.SH']
# i = 2
# startYear = 2015
# number = 1
# df = readWSDFile(baseDir, stockCodes[i], startYear, number)
# print 'Day count:', len(df)
# # print df.head(5)
# dfi = readWSDIndexFile(baseDir, stockCodes[i], startYear, number)
#
# X, y, actionDates = prepareData(df, dfi)
# print np.shape(X), actionDates
# normalizer = preprocessing.Normalizer().fit(X) # fit does nothing
# # normalizer = preprocessing.StandardScaler().fit(X)
# X_norm = normalizer.transform(X)
#
# # estimator = PCA(n_components=20)
# # X_pca = estimator.fit_transform(X_norm)
# # estimator_kernel = KernelPCA(n_components=50, kernel='rbf')
# # X_pca = estimator_kernel.fit_transform(X_norm)
# # plot3D(X_pca, y)
#
# # grid search 多参数优化
# gamma, C, score = optimizeSVM(X_norm, y, kFolds=10)
# print 'gamma=',gamma, 'C=',C, 'score=',score
| 36.590909
| 118
| 0.590311
|
8b763121287feeadca5923f39fd351bcf58ba51b
| 1,952
|
py
|
Python
|
src/box/repository.py
|
boydfd/mAP
|
a7813020232931e518f2e5677a81deff1ff89b6a
|
[
"Apache-2.0"
] | null | null | null |
src/box/repository.py
|
boydfd/mAP
|
a7813020232931e518f2e5677a81deff1ff89b6a
|
[
"Apache-2.0"
] | null | null | null |
src/box/repository.py
|
boydfd/mAP
|
a7813020232931e518f2e5677a81deff1ff89b6a
|
[
"Apache-2.0"
] | null | null | null |
import os
from itertools import groupby
from typing import Any
import yaml
class Serializer:
def serialize(self, entity) -> Any:
pass
def deserialize(self, serialized_entity):
pass
class YamlSerializer(Serializer):
def serialize(self, entity):
return yaml.dump(entity)
def deserialize(self, serialized_entity):
return yaml.load(serialized_entity)
class DataStore:
def put(self, entity):
pass
def put_all(self, entities):
pass
def get(self, id):
pass
def get_all(self):
pass
class BatchRepository:
def __init__(self, filename, serializer: Serializer = YamlSerializer()):
self.filename = filename
self.serializer = serializer
if os.path.exists(self.filename):
with open(self.filename, 'r') as handle:
self.data = self.serializer.deserialize(handle.read())
else:
self.data = []
def save(self, data):
self.data = data
with open(self.filename, 'w') as handle:
handle.write(self.serializer.serialize(self.data))
class Repository:
def __init__(self, filename, serializer: Serializer = YamlSerializer()):
self.filename = filename
self.serializer = serializer
if os.path.exists(self.filename):
with open(self.filename, 'r') as handle:
self.data = self.serializer.deserialize(handle.read())
else:
self.data = {}
def save(self):
with open(self.filename, 'w') as handle:
handle.write(yaml.dump(self.data))
def put(self, id, coupon):
self.data[id] = coupon
self.save()
def get(self, id):
return self.data.get(id)
if __name__ == '__main__':
def prints(k, v):
print(k)
print(list(v))
print({prints(key, value) for key, value in groupbyUnsorted([1, 1, 2, 2, 3, 2, 1], lambda x: x)})
| 23.238095
| 101
| 0.603996
|
139694b5366598e2819d26d3f77c605c278b8a40
| 13,817
|
py
|
Python
|
scripts/genomon_pipeline/config/sample_conf.py
|
Genomon-Project/Genomon
|
95814bbb94fc64805e0b52b1ea2263ee17c4cd17
|
[
"BSD-3-Clause"
] | 10
|
2016-02-01T01:02:55.000Z
|
2022-01-29T23:37:51.000Z
|
scripts/genomon_pipeline/config/sample_conf.py
|
Genomon-Project/GenomonPipeline
|
95814bbb94fc64805e0b52b1ea2263ee17c4cd17
|
[
"BSD-3-Clause"
] | 114
|
2015-09-09T08:27:24.000Z
|
2022-01-12T10:31:53.000Z
|
scripts/genomon_pipeline/config/sample_conf.py
|
Genomon-Project/GenomonPipeline
|
95814bbb94fc64805e0b52b1ea2263ee17c4cd17
|
[
"BSD-3-Clause"
] | 6
|
2016-12-15T02:28:46.000Z
|
2022-01-29T23:37:52.000Z
|
#! /usr/bin/env python
import os
class Sample_conf(object):
def __init__(self):
self.fastq = {}
self.bam_tofastq = {}
self.bam_import = {}
self.mutation_call = []
self.sv_detection = []
self.qc = []
self.control_panel = {}
self.fusion = []
self.expression = []
self.intron_retention = []
#
# should add the file exist check here ?
#
def parse_file(self, file_path):
file_ext = os.path.splitext(file_path)[1]
file_data = []
if file_ext.lower() == '.csv':
file_data = self.parse_csv(file_path)
elif file_ext.lower() == '.txt' or file_ext.lower() == '.tsv':
file_data = self.parse_tsv(file_path)
# elif file_ext.lower() == '.xlsx':
# file_data = self.parse_xlsx(file_path)
else:
#
# should treat other cases ??
#
raise NotImplementedError("currently, we can just accept tsv and csv formats")
file_data_trimmed = []
for line_data in file_data:
# skip empty lines
if len(line_data) == 0: continue
# line starting with '#' is comment
if line_data[0].startswith('#'): continue
# remove spaces
line_data = list(map(lambda x: x.strip(' '), line_data))
# skip if all the elements are empty
if len(line_data) == line_data.count(''): continue
file_data_trimmed.append(line_data)
self.parse_data(file_data_trimmed)
def parse_csv(self, file_path):
_file_data = []
import csv
with open(file_path, 'r') as hIN:
csv_obj = csv.reader(hIN)
for cells in csv_obj:
tempdata = []
row_len = 0
for cell in cells:
row_len += len(cell)
if (len(cell) == 0) and (row_len > 0):
continue
tempdata.append(cell)
if row_len > 0:
_file_data.append(tempdata)
return _file_data
def parse_tsv(self, file_path):
_file_data = []
with open(file_path, 'r') as hIN:
for line in hIN:
F = line.rstrip().split('\t')
tempdata = []
row_len = 0
for cell in F:
row_len += len(cell)
if (len(cell) == 0) and (row_len > 0):
continue
tempdata.append(cell)
if row_len > 0:
_file_data.append(tempdata)
return _file_data
def parse_data(self, _data ):
mode = ''
sampleID_list = []
mut_tumor_sampleID_list = []
sv_tumor_sampleID_list = []
qc_sampleID_list = []
ff_sampleID_list = []
exp_sampleID_list = []
ir_sampleID_list = []
for row in _data:
if row[0].startswith('['):
# header
if row[0].lower() == '[fastq]':
mode = 'fastq'
continue
elif row[0].lower() == '[bam_tofastq]':
mode = 'bam_tofastq'
continue
elif row[0].lower() == '[bam_import]':
mode = 'bam_import'
continue
elif row[0].lower() == '[mutation_call]':
mode = 'mutation_call'
continue
elif row[0].lower() == '[sv_detection]':
mode = 'sv_detection'
continue
elif row[0].lower() == '[qc]':
mode = 'qc'
continue
elif row[0].lower() == '[summary]':
mode = 'qc'
continue
elif row[0].lower() == '[controlpanel]':
mode = 'controlpanel'
continue
elif row[0].lower() == '[fusion]':
mode = 'fusion'
continue
elif row[0].lower() == '[expression]':
mode = 'expression'
continue
elif row[0].lower() == '[intron_retention]':
mode = 'intron_retention'
continue
else:
err_msg = "Section name should be either of [fastq], [bam_tofastq], [bam_import], " + \
"[mutation_call], [sv_detection], [controlpanel], [fusion], [expression] or [intron_retention]. " + \
"Also, sample name should not start with '['."
raise ValueError(err_msg)
# section data
if mode == 'fastq':
sampleID = row[0]
# 'None' is presereved for special string
if sampleID == 'None':
err_msg = "None can not be used as sampleID"
raise ValueError(err_msg)
if sampleID in sampleID_list:
err_msg = sampleID + " is duplicated."
raise ValueError(err_msg)
sampleID_list.append(sampleID)
if len(row) != 3:
err_msg = sampleID + ": the path for read1 (and read2) should be provided"
raise ValueError(err_msg)
sequence1 = row[1].split(';')
sequence2 = row[2].split(';')
for s in range(len(sequence1)):
if not os.path.exists(sequence1[s]):
err_msg = sampleID + ": " + sequence1[s] + " does not exists"
raise ValueError(err_msg)
if not os.path.exists(sequence2[s]):
err_msg = sampleID + ": " + sequence2[s] + " does not exists"
raise ValueError(err_msg)
if sequence1[s] == sequence2[s]:
err_msg = sampleID + ": read1 and read2 are same path"
raise ValueError(err_msg)
self.fastq[sampleID] = [sequence1, sequence2]
elif mode == 'bam_tofastq':
sampleID = row[0]
# 'None' is presereved for special string
if sampleID == 'None':
err_msg = "None can not be used as sampleID"
raise ValueError(err_msg)
if sampleID in sampleID_list:
err_msg = sampleID + " is duplicated."
raise ValueError(err_msg)
sampleID_list.append(sampleID)
if len(row) != 2:
err_msg = sampleID + ": only one bam file is allowed"
raise ValueError(err_msg)
sequences = row[1]
for seq in sequences.split(";"):
if not os.path.exists(seq):
err_msg = sampleID + ": " + seq + " does not exists"
raise ValueError(err_msg)
self.bam_tofastq[sampleID] = sequences
elif mode == 'bam_import':
sampleID = row[0]
# 'None' is presereved for special string
if sampleID == 'None':
err_msg = "None can not be used as sampleID"
raise ValueError(err_msg)
if sampleID in sampleID_list:
err_msg = sampleID + " is duplicated."
raise ValueError(err_msg)
sampleID_list.append(sampleID)
if len(row) != 2:
err_msg = sampleID + ": only one bam file is allowed"
raise ValueError(err_msg)
sequence = row[1]
if not os.path.exists(sequence):
err_msg = sampleID + ": " + sequence + " does not exists"
raise ValueError(err_msg)
sequence_prefix, ext = os.path.splitext(sequence)
if (not os.path.exists(sequence + '.bai')) and (not os.path.exists(sequence_prefix + '.bai')):
err_msg = sampleID + ": " + sequence + " index does not exists"
raise ValueError(err_msg)
self.bam_import[sampleID] = sequence
elif mode == 'mutation_call':
tumorID = row[0]
if tumorID not in sampleID_list:
err_msg = "[mutation_call] section, " + tumorID + " is not defined"
raise ValueError(err_msg)
if tumorID in mut_tumor_sampleID_list:
err_msg = "[mutation_call] section, " + tumorID + " is duplicated"
raise ValueError(err_msg)
normalID = row[1] if len(row) >= 2 and row[1] not in ['', 'None'] else None
controlpanelID = row[2] if len(row) >= 3 and row[2] not in ['', 'None'] else None
if normalID is not None and normalID not in sampleID_list:
err_msg = "[mutation_call] section, " + normalID + " is not defined"
raise ValueError(err_msg)
mut_tumor_sampleID_list.append(tumorID)
self.mutation_call.append((tumorID, normalID, controlpanelID))
elif mode == 'sv_detection':
tumorID = row[0]
if tumorID not in sampleID_list:
err_msg = "[sv_detection] section, " + tumorID + " is not defined"
raise ValueError(err_msg)
if tumorID in sv_tumor_sampleID_list:
err_msg = "[sv_detection] section, " + tumorID + " is duplicated"
raise ValueError(err_msg)
normalID = row[1] if len(row) >= 2 and row[1] not in ['', 'None'] else None
controlpanelID = row[2] if len(row) >= 3 and row[2] not in ['', 'None'] else None
if normalID is not None and normalID not in sampleID_list:
err_msg = "[sv_detection] section, " + normalID + " is not defined"
raise ValueError(err_msg)
sv_tumor_sampleID_list.append(tumorID)
self.sv_detection.append((tumorID, normalID, controlpanelID))
elif mode == 'qc':
sampleID = row[0]
if sampleID not in sampleID_list:
err_msg = "[qc] section, " + sampleID + " is not defined"
raise ValueError(err_msg)
if sampleID in qc_sampleID_list:
err_msg = "[qc] section, " + sampleID + " is duplicated"
raise ValueError(err_msg)
qc_sampleID_list.append(sampleID)
self.qc.append(sampleID)
elif mode == 'controlpanel':
if len(row) <= 1:
err_msg = "[controlpanel] section, list item is none for the row: " + ','.join(row)
raise ValueError(err_msg)
controlpanelID = row[0]
for sample in row[1:]:
if sample not in sampleID_list:
err_msg = "[controlpanel] section, " + sample + " is not defined in " + \
"controlpanelID: " + controlpanelID
raise ValueError(err_msg)
self.control_panel[controlpanelID] = row[1:]
elif mode == 'fusion':
sampleID = row[0]
if sampleID not in sampleID_list:
err_msg = "[fusion] section, " + sampleID + " is not defined"
raise ValueError(err_msg)
if sampleID in ff_sampleID_list:
err_msg = "[fusion] section, " + sampleID + " is duplicated"
raise ValueError(err_msg)
controlpanelID = row[1] if len(row) >= 2 and row[1] not in ['', 'None'] else None
ff_sampleID_list.append(sampleID)
self.fusion.append((sampleID,controlpanelID))
elif mode == 'expression':
sampleID = row[0]
if sampleID not in sampleID_list:
err_msg = "[expression] section, " + sampleID + " is not defined"
raise ValueError(err_msg)
if sampleID in exp_sampleID_list:
err_msg = "[expression] section, " + sampleID + " is duplicated"
raise ValueError(err_msg)
exp_sampleID_list.append(sampleID)
self.expression.append(sampleID)
elif mode == 'intron_retention':
sampleID = row[0]
if sampleID not in sampleID_list:
err_msg = "[intron_retention] section, " + sampleID + " is not defined"
raise ValueError(err_msg)
if sampleID in ir_sampleID_list:
err_msg = "[intron_retention] section, " + sampleID + " is duplicated"
raise ValueError(err_msg)
ir_sampleID_list.append(sampleID)
self.intron_retention.append(sampleID)
# check whether controlpanleID in compare section is defined
# for comp in self.compare:
# if comp[2] is not None and comp[2] not in self.controlpanel:
# err_msg = "[compare] section, controlpanelID: " + comp[2] + " is not defined"
# raiseValueError(err_msg)
global sample_conf
sample_conf = Sample_conf()
| 35.428205
| 131
| 0.478686
|
89a24150bcc8c4912512f36aadaf61356e421633
| 55,975
|
py
|
Python
|
electrum_mona/tests/test_lnpeer.py
|
wakiyamap/electrum-mona
|
d00830c96785c77025432669158ad903146a2298
|
[
"MIT"
] | 61
|
2017-08-06T08:51:49.000Z
|
2021-12-28T06:25:36.000Z
|
electrum_mona/tests/test_lnpeer.py
|
wakiyamap/electrum-mona
|
d00830c96785c77025432669158ad903146a2298
|
[
"MIT"
] | 15
|
2017-09-12T07:15:01.000Z
|
2021-12-28T06:25:15.000Z
|
electrum_mona/tests/test_lnpeer.py
|
wakiyamap/electrum-mona
|
d00830c96785c77025432669158ad903146a2298
|
[
"MIT"
] | 27
|
2017-08-18T19:40:30.000Z
|
2021-03-01T11:16:02.000Z
|
import asyncio
import tempfile
from decimal import Decimal
import os
from contextlib import contextmanager
from collections import defaultdict
import logging
import concurrent
from concurrent import futures
import unittest
from typing import Iterable, NamedTuple, Tuple, List, Dict
from aiorpcx import TaskGroup, timeout_after, TaskTimeout
import electrum_mona
import electrum_mona.trampoline
from electrum_mona import bitcoin
from electrum_mona import constants
from electrum_mona.network import Network
from electrum_mona.ecc import ECPrivkey
from electrum_mona import simple_config, lnutil
from electrum_mona.lnaddr import lnencode, LnAddr, lndecode
from electrum_mona.bitcoin import COIN, sha256
from electrum_mona.util import bh2u, create_and_start_event_loop, NetworkRetryManager, bfh
from electrum_mona.lnpeer import Peer, UpfrontShutdownScriptViolation
from electrum_mona.lnutil import LNPeerAddr, Keypair, privkey_to_pubkey
from electrum_mona.lnutil import LightningPeerConnectionClosed, RemoteMisbehaving
from electrum_mona.lnutil import PaymentFailure, LnFeatures, HTLCOwner
from electrum_mona.lnchannel import ChannelState, PeerState, Channel
from electrum_mona.lnrouter import LNPathFinder, PathEdge, LNPathInconsistent
from electrum_mona.channel_db import ChannelDB
from electrum_mona.lnworker import LNWallet, NoPathFound
from electrum_mona.lnmsg import encode_msg, decode_msg
from electrum_mona import lnmsg
from electrum_mona.logging import console_stderr_handler, Logger
from electrum_mona.lnworker import PaymentInfo, RECEIVED
from electrum_mona.lnonion import OnionFailureCode
from electrum_mona.lnutil import derive_payment_secret_from_payment_preimage
from electrum_mona.lnutil import LOCAL, REMOTE
from electrum_mona.invoices import PR_PAID, PR_UNPAID
from .test_lnchannel import create_test_channels
from .test_bitcoin import needs_test_with_all_chacha20_implementations
from . import TestCaseForTestnet
def keypair():
priv = ECPrivkey.generate_random_key().get_secret_bytes()
k1 = Keypair(
pubkey=privkey_to_pubkey(priv),
privkey=priv)
return k1
@contextmanager
def noop_lock():
yield
class MockNetwork:
def __init__(self, tx_queue):
self.callbacks = defaultdict(list)
self.lnwatcher = None
self.interface = None
user_config = {}
user_dir = tempfile.mkdtemp(prefix="electrum-lnpeer-test-")
self.config = simple_config.SimpleConfig(user_config, read_user_dir_function=lambda: user_dir)
self.asyncio_loop = asyncio.get_event_loop()
self.channel_db = ChannelDB(self)
self.channel_db.data_loaded.set()
self.path_finder = LNPathFinder(self.channel_db)
self.tx_queue = tx_queue
self._blockchain = MockBlockchain()
@property
def callback_lock(self):
return noop_lock()
def get_local_height(self):
return 0
def blockchain(self):
return self._blockchain
async def broadcast_transaction(self, tx):
if self.tx_queue:
await self.tx_queue.put(tx)
async def try_broadcasting(self, tx, name):
await self.broadcast_transaction(tx)
class MockBlockchain:
def height(self):
return 0
def is_tip_stale(self):
return False
class MockWallet:
def set_label(self, x, y):
pass
def save_db(self):
pass
def add_transaction(self, tx):
pass
def is_lightning_backup(self):
return False
def is_mine(self, addr):
return True
class MockLNWallet(Logger, NetworkRetryManager[LNPeerAddr]):
MPP_EXPIRY = 2 # HTLC timestamps are cast to int, so this cannot be 1
TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 0
INITIAL_TRAMPOLINE_FEE_LEVEL = 0
def __init__(self, *, local_keypair: Keypair, chans: Iterable['Channel'], tx_queue, name):
self.name = name
Logger.__init__(self)
NetworkRetryManager.__init__(self, max_retry_delay_normal=1, init_retry_delay_normal=1)
self.node_keypair = local_keypair
self.network = MockNetwork(tx_queue)
self.taskgroup = TaskGroup()
self.lnwatcher = None
self.listen_server = None
self._channels = {chan.channel_id: chan for chan in chans}
self.payments = {}
self.logs = defaultdict(list)
self.wallet = MockWallet()
self.features = LnFeatures(0)
self.features |= LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT
self.features |= LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
self.features |= LnFeatures.VAR_ONION_OPT
self.features |= LnFeatures.PAYMENT_SECRET_OPT
self.features |= LnFeatures.OPTION_TRAMPOLINE_ROUTING_OPT
self.pending_payments = defaultdict(asyncio.Future)
for chan in chans:
chan.lnworker = self
self._peers = {} # bytes -> Peer
# used in tests
self.enable_htlc_settle = True
self.enable_htlc_forwarding = True
self.received_mpp_htlcs = dict()
self.sent_htlcs = defaultdict(asyncio.Queue)
self.sent_htlcs_routes = dict()
self.sent_buckets = defaultdict(set)
self.trampoline_forwarding_failures = {}
self.inflight_payments = set()
self.preimages = {}
self.stopping_soon = False
self.logger.info(f"created LNWallet[{name}] with nodeID={local_keypair.pubkey.hex()}")
def get_invoice_status(self, key):
pass
@property
def lock(self):
return noop_lock()
@property
def channel_db(self):
return self.network.channel_db if self.network else None
@property
def channels(self):
return self._channels
@property
def peers(self):
return self._peers
def get_channel_by_short_id(self, short_channel_id):
with self.lock:
for chan in self._channels.values():
if chan.short_channel_id == short_channel_id:
return chan
def channel_state_changed(self, chan):
pass
def save_channel(self, chan):
print("Ignoring channel save")
def diagnostic_name(self):
return self.name
async def stop(self):
await LNWallet.stop(self)
if self.channel_db:
self.channel_db.stop()
await self.channel_db.stopped_event.wait()
async def create_routes_from_invoice(self, amount_msat: int, decoded_invoice: LnAddr, *, full_path=None):
return [r async for r in self.create_routes_for_payment(
amount_msat=amount_msat,
final_total_msat=amount_msat,
invoice_pubkey=decoded_invoice.pubkey.serialize(),
min_cltv_expiry=decoded_invoice.get_min_final_cltv_expiry(),
r_tags=decoded_invoice.get_routing_info('r'),
invoice_features=decoded_invoice.get_features(),
trampoline_fee_level=0,
use_two_trampolines=False,
payment_hash=decoded_invoice.paymenthash,
payment_secret=decoded_invoice.payment_secret,
full_path=full_path)]
get_payments = LNWallet.get_payments
get_payment_info = LNWallet.get_payment_info
save_payment_info = LNWallet.save_payment_info
set_invoice_status = LNWallet.set_invoice_status
set_request_status = LNWallet.set_request_status
set_payment_status = LNWallet.set_payment_status
get_payment_status = LNWallet.get_payment_status
check_received_mpp_htlc = LNWallet.check_received_mpp_htlc
htlc_fulfilled = LNWallet.htlc_fulfilled
htlc_failed = LNWallet.htlc_failed
save_preimage = LNWallet.save_preimage
get_preimage = LNWallet.get_preimage
create_route_for_payment = LNWallet.create_route_for_payment
create_routes_for_payment = LNWallet.create_routes_for_payment
_check_invoice = staticmethod(LNWallet._check_invoice)
pay_to_route = LNWallet.pay_to_route
pay_to_node = LNWallet.pay_to_node
pay_invoice = LNWallet.pay_invoice
force_close_channel = LNWallet.force_close_channel
try_force_closing = LNWallet.try_force_closing
get_first_timestamp = lambda self: 0
on_peer_successfully_established = LNWallet.on_peer_successfully_established
get_channel_by_id = LNWallet.get_channel_by_id
channels_for_peer = LNWallet.channels_for_peer
_calc_routing_hints_for_invoice = LNWallet._calc_routing_hints_for_invoice
handle_error_code_from_failed_htlc = LNWallet.handle_error_code_from_failed_htlc
is_trampoline_peer = LNWallet.is_trampoline_peer
wait_for_received_pending_htlcs_to_get_removed = LNWallet.wait_for_received_pending_htlcs_to_get_removed
on_proxy_changed = LNWallet.on_proxy_changed
_decode_channel_update_msg = LNWallet._decode_channel_update_msg
_handle_chanupd_from_failed_htlc = LNWallet._handle_chanupd_from_failed_htlc
class MockTransport:
def __init__(self, name):
self.queue = asyncio.Queue()
self._name = name
def name(self):
return self._name
async def read_messages(self):
while True:
yield await self.queue.get()
class NoFeaturesTransport(MockTransport):
"""
This answers the init message with a init that doesn't signal any features.
Used for testing that we require DATA_LOSS_PROTECT.
"""
def send_bytes(self, data):
decoded = decode_msg(data)
print(decoded)
if decoded[0] == 'init':
self.queue.put_nowait(encode_msg('init', lflen=1, gflen=1, localfeatures=b"\x00", globalfeatures=b"\x00"))
class PutIntoOthersQueueTransport(MockTransport):
def __init__(self, keypair, name):
super().__init__(name)
self.other_mock_transport = None
self.privkey = keypair.privkey
def send_bytes(self, data):
self.other_mock_transport.queue.put_nowait(data)
def transport_pair(k1, k2, name1, name2):
t1 = PutIntoOthersQueueTransport(k1, name1)
t2 = PutIntoOthersQueueTransport(k2, name2)
t1.other_mock_transport = t2
t2.other_mock_transport = t1
return t1, t2
class SquareGraph(NamedTuple):
# A
# high fee / \ low fee
# B C
# high fee \ / low fee
# D
w_a: MockLNWallet
w_b: MockLNWallet
w_c: MockLNWallet
w_d: MockLNWallet
peer_ab: Peer
peer_ac: Peer
peer_ba: Peer
peer_bd: Peer
peer_ca: Peer
peer_cd: Peer
peer_db: Peer
peer_dc: Peer
chan_ab: Channel
chan_ac: Channel
chan_ba: Channel
chan_bd: Channel
chan_ca: Channel
chan_cd: Channel
chan_db: Channel
chan_dc: Channel
def all_peers(self) -> Iterable[Peer]:
return self.peer_ab, self.peer_ac, self.peer_ba, self.peer_bd, self.peer_ca, self.peer_cd, self.peer_db, self.peer_dc
def all_lnworkers(self) -> Iterable[MockLNWallet]:
return self.w_a, self.w_b, self.w_c, self.w_d
class PaymentDone(Exception): pass
class SuccessfulTest(Exception): pass
class TestPeer(TestCaseForTestnet):
@classmethod
def setUpClass(cls):
super().setUpClass()
console_stderr_handler.setLevel(logging.DEBUG)
def setUp(self):
super().setUp()
self.asyncio_loop, self._stop_loop, self._loop_thread = create_and_start_event_loop()
self._lnworkers_created = [] # type: List[MockLNWallet]
def tearDown(self):
async def cleanup_lnworkers():
async with TaskGroup() as group:
for lnworker in self._lnworkers_created:
await group.spawn(lnworker.stop())
self._lnworkers_created.clear()
run(cleanup_lnworkers())
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
super().tearDown()
def prepare_peers(self, alice_channel: Channel, bob_channel: Channel):
k1, k2 = keypair(), keypair()
alice_channel.node_id = k2.pubkey
bob_channel.node_id = k1.pubkey
t1, t2 = transport_pair(k1, k2, alice_channel.name, bob_channel.name)
q1, q2 = asyncio.Queue(), asyncio.Queue()
w1 = MockLNWallet(local_keypair=k1, chans=[alice_channel], tx_queue=q1, name=bob_channel.name)
w2 = MockLNWallet(local_keypair=k2, chans=[bob_channel], tx_queue=q2, name=alice_channel.name)
self._lnworkers_created.extend([w1, w2])
p1 = Peer(w1, k2.pubkey, t1)
p2 = Peer(w2, k1.pubkey, t2)
w1._peers[p1.pubkey] = p1
w2._peers[p2.pubkey] = p2
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
alice_channel._state = ChannelState.FUNDED
bob_channel._state = ChannelState.FUNDED
# this populates the channel graph:
p1.mark_open(alice_channel)
p2.mark_open(bob_channel)
return p1, p2, w1, w2, q1, q2
def prepare_chans_and_peers_in_square(self, funds_distribution: Dict[str, Tuple[int, int]]=None) -> SquareGraph:
if not funds_distribution:
funds_distribution = {}
key_a, key_b, key_c, key_d = [keypair() for i in range(4)]
local_balance, remote_balance = funds_distribution.get('ab') or (None, None)
chan_ab, chan_ba = create_test_channels(
alice_name="alice", bob_name="bob",
alice_pubkey=key_a.pubkey, bob_pubkey=key_b.pubkey,
local_msat=local_balance,
remote_msat=remote_balance,
)
local_balance, remote_balance = funds_distribution.get('ac') or (None, None)
chan_ac, chan_ca = create_test_channels(
alice_name="alice", bob_name="carol",
alice_pubkey=key_a.pubkey, bob_pubkey=key_c.pubkey,
local_msat=local_balance,
remote_msat=remote_balance,
)
local_balance, remote_balance = funds_distribution.get('bd') or (None, None)
chan_bd, chan_db = create_test_channels(
alice_name="bob", bob_name="dave",
alice_pubkey=key_b.pubkey, bob_pubkey=key_d.pubkey,
local_msat=local_balance,
remote_msat=remote_balance,
)
local_balance, remote_balance = funds_distribution.get('cd') or (None, None)
chan_cd, chan_dc = create_test_channels(
alice_name="carol", bob_name="dave",
alice_pubkey=key_c.pubkey, bob_pubkey=key_d.pubkey,
local_msat=local_balance,
remote_msat=remote_balance,
)
trans_ab, trans_ba = transport_pair(key_a, key_b, chan_ab.name, chan_ba.name)
trans_ac, trans_ca = transport_pair(key_a, key_c, chan_ac.name, chan_ca.name)
trans_bd, trans_db = transport_pair(key_b, key_d, chan_bd.name, chan_db.name)
trans_cd, trans_dc = transport_pair(key_c, key_d, chan_cd.name, chan_dc.name)
txq_a, txq_b, txq_c, txq_d = [asyncio.Queue() for i in range(4)]
w_a = MockLNWallet(local_keypair=key_a, chans=[chan_ab, chan_ac], tx_queue=txq_a, name="alice")
w_b = MockLNWallet(local_keypair=key_b, chans=[chan_ba, chan_bd], tx_queue=txq_b, name="bob")
w_c = MockLNWallet(local_keypair=key_c, chans=[chan_ca, chan_cd], tx_queue=txq_c, name="carol")
w_d = MockLNWallet(local_keypair=key_d, chans=[chan_db, chan_dc], tx_queue=txq_d, name="dave")
self._lnworkers_created.extend([w_a, w_b, w_c, w_d])
peer_ab = Peer(w_a, key_b.pubkey, trans_ab)
peer_ac = Peer(w_a, key_c.pubkey, trans_ac)
peer_ba = Peer(w_b, key_a.pubkey, trans_ba)
peer_bd = Peer(w_b, key_d.pubkey, trans_bd)
peer_ca = Peer(w_c, key_a.pubkey, trans_ca)
peer_cd = Peer(w_c, key_d.pubkey, trans_cd)
peer_db = Peer(w_d, key_b.pubkey, trans_db)
peer_dc = Peer(w_d, key_c.pubkey, trans_dc)
w_a._peers[peer_ab.pubkey] = peer_ab
w_a._peers[peer_ac.pubkey] = peer_ac
w_b._peers[peer_ba.pubkey] = peer_ba
w_b._peers[peer_bd.pubkey] = peer_bd
w_c._peers[peer_ca.pubkey] = peer_ca
w_c._peers[peer_cd.pubkey] = peer_cd
w_d._peers[peer_db.pubkey] = peer_db
w_d._peers[peer_dc.pubkey] = peer_dc
w_b.network.config.set_key('lightning_forward_payments', True)
w_c.network.config.set_key('lightning_forward_payments', True)
w_b.network.config.set_key('lightning_forward_trampoline_payments', True)
w_c.network.config.set_key('lightning_forward_trampoline_payments', True)
# forwarding fees, etc
chan_ab.forwarding_fee_proportional_millionths *= 500
chan_ab.forwarding_fee_base_msat *= 500
chan_ba.forwarding_fee_proportional_millionths *= 500
chan_ba.forwarding_fee_base_msat *= 500
chan_bd.forwarding_fee_proportional_millionths *= 500
chan_bd.forwarding_fee_base_msat *= 500
chan_db.forwarding_fee_proportional_millionths *= 500
chan_db.forwarding_fee_base_msat *= 500
# mark_open won't work if state is already OPEN.
# so set it to FUNDED
for chan in [chan_ab, chan_ac, chan_ba, chan_bd, chan_ca, chan_cd, chan_db, chan_dc]:
chan._state = ChannelState.FUNDED
# this populates the channel graph:
peer_ab.mark_open(chan_ab)
peer_ac.mark_open(chan_ac)
peer_ba.mark_open(chan_ba)
peer_bd.mark_open(chan_bd)
peer_ca.mark_open(chan_ca)
peer_cd.mark_open(chan_cd)
peer_db.mark_open(chan_db)
peer_dc.mark_open(chan_dc)
graph = SquareGraph(
w_a=w_a,
w_b=w_b,
w_c=w_c,
w_d=w_d,
peer_ab=peer_ab,
peer_ac=peer_ac,
peer_ba=peer_ba,
peer_bd=peer_bd,
peer_ca=peer_ca,
peer_cd=peer_cd,
peer_db=peer_db,
peer_dc=peer_dc,
chan_ab=chan_ab,
chan_ac=chan_ac,
chan_ba=chan_ba,
chan_bd=chan_bd,
chan_ca=chan_ca,
chan_cd=chan_cd,
chan_db=chan_db,
chan_dc=chan_dc,
)
return graph
@staticmethod
async def prepare_invoice(
w2: MockLNWallet, # receiver
*,
amount_msat=100_000_000,
include_routing_hints=False,
) -> Tuple[LnAddr, str]:
amount_btc = amount_msat/Decimal(COIN*1000)
payment_preimage = os.urandom(32)
RHASH = sha256(payment_preimage)
info = PaymentInfo(RHASH, amount_msat, RECEIVED, PR_UNPAID)
w2.save_preimage(RHASH, payment_preimage)
w2.save_payment_info(info)
if include_routing_hints:
routing_hints = await w2._calc_routing_hints_for_invoice(amount_msat)
else:
routing_hints = []
trampoline_hints = []
for r in routing_hints:
node_id, short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = r[1][0]
if len(r[1])== 1 and w2.is_trampoline_peer(node_id):
trampoline_hints.append(('t', (node_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta)))
invoice_features = w2.features.for_invoice()
if invoice_features.supports(LnFeatures.PAYMENT_SECRET_OPT):
payment_secret = derive_payment_secret_from_payment_preimage(payment_preimage)
else:
payment_secret = None
lnaddr1 = LnAddr(
paymenthash=RHASH,
amount=amount_btc,
tags=[('c', lnutil.MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
('d', 'coffee'),
('9', invoice_features),
] + routing_hints + trampoline_hints,
payment_secret=payment_secret,
)
invoice = lnencode(lnaddr1, w2.node_keypair.privkey)
lnaddr2 = lndecode(invoice) # unlike lnaddr1, this now has a pubkey set
return lnaddr2, invoice
def test_reestablish(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
for chan in (alice_channel, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel.peer_state, PeerState.GOOD)
self.assertEqual(bob_channel.peer_state, PeerState.GOOD)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p1.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_reestablish_with_old_state(self):
random_seed = os.urandom(32)
alice_channel, bob_channel = create_test_channels(random_seed=random_seed)
alice_channel_0, bob_channel_0 = create_test_channels(random_seed=random_seed) # these are identical
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
result, log = await w1.pay_invoice(pay_req)
self.assertEqual(result, True)
gath.cancel()
gath = asyncio.gather(pay(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel_0, bob_channel)
for chan in (alice_channel_0, bob_channel):
chan.peer_state = PeerState.DISCONNECTED
async def reestablish():
await asyncio.gather(
p1.reestablish_channel(alice_channel_0),
p2.reestablish_channel(bob_channel))
self.assertEqual(alice_channel_0.peer_state, PeerState.BAD)
self.assertEqual(bob_channel._state, ChannelState.FORCE_CLOSING)
# wait so that pending messages are processed
#await asyncio.sleep(1)
gath.cancel()
gath = asyncio.gather(reestablish(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment(self):
"""Alice pays Bob a single HTLC via direct channel."""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, w2.get_payment_status(lnaddr.paymenthash))
result, log = await w1.pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, w2.get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
lnaddr, pay_req = await self.prepare_invoice(w2)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_race(self):
"""Alice and Bob pay each other simultaneously.
They both send 'update_add_htlc' and receive each other's update
before sending 'commitment_signed'. Neither party should fulfill
the respective HTLCs until those are irrevocably committed to.
"""
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# prep
_maybe_send_commitment1 = p1.maybe_send_commitment
_maybe_send_commitment2 = p2.maybe_send_commitment
lnaddr2, pay_req2 = await self.prepare_invoice(w2)
lnaddr1, pay_req1 = await self.prepare_invoice(w1)
# create the htlc queues now (side-effecting defaultdict)
q1 = w1.sent_htlcs[lnaddr2.paymenthash]
q2 = w2.sent_htlcs[lnaddr1.paymenthash]
# alice sends htlc BUT NOT COMMITMENT_SIGNED
p1.maybe_send_commitment = lambda x: None
route1 = (await w1.create_routes_from_invoice(lnaddr2.get_amount_msat(), decoded_invoice=lnaddr2))[0][0]
amount_msat = lnaddr2.get_amount_msat()
await w1.pay_to_route(
route=route1,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr2.paymenthash,
min_cltv_expiry=lnaddr2.get_min_final_cltv_expiry(),
payment_secret=lnaddr2.payment_secret,
)
p1.maybe_send_commitment = _maybe_send_commitment1
# bob sends htlc BUT NOT COMMITMENT_SIGNED
p2.maybe_send_commitment = lambda x: None
route2 = (await w2.create_routes_from_invoice(lnaddr1.get_amount_msat(), decoded_invoice=lnaddr1))[0][0]
amount_msat = lnaddr1.get_amount_msat()
await w2.pay_to_route(
route=route2,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=lnaddr1.paymenthash,
min_cltv_expiry=lnaddr1.get_min_final_cltv_expiry(),
payment_secret=lnaddr1.payment_secret,
)
p2.maybe_send_commitment = _maybe_send_commitment2
# sleep a bit so that they both receive msgs sent so far
await asyncio.sleep(0.2)
# now they both send COMMITMENT_SIGNED
p1.maybe_send_commitment(alice_channel)
p2.maybe_send_commitment(bob_channel)
htlc_log1 = await q1.get()
assert htlc_log1.success
htlc_log2 = await q2.get()
assert htlc_log2.success
raise PaymentDone()
async def f():
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.01)
await group.spawn(pay())
with self.assertRaises(PaymentDone):
run(f())
#@unittest.skip("too expensive")
#@needs_test_with_all_chacha20_implementations
def test_payments_stresstest(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
alice_init_balance_msat = alice_channel.balance(HTLCOwner.LOCAL)
bob_init_balance_msat = bob_channel.balance(HTLCOwner.LOCAL)
num_payments = 50
payment_value_msat = 10_000_000 # make it large enough so that there are actually HTLCs on the ctx
max_htlcs_in_flight = asyncio.Semaphore(5)
async def single_payment(pay_req):
async with max_htlcs_in_flight:
await w1.pay_invoice(pay_req)
async def many_payments():
async with TaskGroup() as group:
pay_reqs_tasks = [await group.spawn(self.prepare_invoice(w2, amount_msat=payment_value_msat))
for i in range(num_payments)]
async with TaskGroup() as group:
for pay_req_task in pay_reqs_tasks:
lnaddr, pay_req = pay_req_task.result()
await group.spawn(single_payment(pay_req))
gath.cancel()
gath = asyncio.gather(many_payments(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(alice_init_balance_msat - num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.REMOTE))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, bob_channel.balance(HTLCOwner.LOCAL))
self.assertEqual(bob_init_balance_msat + num_payments * payment_value_msat, alice_channel.balance(HTLCOwner.REMOTE))
@needs_test_with_all_chacha20_implementations
def test_payment_multihop(self):
graph = self.prepare_chans_and_peers_in_square()
peers = graph.all_peers()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
result, log = await graph.w_a.pay_invoice(pay_req)
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_with_preselected_path(self):
graph = self.prepare_chans_and_peers_in_square()
peers = graph.all_peers()
async def pay(pay_req):
with self.subTest(msg="bad path: edges do not chain together"):
path = [PathEdge(start_node=graph.w_a.node_keypair.pubkey,
end_node=graph.w_c.node_keypair.pubkey,
short_channel_id=graph.chan_ab.short_channel_id),
PathEdge(start_node=graph.w_b.node_keypair.pubkey,
end_node=graph.w_d.node_keypair.pubkey,
short_channel_id=graph.chan_bd.short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.w_a.pay_invoice(pay_req, full_path=path)
with self.subTest(msg="bad path: last node id differs from invoice pubkey"):
path = [PathEdge(start_node=graph.w_a.node_keypair.pubkey,
end_node=graph.w_b.node_keypair.pubkey,
short_channel_id=graph.chan_ab.short_channel_id)]
with self.assertRaises(LNPathInconsistent):
await graph.w_a.pay_invoice(pay_req, full_path=path)
with self.subTest(msg="good path"):
path = [PathEdge(start_node=graph.w_a.node_keypair.pubkey,
end_node=graph.w_b.node_keypair.pubkey,
short_channel_id=graph.chan_ab.short_channel_id),
PathEdge(start_node=graph.w_b.node_keypair.pubkey,
end_node=graph.w_d.node_keypair.pubkey,
short_channel_id=graph.chan_bd.short_channel_id)]
result, log = await graph.w_a.pay_invoice(pay_req, full_path=path)
self.assertTrue(result)
self.assertEqual(
[edge.short_channel_id for edge in path],
[edge.short_channel_id for edge in log[0].route])
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True)
await group.spawn(pay(pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_temp_node_failure(self):
graph = self.prepare_chans_and_peers_in_square()
graph.w_b.network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
graph.w_c.network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.all_peers()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
result, log = await graph.w_a.pay_invoice(pay_req)
self.assertFalse(result)
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_multihop_route_around_failure(self):
# Alice will pay Dave. Alice first tries A->C->D route, due to lower fees, but Carol
# will fail the htlc and get blacklisted. Alice will then try A->B->D and succeed.
graph = self.prepare_chans_and_peers_in_square()
graph.w_c.network.config.set_key('test_fail_htlcs_with_temp_node_failure', True)
peers = graph.all_peers()
async def pay(lnaddr, pay_req):
self.assertEqual(500000000000, graph.chan_ab.balance(LOCAL))
self.assertEqual(500000000000, graph.chan_db.balance(LOCAL))
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
result, log = await graph.w_a.pay_invoice(pay_req, attempts=2)
self.assertEqual(2, len(log))
self.assertTrue(result)
self.assertEqual(PR_PAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
self.assertEqual([graph.chan_ac.short_channel_id, graph.chan_cd.short_channel_id],
[edge.short_channel_id for edge in log[0].route])
self.assertEqual([graph.chan_ab.short_channel_id, graph.chan_bd.short_channel_id],
[edge.short_channel_id for edge in log[1].route])
self.assertEqual(OnionFailureCode.TEMPORARY_NODE_FAILURE, log[0].failure_msg.code)
self.assertEqual(499899450000, graph.chan_ab.balance(LOCAL))
await asyncio.sleep(0.2) # wait for COMMITMENT_SIGNED / REVACK msgs to update balance
self.assertEqual(500100000000, graph.chan_db.balance(LOCAL))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True)
invoice_features = lnaddr.get_features()
self.assertFalse(invoice_features.supports(LnFeatures.BASIC_MPP_OPT))
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
@needs_test_with_all_chacha20_implementations
def test_payment_with_temp_channel_failure_and_liquidty_hints(self):
# prepare channels such that a temporary channel failure happens at c->d
funds_distribution = {
'ac': (200_000_000, 200_000_000), # low fees
'cd': (50_000_000, 200_000_000), # low fees
'ab': (200_000_000, 200_000_000), # high fees
'bd': (200_000_000, 200_000_000), # high fees
}
# the payment happens in two attempts:
# 1. along a->c->d due to low fees with temp channel failure:
# with chanupd: ORPHANED, private channel update
# c->d gets a liquidity hint and gets blocked
# 2. along a->b->d with success
amount_to_pay = 100_000_000
graph = self.prepare_chans_and_peers_in_square(funds_distribution)
peers = graph.all_peers()
async def pay(lnaddr, pay_req):
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
result, log = await graph.w_a.pay_invoice(pay_req, attempts=3)
self.assertTrue(result)
self.assertEqual(2, len(log))
self.assertEqual(PR_PAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
self.assertEqual(OnionFailureCode.TEMPORARY_CHANNEL_FAILURE, log[0].failure_msg.code)
liquidity_hints = graph.w_a.network.path_finder.liquidity_hints
pubkey_a = graph.w_a.node_keypair.pubkey
pubkey_b = graph.w_b.node_keypair.pubkey
pubkey_c = graph.w_c.node_keypair.pubkey
pubkey_d = graph.w_d.node_keypair.pubkey
# check liquidity hints for failing route:
hint_ac = liquidity_hints.get_hint(graph.chan_ac.short_channel_id)
hint_cd = liquidity_hints.get_hint(graph.chan_cd.short_channel_id)
self.assertEqual(amount_to_pay, hint_ac.can_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_ac.cannot_send(pubkey_a < pubkey_c))
self.assertEqual(None, hint_cd.can_send(pubkey_c < pubkey_d))
self.assertEqual(amount_to_pay, hint_cd.cannot_send(pubkey_c < pubkey_d))
# check liquidity hints for successful route:
hint_ab = liquidity_hints.get_hint(graph.chan_ab.short_channel_id)
hint_bd = liquidity_hints.get_hint(graph.chan_bd.short_channel_id)
self.assertEqual(amount_to_pay, hint_ab.can_send(pubkey_a < pubkey_b))
self.assertEqual(None, hint_ab.cannot_send(pubkey_a < pubkey_b))
self.assertEqual(amount_to_pay, hint_bd.can_send(pubkey_b < pubkey_d))
self.assertEqual(None, hint_bd.cannot_send(pubkey_b < pubkey_d))
raise PaymentDone()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, amount_msat=amount_to_pay, include_routing_hints=True)
await group.spawn(pay(lnaddr, pay_req))
with self.assertRaises(PaymentDone):
run(f())
def _run_mpp(self, graph, kwargs1, kwargs2):
self.assertEqual(500_000_000_000, graph.chan_ab.balance(LOCAL))
self.assertEqual(500_000_000_000, graph.chan_ac.balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.all_peers()
async def pay(attempts=1,
alice_uses_trampoline=False,
bob_forwarding=True,
mpp_invoice=True):
if mpp_invoice:
graph.w_d.features |= LnFeatures.BASIC_MPP_OPT
if not bob_forwarding:
graph.w_b.enable_htlc_forwarding = False
if alice_uses_trampoline:
if graph.w_a.network.channel_db:
graph.w_a.network.channel_db.stop()
await graph.w_a.network.channel_db.stopped_event.wait()
graph.w_a.network.channel_db = None
else:
assert graph.w_a.network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True, amount_msat=amount_to_pay)
self.assertEqual(PR_UNPAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
result, log = await graph.w_a.pay_invoice(pay_req, attempts=attempts)
if not bob_forwarding:
# reset to previous state, sleep 2s so that the second htlc can time out
graph.w_b.enable_htlc_forwarding = True
await asyncio.sleep(2)
if result:
self.assertEqual(PR_PAID, graph.w_d.get_payment_status(lnaddr.paymenthash))
raise PaymentDone()
else:
raise NoPathFound()
async def f(kwargs):
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay(**kwargs))
with self.assertRaises(NoPathFound):
run(f(kwargs1))
with self.assertRaises(PaymentDone):
run(f(kwargs2))
@needs_test_with_all_chacha20_implementations
def test_multipart_payment_with_timeout(self):
graph = self.prepare_chans_and_peers_in_square()
self._run_mpp(graph, {'bob_forwarding':False}, {'bob_forwarding':True})
@needs_test_with_all_chacha20_implementations
def test_multipart_payment(self):
graph = self.prepare_chans_and_peers_in_square()
self._run_mpp(graph, {'mpp_invoice':False}, {'mpp_invoice':True})
@needs_test_with_all_chacha20_implementations
def test_multipart_payment_with_trampoline(self):
# single attempt will fail with insufficient trampoline fee
graph = self.prepare_chans_and_peers_in_square()
electrum_mona.trampoline._TRAMPOLINE_NODES_UNITTESTS = {
graph.w_b.name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.w_b.node_keypair.pubkey),
graph.w_c.name: LNPeerAddr(host="127.0.0.1", port=9735, pubkey=graph.w_c.node_keypair.pubkey),
}
try:
self._run_mpp(graph, {'alice_uses_trampoline':True, 'attempts':1}, {'alice_uses_trampoline':True, 'attempts':30})
finally:
electrum_mona.trampoline._TRAMPOLINE_NODES_UNITTESTS = {}
@needs_test_with_all_chacha20_implementations
def test_fail_pending_htlcs_on_shutdown(self):
"""Alice tries to pay Dave via MPP. Dave receives some HTLCs but not all.
Dave shuts down (stops wallet).
We test if Dave fails the pending HTLCs during shutdown.
"""
graph = self.prepare_chans_and_peers_in_square()
self.assertEqual(500_000_000_000, graph.chan_ab.balance(LOCAL))
self.assertEqual(500_000_000_000, graph.chan_ac.balance(LOCAL))
amount_to_pay = 600_000_000_000
peers = graph.all_peers()
graph.w_d.MPP_EXPIRY = 120
graph.w_d.TIMEOUT_SHUTDOWN_FAIL_PENDING_HTLCS = 3
async def pay():
graph.w_d.features |= LnFeatures.BASIC_MPP_OPT
graph.w_b.enable_htlc_forwarding = False # Bob will hold forwarded HTLCs
assert graph.w_a.network.channel_db is not None
lnaddr, pay_req = await self.prepare_invoice(graph.w_d, include_routing_hints=True, amount_msat=amount_to_pay)
try:
async with timeout_after(0.5):
result, log = await graph.w_a.pay_invoice(pay_req, attempts=1)
except TaskTimeout:
# by now Dave hopefully received some HTLCs:
self.assertTrue(len(graph.chan_dc.hm.htlcs(LOCAL)) > 0)
self.assertTrue(len(graph.chan_dc.hm.htlcs(REMOTE)) > 0)
else:
self.fail(f"pay_invoice finished but was not supposed to. result={result}")
await graph.w_d.stop()
# Dave is supposed to have failed the pending incomplete MPP HTLCs
self.assertEqual(0, len(graph.chan_dc.hm.htlcs(LOCAL)))
self.assertEqual(0, len(graph.chan_dc.hm.htlcs(REMOTE)))
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in peers:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(pay())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
w2.enable_htlc_settle = False
lnaddr, pay_req = run(self.prepare_invoice(w2))
async def pay():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# alice sends htlc
route, amount_msat = (await w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
p1.pay(route=route,
chan=alice_channel,
amount_msat=lnaddr.get_amount_msat(),
total_msat=lnaddr.get_amount_msat(),
payment_hash=lnaddr.paymenthash,
min_final_cltv_expiry=lnaddr.get_min_final_cltv_expiry(),
payment_secret=lnaddr.payment_secret)
# alice closes
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def set_settle():
await asyncio.sleep(0.1)
w2.enable_htlc_settle = True
gath = asyncio.gather(pay(), set_settle(), p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
async def f():
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(f())
@needs_test_with_all_chacha20_implementations
def test_close_upfront_shutdown_script(self):
alice_channel, bob_channel = create_test_channels()
# create upfront shutdown script for bob, alice doesn't use upfront
# shutdown script
bob_uss_pub = lnutil.privkey_to_pubkey(os.urandom(32))
bob_uss_addr = bitcoin.pubkey_to_address('p2wpkh', bh2u(bob_uss_pub))
bob_uss = bfh(bitcoin.address_to_script(bob_uss_addr))
# bob commits to close to bob_uss
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
# but bob closes to some receiving address, which we achieve by not
# setting the upfront shutdown script in the channel config
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = b''
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# bob closes channel with different shutdown script
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(UpfrontShutdownScriptViolation):
run(test())
# bob sends the same upfront_shutdown_script has he announced
alice_channel.config[HTLCOwner.REMOTE].upfront_shutdown_script = bob_uss
bob_channel.config[HTLCOwner.LOCAL].upfront_shutdown_script = bob_uss
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
w1.network.config.set_key('dynamic_fees', False)
w2.network.config.set_key('dynamic_fees', False)
w1.network.config.set_key('fee_per_kb', 5000)
w2.network.config.set_key('fee_per_kb', 1000)
async def test():
async def close():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
await p1.close_channel(alice_channel.channel_id)
gath.cancel()
async def main_loop(peer):
async with peer.taskgroup as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
coros = [close(), main_loop(p1), main_loop(p2)]
gath = asyncio.gather(*coros)
await gath
with self.assertRaises(concurrent.futures.CancelledError):
run(test())
def test_channel_usage_after_closing(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, q1, q2 = self.prepare_peers(alice_channel, bob_channel)
lnaddr, pay_req = run(self.prepare_invoice(w2))
lnaddr = w1._check_invoice(pay_req)
route, amount_msat = run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))[0][0:2]
assert amount_msat == lnaddr.get_amount_msat()
run(w1.force_close_channel(alice_channel.channel_id))
# check if a tx (commitment transaction) was broadcasted:
assert q1.qsize() == 1
with self.assertRaises(NoPathFound) as e:
run(w1.create_routes_from_invoice(lnaddr.get_amount_msat(), decoded_invoice=lnaddr))
peer = w1.peers[route[0].node_id]
# AssertionError is ok since we shouldn't use old routes, and the
# route finding should fail when channel is closed
async def f():
min_cltv_expiry = lnaddr.get_min_final_cltv_expiry()
payment_hash = lnaddr.paymenthash
payment_secret = lnaddr.payment_secret
pay = w1.pay_to_route(
route=route,
amount_msat=amount_msat,
total_msat=amount_msat,
amount_receiver_msat=amount_msat,
payment_hash=payment_hash,
payment_secret=payment_secret,
min_cltv_expiry=min_cltv_expiry)
await asyncio.gather(pay, p1._message_loop(), p2._message_loop(), p1.htlc_switch(), p2.htlc_switch())
with self.assertRaises(PaymentFailure):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages_that_should_be_ignored(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with trailing garbage
# BOLT-01 says peer2 should ignore trailing garbage
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4) + bytes(range(55))
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
# peer1 sends unknown 'odd-type' message
# BOLT-01 says peer2 should ignore whole message
raw_msg2 = (43333).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
raise SuccessfulTest()
async def f():
async with TaskGroup() as group:
for peer in [p1, p2]:
await group.spawn(peer._message_loop())
await group.spawn(peer.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(SuccessfulTest):
run(f())
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__unknown_even_type(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends unknown 'even-type' message
# BOLT-01 says peer2 should close the connection
raw_msg2 = (43334).to_bytes(length=2, byteorder="big") + bytes(range(55))
p1.transport.send_bytes(raw_msg2)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnknownMandatoryMsgType):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnknownMandatoryMsgType))
@needs_test_with_all_chacha20_implementations
def test_sending_weird_messages__known_msg_with_insufficient_length(self):
alice_channel, bob_channel = create_test_channels()
p1, p2, w1, w2, _q1, _q2 = self.prepare_peers(alice_channel, bob_channel)
async def send_weird_messages():
await asyncio.wait_for(p1.initialized, 1)
await asyncio.wait_for(p2.initialized, 1)
# peer1 sends known message with insufficient length for the contents
# BOLT-01 says peer2 should fail the connection
raw_msg1 = encode_msg('ping', num_pong_bytes=4, byteslen=4)[:-1]
p1.transport.send_bytes(raw_msg1)
await asyncio.sleep(0.05)
failing_task = None
async def f():
nonlocal failing_task
async with TaskGroup() as group:
await group.spawn(p1._message_loop())
await group.spawn(p1.htlc_switch())
failing_task = await group.spawn(p2._message_loop())
await group.spawn(p2.htlc_switch())
await asyncio.sleep(0.2)
await group.spawn(send_weird_messages())
with self.assertRaises(lnmsg.UnexpectedEndOfStream):
run(f())
self.assertTrue(isinstance(failing_task.exception(), lnmsg.UnexpectedEndOfStream))
def run(coro):
return asyncio.run_coroutine_threadsafe(coro, loop=asyncio.get_event_loop()).result()
| 45.545159
| 128
| 0.652863
|
b2f6371d4cce5e86235b75b37e596bf3ede570e0
| 2,801
|
py
|
Python
|
app/core/models.py
|
veyselbugraaydogan/recipe-app-api
|
7b2271a6ccd6525486a7c387d465ced2c18f15da
|
[
"MIT"
] | 1
|
2019-06-11T15:16:45.000Z
|
2019-06-11T15:16:45.000Z
|
app/core/models.py
|
veyselbugraaydogan/recipe-app-api
|
7b2271a6ccd6525486a7c387d465ced2c18f15da
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
veyselbugraaydogan/recipe-app-api
|
7b2271a6ccd6525486a7c387d465ced2c18f15da
|
[
"MIT"
] | null | null | null |
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
#Buradaki cascade user silindiğinde oluşturduğu tagı da siliyor?
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
#Burada sınıfın adını elle yazdık. Öyle yapmasaydık sınıfları belli bir
# sırada yazmamız gerekirdi.
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 28.581633
| 76
| 0.677615
|
2a8455400fcb18069900da917b59ef72117c5049
| 11,411
|
py
|
Python
|
herschel/calculateMergers.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 5
|
2016-05-28T14:12:28.000Z
|
2021-04-22T10:23:12.000Z
|
herschel/calculateMergers.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | null | null | null |
herschel/calculateMergers.py
|
sniemi/SamPy
|
e048756feca67197cf5f995afd7d75d8286e017b
|
[
"BSD-2-Clause"
] | 2
|
2015-07-13T10:04:10.000Z
|
2021-04-22T10:23:23.000Z
|
"""
This script calculates merger fractions.
These results are presented in the Herschel I paper
of Niemi et al. 2011.
"""
import os
import numpy as N
import SamPy.db.sqlite as sq
if __name__ == '__main__':
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
#constants
#path = hm + '/Dropbox/Research/Herschel/runs/reds_zero/'
path = hm + '/Research/Herschel/runs/big_volume/'
db = 'sams.db'
mergetimelimit = 0.25
# print 'Calculating merger statistics from:'
# print path + db
# print 'with mergetimelimit =', mergetimelimit
#
# query2 = '''select galprop.tmerge, galprop.tmajmerge
# from FIR, galprop where
# galprop.mstar > 10.0 and
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.spire250_obs < 1e6 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id
# '''
# #get data, massive galaxies
# data = sq.get_data_sqliteSMNfunctions(path, db, query2)
# tmerge = data[:,0]
# tmajor = data[:,1]
# #masks
# nomergeMask = tmerge < 0.0
# majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
# majorsMask2 = (tmajor > mergetimelimit)
# mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
# (majorsMask == False) & (majorsMask2 == False)
# mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
# (mergersMask == False) & (majorsMask2 == False)
# #the fraction of no mergers?
# nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
# nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
# nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
# #print out some statistics
# print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
# print 'Mean tmerge of M_star > 10**10 galaxies', N.mean(tmerge[tmerge > 0.0])
# print 'Max tmerge of M_star > 10**10 galaxies', N.max(tmerge[tmerge > 0.0])
# print 'Fraction of M_star > 10**10 have experienced a merger', 100.-nm2
# print 'Fraction of M_star > 10**10 have experienced a major merger', 100.-nm3
# print 'Fraction of M_star > 10**10 who have experienced their major merger within mergetimlimit', nm4
# print
###############################################################################
#
# query2 = '''select galprop.tmerge, galprop.tmajmerge
# from FIR, galprop where
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.spire250_obs < 1e6 and
# FIR.spire250_obs > 1e-40 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id
# '''
# #get data
# data = sq.get_data_sqliteSMNfunctions(path, db, query2)
# tmerge = data[:,0]
# tmajor = data[:,1]
# #masks
# nomergeMask = tmerge < 0.0
# majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
# majorsMask2 = (tmajor > mergetimelimit)
# mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
# (majorsMask == False) & (majorsMask2 == False)
# mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
# (mergersMask == False) & (majorsMask2 == False)
# #the fraction of no mergers?
# nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
# nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
# nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
# #print out some statistics
# print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
# print 'Mean tmerge of all galaxies', N.mean(tmerge[tmerge > 0.0])
# print 'Max tmerge of all galaxies', N.max(tmerge[tmerge > 0.0])
# print 'Fraction of all galaxies that have experienced a merger', 100.-nm2
# print 'Fraction of all galaxies that have experienced a major merger', 100.-nm3
# print 'Fraction of all galaxies that who have experienced their major merger within mergetimlimit', nm4
# print
#
###############################################################################
#
# query2 = '''select galprop.tmerge, galprop.tmajmerge
# from FIR, galprop where
# FIR.spire250_obs > 20e-3 and
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.spire250_obs < 1e6 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id
# '''
# #get data, S_250 > 20 mJy
# data = sq.get_data_sqliteSMNfunctions(path, db, query2)
# tmerge = data[:,0]
# tmajor = data[:,1]
# #masks
# nomergeMask = tmerge < 0.0
# majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
# majorsMask2 = (tmajor > mergetimelimit)
# mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
# (majorsMask == False) & (majorsMask2 == False)
# mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
# (mergersMask == False) & (majorsMask2 == False)
# #the fraction of no mergers?
# nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
# nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
# nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
# #print out some statistics
# print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
# print 'Mean tmerge of S_250 > 20 mJy galaxies', N.mean(tmerge[tmerge > 0.0])
# print 'Max tmerge of S_250 > 20 mJy galaxies', N.max(tmerge[tmerge > 0.0])
# print 'Fraction of S_250 > 20 mJy have experienced a merger', 100.-nm2
# print 'Fraction of S_250 > 20 mJy have experienced a major merger', 100.-nm3
# print 'Fraction of S_250 > 20 mJy who have experienced their major merger within mergetimlimit', nm4
# print
#
################################################################################
# query2 = '''select galprop.tmerge, galprop.tmajmerge
# from FIR, galprop where
# FIR.spire250_obs > 5e-3 and
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.spire250_obs < 1e6 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id
# '''
# #get data, S_250 > 5 mJy
# data = sq.get_data_sqliteSMNfunctions(path, db, query2)
# tmerge = data[:,0]
# tmajor = data[:,1]
# #masks
# nomergeMask = tmerge < 0.0
# majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
# majorsMask2 = (tmajor > mergetimelimit)
# mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
# (majorsMask == False) & (majorsMask2 == False)
# mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
# (mergersMask == False) & (majorsMask2 == False)
# #the fraction of no mergers?
# nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
# nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
# nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
# #print out some statistics
# print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
# print 'Mean tmerge of S_250 > 5 mJy galaxies', N.mean(tmerge[tmerge > 0.0])
# print 'Max tmerge of S_250 > 5 mJy galaxies', N.max(tmerge[tmerge > 0.0])
# print 'Fraction of S_250 > 5 mJy have experienced a merger', 100.-nm2
# print 'Fraction of S_250 > 5 mJy have experienced a major merger', 100.-nm3
# print 'Fraction of S_250 > 5 mJy who have experienced their major merger within mergetimlimit', nm4
# print
###############################################################################
# query2 = '''select galprop.tmerge, galprop.tmajmerge
# from FIR, galprop where
# FIR.pacs160_obs > 10e-3 and
# FIR.z >= 2.0 and
# FIR.z < 4.0 and
# FIR.spire250_obs < 1e6 and
# FIR.gal_id = galprop.gal_id and
# FIR.halo_id = galprop.halo_id
# '''
# #get data
# data = sq.get_data_sqliteSMNfunctions(path, db, query2)
# tmerge = data[:,0]
# tmajor = data[:,1]
# #masks
# nomergeMask = tmerge < 0.0
# majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
# majorsMask2 = (tmajor > mergetimelimit)
# mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
# (majorsMask == False) & (majorsMask2 == False)
# mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
# (mergersMask == False) & (majorsMask2 == False)
# #the fraction of no mergers?
# nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
# nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
# nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
# #print out some statistics
# print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
# print 'Mean tmerge of PACS S_160 > 10 mJy galaxies', N.mean(tmerge[tmerge > 0.0])
# print 'Max tmerge of PACS S_160 > 10 mJy galaxies', N.max(tmerge[tmerge > 0.0])
# print 'Fraction of PACS S_160 > 10 mJy have experienced a merger', 100.-nm2
# print 'Fraction of PACS S_160 > 10 mJy have experienced a major merger', 100.-nm3
# print 'Fraction of PACS S_160 > 10 mJy who have experienced their major merger within mergetimlimit', nm4
mergetimelimit = 0.5
query2 = '''select galprop.tmerge, galprop.tmajmerge
from FIR, galprop, galphotdust where
galphotdust.f775w - galphotdust.f850lp < 0.2 and
FIR.spire250_obs > 5e-3 and
FIR.z >= 2.0 and
FIR.z < 4.0 and
FIR.spire250_obs < 1e6 and
FIR.gal_id = galprop.gal_id and
FIR.halo_id = galprop.halo_id and
FIR.gal_id = galphotdust.gal_id and
FIR.halo_id = galphotdust.halo_id
'''
#get data
data = sq.get_data_sqliteSMNfunctions(path, db, query2)
tmerge = data[:,0]
tmajor = data[:,1]
#masks
nomergeMask = tmerge < 0.0
majorsMask = (tmajor > 0.0) & (tmajor <= mergetimelimit)
majorsMask2 = (tmajor > mergetimelimit)
mergersMask = (tmerge > 0.0) & (tmerge <= mergetimelimit) & \
(majorsMask == False) & (majorsMask2 == False)
mergersMask2 = (nomergeMask == False) & (majorsMask == False) & \
(mergersMask == False) & (majorsMask2 == False)
#the fraction of no mergers?
nm2 = len(tmerge[tmerge < 0.0]) / float(len(tmerge)) * 100.
nm3 = len(tmajor[tmajor < 0.0]) / float(len(tmajor)) * 100.
nm4 = len(tmajor[majorsMask]) / float(len(tmajor)) * 100.
#print out some statistics
print 'Number of galaxies and Poisson error:', len(tmerge), N.sqrt(len(tmerge))
print 'Mean tmerge of UV < 0.2 galaxies', N.mean(tmerge[tmerge > 0.0])
print 'Max tmerge of UV < 0.2 galaxies', N.max(tmerge[tmerge > 0.0])
print 'Fraction of UV < 0.2 have experienced a merger', 100.-nm2
print 'Fraction of UV < 0.2 have experienced a major merger', 100.-nm3
print 'Fraction of UV < 0.2 who have experienced their major merger within mergetimlimit', nm4
| 48.351695
| 110
| 0.578039
|
b240f51f19f3f13a6dd666fdcff641600c0dc401
| 3,607
|
py
|
Python
|
test/functional/rpc_blockchain.py
|
donPabloNow/digiwage
|
87491caf8563779b1bb69866e102cb8a1439b427
|
[
"MIT"
] | 14
|
2018-03-19T23:28:42.000Z
|
2022-03-11T08:58:01.000Z
|
test/functional/rpc_blockchain.py
|
donPabloNow/digiwage
|
87491caf8563779b1bb69866e102cb8a1439b427
|
[
"MIT"
] | 4
|
2018-03-30T13:55:22.000Z
|
2022-01-30T21:17:25.000Z
|
test/functional/rpc_blockchain.py
|
donPabloNow/digiwage
|
87491caf8563779b1bb69866e102cb8a1439b427
|
[
"MIT"
] | 22
|
2018-04-08T07:41:41.000Z
|
2022-03-11T03:29:25.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import DigiwageTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(DigiwageTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
#self._test_getblockchaininfo()
self._test_gettxoutsetinfo()
self._test_getblockheader()
#self._test_getdifficulty()
self.nodes[0].verifychain(0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(keys))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('50000.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bytes_serialized'], 14073),
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
#assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
#assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
if __name__ == '__main__':
BlockchainTest().main()
| 32.790909
| 87
| 0.659551
|
5355f85ce58be3dc6bff9c45ad3b65da037930d9
| 3,669
|
py
|
Python
|
posthog/models/filters/mixins/funnel.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
posthog/models/filters/mixins/funnel.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
posthog/models/filters/mixins/funnel.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
from typing import Optional
from posthog.constants import (
BIN_COUNT,
DISPLAY,
FUNNEL_FROM_STEP,
FUNNEL_ORDER_TYPE,
FUNNEL_STEP,
FUNNEL_TO_STEP,
FUNNEL_VIZ_TYPE,
FUNNEL_WINDOW_DAYS,
INSIGHT,
INSIGHT_FUNNELS,
TRENDS_LINEAR,
FunnelOrderType,
FunnelVizType,
)
from posthog.models.filters.mixins.base import BaseParamMixin
from posthog.models.filters.mixins.utils import cached_property, include_dict
class FunnelFromToStepsMixin(BaseParamMixin):
@cached_property
def funnel_from_step(self) -> Optional[int]:
if self._data.get(FUNNEL_FROM_STEP):
return int(self._data[FUNNEL_FROM_STEP])
return None
@cached_property
def funnel_to_step(self) -> Optional[int]:
if self._data.get(FUNNEL_TO_STEP):
return int(self._data[FUNNEL_TO_STEP])
return None
@include_dict
def funnel_from_to_steps_to_dict(self):
dict_part = {}
if self.funnel_from_step:
dict_part[FUNNEL_FROM_STEP] = self.funnel_from_step
if self.funnel_to_step:
dict_part[FUNNEL_TO_STEP] = self.funnel_to_step
return dict_part
class FunnelWindowDaysMixin(BaseParamMixin):
@cached_property
def funnel_window_days(self) -> Optional[int]:
_days = int(self._data.get(FUNNEL_WINDOW_DAYS, "0"))
if _days == 0:
return None
return _days
@include_dict
def funnel_window_days_to_dict(self):
return {FUNNEL_WINDOW_DAYS: self.funnel_window_days} if self.funnel_window_days else {}
@staticmethod
def milliseconds_from_days(days):
milliseconds, seconds, minutes, hours = [1000, 60, 60, 24]
return milliseconds * seconds * minutes * hours * days
@staticmethod
def microseconds_from_days(days):
microseconds = 1000
return microseconds * FunnelWindowDaysMixin.milliseconds_from_days(days)
class FunnelPersonsStepMixin(BaseParamMixin):
# first step is 0
# -1 means dropoff into step 1
@cached_property
def funnel_step(self) -> Optional[int]:
_step = int(self._data.get(FUNNEL_STEP, "0"))
if _step == 0:
return None
return _step
@include_dict
def funnel_step_to_dict(self):
return {FUNNEL_STEP: self.funnel_step} if self.funnel_step else {}
class FunnelTypeMixin(BaseParamMixin):
@cached_property
def funnel_order_type(self) -> Optional[FunnelOrderType]:
return self._data.get(FUNNEL_ORDER_TYPE)
@cached_property
def funnel_viz_type(self) -> Optional[FunnelVizType]:
funnel_viz_type = self._data.get(FUNNEL_VIZ_TYPE)
if (
funnel_viz_type is None
and self._data.get(INSIGHT) == INSIGHT_FUNNELS
and self._data.get(DISPLAY) == TRENDS_LINEAR
):
# Backwards compatibility
# Before Filter.funnel_viz_type funnel trends were indicated by Filter.display being TRENDS_LINEAR
return FunnelVizType.TRENDS
return funnel_viz_type
@include_dict
def funnel_type_to_dict(self):
result = {}
if self.funnel_order_type:
result[FUNNEL_ORDER_TYPE] = self.funnel_order_type
if self.funnel_viz_type:
result[FUNNEL_VIZ_TYPE] = self.funnel_viz_type
return result
class HistogramMixin(BaseParamMixin):
@cached_property
def bin_count(self) -> Optional[int]:
bin_count = self._data.get(BIN_COUNT)
return int(bin_count) if bin_count else None
@include_dict
def histogram_to_dict(self):
return {"bin_count": self.bin_count} if self.bin_count else {}
| 30.322314
| 110
| 0.682475
|
809b35fc233b69b320daabd3dd6693fb9147fd0a
| 4,285
|
py
|
Python
|
filebrowser/templatetags/fb_tags.py
|
hu-django/filebrowser-no-grappelli
|
3d7f9579146cf51933c47bec05b78dd718bb4007
|
[
"BSD-3-Clause"
] | null | null | null |
filebrowser/templatetags/fb_tags.py
|
hu-django/filebrowser-no-grappelli
|
3d7f9579146cf51933c47bec05b78dd718bb4007
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T14:33:01.000Z
|
2022-02-21T14:33:01.000Z
|
filebrowser/templatetags/fb_tags.py
|
hu-django/filebrowser-no-grappelli
|
3d7f9579146cf51933c47bec05b78dd718bb4007
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# django imports
from django import template
from django.utils.http import urlquote
# filebrowser imports
from filebrowser.settings import SELECT_FORMATS
register = template.Library()
@register.inclusion_tag('filebrowser/include/_response.html', takes_context=True)
def query_string(context, add=None, remove=None):
"""
Allows the addition and removal of query string parameters.
_response.html is just {{ response }}
Usage:
http://www.url.com/{% query_string "param_to_add=value, param_to_add=value" "param_to_remove, params_to_remove" %}
http://www.url.com/{% query_string "" "filter" %}filter={{new_filter}}
http://www.url.com/{% query_string "sort=value" "sort" %}
"""
# Written as an inclusion tag to simplify getting the context.
add = string_to_dict(add)
remove = string_to_list(remove)
params = context['query'].copy()
response = get_query_string(params, add, remove)
return {'response': response }
def query_helper(query, add=None, remove=None):
"""
Helper Function for use within views.
"""
add = string_to_dict(add)
remove = string_to_list(remove)
params = query.copy()
return get_query_string(params, add, remove)
def get_query_string(p, new_params=None, remove=None):
"""
Add and remove query parameters. From `django.contrib.admin`.
@p:type dict
"""
if new_params is None: new_params = {}
if remove is None: remove = []
for r in remove:
if r in p:
del p[r]
for k, v in new_params.items():
if k in p and v is None:
del p[k]
elif v is not None:
p[k] = v
return '?' + '&'.join([u'%s=%s' % (urlquote(k), urlquote(v)) for k, v in p.items()])
def string_to_dict(string):
"""
Usage:
{{ url|thumbnail:"width=10,height=20" }}
{{ url|thumbnail:"width=10" }}
{{ url|thumbnail:"height=20" }}
"""
kwargs = {}
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '': continue
kw, val = arg.split('=', 1)
kwargs[kw] = val
return kwargs
def string_to_list(string):
"""
Usage:
{{ url|thumbnail:"width,height" }}
"""
args = []
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '': continue
args.append(arg)
return args
class SelectableNode(template.Node):
def __init__(self, filetype, format):
self.filetype = template.Variable(filetype)
self.format = template.Variable(format)
def render(self, context):
try:
filetype = self.filetype.resolve(context)
except template.VariableDoesNotExist:
filetype = ''
try:
format = self.format.resolve(context)
except template.VariableDoesNotExist:
format = ''
if filetype and format and filetype in SELECT_FORMATS[format]:
selectable = True
elif filetype and format and filetype not in SELECT_FORMATS[format]:
selectable = False
else:
selectable = True
context['selectable'] = selectable
return ''
def selectable(parser, token):
try:
tag, filetype, format = token.split_contents()
except:
raise template.TemplateSyntaxError("%s tag requires 2 arguments" % token.contents.split()[0])
return SelectableNode(filetype, format)
register.tag(selectable)
@register.simple_tag
def custom_admin_media_prefix():
import django
if "1.4" in django.get_version():
from django.conf import settings
return "".join([settings.STATIC_URL,"admin/"])
else:
try:
from django.contrib.admin.templatetags import admin_media_prefix
except ImportError:
from django.contrib.admin.templatetags.adminmedia import admin_media_prefix
return admin_media_prefix()
| 27.467949
| 118
| 0.599767
|
0389ce66041ce2aba4b56550eab2ba68268a63ba
| 1,426
|
py
|
Python
|
icedata/datasets/pets/tests/test_parsers.py
|
airctic/icedata
|
a255d401ee4d4f71bc47268aee2d5d07901332b6
|
[
"Apache-2.0"
] | 42
|
2020-09-14T18:28:02.000Z
|
2022-03-30T19:55:10.000Z
|
icedata/datasets/pets/tests/test_parsers.py
|
fstroth/icedata
|
0b543d887aaf28e2fa4822310e0b2b22cd5acec4
|
[
"Apache-2.0"
] | 103
|
2020-09-11T19:50:29.000Z
|
2022-03-15T13:07:10.000Z
|
icedata/datasets/pets/tests/test_parsers.py
|
fstroth/icedata
|
0b543d887aaf28e2fa4822310e0b2b22cd5acec4
|
[
"Apache-2.0"
] | 19
|
2020-09-11T19:26:50.000Z
|
2022-03-15T13:09:44.000Z
|
import icedata
from icevision.all import *
def test_parser(data_dir):
parser = icedata.pets.parser(data_dir, mask=True)
records = parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 5
record = records[0]
assert record.filepath.name == "Abyssinian_119.jpg"
assert record.record_id == "Abyssinian_119"
assert record.detection.labels == ["Abyssinian"]
assert record.height == 297
assert record.width == 300
assert record.detection.bboxes == [BBox.from_xyxy(39, 51, 156, 179)]
print(record.detection.masks[0])
assert record.detection.masks[0].to_erles(None, None) == EncodedRLEs(
[
{
"size": [297, 300],
"counts": b"fQ9:l86L3L5aHAk5f0nI@T1;n19jL_OS1>o1c1jMdNR2^1jMfNT2\\1dMmNY2V1bMoN\\2R1aMRO]2o0aMSO]2o0aMTO]2n0_MUO`2l0^MVO`2m0]MUOb2o0XMTOg2P1TMROk2g300O1O100O10000O2N2O1O3M:Fe0[O2NO1O1O1O1O1O1O2N1O1O2N1O1O1O2N1O2N1O00000000000000000000000000000000000000000000000000000000000000000000O100O100O1O1O1O100O1O1O1O1O1O1O100O1O1O1O1O1O1O100O1O100O10000O1000oJbNW2]1^MoNb2Q1oL_OP3b0jLCV3=gLFY3:fLHY39dLJ[37aLL_35^LNa34ZLOf34PL4o3OjK5V4`2O1O1O001O1O1O001O00001O1O001O1O1O1O001O1O001O1O001O001O00001O1O00001O1O001O1O001O001O00001O1O1O1O2N2N3M2N3M2N3M4L>B8H3M4L3M4L1OO1O1O1O1O1O1M3O00000O1O10000001N1N200O2O0N201N1N2O2O1N10000N2O1O2N100N3N2M2N3N2O1O1O1N2O2N1O1N3N1N3N2M3L6IfU6",
}
]
)
| 50.928571
| 682
| 0.774895
|
f4c4841c2a2d0edc0336ca639499a15556f0300a
| 1,909
|
py
|
Python
|
homeassistant/components/wink/cover.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/wink/cover.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:34:57.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/wink/cover.py
|
VirtualL/home-assistant
|
301829d02be8d865ab46c8901ac046d060849320
|
[
"Apache-2.0"
] | 1
|
2019-06-19T07:43:11.000Z
|
2019-06-19T07:43:11.000Z
|
"""Support for Wink covers."""
from homeassistant.components.cover import ATTR_POSITION, CoverDevice
from . import DOMAIN, WinkDevice
DEPENDENCIES = ['wink']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink cover platform."""
import pywink
for shade in pywink.get_shades():
_id = shade.object_id() + shade.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkCoverDevice(shade, hass)])
for shade in pywink.get_shade_groups():
_id = shade.object_id() + shade.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkCoverDevice(shade, hass)])
for door in pywink.get_garage_doors():
_id = door.object_id() + door.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_entities([WinkCoverDevice(door, hass)])
class WinkCoverDevice(WinkDevice, CoverDevice):
"""Representation of a Wink cover device."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['cover'].append(self)
def close_cover(self, **kwargs):
"""Close the cover."""
self.wink.set_state(0)
def open_cover(self, **kwargs):
"""Open the cover."""
self.wink.set_state(1)
def set_cover_position(self, **kwargs):
"""Move the cover shutter to a specific position."""
position = kwargs.get(ATTR_POSITION)
self.wink.set_state(position/100)
@property
def current_cover_position(self):
"""Return the current position of cover shutter."""
if self.wink.state() is not None:
return int(self.wink.state()*100)
return None
@property
def is_closed(self):
"""Return if the cover is closed."""
state = self.wink.state()
return bool(state == 0)
| 32.355932
| 69
| 0.636459
|
95e2822563bfda51354d19f269b38a0e4ab87377
| 2,588
|
py
|
Python
|
nex2art/core/Ldap3.py
|
ghl1024/nexus2artifactory
|
1b300e1ea9c51d51a89096e8b710a0763750c38d
|
[
"Apache-2.0"
] | 50
|
2018-08-30T00:39:16.000Z
|
2022-01-27T10:08:19.000Z
|
nex2art/core/Ldap3.py
|
ghl1024/nexus2artifactory
|
1b300e1ea9c51d51a89096e8b710a0763750c38d
|
[
"Apache-2.0"
] | 68
|
2018-06-12T10:37:01.000Z
|
2022-01-10T02:47:12.000Z
|
nex2art/core/Ldap3.py
|
ghl1024/nexus2artifactory
|
1b300e1ea9c51d51a89096e8b710a0763750c38d
|
[
"Apache-2.0"
] | 38
|
2018-06-11T10:38:03.000Z
|
2021-11-12T15:00:21.000Z
|
import logging
class Ldap3(object):
def __init__(self):
self.log = logging.getLogger(__name__)
self.initialize()
def initialize(self):
self.ldap = None
def refresh(self, data):
self.log.info("Reading LDAP config from Nexus.")
ldaps = {}
for ldap in data['ldaps']: ldaps[ldap['name']] = self.getldap(ldap)
self.ldap = ldaps
self.log.info("Successfully read LDAP config.")
def getldap(self, data):
ldap = {'nexusName': data['name']}
url = data['protocol'] + '://' + data['hostName']
if (data['protocol'], data['port']) not in (('ldap', 389), ('ldaps', 636)):
url += ':' + str(data['port'])
url += '/' + data['searchBase']
ldap['ldapUrl'] = url
filt = '(&(objectClass=' + data['userObjectClass'] + ')('
filt += data['userIdAttribute'] + '={0})'
if data['ldapFilter'] != None and len(data['ldapFilter']) > 0:
ufilt = data['ldapFilter']
if ufilt[0] != '(' or ufilt[-1] != ')':
ufilt = '(' + ufilt + ')'
filt += ufilt
filt += ')'
ldap['searchFilter'] = filt
ldap['emailAttribute'] = data['emailAddressAttribute']
if data['systemUsername'] != None and len(data['systemUsername']) > 0:
ldap['managerDn'] = data['systemUsername']
if data['systemPassword'] != None and len(data['systemPassword']) > 0:
ldap['managerPassword'] = data['systemPassword']
if data['userBaseDn'] != None and len(data['userBaseDn']) > 0:
ldap['searchBase'] = data['userBaseDn']
ldap['searchSubTree'] = 'true' if data['userSubtree'] else 'false'
if data['ldapGroupsAsRoles']:
goc = 'group'
umoa = data['userMemberOfAttribute']
if umoa != None and len(umoa) > 0:
ldap['groupMemberAttribute'] = data['userMemberOfAttribute']
ldap['strategy'] = 'DYNAMIC'
ldap['groupNameAttribute'] = 'cn'
else:
ldap['groupMemberAttribute'] = data['groupMemberAttribute']
ldap['strategy'] = 'STATIC'
ldap['groupNameAttribute'] = data['groupIdAttribute']
goc = data['groupObjectClass']
ldap['filter'] = '(objectClass=' + goc + ')'
if data['groupBaseDn'] != None and len(data['groupBaseDn']) > 0:
ldap['groupBaseDn'] = data['groupBaseDn']
ldap['subTree'] = 'true' if data['groupSubtree'] else 'false'
return ldap
| 43.864407
| 83
| 0.535549
|
594ab0f9e315f72cc9b89c0a9a0512e459317620
| 5,092
|
py
|
Python
|
ccnpy/tests/test_Packet.py
|
mmosko/ccnpy
|
20d982e2e3845818fde7f3facdc8cbcdff323dbb
|
[
"Apache-2.0"
] | 1
|
2020-12-23T14:17:25.000Z
|
2020-12-23T14:17:25.000Z
|
ccnpy/tests/test_Packet.py
|
mmosko/ccnpy
|
20d982e2e3845818fde7f3facdc8cbcdff323dbb
|
[
"Apache-2.0"
] | 1
|
2019-07-01T18:19:05.000Z
|
2019-07-02T05:35:52.000Z
|
ccnpy/tests/test_Packet.py
|
mmosko/ccnpy
|
20d982e2e3845818fde7f3facdc8cbcdff323dbb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Marc Mosko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import tempfile
import unittest
import ccnpy
import ccnpy.crypto
class Packet_Test(unittest.TestCase):
def test_create_content_object(self):
body = ccnpy.ContentObject.create_data(name=ccnpy.Name.from_uri('ccnx:/apple'), payload=[1, 2, 3, 4])
packet = ccnpy.Packet.create_content_object(body)
expected = array.array("B", [ 1, 1, 0, 38,
0, 0, 0, 8,
# T_CONTENT
0, 2, 0, 26,
# T_NAME
0, 0, 0, 9,
0, 1, 0, 5, 97, 112, 112, 108, 101,
# T_PAYLOAD_TYPE
0, 5, 0, 1, 0,
# T_PAYLOAD
0, 1, 0, 4, 1, 2, 3, 4])
actual = packet.serialize()
self.assertEqual(expected, actual)
def test_create_signed_content_object(self):
body = ccnpy.ContentObject.create_data(name=ccnpy.Name.from_uri('ccnx:/apple'), payload=[1, 2, 3, 4])
signer = ccnpy.crypto.Crc32c_Signer()
validation_alg = ccnpy.ValidationAlg_Crc32c()
validation_payload = signer.sign(body.serialize(), validation_alg.serialize())
packet = ccnpy.Packet.create_signed_content_object(body, validation_alg, validation_payload)
expected = array.array("B", [ 1, 1, 0, 54,
0, 0, 0, 8,
# T_CONTENT
0, 2, 0, 26,
# T_NAME
0, 0, 0, 9,
0, 1, 0, 5, 97, 112, 112, 108, 101,
# T_PAYLOAD_TYPE
0, 5, 0, 1, 0,
# T_PAYLOAD
0, 1, 0, 4, 1, 2, 3, 4,
# Validation Alg
0, 3, 0, 4, 0, 2, 0, 0,
# Validation Payload
0, 4, 0, 4, 0, 90, 226, 225])
actual = packet.serialize()
self.assertEqual(expected, actual)
def test_deserialize_signed_content_object(self):
wire_format = array.array("B", [ 1, 1, 0, 54,
0, 0, 0, 8,
# T_CONTENT
0, 2, 0, 26,
# T_NAME
0, 0, 0, 9,
0, 1, 0, 5, 97, 112, 112, 108, 101,
# T_PAYLOAD_TYPE
0, 5, 0, 1, 0,
# T_PAYLOAD
0, 1, 0, 4, 1, 2, 3, 4,
# Validation Alg
0, 3, 0, 4, 0, 2, 0, 0,
# Validation Payload
0, 4, 0, 4, 0, 90, 226, 225])
actual = ccnpy.Packet.deserialize(wire_format)
body = ccnpy.ContentObject.create_data(name=ccnpy.Name.from_uri('ccnx:/apple'), payload=[1, 2, 3, 4])
signer = ccnpy.crypto.Crc32c_Signer()
validation_alg = ccnpy.ValidationAlg_Crc32c()
validation_payload = signer.sign(body.serialize(), validation_alg.serialize())
expected = ccnpy.Packet.create_signed_content_object(body, validation_alg, validation_payload)
self.assertEqual(expected, actual)
def test_save_load(self):
body = ccnpy.ContentObject.create_data(name=ccnpy.Name.from_uri('ccnx:/apple'), payload=[1, 2, 3, 4])
signer = ccnpy.crypto.Crc32c_Signer()
validation_alg = ccnpy.ValidationAlg_Crc32c()
validation_payload = signer.sign(body.serialize(), validation_alg.serialize())
packet = ccnpy.Packet.create_signed_content_object(body, validation_alg, validation_payload)
tmp = tempfile.NamedTemporaryFile()
packet.save(tmp.name)
test = ccnpy.Packet.load(tmp.name)
self.assertEqual(packet, test)
| 45.873874
| 109
| 0.460723
|
9718da381ce1684ebf812005f64a6923f23a0c99
| 357
|
py
|
Python
|
Day-067/regular-exp-3.py
|
arvimal/100DaysofCode
|
ad4899bc88b948c3efd90337d64e932f1627fd94
|
[
"MIT"
] | 1
|
2018-06-28T17:39:38.000Z
|
2018-06-28T17:39:38.000Z
|
Day-067/regular-exp-3.py
|
arvimal/100DaysofCode-Python
|
01e59f45b4dc06a3be9e9900456a6bd439752911
|
[
"MIT"
] | null | null | null |
Day-067/regular-exp-3.py
|
arvimal/100DaysofCode-Python
|
01e59f45b4dc06a3be9e9900456a6bd439752911
|
[
"MIT"
] | 7
|
2020-01-24T23:03:58.000Z
|
2021-05-31T01:00:27.000Z
|
#!/usr/bin/env python3
# The `re.search(pattern, text)` can only search for single instances of text.
# REFER `Day-067/regular-exp-1.py`
# To find multiple occurrences, use `re.findall()`
import re
# Text to search
text = "Hello, how are you, Are you fine?"
# Patterns to match
pattern_A = "this"
pattern_B = "how"
pattern_C = "that"
pattern_D = "are"
| 19.833333
| 78
| 0.697479
|
d40e5588c555ee925842de8a892a8af54203e1ff
| 22,272
|
py
|
Python
|
app/k5APIwrappersV3.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
app/k5APIwrappersV3.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
app/k5APIwrappersV3.py
|
allthingsclowd/K5_User_Onboarding_Example
|
313b0033ceb015cca86574762915e02000d4bbbb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""Summary: User onboarding process focused example python based API request
calls for the Fujitsu K5 IaaS Platform
Author: Graham Land
Date: 08/12/16
Twitter: @allthingsclowd
Github: https://github.com/allthingscloud
Blog: https://allthingscloud.eu
"""
import requests
def get_globally_scoped_token(adminUser, adminPassword, contract,
defaultid, region):
"""Get a global project scoped auth token
Returns:
Python Object: Globally Project Scoped Object
Containing a Catalog List in the Body
Args:
adminUser (string): Administrative user name
adminPassword (string): Password for above user
contract (string): Contract name
defaultid (string): Default project
region (string): Unused, need to remove at a later date
"""
identityURL = 'https://identity.gls.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={"auth":
{"identity":
{"methods": ["password"], "password":
{"user":
{"domain":
{"name": contract},
"name": adminUser,
"password": adminPassword
}}},
"scope":
{"project":
{"id": defaultid
}}}})
return response
except:
return "Global Token Error"
def get_globally_rescoped_token(globaltoken, defaultid):
"""Summary - Get a global project scoped auth token
Returns:
STRING: Globally Scoped Object
Args:
globaltoken (string): valid global token
defaultid (string): default projct id
"""
identityURL = 'https://identity.gls.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": globaltoken
}
},
"scope": {
"project": {
"id": defaultid
}
}
}
})
return response
except:
return "Global Rescope Token Error"
def get_re_unscoped_token(k5token, region):
"""Summary - Get a regional unscoped auth token
Returns:
Object: Regionally Scoped Project Token
Args:
k5token (TYPE): valid regional token
region (TYPE): region
"""
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/auth/tokens'
tokenbody = {
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": k5token
}
},
}
}
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json=tokenbody)
return response
except:
return 'Regional Re-Scoping Failure'
def get_rescoped_token(k5token, projectid, region):
"""Get a regional project token - rescoped
Returns:
STRING: Regionally Scoped Project Token
Args:
k5token (TYPE): valid regional token
projectid (TYPE): project id to scope to
region (TYPE): k5 region
"""
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": k5token
}
},
"scope": {
"project": {
"id": projectid
}
}
}
})
return response
except:
return 'Regional Project Rescoping Failure'
def get_scoped_token(adminUser, adminPassword, contract, projectid, region):
"""Summary - Get a regional project scoped token using a username and password
Returns:
Object: Regionally Scoped Project Token Object
Args:
adminUser (TYPE): username
adminPassword (TYPE): password
contract (TYPE): contract name
projectid (TYPE): project id
region (TYPE): region
"""
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={"auth":
{"identity":
{"methods": ["password"], "password":
{"user":
{"domain":
{"name": contract},
"name": adminUser,
"password": adminPassword
}}},
"scope":
{"project":
{"id": projectid
}}}})
return response.headers['X-Subject-Token']
except:
return 'Regional Project Token Scoping Failure'
def get_unscoped_token(adminUser, adminPassword, contract, region):
"""Get a regional unscoped token with username and password
Returns:
TYPE: Regional UnScoped Token Object
Args:
adminUser (TYPE): username
adminPassword (TYPE): password
contract (TYPE): k5 contract name
region (TYPE): k5 region
"""
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={"auth":
{"identity":
{"methods": ["password"], "password":
{"user":
{"domain":
{"name": contract},
"name": adminUser,
"password": adminPassword
}}}}})
return response
except:
return 'Regional Unscoped Token Failure'
def get_unscoped_idtoken(adminUser, adminPassword, contract):
"""Summary - Get a central identity portal token
Returns:
TYPE: Central Identity Token Header
Args:
adminUser (TYPE): k5 admin name
adminPassword (TYPE): k5 password
contract (TYPE): k5 contract
"""
try:
response = requests.post('https://auth-api.jp-east-1.paas.cloud.global.fujitsu.com/API/paas/auth/token',
headers={'Content-Type': 'application/json'},
json={"auth":
{"identity":
{"password":
{"user":
{"contract_number": contract,
"name": adminUser,
"password": adminPassword
}}}}})
return response.headers['X-Access-Token']
except:
return 'ID Token Failure'
def assign_user_to_group(global_token, regional_token, contractid, region,
username, groupname):
"""Summary - Assign a K5 user to a group - requires both global
and regional tokens as we work with both global and regional features
Args:
global_token (TYPE): globally scoped token
regional_token (TYPE): regionallly scoped tokenailed to assign user to group
contractid (TYPE): k5 contract id
region (TYPE): k5 region
username (TYPE): k5 user name to be added to group
groupname (TYPE): k5 group to add user to
Returns:
TYPE: http request object
"""
try:
# if user exists return its id otherwise return 'None'
userid = get_itemid(get_keystoneobject_list(
regional_token, region, contractid, 'users'), username, 'users')
# if group exists return its id otherwise return 'None'
groupid = get_itemid(get_keystoneobject_list(
regional_token, region, contractid, 'groups'), groupname, 'groups')
region = 'gls'
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/groups/' + groupid + '/users/' + userid
# make the put rest request
print "Debug: Assign USER URL : ", identityURL
response = requests.put(identityURL,
headers={'X-Auth-Token': global_token,
'Content-Type': 'application/json'})
print "Debug : Add User Response : ", response
return response
except:
return 'Failed to assign user to group'
def assign_role_to_group_on_domain(k5token, contractid, region, group, role):
"""Summary - Assign a role to a group in a contract on K5
Args:
k5token (TYPE): valid regional unscoped token
contractid (TYPE): k5 contract id
region (TYPE): K5 region
group (TYPE): K5 group
role (TYPE): K5 role
Returns:
TYPE: http request object
"""
try:
# if group exists return its id otherwisw return 'None'
groupid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'groups'), group, 'groups')
# if role exists return its id otherwise return 'None'
roleid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'roles'), role, 'roles')
# the regional rather than global api is required for this call
identityURL = 'https://identity.' + region + '.cloud.global.fujitsu.com/v3/domains/' + \
contractid + '/groups/' + groupid + '/roles/' + roleid
# make the put rest api request
response = requests.put(identityURL, headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response
except:
return 'Failed to assign role to group on domain'
def assign_role_to_user_and_project(k5token, contractid, region, username,
project, role):
"""Summary - assign a role to a user and a project on K5
Args:
k5token (TYPE): valid K5 unscoped token
contractid (TYPE): K5 contract id
region (TYPE): K5 region
username (TYPE): K5 user to be assigned role on project
project (TYPE): K5 project where user will be assigned role
role (TYPE): K5 role
Returns:
TYPE: http request object
"""
try:
# if user exists return its id otherwise return 'None'
userid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'users'), username, 'users')
# if project exists return its id otherwise return 'None'
projectid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'projects'), project, 'projects')
# if role exists return its id otherwise return 'None'
roleid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'roles'), role, 'roles')
identityURL = 'https://identity.' + region + '.cloud.global.fujitsu.com/v3/projects/' + \
projectid + '/users/' + userid + '/roles/' + roleid
response = requests.put(identityURL,
headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response
except:
return 'Failed to assign role to user and project'
def assign_role_to_group_and_project(k5token, contractid, region, group,
project, role):
"""Summary - assign a role to a group and a project
Args:
k5token (TYPE): valid K5 unscoped token
contractid (TYPE): K5 contract id
region (TYPE): K5 region
group (TYPE): K5 group
project (TYPE): K5 project
role (TYPE): K5 role
Returns:
TYPE: http request object
"""
try:
# if group exists return its id otherwise return 'None'
groupid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'groups'), group, 'groups')
# if project exists return its id otherwise return 'None'
projectid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'projects'), project, 'projects')
# if role exists return its id otherwise return 'None'
roleid = get_itemid(get_keystoneobject_list(
k5token, region, contractid, 'roles'), role, 'roles')
identityURL = 'https://identity.' + region + '.cloud.global.fujitsu.com/v3/projects/' + \
projectid + '/groups/' + groupid + '/roles/' + roleid
response = requests.put(identityURL,
headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response
except:
return 'Failed to assign role to group and project'
def create_new_project(k5token, contractid, region, project):
"""Summary - create a K5 project
Args:
k5token (TYPE): valid regional domain scoped token
contractid (TYPE): K5 contract id
region (TYPE): K5 region
project (TYPE): New project name
Returns:
TYPE: http response object
"""
try:
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/projects?domain_id=' + contractid
response = requests.post(identityURL,
headers={
'X-Auth-Token': k5token, 'Content-Type': 'application/json', 'Accept': 'application/json'},
json={"project":
{"description": "Programatically created project",
"domain_id": contractid,
"enabled": True,
"is_domain": False,
"name": project
}})
return response
except:
return 'Failed to create a new project'
def create_new_group(global_k5token, contractid, region, project):
"""Summary - create a K5 group
Args:
global_k5token (TYPE): K5 globally scoped token
contractid (TYPE): K5 contract id
region (TYPE): K5 region
project (TYPE): K5 project used to build the group name - only required for my use case
Returns:
TYPE: New Group Name
"""
try:
groupname = project + '_Admin'
#print "DEBUG - New groupname", groupname
groupURL = 'https://identity.gls.cloud.global.fujitsu.com/v3/groups'
response = requests.post(groupURL,
headers={'X-Auth-Token': global_k5token,
'Content-Type': 'application/json'},
json={"group":
{"description": "auto-generated project",
"domain_id": contractid,
"name": groupname
}})
#print "Debug - new group api response ", response
#print "Debug - json ", response.json()
groupDetail = response.json()
return groupDetail['group']['name']
except:
return 'Failed to create new group'
def get_keystoneobject_list(k5token, region, contractid, objecttype):
"""Summary - gets generic keystone list of projects,users,roles
or groups depending
on the object type passed in to the call
Args:
k5token (TYPE): K5 regional domain scoped token
region (TYPE): K5 region
contractid (TYPE): K5 Contract ID
objecttype (TYPE): openstack object type to base list upon...
eg. groups/users/roles etc
Returns:
TYPE: python list with results
"""
try:
identityURL = 'https://identity.' + region + \
'.cloud.global.fujitsu.com/v3/' + objecttype + '?domain_id=' + contractid
response = requests.get(identityURL,
headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response.json()
except:
return 'Failed to get keystone object list'
def get_itemid(itemlist, itemname, itemtype):
"""Summary - generic function to get id from name in a list
Args:
itemlist (TYPE): python list
itemname (TYPE): k5 item name to be converted to an id
itemtype (TYPE): keyname ...eg. groups/users/roles etc
Returns:
TYPE: Description
"""
try:
itemid = 'None'
for item in itemlist[itemtype]:
if (item.get('name') == itemname):
itemid = item.get('id')
break
return itemid
except:
return 'Failed to get item id'
def add_new_user(idtoken, contract, region, userDetails):
"""Summary - K5 add a new user to the K5 central authentication portal
Args:
idtoken (TYPE): Identity Scoped Token
contract (TYPE): K5 contract name
region (TYPE): K5 region
userDetails (TYPE): python Tuple containing user details ..
eg. {firstname,lastname,username,email,password}
Returns:
TYPE: http response object
"""
try:
centralIdUrl = 'https://k5-apiportal.paas.cloud.global.fujitsu.com/API/v1/api/users'
print "DEBUG : ", centralIdUrl, idtoken, contract, region, userDetails
response = requests.post(centralIdUrl,
headers={'Token': idtoken,
'Content-Type': 'application/json'},
json={"user_last_name": userDetails[1],
"user_first_name": userDetails[0],
"login_id": userDetails[2],
"user_description": "Automated Account Setup",
"mailaddress": userDetails[3],
"user_status": "1",
"password": userDetails[4],
"language_code": "en",
"role_code": "01"
})
print response
print response.json()
return response
except:
print 'Failed to add new user'
return 'Failed to add new user'
def main():
"""Summary - deliberately left blank -
I usually test all my functions here before using the module for import!
Returns:
TYPE: Description
"""
#portal_token =
if __name__ == "__main__":
main()
| 38.733913
| 128
| 0.477954
|
c89f307ac95e38075a7038d1c5fa7df969e0711f
| 2,936
|
py
|
Python
|
allennlp/tests/modules/token_embedders/pretrained_transformer_embedder_test.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
allennlp/tests/modules/token_embedders/pretrained_transformer_embedder_test.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
allennlp/tests/modules/token_embedders/pretrained_transformer_embedder_test.py
|
nadgeri14/allennlp
|
2eefffaf71612263a1c20e8ce4107849cfd5efe3
|
[
"Apache-2.0"
] | null | null | null |
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestPretrainedTransformerEmbedder(AllenNlpTestCase):
def test_forward_runs_when_initialized_from_params(self):
# This code just passes things off to ``transformers``, so we only have a very simple
# test.
params = Params({"model_name": "bert-base-uncased"})
embedder = PretrainedTransformerEmbedder.from_params(params)
token_ids = torch.randint(0, 100, (1, 4))
mask = torch.randint(0, 2, (1, 4))
output = embedder(token_ids=token_ids, mask=mask)
assert tuple(output.size()) == (1, 4, 768)
def test_end_to_end(self):
tokenizer = PretrainedTransformerTokenizer(model_name="bert-base-uncased")
token_indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased")
sentence1 = "A, AllenNLP sentence."
tokens1 = tokenizer.tokenize(sentence1)
expected_tokens1 = ["[CLS]", "a", ",", "allen", "##nl", "##p", "sentence", ".", "[SEP]"]
assert [t.text for t in tokens1] == expected_tokens1
sentence2 = "AllenNLP is great"
tokens2 = tokenizer.tokenize(sentence2)
expected_tokens2 = ["[CLS]", "allen", "##nl", "##p", "is", "great", "[SEP]"]
assert [t.text for t in tokens2] == expected_tokens2
vocab = Vocabulary()
params = Params(
{
"token_embedders": {
"bert": {"type": "pretrained_transformer", "model_name": "bert-base-uncased"}
}
}
)
token_embedder = BasicTextFieldEmbedder.from_params(vocab=vocab, params=params)
instance1 = Instance({"tokens": TextField(tokens1, {"bert": token_indexer})})
instance2 = Instance({"tokens": TextField(tokens2, {"bert": token_indexer})})
batch = Batch([instance1, instance2])
batch.index_instances(vocab)
padding_lengths = batch.get_padding_lengths()
tensor_dict = batch.as_tensor_dict(padding_lengths)
tokens = tensor_dict["tokens"]
max_length = max(len(tokens1), len(tokens2))
assert tokens["bert"]["token_ids"].shape == (2, max_length)
assert tokens["bert"]["mask"].tolist() == [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0],
]
# Attention mask
bert_vectors = token_embedder(tokens)
assert bert_vectors.size() == (2, 9, 768)
| 40.777778
| 97
| 0.653951
|
f46c4d818e21a4a0e4fb6515ec7971405b3ccf0f
| 1,220
|
py
|
Python
|
scrapers/tables.py
|
todorus/openkaart-data
|
a6781a205f9600a2911ab7ff79bea17d2680cfa4
|
[
"MIT"
] | null | null | null |
scrapers/tables.py
|
todorus/openkaart-data
|
a6781a205f9600a2911ab7ff79bea17d2680cfa4
|
[
"MIT"
] | null | null | null |
scrapers/tables.py
|
todorus/openkaart-data
|
a6781a205f9600a2911ab7ff79bea17d2680cfa4
|
[
"MIT"
] | null | null | null |
import psycopg2
from shapely.geometry import shape
import os.path
import logging
def municipalities(conn, cur):
logging.info("(re)creating municipalities")
cur.execute("DROP TABLE IF EXISTS municipalities")
cur.execute('CREATE TABLE municipalities (id serial PRIMARY KEY, code integer, name varchar, "geometry" geometry, UNIQUE (code))')
conn.commit()
def postalcodes(conn, cur):
logging.info("(re)creating postalcodes")
cur.execute("DROP TABLE IF EXISTS postal_codes")
cur.execute("CREATE TABLE postal_codes (id serial PRIMARY KEY, name varchar, UNIQUE (name))")
logging.info("inserting postal4 codes")
for number in range(0, 10000):
name = str(number).zfill(4)
data = (name,)
cur.execute("INSERT INTO postal_codes (name) VALUES (%s) ON CONFLICT DO NOTHING", data)
conn.commit()
def postalcodes_to_municipalities(conn, cur):
logging.info("(re)creating postalcodes_to_municipalities")
cur.execute("DROP TABLE IF EXISTS postalcodes_to_municipalities")
cur.execute('CREATE TABLE postalcodes_to_municipalities (id serial PRIMARY KEY, municipality_id integer, postalcode_id integer, UNIQUE (municipality_id, postalcode_id))')
conn.commit()
| 42.068966
| 174
| 0.737705
|
ac0281c71d133b4860e315c0d3a02f85ec9f341d
| 5,289
|
py
|
Python
|
external/loaders/loaders/mappers/_fine_res_budget.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | 1
|
2021-12-14T23:43:35.000Z
|
2021-12-14T23:43:35.000Z
|
external/loaders/loaders/mappers/_fine_res_budget.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | 195
|
2021-09-16T05:47:18.000Z
|
2022-03-31T22:03:15.000Z
|
external/loaders/loaders/mappers/_fine_res_budget.py
|
ai2cm/fv3net
|
e62038aee0a97d6207e66baabd8938467838cf51
|
[
"MIT"
] | null | null | null |
import xarray
from typing import Tuple
from typing_extensions import Protocol
import vcm
def eddy_flux_coarse(unresolved_flux, total_resolved_flux, omega, field):
"""Compute re-coarsened eddy flux divergence from re-coarsed data
"""
return unresolved_flux + (total_resolved_flux - omega * field)
FINE_RES_STATE_NAMES = {
"T": "air_temperature",
"sphum": "specific_humidity",
"delp": "pressure_thickness_of_atmospheric_layer",
}
FINE_RES_FLUX_NAMES = {
"DLWRFsfc_coarse": "total_sky_downward_longwave_flux_at_surface",
"DSWRFsfc_coarse": "total_sky_downward_shortwave_flux_at_surface",
"DSWRFtoa_coarse": "total_sky_downward_shortwave_flux_at_top_of_atmosphere",
"ULWRFsfc_coarse": "total_sky_upward_longwave_flux_at_surface",
"ULWRFtoa_coarse": "total_sky_upward_longwave_flux_at_top_of_atmosphere",
"USWRFsfc_coarse": "total_sky_upward_shortwave_flux_at_surface",
"USWRFtoa_coarse": "total_sky_upward_shortwave_flux_at_top_of_atmosphere",
"LHTFLsfc_coarse": "latent_heat_flux",
"SHTFLsfc_coarse": "sensible_heat_flux",
"PRATEsfc_coarse": "surface_precipitation_rate",
}
class FineResBudget(Protocol):
"""Protocol defining what input vaiables are required
Only used for type checking and editor autocompletion.
"""
area: xarray.DataArray
delp: xarray.DataArray
T: xarray.DataArray
dq3dt_deep_conv_coarse: xarray.DataArray
dq3dt_mp_coarse: xarray.DataArray
dq3dt_pbl_coarse: xarray.DataArray
dq3dt_shal_conv_coarse: xarray.DataArray
dt3dt_deep_conv_coarse: xarray.DataArray
dt3dt_lw_coarse: xarray.DataArray
dt3dt_mp_coarse: xarray.DataArray
dt3dt_ogwd_coarse: xarray.DataArray
dt3dt_pbl_coarse: xarray.DataArray
dt3dt_shal_conv_coarse: xarray.DataArray
dt3dt_sw_coarse: xarray.DataArray
eddy_flux_vulcan_omega_sphum: xarray.DataArray
eddy_flux_vulcan_omega_temp: xarray.DataArray
exposed_area: xarray.DataArray
qv_dt_fv_sat_adj_coarse: xarray.DataArray
qv_dt_phys_coarse: xarray.DataArray
sphum: xarray.DataArray
sphum_storage: xarray.DataArray
sphum_vulcan_omega_coarse: xarray.DataArray
t_dt_fv_sat_adj_coarse: xarray.DataArray
t_dt_nudge_coarse: xarray.DataArray
t_dt_phys_coarse: xarray.DataArray
vulcan_omega_coarse: xarray.DataArray
T_vulcan_omega_coarse: xarray.DataArray
T_storage: xarray.DataArray
DLWRFsfc_coarse: xarray.DataArray
DSWRFsfc_coarse: xarray.DataArray
DSWRFtoa_coarse: xarray.DataArray
ULWRFsfc_coarse: xarray.DataArray
ULWRFtoa_coarse: xarray.DataArray
USWRFsfc_coarse: xarray.DataArray
USWRFtoa_coarse: xarray.DataArray
LHTFLsfc_coarse: xarray.DataArray
SHTFLsfc_coarse: xarray.DataArray
PRATEsfc_coarse: xarray.DataArray
def astype(self, dtype):
pass
def apparent_heating(data: FineResBudget, include_temperature_nudging: bool = False):
eddy_flux = eddy_flux_coarse(
data.eddy_flux_vulcan_omega_temp,
data.T_vulcan_omega_coarse,
data.vulcan_omega_coarse,
data.T,
)
eddy_flux_convergence = vcm.convergence_cell_center(eddy_flux, data.delp, dim="z")
result = data.t_dt_fv_sat_adj_coarse + data.t_dt_phys_coarse + eddy_flux_convergence
description = (
"Apparent heating due to physics and sub-grid-scale advection. Given "
"by sat adjustment (dycore) + physics tendency + eddy-flux-convergence"
)
if include_temperature_nudging:
result = result + data.t_dt_nudge_coarse
description = description + " + temperature nudging"
return result.assign_attrs(
units="K/s",
long_name="apparent heating from high resolution data",
description=description,
).rename("Q1")
def apparent_moistening(data: FineResBudget):
eddy_flux = eddy_flux_coarse(
data.eddy_flux_vulcan_omega_sphum,
data.sphum_vulcan_omega_coarse,
data.vulcan_omega_coarse,
data.sphum,
)
eddy_flux_convergence = vcm.convergence_cell_center(eddy_flux, data.delp, dim="z")
return (
(data.qv_dt_fv_sat_adj_coarse + data.qv_dt_phys_coarse + eddy_flux_convergence)
.assign_attrs(
units="kg/kg/s",
long_name="apparent moistening from high resolution data",
description=(
"Apparent moistening due to physics and sub-grid-scale advection. "
"Given by "
"sat adjustment (dycore) + physics tendency + eddy-flux-convergence"
),
)
.rename("Q2")
)
def column_integrated_fine_res_nudging_heating(data: FineResBudget) -> xarray.DataArray:
heating_in_energy_units = vcm.internal_energy(data.t_dt_nudge_coarse)
column_heating = vcm.mass_integrate(heating_in_energy_units, data.delp, dim="z")
return column_heating.assign_attrs(
units="W/m**2",
long_name="Column integrated heating tendency due to temperature "
"nudging of fine-res run.",
)
def compute_fine_res_sources(
data: FineResBudget, include_temperature_nudging: bool = False
) -> Tuple[xarray.DataArray, xarray.DataArray]:
heating = apparent_heating(data, include_temperature_nudging)
moistening = apparent_moistening(data)
return heating, moistening
| 36.729167
| 88
| 0.741161
|
200a5ce7715c181c12548f20c56024c90b1d1aea
| 3,944
|
py
|
Python
|
src/posts/models.py
|
stefikolo/DRF_API
|
cde13a9ee8f52401b9ec0a007607120562e2b234
|
[
"MIT"
] | null | null | null |
src/posts/models.py
|
stefikolo/DRF_API
|
cde13a9ee8f52401b9ec0a007607120562e2b234
|
[
"MIT"
] | null | null | null |
src/posts/models.py
|
stefikolo/DRF_API
|
cde13a9ee8f52401b9ec0a007607120562e2b234
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.db import models
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from markdown_deux import markdown
from comments.models import Comment
from posts.utils import get_read_time
# Create your models here.
# MVC MODEL VIEW CONTROLLER
# Post.objects.all()
# Post.objects.create(user=user, title="Some time")
class PostManager(models.Manager):
def active(self, *args, **kwargs):
# Post.objects.all() = super(PostManager, self).all()
return super(PostManager, self).filter(draft=False).filter(publish__lte=timezone.now())
def upload_location(instance, filename):
# filebase, extension = filename.split(".")
# return "%s/%s.%s" %(instance.id, instance.id, extension)
PostModel = instance.__class__
new_id = PostModel.objects.order_by("id").last().id + 1
"""
instance.__class__ gets the model Post. We must use this method because the model is defined below.
Then create a queryset ordered by the "id"s of each object,
Then we get the last object in the queryset with `.last()`
Which will give us the most recently created Model instance
We add 1 to it, so we get what should be the same id as the the post we are creating.
"""
return "%s/%s" % (new_id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)
title = models.CharField(max_length=120)
slug = models.SlugField(unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
content = models.TextField()
draft = models.BooleanField(default=False)
publish = models.DateField(auto_now=False, auto_now_add=False)
read_time = models.IntegerField(default=0) # models.TimeField(null=True, blank=True) #assume minutes
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
objects = PostManager()
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"slug": self.slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def get_markdown(self):
content = self.content
markdown_text = markdown(content)
return mark_safe(markdown_text)
@property
def comments(self):
instance = self
qs = Comment.objects.filter_by_instance(instance)
return qs
@property
def get_content_type(self):
instance = self
content_type = ContentType.objects.get_for_model(instance.__class__)
return content_type
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" % (slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
if instance.content:
html_string = instance.get_markdown()
read_time_var = get_read_time(html_string)
instance.read_time = read_time_var
pre_save.connect(pre_save_post_receiver, sender=Post)
| 33.142857
| 105
| 0.690923
|
74c03e715e6014b99297f1ec03cb87f2c165d4da
| 826
|
py
|
Python
|
pulsus/services/base/notification.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | 14
|
2015-01-16T07:48:43.000Z
|
2019-04-19T23:13:50.000Z
|
pulsus/services/base/notification.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | null | null | null |
pulsus/services/base/notification.py
|
pennersr/pulsus
|
ace014ca40e3928b235e1bcfebe22301c7f3cafe
|
[
"MIT"
] | 2
|
2015-08-06T12:52:56.000Z
|
2019-02-07T18:09:23.000Z
|
class BaseNotification(object):
service_type = None
notification_type = None
def serialize_data(self):
raise NotImplementedError()
def serialize(self):
ret = {'data': self.serialize_data()}
ret['type'] = self.service_type
if self.notification_type:
ret['kind'] = self.notification_type
return ret
@classmethod
def deserialize(cls, data):
# FIXME: Something hardcoded for now, to be
# be replaced
from ..apns import APNSNotification
from ..gcm import GCMJSONMessage
if data['type'] == APNSNotification.service_type:
return APNSNotification.deserialize_data(data['data'])
elif data['type'] == GCMJSONMessage.service_type:
return GCMJSONMessage.deserialize_data(data['data'])
| 30.592593
| 66
| 0.642857
|
13b35846fb9327bf424383f3cce35f3f001adfd4
| 7,667
|
py
|
Python
|
src/PreprocessML20M.py
|
olivierjeunen/ease-side-info-recsys-2020
|
66713a4a2d4b238e883254da4be7b51e8bbc1b96
|
[
"MIT"
] | 12
|
2020-08-17T08:20:48.000Z
|
2022-01-25T11:43:59.000Z
|
src/PreprocessML20M.py
|
olivierjeunen/ease-side-info-recsys-2020
|
66713a4a2d4b238e883254da4be7b51e8bbc1b96
|
[
"MIT"
] | 1
|
2021-10-08T05:01:15.000Z
|
2021-11-05T10:54:03.000Z
|
src/PreprocessML20M.py
|
olivierjeunen/ease-side-info-recsys-2020
|
66713a4a2d4b238e883254da4be7b51e8bbc1b96
|
[
"MIT"
] | 3
|
2020-11-30T05:35:10.000Z
|
2022-02-19T09:00:31.000Z
|
import argparse
import numpy as np
import os
import pandas as pd
import pickle
import util
from datetime import datetime
from scipy.sparse import save_npz, vstack
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
# Commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument('dir', type = str, help = 'Directory containing the data')
parser.add_argument('--test_users', type = int, default = 10000)
args = parser.parse_args()
# Fix seed for reproducibility
np.random.seed(42)
# Load rating data
print(datetime.now(), 'Loading in ratings...')
ratings = pd.read_csv(args.dir + 'ml-20m_ratings.csv')
ratings.columns = ['user', 'item', 'rating', 'time']
# Preprocessing as in Liang et al. @ WWW 2018
# Only keep ratings of 4 or higher
ratings = ratings.loc[ratings.rating >= 4]
# Only keep users who have rated at least 5 movies
user_counts = ratings['user'].value_counts().reset_index().rename(columns = {'index': 'user', 'user': 'count'})
user_counts = user_counts.loc[user_counts['count'] >= 5]
ratings = ratings.merge(user_counts, on = 'user', how = 'right').drop('count', axis = 1)
print('\t{0:8} ratings'.format(ratings.shape[0]))
print('\t{0:8} unique users, {1:8} unique items'.format(ratings['user'].nunique(), ratings['item'].nunique()))
# Load side info
print(datetime.now(), 'Loading in side-info...')
##########
# GENRES #
##########
# Load in data
movies = pd.read_csv(args.dir + 'ml-20m_movies.csv')
movies.columns = ['item', 'title', 'genres']
# Drop movies that don't appear in preference data
movies = movies.merge(ratings[['item']].drop_duplicates(), on = 'item', how = 'right')
# Properly format
genres = pd.DataFrame(movies.genres.str.split('|').tolist(), index = movies.item)\
.stack()\
.reset_index([0, 'item'])\
.rename(columns = {0: 'genre'})
# Drop nonsensical genres
genres = genres.loc[genres.genre != '(no genres listed)']
genres = genres.loc[genres.genre != 'IMAX']
#########
# YEARS #
#########
# Extract year
movies['year'] = movies['title'].str.extract(pat = '\((\d\d\d\d)(?:[-–]\s*(?:\d\d\d\d)?)?\)')
years = movies[['item','year']]
# Drop years that appear less than once (wouldn't affect Gram-matrix)
y2c = years.groupby('year')['item']\
.apply(lambda x: len(set(x)))\
.reset_index()\
.rename(columns = {'item': 'count'})
y2c = y2c[y2c['count'] >= 2]
years = years.merge(y2c[['year']], on = 'year', how = 'right')
########
# CREW #
########
# Load IMDB data links with movielens
links = pd.read_csv(args.dir + 'ml-imdb_links.csv')[['movieId','imdbId']]
links.columns = ['item', 'imdb_id']
# Load IMDB crew data and link it properly
crew = pd.read_csv(args.dir + 'imdb_crew_info.csv')
crew.columns = ['imdb_id', 'directors', 'writers']
crew['imdb_id'] = crew['imdb_id'].apply(lambda s: int(s[2:]))
crew = crew.merge(links, on = 'imdb_id', how = 'right')
# We don't care about movies without ratings
crew = crew.merge(ratings[['item']].drop_duplicates(), on = 'item', how = 'right')[['item','directors','writers']]
crew['directors'] = crew['directors'].apply(lambda s: str(s))
crew['writers'] = crew['writers'].apply(lambda s: str(s))
# Extract directors
directors = pd.DataFrame(crew.directors.str.split(',').tolist(), index = crew.item).stack().reset_index([0, 'item'])
directors.columns = ['item', 'director']
directors = directors.loc[directors.director != '\\N']
# Drop directors that appear less than once (wouldn't affect Gram-matrix)
dir2count = directors.groupby('director')['item'].apply(lambda x: len(set(x))).reset_index().rename(columns = {'item': 'count'})
dir2count = dir2count[dir2count['count'] >= 2]
directors = directors.merge(dir2count[['director']], on = 'director', how = 'right')
# Extract writers
writers = pd.DataFrame(crew.writers.str.split(',').tolist(), index = crew.item).stack().reset_index([0, 'item'])
writers.columns = ['item', 'writer']
writers = writers.loc[writers.writer != '\\N']
# Drop writers that appear less than once (wouldn't affect Gram-matrix)
writer2count = writers.groupby('writer')['item'].apply(lambda x: len(set(x))).reset_index().rename(columns = {'item': 'count'})
writer2count = writer2count[writer2count['count'] >= 2]
writers = writers.merge(writer2count[['writer']], on = 'writer', how = 'right')
# Ensure proper integer identifiers
user_enc = LabelEncoder()
item_enc = LabelEncoder()
genre_enc = LabelEncoder()
year_enc = LabelEncoder()
direc_enc = LabelEncoder()
write_enc = LabelEncoder()
ratings['user'] = user_enc.fit_transform(ratings['user'])
ratings['item'] = item_enc.fit_transform(ratings['item'])
genres['item'] = item_enc.transform(genres['item'])
genres['genre'] = genre_enc.fit_transform(genres['genre'])
years['item'] = item_enc.transform(years['item'])
years['year'] = year_enc.fit_transform(years['year'])
directors['item'] = item_enc.transform(directors['item'])
directors['director'] = direc_enc.fit_transform(directors['director'])
writers['item'] = item_enc.transform(writers['item'])
writers['writer'] = write_enc.fit_transform(writers['writer'])
# Generate Metadata-to-item mapping
X_genres = util.generate_csr_matrix(genres, 'genre', ratings['item'].max() + 1)
X_years = util.generate_csr_matrix(years, 'year', ratings['item'].max() + 1)
X_directors = util.generate_csr_matrix(directors, 'director', ratings['item'].max() + 1)
X_writers = util.generate_csr_matrix(writers, 'writer', ratings['item'].max() + 1)
X_meta = vstack((X_genres,X_years,X_directors,X_writers))
# Check whether output directory already exists - make it if necessary
if not os.path.exists(args.dir + 'preprocessed/'):
os.makedirs(args.dir + 'preprocessed/')
# Write out metadata-item matrix
print(datetime.now(), 'Writing out metadata-item matrix...')
save_npz(args.dir + 'preprocessed/X_meta.npz', X_meta)
# Train - validation - test split
print(datetime.now(), 'Train-validation-test split...')
X_train, X_val, val_dict, X_test, test_dict = util.train_val_test_split_Jebara(ratings, n_test_users = args.test_users)
# Write out validation and test data
print(datetime.now(), 'Writing out validation and test data...')
save_npz(args.dir + 'preprocessed/X_val.npz', X_val)
with open(args.dir + 'preprocessed/val_dict.pkl', 'wb') as handle:
pickle.dump(val_dict, handle)
save_npz(args.dir + 'preprocessed/X_test.npz', X_test)
with open(args.dir + 'preprocessed/test_dict.pkl', 'wb') as handle:
pickle.dump(test_dict, handle)
# Write out full user-item training matrix
print(datetime.now(), 'Writing out train data...')
save_npz(args.dir + 'preprocessed/X_train.npz', X_train)
# Subsample training data on a user-level
print(datetime.now(), 'Subsampling training users...')
train_users = np.unique(X_train.nonzero()[0])
np.random.shuffle(train_users)
for frac_train_users in [0.01, .05, .1, .25, .5]:
train_users[:int(frac_train_users * len(train_users))]
pd.DataFrame(train_users[:int(frac_train_users * len(train_users))], columns = ['user']).to_csv(args.dir + 'preprocessed/train_users_{}.csv'.format(frac_train_users), index = False)
print(datetime.now(), 'Finished!')
| 46.466667
| 189
| 0.647059
|
2a3eb179275b89827a310426b9f8b67fc41faf57
| 2,329
|
py
|
Python
|
ibmdbpy/series.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 21
|
2016-02-18T13:10:48.000Z
|
2020-11-09T00:09:07.000Z
|
ibmdbpy/series.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 57
|
2016-02-29T15:14:05.000Z
|
2021-07-23T07:19:41.000Z
|
ibmdbpy/series.py
|
marc-mclean1/ibmdbpy
|
46d885e793da52c58424885d74ab1a6668c391b3
|
[
"BSD-3-Clause"
] | 17
|
2016-01-04T07:11:37.000Z
|
2021-11-05T12:45:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2015, IBM Corp.
# All rights reserved.
#
# Distributed under the terms of the BSD Simplified License.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
idaSeries
"""
# Ensure Python 2 compatibility
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
from copy import deepcopy
from lazy import lazy
import ibmdbpy
class IdaSeries(ibmdbpy.IdaDataFrame):
"""
IdaSeries can be considered as a different version of IdaDataFrame
objects that have only one column and can be thus represented
as pandas.Series to the user.
"""
def __init__(self, idadb, tablename, indexer, column):
super(IdaSeries, self).__init__(idadb, tablename, indexer)
self.column = column
##### legacy
@lazy
def columns(self):
return [self.column]
# TODO : Override all methods for which the behavior, i.e. the output is
# different in comparision with the one of an IdaDataFrame. For now the
# disjunction are implemented on the functions only.
def min(self):
result = super(IdaSeries, self).min()
#import pdb; pdb.set_trace()
return result[0]
def max(self):
result = super(IdaSeries, self).max()
#import pdb; pdb.set_trace()
return result[0]
def _clone(self):
"""
Clone an IdaSeries.
"""
newida = IdaSeries(self._idadb, self._name, self.indexer, self.column)
newida.internal_state.name = deepcopy(self.internal_state.name)
newida.internal_state.ascending = deepcopy(self.internal_state.ascending)
#newida.internal_state.views = deepcopy(self.internal_state.views)
newida.internal_state._views = deepcopy(self.internal_state._views)
newida.internal_state._cumulative = deepcopy(self.internal_state._cumulative)
newida.internal_state.order = deepcopy(self.internal_state.order)
return newida
| 32.802817
| 85
| 0.659081
|
80ac8ec00f7b71c72169101b1398a54359093e95
| 2,709
|
py
|
Python
|
sdk/python/pulumi_aws_native/ssmcontacts/get_contact.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/ssmcontacts/get_contact.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/ssmcontacts/get_contact.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetContactResult',
'AwaitableGetContactResult',
'get_contact',
'get_contact_output',
]
@pulumi.output_type
class GetContactResult:
def __init__(__self__, arn=None, display_name=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
"""
The Amazon Resource Name (ARN) of the contact.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Name of the contact. String value with 3 to 256 characters. Only alphabetical, space, numeric characters, dash, or underscore allowed.
"""
return pulumi.get(self, "display_name")
class AwaitableGetContactResult(GetContactResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContactResult(
arn=self.arn,
display_name=self.display_name)
def get_contact(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContactResult:
"""
Resource Type definition for AWS::SSMContacts::Contact
:param str arn: The Amazon Resource Name (ARN) of the contact.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:ssmcontacts:getContact', __args__, opts=opts, typ=GetContactResult).value
return AwaitableGetContactResult(
arn=__ret__.arn,
display_name=__ret__.display_name)
@_utilities.lift_output_func(get_contact)
def get_contact_output(arn: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetContactResult]:
"""
Resource Type definition for AWS::SSMContacts::Contact
:param str arn: The Amazon Resource Name (ARN) of the contact.
"""
...
| 31.5
| 142
| 0.664821
|
d30fcd935f23ca36ba9a30c5a3547e8fb11d550c
| 606
|
py
|
Python
|
models/post.py
|
CodeByMini/thefriendzone
|
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
|
[
"Apache-2.0"
] | null | null | null |
models/post.py
|
CodeByMini/thefriendzone
|
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
|
[
"Apache-2.0"
] | null | null | null |
models/post.py
|
CodeByMini/thefriendzone
|
84c3dd14ba2b0be7cf3cd681f761d3d6780498d4
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
class Post:
def __init__(self, author, body):
self._author = author
self._body = body
self._created_timestamp = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
self._yikes = 0
self._attachments = []
@property
def yikes(self):
return self._yikes
@property
def author(self):
return self._author
def yike(self):
self._yikes += 1
def un_yike(self):
if self._yikes > 0:
self._yikes -= 1
def attach(self, attachment):
self._attachments.append(attachment)
| 22.444444
| 79
| 0.582508
|
38ca2c4476604c4ed7d8c1f8f174089559252dc3
| 3,765
|
py
|
Python
|
src/OTLMOW/OTLModel/Classes/Hardware.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Classes/Hardware.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Classes/Hardware.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.HardwareToegang import HardwareToegang
from OTLMOW.OTLModel.Datatypes.IntegerField import IntegerField
from OTLMOW.OTLModel.Datatypes.KlHardwareMerk import KlHardwareMerk
from OTLMOW.OTLModel.Datatypes.KlHardwareModelnaam import KlHardwareModelnaam
from OTLMOW.OTLModel.Datatypes.KlHardwareVormfactor import KlHardwareVormfactor
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Hardware(HardwareToegang, PuntGeometrie):
"""Fysieke componenten of onderdelen van een computer."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Hardware'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
HardwareToegang.__init__(self)
PuntGeometrie.__init__(self)
self._aantalUnits = OTLAttribuut(field=IntegerField,
naam='aantalUnits',
label='aantal units',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Hardware.aantalUnits',
definition='Het aantal units dat een server in een rack inneemt.',
owner=self)
self._merk = OTLAttribuut(field=KlHardwareMerk,
naam='merk',
label='merk',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Hardware.merk',
definition='Het merk van de hardware.',
owner=self)
self._modelnaam = OTLAttribuut(field=KlHardwareModelnaam,
naam='modelnaam',
label='modelnaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Hardware.modelnaam',
definition='De modelnaam van de hardware.',
owner=self)
self._vormfactor = OTLAttribuut(field=KlHardwareVormfactor,
naam='vormfactor',
label='vormfactor',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Hardware.vormfactor',
definition='Het soort toestel waarin de fysieke componenten of onderdelen worden vormgegeven.',
owner=self)
@property
def aantalUnits(self):
"""Het aantal units dat een server in een rack inneemt."""
return self._aantalUnits.get_waarde()
@aantalUnits.setter
def aantalUnits(self, value):
self._aantalUnits.set_waarde(value, owner=self)
@property
def merk(self):
"""Het merk van de hardware."""
return self._merk.get_waarde()
@merk.setter
def merk(self, value):
self._merk.set_waarde(value, owner=self)
@property
def modelnaam(self):
"""De modelnaam van de hardware."""
return self._modelnaam.get_waarde()
@modelnaam.setter
def modelnaam(self, value):
self._modelnaam.set_waarde(value, owner=self)
@property
def vormfactor(self):
"""Het soort toestel waarin de fysieke componenten of onderdelen worden vormgegeven."""
return self._vormfactor.get_waarde()
@vormfactor.setter
def vormfactor(self, value):
self._vormfactor.set_waarde(value, owner=self)
| 44.294118
| 135
| 0.594422
|
dd63b2033d9a6ee59e4049e3937cca739b8bd9c7
| 21,872
|
py
|
Python
|
bokeh/model.py
|
jermwatt/bokeh
|
1985c3b1bbaf5a71a62e94a8dacbb7c67df256c9
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T09:15:48.000Z
|
2017-04-27T09:15:48.000Z
|
app/static/libs/bokeh/bokeh/model.py
|
TBxy/bokeh_start_app
|
755494f6bc60e92ce17022bbd7f707a39132cbd0
|
[
"MIT"
] | null | null | null |
app/static/libs/bokeh/bokeh/model.py
|
TBxy/bokeh_start_app
|
755494f6bc60e92ce17022bbd7f707a39132cbd0
|
[
"MIT"
] | 1
|
2021-09-09T03:33:04.000Z
|
2021-09-09T03:33:04.000Z
|
''' Provide a base class for all objects (called Bokeh Models) that go in
Bokeh Documents.
The :class:`~bokeh.document.Document` class is the basic unit of serialization
for Bokeh visualizations and applications. Documents contain collections of
related Bokeh Models (e.g. ``Plot``, ``Range1d``, etc. ) that can be all
serialized together.
The :class:`~bokeh.model.Model` class is a base class for all objects that
can be added to a Document.
'''
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__file__)
from contextlib import contextmanager
from json import loads
from operator import itemgetter
from six import iteritems
from .core.json_encoder import serialize_json
from .core.properties import Any, Dict, Instance, List, String
from .core.has_props import HasProps, MetaHasProps
from .core.query import find
from .themes import default as default_theme
from .util.callback_manager import CallbackManager
from .util.future import with_metaclass
from .util.serialization import make_id
class Viewable(MetaHasProps):
""" Any Bokeh Model which has its own View Model in the
persistence layer.
"""
# Stores a mapping from subclass __view_model__ names to classes
model_class_reverse_map = {}
# Mmmm.. metaclass inheritance. On the one hand, it seems a little
# overkill. On the other hand, this is exactly the sort of thing
# it's meant for.
def __new__(meta_cls, class_name, bases, class_dict):
if "__view_model__" not in class_dict:
class_dict["__view_model__"] = class_name
class_dict["get_class"] = Viewable.get_class
# Create the new class
newcls = super(Viewable, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
entry = class_dict.get("__subtype__", class_dict["__view_model__"])
# Add it to the reverse map, but check for duplicates first
if entry in Viewable.model_class_reverse_map and not hasattr(newcls, "__implementation__"):
raise Warning("Duplicate __view_model__ or __subtype__ declaration of '%s' for " \
"class %s. Previous definition: %s" % \
(entry, class_name,
Viewable.model_class_reverse_map[entry]))
Viewable.model_class_reverse_map[entry] = newcls
return newcls
@classmethod
def _preload_models(cls):
from . import models; models
from .plotting import Figure; Figure
try:
from .charts import Chart; Chart
except RuntimeError:
# this would occur if pandas is not installed but then we can't
# use the bokeh.charts interface anyway
pass
@classmethod
def get_class(cls, view_model_name):
""" Given a __view_model__ name, returns the corresponding class
object
"""
cls._preload_models()
d = Viewable.model_class_reverse_map
if view_model_name in d:
return d[view_model_name]
else:
raise KeyError("View model name '%s' not found" % view_model_name)
class Model(with_metaclass(Viewable, HasProps, CallbackManager)):
''' Base class for all objects stored in Bokeh ``Document`` instances.
'''
name = String(help="""
An arbitrary, user-supplied name for this model.
This name can be useful when querying the document to retrieve specific
Bokeh models.
.. code:: python
>>> plot.circle([1,2,3], [4,5,6], name="temp")
>>> plot.select(name="temp")
[GlyphRenderer(id='399d53f5-73e9-44d9-9527-544b761c7705', ...)]
.. note::
No uniqueness guarantees or other conditions are enforced on any names
that are provided.
""")
tags = List(Any, help="""
An optional list of arbitrary, user-supplied values to attach to this
model.
This data can be useful when querying the document to retrieve specific
Bokeh models:
.. code:: python
>>> r = plot.circle([1,2,3], [4,5,6])
>>> r.tags = ["foo", 10]
>>> plot.select(tags=['foo', 10])
[GlyphRenderer(id='1de4c3df-a83d-480a-899b-fb263d3d5dd9', ...)]
Or simply a convenient way to attach any necessary metadata to a model
that can be accessed by CustomJS callbacks, etc.
""")
js_callbacks = Dict(String, List(Instance("bokeh.models.callbacks.CustomJS")), help="""
A mapping of attribute names to lists of CustomJS callbacks, to be set up on
BokehJS side when the document is created.
Typically, rather then modifying this property directly, callbacks should be
added using the ``Model.js_on_change`` method:
.. code:: python
callback = CustomJS(code="console.log('stuff')")
plot.x_range.js_on_change('start', callback)
""")
def __init__(self, **kwargs):
self._id = kwargs.pop("id", make_id())
self._document = None
super(Model, self).__init__(**kwargs)
default_theme.apply_to_model(self)
def _attach_document(self, doc):
'''This should only be called by the Document implementation to set the document field'''
if self._document is not None and self._document is not doc:
raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self))
doc.theme.apply_to_model(self)
self._document = doc
def _detach_document(self):
'''This should only be called by the Document implementation to unset the document field'''
self._document = None
default_theme.apply_to_model(self)
@property
def document(self):
return self._document
def on_change(self, attr, *callbacks):
''' Add a callback on this object to trigger when ``attr`` changes.
Args:
attr (str) : an attribute name on this object
callback (callable) : a callback function to register
Returns:
None
'''
if attr not in self.properties():
raise ValueError("attempted to add a callback on nonexistent %s.%s property" % (self.__class__.__name__, attr))
super(Model, self).on_change(attr, *callbacks)
def js_on_change(self, event, *callbacks):
''' Attach a CustomJS callback to an arbitrary BokehJS model event.
On the BokehJS side, change events for model properties have the
form ``"change:property_name"``. As a convenience, if the event name
passed to this method is also the name of a property on the model,
then it will be prefixed with ``"change:"`` automatically:
.. code:: python
# these two are equivalent
source.js_on_change('data', callback)
source.js_on_change('change:data', callback)
However, there are other kinds of events that can be useful to respond
to, in addition to property change events. For example to run a
callback whenever data is streamed to a ``ColumnDataSource``, use the
``"stream"`` event on the source:
.. code:: python
source.js_on_change('stream', callback)
'''
if len(callbacks) == 0:
raise ValueError("js_on_change takes an event name and one or more callbacks, got only one parameter")
# handle any CustomJS callbacks here
from bokeh.models.callbacks import CustomJS
if not all(isinstance(x, CustomJS) for x in callbacks):
raise ValueError("not all callback values are CustomJS instances")
if event in self.properties():
event = "change:%s" % event
if event not in self.js_callbacks:
self.js_callbacks[event] = []
for callback in callbacks:
if callback in self.js_callbacks[event]:
continue
self.js_callbacks[event].append(callback)
def trigger(self, attr, old, new, hint=None, setter=None):
# The explicit assumption here is that hinted events do not
# need to go through all the same invalidation steps. Currently
# as of Bokeh 0.11.1 the only hinted event is ColumnsStreamedEvent.
# This may need to be further refined in the future, if the
# assumption does not hold for future hinted events (e.g. the hint
# could specify explicitly whether to do normal invalidation or not)
if not hint:
dirty = { 'count' : 0 }
def mark_dirty(obj):
dirty['count'] += 1
if self._document is not None:
self._visit_value_and_its_immediate_references(new, mark_dirty)
self._visit_value_and_its_immediate_references(old, mark_dirty)
if dirty['count'] > 0:
self._document._invalidate_all_models()
# chain up to invoke callbacks
super(Model, self).trigger(attr, old, new, hint, setter)
@property
def ref(self):
if "__subtype__" in self.__class__.__dict__:
return {
'type': self.__view_model__,
'subtype': self.__subtype__,
'id': self._id,
}
else:
return {
'type': self.__view_model__,
'id': self._id,
}
def select(self, selector):
''' Query this object and all of its references for objects that
match the given selector.
Args:
selector (JSON-like) :
Returns:
seq[Model]
'''
return find(self.references(), selector)
def select_one(self, selector):
''' Query this object and all of its references for objects that
match the given selector. Raises an error if more than one object
is found. Returns single matching object, or None if nothing is found
Args:
selector (JSON-like) :
Returns:
Model
'''
result = list(self.select(selector))
if len(result) > 1:
raise ValueError("Found more than one object matching %s: %r" % (selector, result))
if len(result) == 0:
return None
return result[0]
def set_select(self, selector, updates):
''' Update objects that match a given selector with the specified
attribute/value updates.
Args:
selector (JSON-like) :
updates (dict) :
Returns:
None
'''
for obj in self.select(selector):
for key, val in updates.items():
setattr(obj, key, val)
def layout(self, side, plot):
try:
return self in getattr(plot, side)
except:
return []
@classmethod
def _visit_immediate_value_references(cls, value, visitor):
''' Visit all references to another Model without recursing into any
of the child Model; may visit the same Model more than once if
it's referenced more than once. Does not visit the passed-in value.
'''
if isinstance(value, HasProps):
for attr in value.properties_with_refs():
child = getattr(value, attr)
cls._visit_value_and_its_immediate_references(child, visitor)
else:
cls._visit_value_and_its_immediate_references(value, visitor)
@classmethod
def _visit_value_and_its_immediate_references(cls, obj, visitor):
if isinstance(obj, Model):
visitor(obj)
elif isinstance(obj, HasProps):
# this isn't a Model, so recurse into it
cls._visit_immediate_value_references(obj, visitor)
elif isinstance(obj, (list, tuple)):
for item in obj:
cls._visit_value_and_its_immediate_references(item, visitor)
elif isinstance(obj, dict):
for key, value in iteritems(obj):
cls._visit_value_and_its_immediate_references(key, visitor)
cls._visit_value_and_its_immediate_references(value, visitor)
@classmethod
def collect_models(cls, *input_values):
""" Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go. The resulting list
is duplicate-free based on objects' identifiers.
"""
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj._id not in ids:
queued.append(obj)
for value in input_values:
cls._visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj._id not in ids:
ids.add(obj._id)
collected.append(obj)
cls._visit_immediate_value_references(obj, queue_one)
return collected
def references(self):
"""Returns all ``Models`` that this object has references to. """
return set(self.collect_models(self))
def _to_json_like(self, include_defaults):
""" Returns a dictionary of the attributes of this object, in
a layout corresponding to what BokehJS expects at unmarshalling time.
This method does not convert "Bokeh types" into "plain JSON types,"
for example each child Model will still be a Model, rather
than turning into a reference, numpy isn't handled, etc.
That's what "json like" means.
This method should be considered "private" or "protected",
for use internal to Bokeh; use to_json() instead because
it gives you only plain JSON-compatible types.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default.
"""
all_attrs = self.properties_with_values(include_defaults=include_defaults)
# If __subtype__ is defined, then this model may introduce properties
# that don't exist on __view_model__ in bokehjs. Don't serialize such
# properties.
subtype = getattr(self.__class__, "__subtype__", None)
if subtype is not None and subtype != self.__class__.__view_model__:
attrs = {}
for attr, value in all_attrs.items():
if attr in self.__class__.__dict__:
continue
else:
attrs[attr] = value
else:
attrs = all_attrs
for (k, v) in attrs.items():
# we can't serialize Infinity, we send it as None and
# the other side has to fix it up. This transformation
# can't be in our json_encoder because the json
# module checks for inf before it calls the custom
# encoder.
if isinstance(v, float) and v == float('inf'):
attrs[k] = None
return attrs
def to_json(self, include_defaults):
""" Returns a dictionary of the attributes of this object,
containing only "JSON types" (string, number, boolean,
none, dict, list).
References to other objects are serialized as "refs" (just
the object ID and type info), so the deserializer will
need to separately have the full attributes of those
other objects.
There's no corresponding from_json() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
"""
return loads(self.to_json_string(include_defaults=include_defaults))
def to_json_string(self, include_defaults):
"""Returns a JSON string encoding the attributes of this object.
References to other objects are serialized as references
(just the object ID and type info), so the deserializer
will need to separately have the full attributes of those
other objects.
There's no corresponding from_json_string() because to
deserialize an object is normally done in the context of a
Document (since the Document can resolve references).
For most purposes it's best to serialize and deserialize
entire documents.
Args:
include_defaults (bool) : whether to include attributes
that haven't been changed from the default
"""
json_like = self._to_json_like(include_defaults=include_defaults)
json_like['id'] = self._id
# serialize_json "fixes" the JSON from _to_json_like by converting
# all types into plain JSON types # (it converts Model into refs,
# for example).
return serialize_json(json_like)
def __str__(self):
return "%s(id=%r, ...)" % (self.__class__.__name__, getattr(self, "_id", None))
__repr__ = __str__
def _bokeh_repr_pretty_(self, p, cycle):
name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
_id = getattr(self, "_id", None)
if cycle:
p.text(name)
p.text('(id=')
p.pretty(_id)
p.text(', ...)')
else:
with p.group(4, '%s(' % name, ')'):
props = self.properties_with_values().items()
sorted_props = sorted(props, key=itemgetter(0))
all_props = [('id', _id)] + sorted_props
for i, (prop, value) in enumerate(all_props):
if i == 0:
p.breakable('')
else:
p.text(',')
p.breakable()
p.text(prop)
p.text('=')
p.pretty(value)
def _repr_html_(self):
module = self.__class__.__module__
name = self.__class__.__name__
_id = getattr(self, "_id", None)
cls_name = make_id()
def row(c):
return '<div style="display: table-row;">' + c + '</div>'
def hidden_row(c):
return '<div class="%s" style="display: none;">%s</div>' % (cls_name, c)
def cell(c):
return '<div style="display: table-cell;">' + c + '</div>'
html = ''
html += '<div style="display: table;">'
ellipsis_id = make_id()
ellipsis = '<span id="%s" style="cursor: pointer;">…)</span>' % ellipsis_id
prefix = cell('<b title="%s.%s">%s</b>(' % (module, name, name))
html += row(prefix + cell('id' + ' = ' + repr(_id) + ', ' + ellipsis))
props = self.properties_with_values().items()
sorted_props = sorted(props, key=itemgetter(0))
all_props = sorted_props
for i, (prop, value) in enumerate(all_props):
end = ')' if i == len(all_props)-1 else ','
html += hidden_row(cell("") + cell(prop + ' = ' + repr(value) + end))
html += '</div>'
html += """
<script>
(function() {
var expanded = false;
var ellipsis = document.getElementById("%(ellipsis_id)s");
ellipsis.addEventListener("click", function() {
var rows = document.getElementsByClassName("%(cls_name)s");
for (var i = 0; i < rows.length; i++) {
var el = rows[i];
el.style.display = expanded ? "none" : "table-row";
}
ellipsis.innerHTML = expanded ? "…)" : "‹‹‹";
expanded = !expanded;
});
})();
</script>
""" % dict(ellipsis_id=ellipsis_id, cls_name=cls_name)
return html
def _find_some_document(models):
from .document import Document
# First try the easy stuff...
doc = None
for model in models:
if isinstance(model, Document):
doc = model
break
elif isinstance(model, Model):
if model.document is not None:
doc = model.document
break
# Now look in children of models
if doc is None:
for model in models:
if isinstance(model, Model):
# see if some child of ours is in a doc, this is meant to
# handle a thing like:
# p = figure()
# box = HBox(children=[p])
# show(box)
for r in model.references():
if r.document is not None:
doc = r.document
break
return doc
class _ModelInDocument(object):
# 'models' can be a single Model, a single Document, or a list of either
def __init__(self, models):
from .document import Document
self._to_remove_after = []
if not isinstance(models, list):
models = [models]
self._doc = _find_some_document(models)
if self._doc is None:
# oh well - just make up a doc
self._doc = Document()
for model in models:
if isinstance(model, Model):
if model.document is None:
self._to_remove_after.append(model)
def __exit__(self, type, value, traceback):
for model in self._to_remove_after:
model.document.remove_root(model)
def __enter__(self):
for model in self._to_remove_after:
self._doc.add_root(model)
@contextmanager
def _ModelInEmptyDocument(model):
from .document import Document
full_doc = _find_some_document([model])
model._document = None
for ref in model.references():
ref._document = None
empty_doc = Document()
empty_doc.add_root(model)
yield model
model._document = full_doc
for ref in model.references():
ref._document = full_doc
| 35.448947
| 123
| 0.610461
|
a08fb12f51b99a72c2ef6089d424517a26e7a5ea
| 2,790
|
py
|
Python
|
docs/setup.py
|
sschwindt/TKEanalyst
|
bb6ca6a98133e4e9c822c0d20188fab0cb2adb43
|
[
"BSD-3-Clause"
] | null | null | null |
docs/setup.py
|
sschwindt/TKEanalyst
|
bb6ca6a98133e4e9c822c0d20188fab0cb2adb43
|
[
"BSD-3-Clause"
] | null | null | null |
docs/setup.py
|
sschwindt/TKEanalyst
|
bb6ca6a98133e4e9c822c0d20188fab0cb2adb43
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
from pathlib import Path
lines = Path(".").joinpath("__init__.py")
version = "1.0.3"
for line in lines.read_text().split("\n"):
if line.startswith("__version__ ="):
version = line.split(" = ")[-1].strip('"')
break
setup(
name="TKEanalyst",
version=version,
python_requires=">=3.6",
author="sschwindt",
author_email="sebastian.schwindt@iws.uni-stuttgart.de",
url="https://github.com/sschwindt/TKEanalyst",
project_urls={
"Documentation": "https://TKEanalyst.readthedocs.io/",
"Funding": "https://hydro-informatics.com/",
"Source": "https://github.com/sschwindt/TKEanalyst",
},
# this should be a whitespace separated string of keywords, not a list
keywords="turbulent kinetic energy acoustic doppler velocimitry adv vectrino",
description="Analyze and despike hydrodynamic flow fluctuations",
license="BSD License",
long_description=Path("./README.md").read_text(),
long_description_content_type="text/markdown",
packages=find_packages(),
install_requires=[
"pyyaml",
"docutils>=0.15",
"sphinx",
"click",
"pydata-sphinx-theme~=0.4.1",
"beautifulsoup4",
'importlib-resources~=3.0.0; python_version < "3.7"',
],
# dependency_links=[
# "git+https://github.com/ecohydraulics/flusstools-pckg#egg=flusstools-pckg"
# ],
include_package_data=True,
extras_require={
"code_style": ["pre-commit~=2.7.0"],
"sphinx": [
"folium",
"numpy",
"matplotlib",
"ipywidgets",
"openpyxl",
"pandas",
"nbclient",
"myst-nb~=0.10.1",
"sphinx-togglebutton>=0.2.1",
"sphinx-copybutton",
"plotly",
"sphinxcontrib-bibtex",
"sphinx-thebe",
"ablog~=0.10.11",
],
"testing": [
"myst_nb~=0.10.1",
"sphinx_thebe",
"coverage",
"pytest~=6.0.1",
"pytest-cov",
"pytest-regressions~=2.0.1",
],
"live-dev": ["sphinx-autobuild", "web-compile~=0.2.1"],
},
entry_points={
"sphinx.html_themes": ["sphinx_book_theme = sphinx_book_theme"],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 4 - Beta",
],
)
| 31.704545
| 84
| 0.562724
|
39aec107424c170fc72d4972f1415e389e01e317
| 35,470
|
py
|
Python
|
airflow/providers/google/cloud/hooks/dataproc.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/hooks/dataproc.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 20
|
2021-01-23T12:33:08.000Z
|
2021-12-07T22:30:37.000Z
|
airflow/providers/google/cloud/hooks/dataproc.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Google Cloud Dataproc hook.
"""
import time
import uuid
import warnings
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from cached_property import cached_property
from google.api_core.retry import Retry
from google.cloud.dataproc_v1beta2 import ( # pylint: disable=no-name-in-module
ClusterControllerClient,
JobControllerClient,
WorkflowTemplateServiceClient,
)
from google.cloud.dataproc_v1beta2.types import ( # pylint: disable=no-name-in-module
Cluster,
Duration,
FieldMask,
Job,
JobStatus,
WorkflowTemplate,
)
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.version import version as airflow_version
class DataProcJobBuilder:
"""
A helper class for building Dataproc job.
"""
def __init__(
self,
project_id: str,
task_id: str,
cluster_name: str,
job_type: str,
properties: Optional[Dict[str, str]] = None,
) -> None:
name = task_id + "_" + str(uuid.uuid4())[:8]
self.job_type = job_type
self.job = {
"job": {
"reference": {"project_id": project_id, "job_id": name,},
"placement": {"cluster_name": cluster_name},
"labels": {'airflow-version': 'v' + airflow_version.replace('.', '-').replace('+', '-')},
job_type: {},
}
} # type: Dict[str, Any]
if properties is not None:
self.job["job"][job_type]["properties"] = properties
def add_labels(self, labels):
"""
Set labels for Dataproc job.
:param labels: Labels for the job query.
:type labels: dict
"""
if labels:
self.job["job"]["labels"].update(labels)
def add_variables(self, variables: List[str]) -> None:
"""
Set variables for Dataproc job.
:param variables: Variables for the job query.
:type variables: List[str]
"""
if variables is not None:
self.job["job"][self.job_type]["script_variables"] = variables
def add_args(self, args: List[str]) -> None:
"""
Set args for Dataproc job.
:param args: Args for the job query.
:type args: List[str]
"""
if args is not None:
self.job["job"][self.job_type]["args"] = args
def add_query(self, query: List[str]) -> None:
"""
Set query uris for Dataproc job.
:param query: URIs for the job queries.
:type query: List[str]
"""
self.job["job"][self.job_type]["query_list"] = {'queries': [query]}
def add_query_uri(self, query_uri: str) -> None:
"""
Set query uri for Dataproc job.
:param query_uri: URI for the job query.
:type query_uri: str
"""
self.job["job"][self.job_type]["query_file_uri"] = query_uri
def add_jar_file_uris(self, jars: List[str]) -> None:
"""
Set jars uris for Dataproc job.
:param jars: List of jars URIs
:type jars: List[str]
"""
if jars is not None:
self.job["job"][self.job_type]["jar_file_uris"] = jars
def add_archive_uris(self, archives: List[str]) -> None:
"""
Set archives uris for Dataproc job.
:param archives: List of archives URIs
:type archives: List[str]
"""
if archives is not None:
self.job["job"][self.job_type]["archive_uris"] = archives
def add_file_uris(self, files: List[str]) -> None:
"""
Set file uris for Dataproc job.
:param files: List of files URIs
:type files: List[str]
"""
if files is not None:
self.job["job"][self.job_type]["file_uris"] = files
def add_python_file_uris(self, pyfiles: List[str]) -> None:
"""
Set python file uris for Dataproc job.
:param pyfiles: List of python files URIs
:type pyfiles: List[str]
"""
if pyfiles is not None:
self.job["job"][self.job_type]["python_file_uris"] = pyfiles
def set_main(self, main_jar: Optional[str], main_class: Optional[str]) -> None:
"""
Set Dataproc main class.
:param main_jar: URI for the main file.
:type main_jar: str
:param main_class: Name of the main class.
:type main_class: str
:raises: Exception
"""
if main_class is not None and main_jar is not None:
raise Exception("Set either main_jar or main_class")
if main_jar:
self.job["job"][self.job_type]["main_jar_file_uri"] = main_jar
else:
self.job["job"][self.job_type]["main_class"] = main_class
def set_python_main(self, main: str) -> None:
"""
Set Dataproc main python file uri.
:param main: URI for the python main file.
:type main: str
"""
self.job["job"][self.job_type]["main_python_file_uri"] = main
def set_job_name(self, name: str) -> None:
"""
Set Dataproc job name.
:param name: Job name.
:type name: str
"""
self.job["job"]["reference"]["job_id"] = name + "_" + str(uuid.uuid4())[:8]
def build(self) -> Dict:
"""
Returns Dataproc job.
:return: Dataproc job
:rtype: dict
"""
return self.job
class DataprocHook(GoogleBaseHook):
"""
Hook for Google Cloud Dataproc APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def get_cluster_client(self, location: Optional[str] = None) -> ClusterControllerClient:
"""
Returns ClusterControllerClient.
"""
client_options = (
{'api_endpoint': '{}-dataproc.googleapis.com:443'.format(location)} if location else None
)
return ClusterControllerClient(
credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options
)
@cached_property
def get_template_client(self) -> WorkflowTemplateServiceClient:
"""
Returns WorkflowTemplateServiceClient.
"""
return WorkflowTemplateServiceClient(
credentials=self._get_credentials(), client_info=self.client_info
)
def get_job_client(self, location: Optional[str] = None) -> JobControllerClient:
"""
Returns JobControllerClient.
"""
client_options = (
{'api_endpoint': '{}-dataproc.googleapis.com:443'.format(location)} if location else None
)
return JobControllerClient(
credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options
)
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
region: str,
cluster: Union[Dict, Cluster],
project_id: str,
request_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Creates a cluster in a project.
:param project_id: Required. The ID of the Google Cloud project that the cluster belongs to.
:type project_id: str
:param region: Required. The Cloud Dataproc region in which to handle the request.
:type region: str
:param cluster: Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`
:type cluster: Union[Dict, google.cloud.dataproc_v1.types.Cluster]
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``CreateClusterRequest`` requests with the same id, then the second request will be ignored and
the first ``google.longrunning.Operation`` created and stored in the backend is returned.
:type request_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=region)
result = client.create_cluster(
project_id=project_id,
region=region,
cluster=cluster,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
cluster_uuid: Optional[str] = None,
request_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Deletes a cluster in a project.
:param project_id: Required. The ID of the Google Cloud project that the cluster belongs to.
:type project_id: str
:param region: Required. The Cloud Dataproc region in which to handle the request.
:type region: str
:param cluster_name: Required. The cluster name.
:type cluster_name: str
:param cluster_uuid: Optional. Specifying the ``cluster_uuid`` means the RPC should fail
if cluster with specified UUID does not exist.
:type cluster_uuid: str
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``DeleteClusterRequest`` requests with the same id, then the second request will be ignored and
the first ``google.longrunning.Operation`` created and stored in the backend is returned.
:type request_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=region)
result = client.delete_cluster(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def diagnose_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Gets cluster diagnostic information. After the operation completes GCS uri to
diagnose is returned
:param project_id: Required. The ID of the Google Cloud project that the cluster belongs to.
:type project_id: str
:param region: Required. The Cloud Dataproc region in which to handle the request.
:type region: str
:param cluster_name: Required. The cluster name.
:type cluster_name: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=region)
operation = client.diagnose_cluster(
project_id=project_id,
region=region,
cluster_name=cluster_name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
operation.result()
gcs_uri = str(operation.operation.response.value)
return gcs_uri
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self,
region: str,
cluster_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Gets the resource representation for a cluster in a project.
:param project_id: Required. The ID of the Google Cloud project that the cluster belongs to.
:type project_id: str
:param region: Required. The Cloud Dataproc region in which to handle the request.
:type region: str
:param cluster_name: Required. The cluster name.
:type cluster_name: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=region)
result = client.get_cluster(
project_id=project_id,
region=region,
cluster_name=cluster_name,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_clusters(
self,
region: str,
filter_: str,
project_id: str,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Lists all regions/{region}/clusters in a project.
:param project_id: Required. The ID of the Google Cloud project that the cluster belongs to.
:type project_id: str
:param region: Required. The Cloud Dataproc region in which to handle the request.
:type region: str
:param filter_: Optional. A filter constraining the clusters to list. Filters are case-sensitive.
:type filter_: str
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:type page_size: int
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=region)
result = client.list_clusters(
project_id=project_id,
region=region,
filter_=filter_,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_cluster( # pylint: disable=too-many-arguments
self,
location: str,
cluster_name: str,
cluster: Union[Dict, Cluster],
update_mask: Union[Dict, FieldMask],
project_id: str,
graceful_decommission_timeout: Optional[Union[Dict, Duration]] = None,
request_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Updates a cluster in a project.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param cluster_name: Required. The cluster name.
:type cluster_name: str
:param cluster: Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Cluster`
:type cluster: Union[Dict, google.cloud.dataproc_v1.types.Cluster]
:param update_mask: Required. Specifies the path, relative to ``Cluster``, of the field to update. For
example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``, and the ``PATCH`` request body would specify
the new value, as follows:
::
{ "config":{ "workerConfig":{ "numInstances":"5" } } }
Similarly, to change the number of preemptible workers in a cluster to 5, the ``update_mask``
parameter would be ``config.secondary_worker_config.num_instances``, and the ``PATCH`` request
body would be set as follows:
::
{ "config":{ "secondaryWorkerConfig":{ "numInstances":"5" } } }
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.FieldMask`
:type update_mask: Union[Dict, google.cloud.dataproc_v1.types.FieldMask]
:param graceful_decommission_timeout: Optional. Timeout for graceful YARN decomissioning. Graceful
decommissioning allows removing nodes from the cluster without interrupting jobs in progress.
Timeout specifies how long to wait for jobs in progress to finish before forcefully removing nodes
(and potentially interrupting jobs). Default timeout is 0 (for forceful decommission), and the
maximum allowed timeout is 1 day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Duration`
:type graceful_decommission_timeout: Union[Dict, google.cloud.dataproc_v1.types.Duration]
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``UpdateClusterRequest`` requests with the same id, then the second request will be ignored and
the first ``google.longrunning.Operation`` created and stored in the backend is returned.
:type request_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_cluster_client(location=location)
operation = client.update_cluster(
project_id=project_id,
region=location,
cluster_name=cluster_name,
cluster=cluster,
update_mask=update_mask,
graceful_decommission_timeout=graceful_decommission_timeout,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow_template(
self,
location: str,
template: Union[Dict, WorkflowTemplate],
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> WorkflowTemplate:
"""
Creates new workflow template.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param template: The Dataproc workflow template to create. If a dict is provided,
it must be of the same form as the protobuf message WorkflowTemplate.
:type template: Union[dict, WorkflowTemplate]
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_template_client
parent = client.region_path(project_id, location)
return client.create_workflow_template(
parent=parent, template=template, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def instantiate_workflow_template(
self,
location: str,
template_name: str,
project_id: str,
version: Optional[int] = None,
request_id: Optional[str] = None,
parameters: Optional[Dict[str, str]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Instantiates a template and begins execution.
:param template_name: Name of template to instantiate.
:type template_name: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param version: Optional. The version of workflow template to instantiate. If specified,
the workflow will be instantiated only if the current version of
the workflow template has the supplied version.
This option cannot be used to instantiate a previous version of
workflow template.
:type version: int
:param request_id: Optional. A tag that prevents multiple concurrent workflow instances
with the same tag from running. This mitigates risk of concurrent
instances started due to retries.
:type request_id: str
:param parameters: Optional. Map from parameter names to values that should be used for those
parameters. Values may not exceed 100 characters.
:type parameters: Dict[str, str]
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_template_client
name = client.workflow_template_path(project_id, location, template_name)
operation = client.instantiate_workflow_template(
name=name,
version=version,
parameters=parameters,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def instantiate_inline_workflow_template(
self,
location: str,
template: Union[Dict, WorkflowTemplate],
project_id: str,
request_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
Instantiates a template and begins execution.
:param template: The workflow template to instantiate. If a dict is provided,
it must be of the same form as the protobuf message WorkflowTemplate
:type template: Union[Dict, WorkflowTemplate]
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param request_id: Optional. A tag that prevents multiple concurrent workflow instances
with the same tag from running. This mitigates risk of concurrent
instances started due to retries.
:type request_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_template_client
parent = client.region_path(project_id, location)
operation = client.instantiate_inline_workflow_template(
parent=parent,
template=template,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return operation
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_job(self, job_id: str, location: str, project_id: str, wait_time: int = 10):
"""
Helper method which polls a job to check if it finishes.
:param job_id: Id of the Dataproc job
:type job_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param wait_time: Number of seconds between checks
:type wait_time: int
"""
state = None
while state not in (JobStatus.ERROR, JobStatus.DONE, JobStatus.CANCELLED):
time.sleep(wait_time)
job = self.get_job(location=location, job_id=job_id, project_id=project_id)
state = job.status.state
if state == JobStatus.ERROR:
raise AirflowException('Job failed:\n{}'.format(job))
if state == JobStatus.CANCELLED:
raise AirflowException('Job was cancelled:\n{}'.format(job))
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
location: str,
job_id: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Job:
"""
Gets the resource representation for a job in a project.
:param job_id: Id of the Dataproc job
:type job_id: str
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_job_client(location=location)
job = client.get_job(
project_id=project_id,
region=location,
job_id=job_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
@GoogleBaseHook.fallback_to_default_project_id
def submit_job(
self,
location: str,
job: Union[Dict, Job],
project_id: str,
request_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Job:
"""
Submits a job to a cluster.
:param job: The job resource. If a dict is provided,
it must be of the same form as the protobuf message Job
:type job: Union[Dict, Job]
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param request_id: Optional. A tag that prevents multiple concurrent workflow instances
with the same tag from running. This mitigates risk of concurrent
instances started due to retries.
:type request_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_job_client(location=location)
return client.submit_job(
project_id=project_id,
region=location,
job=job,
request_id=request_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def submit(
self,
project_id: str,
job: Dict,
region: str = 'global',
job_error_states: Optional[Iterable[str]] = None, # pylint: disable=unused-argument
) -> None:
"""
Submits Google Cloud Dataproc job.
:param project_id: The id of Google Cloud Dataproc project.
:type project_id: str
:param job: The job to be submitted
:type job: dict
:param region: The region of Google Dataproc cluster.
:type region: str
:param job_error_states: Job states that should be considered error states.
:type job_error_states: List[str]
"""
# TODO: Remover one day
warnings.warn("This method is deprecated. Please use `submit_job`", DeprecationWarning, stacklevel=2)
job_object = self.submit_job(location=region, project_id=project_id, job=job)
job_id = job_object.reference.job_id
self.wait_for_job(job_id=job_id, location=region, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: str,
location: str = 'global',
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Job:
"""
Starts a job cancellation request.
:param project_id: Required. The ID of the Google Cloud project that the job belongs to.
:type project_id: str
:param location: Required. The Cloud Dataproc region in which to handle the request.
:type location: str
:param job_id: Required. The job ID.
:type job_id: str
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:type timeout: float
:param metadata: Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]
"""
client = self.get_job_client(location=location)
job = client.cancel_job(
project_id=project_id,
region=location,
job_id=job_id,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return job
| 40.864055
| 110
| 0.633352
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.