hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
350bb2c6e2f935943a5194b0fe9fa83703e72d8f
| 1,525
|
py
|
Python
|
transports/email/__init__.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 3
|
2015-05-27T14:35:49.000Z
|
2016-02-26T21:04:32.000Z
|
transports/email/__init__.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 375
|
2015-01-31T10:08:34.000Z
|
2021-06-10T19:44:21.000Z
|
transports/email/__init__.py
|
tperrier/mwachx
|
94616659dc29843e661b2ecc9a2e7f1d4e81b5a4
|
[
"Apache-2.0"
] | 6
|
2016-01-10T19:52:41.000Z
|
2020-06-15T22:07:24.000Z
|
#Email Imports
import smtplib,sys
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
#Django imports
from django.conf import settings
def send(to,message):
success = email("SMS as Email: {}".format(to),message)
return 'Email Transport ID', success, {}
def email(subject,message,to='default'):
email_settings = getattr(settings,'EMAIL_SETUP',None)
if not isinstance(email_settings,dict):
print "Email Settings",email_settings
return False
from_address = email_settings.get('from')
to = email_settings.get('to').get(to)
password = email_settings.get('password')
username = email_settings.get('username')
server = email_settings.get('server')
if from_address is None or to is None or password is None or username is None or server is None:
print "Email Settings Options", from_address, to, password, username, password
return False
# Make Message monspace and preserve whitespace
message = "<pre style='font-family:monospace;font-size:12pt;'>\n{}</pre>".format(message)
msg = MIMEMultipart()
msg['From'] = from_address
msg['To'] = to
msg['Subject'] = "[MX Server] {}".format(subject)
msg.attach(MIMEText(message,'html'))
mail_server = smtplib.SMTP(server,587)
mail_server.ehlo(); mail_server.starttls(); mail_server.ehlo()
mail_server.login(username,password)
mail_server.sendmail(msg['From'],msg['To'].split(','),msg.as_string())
mail_server.close()
return True
| 33.152174
| 100
| 0.700328
|
808609355a9c75f68afc8788099cf47e966f6466
| 4,917
|
py
|
Python
|
run.py
|
fordevoted/GIF_generator
|
c8460a670b6b44d14ef98cd12d43f08a93df5321
|
[
"MIT"
] | 2
|
2020-01-09T03:49:33.000Z
|
2020-01-24T03:57:09.000Z
|
run.py
|
fordevoted/GIF_generator
|
c8460a670b6b44d14ef98cd12d43f08a93df5321
|
[
"MIT"
] | null | null | null |
run.py
|
fordevoted/GIF_generator
|
c8460a670b6b44d14ef98cd12d43f08a93df5321
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 12:17:38 2018
@author: github.com/GustavZ
"""
import os
import tarfile
from six.moves import urllib
import numpy as np
import tensorflow as tf
import yaml
import cv2
from stuff.helper import FPS2, WebcamVideoStream
from skimage import measure
## LOAD CONFIG PARAMS ##
if (os.path.isfile('config.yml')):
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
else:
with open("config.sample.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
VIDEO_INPUT = cfg['video_input']
FPS_INTERVAL = cfg['fps_interval']
ALPHA = cfg['alpha']
MODEL_NAME = cfg['model_name']
MODEL_PATH = cfg['model_path']
DOWNLOAD_BASE = cfg['download_base']
BBOX = cfg['bbox']
MINAREA = cfg['minArea']
# Hardcoded COCO_VOC Labels
LABEL_NAMES = np.asarray([
'', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'])
def create_colormap(seg_map):
"""
Takes A 2D array storing the segmentation labels.
Returns A 2D array where each element is the color indexed
by the corresponding element in the input label to the PASCAL color map.
"""
colormap = np.zeros((256, 3), dtype=int)
ind = np.arange(256, dtype=int)
for shift in reversed(range(8)):
for channel in range(3):
colormap[:, channel] |= ((ind >> channel) & 1) << shift
ind >>= 3
return colormap[seg_map]
# Download Model from TF-deeplab's Model Zoo
def download_model():
model_file = MODEL_NAME + '.tar.gz'
if not os.path.isfile(MODEL_PATH):
print('> Model not found. Downloading it now.')
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + model_file, model_file)
tar_file = tarfile.open(model_file)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd() + '/models/')
os.remove(os.getcwd() + '/' + model_file)
else:
print('> Model found. Proceed.')
# Visualize Text on OpenCV Image
def vis_text(image,string,pos):
cv2.putText(image,string,(pos),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Load frozen Model
def load_frozenmodel():
print('> Loading frozen model into memory')
detection_graph = tf.Graph()
with detection_graph.as_default():
seg_graph_def = tf.GraphDef()
with tf.gfile.GFile(MODEL_PATH, 'rb') as fid:
serialized_graph = fid.read()
seg_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(seg_graph_def, name='')
return detection_graph
def segmentation(detection_graph,label_names):
# fixed input sizes as model needs resize either way
vs = WebcamVideoStream(VIDEO_INPUT,640,480).start()
resize_ratio = 1.0 * 513 / max(vs.real_width,vs.real_height)
target_size = (int(resize_ratio * vs.real_width), int(resize_ratio * vs.real_height))
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
fps = FPS2(FPS_INTERVAL).start()
print("> Starting Segmentaion")
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while vs.isActive():
image = cv2.resize(vs.read(),target_size)
batch_seg_map = sess.run('SemanticPredictions:0',
feed_dict={'ImageTensor:0': [cv2.cvtColor(image, cv2.COLOR_BGR2RGB)]})
# visualization
seg_map = batch_seg_map[0]
seg_image = create_colormap(seg_map).astype(np.uint8)
cv2.addWeighted(seg_image,ALPHA,image,1-ALPHA,0,image)
vis_text(image,"fps: {}".format(fps.fps_local()),(10,30))
# boxes (ymin, xmin, ymax, xmax)
if BBOX:
map_labeled = measure.label(seg_map, connectivity=1)
for region in measure.regionprops(map_labeled):
if region.area > MINAREA:
box = region.bbox
p1 = (box[1], box[0])
p2 = (box[3], box[2])
cv2.rectangle(image, p1, p2, (77,255,9), 2)
vis_text(image,label_names[seg_map[tuple(region.coords[0])]],(p1[0],p1[1]-10))
cv2.imshow('segmentation',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.update()
fps.stop()
vs.stop()
cv2.destroyAllWindows()
if __name__ == '__main__':
download_model()
graph = load_frozenmodel()
segmentation(graph, LABEL_NAMES)
| 36.969925
| 106
| 0.608704
|
4a1556fcefc6f81fe6cf42b11d57abd7f2472c05
| 4,607
|
py
|
Python
|
nova/tests/unit/notifications/test_base.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/notifications/test_base.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/notifications/test_base.py
|
WeifanFu-bsn/nova
|
c7b54a80ac25f6a01d0a150c546532f5ae2592ce
|
[
"Apache-2.0"
] | 1
|
2020-07-22T21:17:41.000Z
|
2020-07-22T21:17:41.000Z
|
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from nova import context as nova_context
from nova.notifications import base
from nova import test
from nova.tests import uuidsentinel as uuids
from nova import utils
class TestNullSafeUtils(test.NoDBTestCase):
def test_null_safe_isotime(self):
dt = None
self.assertEqual('', base.null_safe_isotime(dt))
dt = datetime.datetime(second=1,
minute=1,
hour=1,
day=1,
month=1,
year=2017)
self.assertEqual(utils.strtime(dt), base.null_safe_isotime(dt))
def test_null_safe_str(self):
line = None
self.assertEqual('', base.null_safe_str(line))
line = 'test'
self.assertEqual(line, base.null_safe_str(line))
class TestSendInstanceUpdateNotification(test.NoDBTestCase):
@mock.patch('nova.notifications.objects.base.NotificationBase.emit',
new_callable=mock.NonCallableMock) # asserts not called
# TODO(mriedem): Rather than mock is_enabled, it would be better to
# configure oslo_messaging_notifications.driver=['noop']
@mock.patch('nova.rpc.NOTIFIER.is_enabled', return_value=False)
def test_send_versioned_instance_update_notification_disabled(self,
mock_enabled,
mock_info):
"""Tests the case that versioned notifications are disabled which makes
_send_versioned_instance_update_notification a noop.
"""
base._send_versioned_instance_update(mock.sentinel.ctxt,
mock.sentinel.instance,
mock.sentinel.payload,
mock.sentinel.host,
mock.sentinel.service)
@mock.patch.object(base, 'bandwidth_usage')
@mock.patch.object(base, '_compute_states_payload')
@mock.patch('nova.rpc.get_notifier')
@mock.patch.object(base, 'info_from_instance')
def test_send_legacy_instance_update_notification(self, mock_info,
mock_get_notifier,
mock_states,
mock_bw):
"""Tests the case that versioned notifications are disabled and
assert that this does not prevent sending the unversioned
instance.update notification.
"""
self.flags(notification_format='unversioned', group='notifications')
base.send_instance_update_notification(mock.sentinel.ctxt,
mock.sentinel.instance)
mock_get_notifier.return_value.info.assert_called_once_with(
mock.sentinel.ctxt, 'compute.instance.update', mock.ANY)
class TestBandwidthUsage(test.NoDBTestCase):
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.network.API')
@mock.patch('nova.objects.BandwidthUsageList.get_by_uuids')
def test_context_elevated(self, mock_get_bw_usage, mock_nw_api,
mock_elevated):
context = nova_context.RequestContext('fake', 'fake')
# We need this to not be a NovaObject so the old school
# get_instance_nw_info will run.
instance = {'uuid': uuids.instance}
audit_start = 'fake'
base.bandwidth_usage(context, instance, audit_start)
network_api = mock_nw_api.return_value
network_api.get_instance_nw_info.assert_called_once_with(
mock_elevated.return_value, instance)
mock_get_bw_usage.assert_called_once_with(
mock_elevated.return_value, [uuids.instance], audit_start)
mock_elevated.assert_called_once_with(read_deleted='yes')
| 43.87619
| 79
| 0.622097
|
7ab2e0aa9a0f7def7a340d840c0128dfbae9b3f6
| 12,462
|
py
|
Python
|
test/functional/feature_fee_estimation.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
test/functional/feature_fee_estimation.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
test/functional/feature_fee_estimation.py
|
bitcoin-black/bitcoinblack
|
ad87d315c635ef439d4664da46e6672153f91b79
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import *
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
redeem_script_1 = CScript([OP_1, OP_DROP])
redeem_script_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
global log
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""
Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)
"""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount*COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split = False):
"""
We need to generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed.
"""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"]/2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split) :
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
else :
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
"""
This function calls estimatefee and verifies that the estimates
meet certain invariants.
"""
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
if print_estimates:
log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
delta = 1.0e-6 # account for rounding error
last_e = max(fees_seen)
for e in [x for x in all_estimates if x >= 0]:
# Estimates should be within the bounds of what transactions fees actually were:
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
%(float(e), min(fees_seen), max(fees_seen)))
# Estimates should be monotonically decreasing
if float(e)-delta > last_e:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
%(float(e),float(last_e)))
last_e = e
valid_estimate = False
invalid_estimates = 0
for i,e in enumerate(all_estimates): # estimate is for i+1
if e >= 0:
valid_estimate = True
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
else:
invalid_estimates += 1
# estimatesmartfee should still be valid
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
answer_found = node.estimatesmartfee(i+1)["blocks"]
assert(approx_estimate > 0)
assert(answer_found > i+1)
# Once we're at a high enough confirmation count that we can give an estimate
# We should have estimates for all higher confirmation counts
if valid_estimate:
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
# Check on the expected number of different confirmation counts
# that we might not have valid estimates for
if invalid_estimates > max_invalid:
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
return all_estimates
class EstimateFeeTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-blockmaxsize=17000", "-maxorphantx=1000", "-deprecatedrpc=estimatefee"],
["-blockmaxsize=8000", "-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# (17k is room enough for 110 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100-50,100+50)):
from_index = random.randint(1,2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee)/tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Make log handler available to helper functions
global log
log = self.log
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
#Double txouts to txouts2
while (len(self.txouts)>0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
#Double txouts2 to txouts
while (len(self.txouts2)>0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb, 14)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb, 2)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb, 2)
if __name__ == '__main__':
EstimateFeeTest().main()
| 47.38403
| 113
| 0.646285
|
796fd076c6a5e5b144d1a8b0e3dc64f5ef3ca650
| 2,745
|
py
|
Python
|
Lib/test/test_crypt.py
|
yungyu/cpython
|
97588f439d35a918c95f6785c0fa17d3fbc40c31
|
[
"PSF-2.0"
] | 7
|
2018-04-28T12:39:00.000Z
|
2020-12-17T11:22:25.000Z
|
Lib/test/test_crypt.py
|
yungyu/cpython
|
97588f439d35a918c95f6785c0fa17d3fbc40c31
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_crypt.py
|
yungyu/cpython
|
97588f439d35a918c95f6785c0fa17d3fbc40c31
|
[
"PSF-2.0"
] | 3
|
2019-04-22T21:31:38.000Z
|
2020-04-23T08:51:10.000Z
|
import sys
from test import support
import unittest
crypt = support.import_module('crypt')
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
cr = crypt.crypt('mypassword')
cr2 = crypt.crypt('mypassword', cr)
self.assertEqual(cr2, cr)
cr = crypt.crypt('mypassword', 'ab')
if cr is not None:
cr2 = crypt.crypt('mypassword', cr)
self.assertEqual(cr2, cr)
def test_salt(self):
self.assertEqual(len(crypt._saltchars), 64)
for method in crypt.methods:
salt = crypt.mksalt(method)
self.assertIn(len(salt) - method.salt_chars, {0, 1, 3, 4, 6, 7})
if method.ident:
self.assertIn(method.ident, salt[:len(salt)-method.salt_chars])
def test_saltedcrypt(self):
for method in crypt.methods:
cr = crypt.crypt('assword', method)
self.assertEqual(len(cr), method.total_size)
cr2 = crypt.crypt('assword', cr)
self.assertEqual(cr2, cr)
cr = crypt.crypt('assword', crypt.mksalt(method))
self.assertEqual(len(cr), method.total_size)
def test_methods(self):
self.assertTrue(len(crypt.methods) >= 1)
if sys.platform.startswith('openbsd'):
self.assertEqual(crypt.methods, [crypt.METHOD_BLOWFISH])
else:
self.assertEqual(crypt.methods[-1], crypt.METHOD_CRYPT)
@unittest.skipUnless(crypt.METHOD_BLOWFISH in crypt.methods,
'requires support of Blowfish')
def test_log_rounds(self):
self.assertEqual(len(crypt._saltchars), 64)
for log_rounds in range(4, 11):
salt = crypt.mksalt(crypt.METHOD_BLOWFISH, log_rounds=log_rounds)
self.assertIn('$%02d$' % log_rounds, salt)
self.assertIn(len(salt) - crypt.METHOD_BLOWFISH.salt_chars, {6, 7})
cr = crypt.crypt('mypassword', salt)
self.assertTrue(cr)
cr2 = crypt.crypt('mypassword', cr)
self.assertEqual(cr2, cr)
@unittest.skipUnless(crypt.METHOD_BLOWFISH in crypt.methods,
'requires support of Blowfish')
def test_invalid_log_rounds(self):
for log_rounds in (1, -1, 999):
salt = crypt.mksalt(crypt.METHOD_BLOWFISH, log_rounds=log_rounds)
cr = crypt.crypt('mypassword', salt)
if cr is not None:
# On failure the openwall implementation returns a magic
# string that is shorter than 13 characters and is guaranteed
# to differ from a salt.
self.assertNotEqual(cr, salt)
self.assertLess(len(cr), 13)
if __name__ == "__main__":
unittest.main()
| 38.661972
| 79
| 0.604007
|
ec8eaa8e48ef197178aa1b0e0c319c052c7f9282
| 2,564
|
py
|
Python
|
docs/source/conf.py
|
mew2057/ibm-spectrum-scale-csi-operator
|
67535f3a0f114d3210232cf9341352b920fcc9f8
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
mew2057/ibm-spectrum-scale-csi-operator
|
67535f3a0f114d3210232cf9341352b920fcc9f8
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
mew2057/ibm-spectrum-scale-csi-operator
|
67535f3a0f114d3210232cf9341352b920fcc9f8
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'IBM Spectrum Scale CSI Operator'
copyright = '2019, IBM'
author = 'John Dunham'
master_doc = 'index'
# The full version, including alpha/beta/rc tags
release = '0.9.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': True,
'sticky_navigation': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add md to suffix.
source_suffix = ['.md', '.rst']
# Markdown support.
source_parsers = { '.md' : 'recommonmark.parser.CommonMarkParser' }
# collection of substitutions.
rst_epilog="""
.. |driver-repo| replace:: GitHubDriver_
.. |operator-repo| replace:: GitHubOperator_
.. _GitHubDriver: https://github.com/IBM/ibm-spectrum-scale-csi-driver
.. _GitHubOperator: https://github.com/IBM/
"""
| 31.268293
| 79
| 0.673167
|
45c176d8169681908038753d43c25edab394a572
| 4,735
|
py
|
Python
|
src/emmental/modules/embedding_module.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
src/emmental/modules/embedding_module.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
src/emmental/modules/embedding_module.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from numpy import ndarray
from torch import Tensor
class EmbeddingModule(nn.Module):
r"""Embedding module.
Args:
word_counter(dict): Word count dictionary that contians the frequencies of
each word, defaults to None.
max_size(int): Max size of word dictionary, defaults to None.
word_dim(int): Dimension of embeddings, defaults to 300.
specials(list): The list of special tokens (e.g., padding or eos) that will
be prepended to the vocabulary, defaults to [].
threshold(int): The minimum frequency needed to include a token in the
vocabulary, defaults to None.
embedding_file(str): The pretrained embedding file path, defaults to None.
fix_emb(bool): Whether fix word embeddings or not, defaults to False.
"""
UNK = "<unk>"
PAD = "<pad>"
def __init__(
self,
word_counter: Optional[Dict[str, int]] = None,
max_size: Optional[int] = None,
word_dim: int = 300,
specials: List[str] = [],
threshold: int = 0,
embedding_file: Optional[str] = None,
fix_emb: bool = False,
) -> None:
super().__init__()
assert (
word_counter is not None or embedding_file is not None
), "word_counter and embedding_file are not provided."
self.word_counter = word_counter
self.dim = word_dim
# remove words that occur less than threshold
if self.word_counter and threshold > 1:
self.word_counter = dict(
[(k, v) for k, v in self.word_counter.items() if v >= threshold]
)
max_size = None if max_size is None else max_size + len(specials)
reverse = True
if embedding_file:
emb_dim, emb_w2i, emb_wv = self._load_embedding(embedding_file)
self.dim = emb_dim
if word_counter is None:
self.word_counter = emb_w2i
reverse = False
self.id2word = sorted(
self.word_counter, key=lambda k: self.word_counter[k], reverse=reverse
)
specials = [self.UNK, self.PAD] + [
special for special in specials if special not in [self.UNK, self.PAD]
]
# append special tokens and remove duplicate words
self.id2word = specials + [
word for word in self.id2word if word not in specials
]
# limit the word list size
if max_size:
self.id2word = self.id2word[:max_size]
self.word2id = dict(
[(self.id2word[idx], idx) for idx in range(len(self.id2word))]
)
self.size = len(self.id2word)
# Initalize word embeddings
self.embeddings = nn.Embedding(self.size, self.dim)
self.embeddings.weight.data.uniform_(-1, 1)
self.embeddings.weight.data[self.word2id[self.PAD]] = 0.0
# Update word embedding with pretrained embeddings
if embedding_file:
for w, i in emb_w2i.items():
if w in self.word2id:
self.word2id[w]
self.embeddings.weight.data[self.word2id[w]].copy_( # type: ignore
torch.from_numpy(emb_wv[emb_w2i[w]])
)
if fix_emb:
self.embeddings.weight.requires_grad = False
def _load_embedding(
self, embedding_file: str
) -> Tuple[int, Dict[str, int], List[ndarray]]:
r"""Load the pre-trained embeddings from file.
Args:
embedding_file: The pretrained embedding file path.
Returns:
tuple: word embedding dimension, word to index dict, and embedding vectors.
"""
emb_dim = 0
emb_w2i: Dict[str, int] = {}
emb_wv = []
with open(embedding_file, encoding="utf8") as f:
for line in f:
elems = line.split()
# skip the header
if len(elems) == 2:
continue
# collect embedding dim
emb_dim = len(elems) - 1
# collect word
token = elems[0]
# collect word embedding
if token not in emb_w2i:
emb_w2i[token] = len(emb_w2i)
emb_wv.append(np.array([float(v) for v in elems[1:]]))
return emb_dim, emb_w2i, emb_wv
def forward(self, input: Tensor) -> Tensor: # type: ignore
r"""Forward function.
Args:
input(Tensor): Input tensor.
Returns:
Tensor: Output tensor.
"""
return self.embeddings(input)
| 31.778523
| 87
| 0.574868
|
8257630cf207137c341d0041ed72c6c67c2854ba
| 34,923
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_signal.py
|
SmirnovKol/Paddle
|
a3730dc87bc61593514b830727e36e5d19e753cd
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/test_signal.py
|
SmirnovKol/Paddle
|
a3730dc87bc61593514b830727e36e5d19e753cd
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_signal.py
|
SmirnovKol/Paddle
|
a3730dc87bc61593514b830727e36e5d19e753cd
|
[
"Apache-2.0"
] | 1
|
2021-09-24T11:23:36.000Z
|
2021-09-24T11:23:36.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import unittest
import numpy as np
from numpy import fft
from numpy.lib.stride_tricks import as_strided
import paddle
import scipy.signal
paddle.set_default_dtype('float64')
DEVICES = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
DEVICES.append(paddle.CUDAPlace(0))
TEST_CASE_NAME = 'test_case'
# Constrain STFT block sizes to 256 KB
MAX_MEM_BLOCK = 2**8 * 2**10
def fix_length(data, size, axis=-1, **kwargs):
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
def tiny(x):
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(
x.dtype, np.complexfloating):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise Exception("threshold={} must be strictly "
"positive".format(threshold))
if fill not in [None, False, True]:
raise Exception("fill={} must be None or boolean".format(fill))
if not np.all(np.isfinite(S)):
raise Exception("Input must be finite")
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise Exception("Cannot normalize with norm=0 and fill=True")
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag**norm, axis=axis, keepdims=True)**(1.0 / norm)
if axis is None:
fill_norm = mag.size**(-1.0 / norm)
else:
fill_norm = mag.shape[axis]**(-1.0 / norm)
elif norm is None:
return S
else:
raise Exception("Unsupported norm: {}".format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover
"""Helper function for window sum-square calculation."""
n = len(x)
n_fft = len(win_sq)
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample +
n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
def window_sumsquare(
window,
n_frames,
hop_length=512,
win_length=None,
n_fft=2048,
dtype=np.float32,
norm=None,
):
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = normalize(win_sq, norm=norm)**2
win_sq = pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x
def dtype_c2r(d, default=np.float32):
mapping = {
np.dtype(np.complex64): np.float32,
np.dtype(np.complex128): np.float64,
}
# If we're given a real type already, return it
dt = np.dtype(d)
if dt.kind == "f":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(np.dtype(d), default))
def dtype_r2c(d, default=np.complex64):
mapping = {
np.dtype(np.float32): np.complex64,
np.dtype(np.float64): np.complex128,
}
# If we're given a complex type already, return it
dt = np.dtype(d)
if dt.kind == "c":
return dt
# Otherwise, try to map the dtype.
# If no match is found, return the default.
return np.dtype(mapping.get(dt, default))
def frame(x, frame_length, hop_length, axis=-1):
if not isinstance(x, np.ndarray):
raise Exception("Input must be of type numpy.ndarray, "
"given type(x)={}".format(type(x)))
if x.shape[axis] < frame_length:
raise Exception("Input is too short (n={:d})"
" for frame_length={:d}".format(x.shape[axis],
frame_length))
if hop_length < 1:
raise Exception("Invalid hop_length: {:d}".format(hop_length))
if axis == -1 and not x.flags["F_CONTIGUOUS"]:
print("librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(
axis))
x = np.asfortranarray(x)
elif axis == 0 and not x.flags["C_CONTIGUOUS"]:
print("librosa.util.frame called with axis={} "
"on a non-contiguous input. This will result in a copy.".format(
axis))
x = np.ascontiguousarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
new_stride = np.prod(strides[strides > 0] // x.itemsize) * x.itemsize
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * new_stride]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * new_stride] + list(strides)
else:
raise Exception("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
def pad_center(data, size, axis=-1, **kwargs):
kwargs.setdefault("mode", "constant")
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise Exception(("Target size ({:d}) must be "
"at least input size ({:d})").format(size, n))
return np.pad(data, lengths, **kwargs)
def get_window(window, Nx, fftbins=True):
if callable(window):
return window(Nx)
elif isinstance(window, (str, tuple)) or np.isscalar(window):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise Exception("Window size mismatch: "
"{:d} != {:d}".format(len(window), Nx))
else:
raise Exception("Invalid window specification: {}".format(window))
def __overlap_add(y, ytmp, hop_length):
# numba-accelerated overlap add for inverse stft
# y is the pre-allocated output buffer
# ytmp is the windowed inverse-stft frames
# hop_length is the hop-length of the STFT analysis
n_fft = ytmp.shape[0]
for frame in range(ytmp.shape[1]):
sample = frame * hop_length
y[sample:(sample + n_fft)] += ytmp[:, frame]
def stft(x,
n_fft=2048,
hop_length=None,
win_length=None,
window="hann",
center=True,
pad_mode="reflect"):
y = x
input_rank = len(y.shape)
if input_rank == 2:
assert y.shape[0] == 1 # Only 1d input supported in librosa
y = y.squeeze(0)
dtype = None
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Pad the time series so that frames are centered
if center:
if n_fft > y.shape[-1]:
print("n_fft={} is too small for input signal of length={}".format(
n_fft, y.shape[-1]))
y = np.pad(y, int(n_fft // 2), mode=pad_mode)
elif n_fft > y.shape[-1]:
raise Exception(
"n_fft={} is too large for input signal of length={}".format(
n_fft, y.shape[-1]))
# Window the time series.
y_frames = frame(y, frame_length=n_fft, hop_length=hop_length)
if dtype is None:
dtype = dtype_r2c(y.dtype)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order="F")
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = MAX_MEM_BLOCK // (stft_matrix.shape[0] * stft_matrix.itemsize)
n_columns = max(n_columns, 1)
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
stft_matrix[:,
bl_s:bl_t] = fft.rfft(fft_window * y_frames[:, bl_s:bl_t],
axis=0)
if input_rank == 2:
stft_matrix = np.expand_dims(stft_matrix, 0)
return stft_matrix
def istft(
x,
hop_length=None,
win_length=None,
window="hann",
center=True,
length=None,
):
stft_matrix = x
input_rank = len(stft_matrix.shape)
if input_rank == 3:
assert stft_matrix.shape[0] == 1 # Only 2d input supported in librosa
stft_matrix = stft_matrix.squeeze(0)
dtype = None
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = get_window(window, win_length, fftbins=True)
# Pad out to match n_fft, and add a broadcasting axis
ifft_window = pad_center(ifft_window, n_fft)[:, np.newaxis]
# For efficiency, trim STFT frames according to signal length if available
if length:
if center:
padded_length = length + int(n_fft)
else:
padded_length = length
n_frames = min(stft_matrix.shape[1],
int(np.ceil(padded_length / hop_length)))
else:
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
if dtype is None:
dtype = dtype_c2r(stft_matrix.dtype)
y = np.zeros(expected_signal_len, dtype=dtype)
n_columns = MAX_MEM_BLOCK // (stft_matrix.shape[0] * stft_matrix.itemsize)
n_columns = min(n_columns, 1)
frame = 0
for bl_s in range(0, n_frames, n_columns):
bl_t = min(bl_s + n_columns, n_frames)
# invert the block and apply the window function
ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0)
# Overlap-add the istft block starting at the i'th frame
__overlap_add(y[frame * hop_length:], ytmp, hop_length)
frame += bl_t - bl_s
# Normalize by sum of squared window
ifft_window_sum = window_sumsquare(
window,
n_frames,
win_length=win_length,
n_fft=n_fft,
hop_length=hop_length,
dtype=dtype,
)
approx_nonzero_indices = ifft_window_sum > tiny(ifft_window_sum)
y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices]
if length is None:
# If we don't need to control length, just do the usual center trimming
# to eliminate padded data
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
else:
if center:
# If we're centering, crop off the first n_fft//2 samples
# and then trim/pad to the target length.
# We don't trim the end here, so that if the signal is zero-padded
# to a longer duration, the decay is smooth by windowing
start = int(n_fft // 2)
else:
# If we're not centering, start at 0 and trim/pad as necessary
start = 0
y = fix_length(y[start:], length)
if input_rank == 3:
y = np.expand_dims(y, 0)
return y
def frame_for_api_test(x, frame_length, hop_length, axis=-1):
if axis == -1 and not x.flags["C_CONTIGUOUS"]:
x = np.ascontiguousarray(x)
elif axis == 0 and not x.flags["F_CONTIGUOUS"]:
x = np.asfortranarray(x)
n_frames = 1 + (x.shape[axis] - frame_length) // hop_length
strides = np.asarray(x.strides)
if axis == -1:
shape = list(x.shape)[:-1] + [frame_length, n_frames]
strides = list(strides) + [hop_length * x.itemsize]
elif axis == 0:
shape = [n_frames, frame_length] + list(x.shape)[1:]
strides = [hop_length * x.itemsize] + list(strides)
else:
raise ValueError("Frame axis={} must be either 0 or -1".format(axis))
return as_strided(x, shape=shape, strides=strides)
def overlap_add_for_api_test(x, hop_length, axis=-1):
assert axis in [0, -1], 'axis should be 0/-1.'
assert len(x.shape) >= 2, 'Input dims shoulb be >= 2.'
squeeze_output = False
if len(x.shape) == 2:
squeeze_output = True
dim = 0 if axis == -1 else -1
x = np.expand_dims(x, dim) # batch
n_frames = x.shape[axis]
frame_length = x.shape[1] if axis == 0 else x.shape[-2]
# Assure no gaps between frames.
assert 0 < hop_length <= frame_length, \
f'hop_length should be in (0, frame_length({frame_length})], but got {hop_length}.'
seq_length = (n_frames - 1) * hop_length + frame_length
reshape_output = False
if len(x.shape) > 3:
reshape_output = True
if axis == 0:
target_shape = [seq_length] + list(x.shape[2:])
x = x.reshape(n_frames, frame_length, np.product(x.shape[2:]))
else:
target_shape = list(x.shape[:-2]) + [seq_length]
x = x.reshape(np.product(x.shape[:-2]), frame_length, n_frames)
if axis == 0:
x = x.transpose((2, 1, 0))
y = np.zeros(shape=[np.product(x.shape[:-2]), seq_length], dtype=x.dtype)
for i in range(x.shape[0]):
for frame in range(x.shape[-1]):
sample = frame * hop_length
y[i, sample:sample + frame_length] += x[i, :, frame]
if axis == 0:
y = y.transpose((1, 0))
if reshape_output:
y = y.reshape(target_shape)
if squeeze_output:
y = y.squeeze(-1) if axis == 0 else y.squeeze(0)
return y
def place(devices, key='place'):
def decorate(cls):
module = sys.modules[cls.__module__].__dict__
raw_classes = {
k: v
for k, v in module.items() if k.startswith(cls.__name__)
}
for raw_name, raw_cls in raw_classes.items():
for d in devices:
test_cls = dict(raw_cls.__dict__)
test_cls.update({key: d})
new_name = raw_name + '.' + d.__class__.__name__
module[new_name] = type(new_name, (raw_cls, ), test_cls)
del module[raw_name]
return cls
return decorate
def setUpModule():
global rtol
global atol
# All test case will use float64 for compare percision, refs:
# https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64
rtol = {
'float32': 1e-06,
'float64': 1e-7,
'complex64': 1e-06,
'complex128': 1e-7,
}
atol = {
'float32': 0.0,
'float64': 0.0,
'complex64': 0.0,
'complex128': 0.0,
}
def tearDownModule():
pass
def rand_x(dims=1,
dtype='float64',
min_dim_len=1,
max_dim_len=10,
shape=None,
complex=False):
if shape is None:
shape = [
np.random.randint(min_dim_len, max_dim_len) for i in range(dims)
]
if complex:
return np.random.randn(*shape).astype(
dtype) + 1.j * np.random.randn(*shape).astype(dtype)
else:
return np.random.randn(*shape).astype(dtype)
def parameterize(attrs, input_values=None):
if isinstance(attrs, str):
attrs = [attrs]
input_dicts = (attrs if input_values is None else
[dict(zip(attrs, vals)) for vals in input_values])
def decorator(base_class):
test_class_module = sys.modules[base_class.__module__].__dict__
for idx, input_dict in enumerate(input_dicts):
test_class_dict = dict(base_class.__dict__)
test_class_dict.update(input_dict)
name = class_name(base_class, idx, input_dict)
test_class_module[name] = type(name, (base_class, ),
test_class_dict)
for method_name in list(base_class.__dict__):
if method_name.startswith("test"):
delattr(base_class, method_name)
return base_class
return decorator
def class_name(cls, num, params_dict):
suffix = to_safe_name(
next((v for v in params_dict.values() if isinstance(v, str)), ""))
if TEST_CASE_NAME in params_dict:
suffix = to_safe_name(params_dict["test_case"])
return "{}_{}{}".format(cls.__name__, num, suffix and "_" + suffix)
def to_safe_name(s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
# yapf: disable
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis'),
[
('test_1d_input1', rand_x(1, np.float64, shape=[150]), 50, 15, 0),
('test_1d_input2', rand_x(1, np.float64, shape=[150]), 50, 15, -1),
('test_2d_input1', rand_x(2, np.float64, shape=[150, 8]), 50, 15, 0),
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
])
class TestFrame(unittest.TestCase):
def test_frame(self):
self.assertTrue(
np.allclose(
frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis),
paddle.signal.frame(
paddle.to_tensor(self.x),
self.frame_length,
self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis'),
[
('test_1d_input1', rand_x(1, np.float64, shape=[150]), 50, 15, 0),
('test_1d_input2', rand_x(1, np.float64, shape=[150]), 50, 15, -1),
('test_2d_input1', rand_x(2, np.float64, shape=[150, 8]), 50, 15, 0),
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
])
class TestFrameStatic(unittest.TestCase):
def test_frame_static(self):
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype)
output = paddle.signal.frame(
input,
self.frame_length,
self.hop_length,
self.axis),
exe = paddle.static.Executor(self.place)
exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static()
self.assertTrue(
np.allclose(
frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis', 'expect_exception'),
[
('test_axis', rand_x(1, np.float64, shape=[150]), 50, 15, 2, ValueError),
('test_hop_length', rand_x(1, np.float64, shape=[150]), 50, 0, -1, ValueError),
('test_frame_length1', rand_x(2, np.float64, shape=[150, 8]), 0, 15, 0, ValueError),
('test_frame_length2', rand_x(2, np.float64, shape=[150, 8]), 151, 15, 0, ValueError),
])
class TestFrameException(unittest.TestCase):
def test_frame(self):
with self.assertRaises(self.expect_exception):
paddle.signal.frame(
paddle.to_tensor(self.x),
self.frame_length,
self.hop_length,
self.axis)
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'hop_length', 'axis'),
[
('test_2d_input1', rand_x(2, np.float64, shape=[3, 50]), 4, 0),
('test_2d_input2', rand_x(2, np.float64, shape=[50, 3]), 4, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[5, 40, 2]), 10, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
])
class TestOverlapAdd(unittest.TestCase):
def test_overlap_add(self):
self.assertTrue(
np.allclose(
overlap_add_for_api_test(self.x, self.hop_length, self.axis),
paddle.signal.overlap_add(
paddle.to_tensor(self.x),
self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'hop_length', 'axis'),
[
('test_2d_input1', rand_x(2, np.float64, shape=[3, 50]), 4, 0),
('test_2d_input2', rand_x(2, np.float64, shape=[50, 3]), 4, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[5, 40, 2]), 10, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
])
class TestOverlapAddStatic(unittest.TestCase):
def test_overlap_add_static(self):
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype)
output = paddle.signal.overlap_add(
input,
self.hop_length,
self.axis),
exe = paddle.static.Executor(self.place)
exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static()
self.assertTrue(
np.allclose(
overlap_add_for_api_test(self.x, self.hop_length, self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'hop_length', 'axis', 'expect_exception'),
[
('test_axis', rand_x(2, np.float64, shape=[3, 50]), 4, 2, ValueError),
('test_hop_length', rand_x(2, np.float64, shape=[50, 3]), -1, -1, ValueError),
])
class TestOverlapAddException(unittest.TestCase):
def test_overlap_add(self):
with self.assertRaises(self.expect_exception):
paddle.signal.overlap_add(
paddle.to_tensor(self.x),
self.hop_length,
self.axis)
# ================= STFT
# common args
# x
# n_fft,
# hop_length=None,
# win_length=None,
# window=None,
# center=True,
# pad_mode='reflect',
# paddle only
# normalized=False,
# onesided=True,
# ================= ISTFT
# common args
# x,
# hop_length=None,
# win_length=None,
# window=None,
# center=True,
# length=None,
# paddle only
# n_fft,
# normalized=False,
# onesided=True,
# return_complex=False,
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n_fft', 'hop_length', 'win_length', 'window', 'center', 'pad_mode', 'normalized', 'onesided'),
[
('test_1d_input', rand_x(1, np.float64, shape=[160000]),
512, None, None, get_window('hann', 512), True, 'reflect', False, True),
('test_2d_input', rand_x(2, np.float64, shape=[1, 160000]),
512, None, None, get_window('hann', 512), True, 'reflect', False, True),
('test_hop_length', rand_x(2, np.float64, shape=[1, 160000]),
512, 255, None, get_window('hann', 512), True, 'reflect', False, True),
('test_win_length', rand_x(2, np.float64, shape=[1, 160000]),
512, 255, 499, get_window('hann', 499), True, 'reflect', False, True),
('test_window', rand_x(2, np.float64, shape=[1, 160000]),
512, None, None, None, True, 'reflect', False, True),
('test_center', rand_x(2, np.float64, shape=[1, 160000]),
512, None, None, None, False, 'reflect', False, True),
])
class TestStft(unittest.TestCase):
def test_stft(self):
if self.window is None:
win_p = None
win_l = 'boxcar' # rectangular window
else:
win_p = paddle.to_tensor(self.window)
win_l = self.window
self.assertTrue(
np.allclose(
stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, self.center, self.pad_mode),
paddle.signal.stft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.pad_mode,
self.normalized,
self.onesided),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n_fft', 'hop_length', 'win_length', 'window', 'center', 'pad_mode', 'normalized', 'onesided', 'expect_exception'),
[
('test_dims', rand_x(1, np.float64, shape=[1, 2, 3]),
512, None, None, None, True, 'reflect', False, True, AssertionError),
('test_hop_length', rand_x(1, np.float64, shape=[16000]),
512, 0, None, None, True, 'reflect', False, True, AssertionError),
('test_nfft1', rand_x(1, np.float64, shape=[16000]),
0, None, None, None, True, 'reflect', False, True, AssertionError),
('test_nfft2', rand_x(1, np.float64, shape=[16000]),
16001, None, None, None, True, 'reflect', False, True, AssertionError),
('test_win_length', rand_x(1, np.float64, shape=[16000]),
512, None, 0, None, True, 'reflect', False, True, AssertionError),
('test_win_length', rand_x(1, np.float64, shape=[16000]),
512, None, 513, None, True, 'reflect', False, True, AssertionError),
('test_pad_mode', rand_x(1, np.float64, shape=[16000]),
512, None, None, None, True, 'nonsense', False, True, AssertionError),
('test_complex_onesided', rand_x(1, np.float64, shape=[16000], complex=True),
512, None, None, None, False, 'reflect', False, True, AssertionError),
])
class TestStftException(unittest.TestCase):
def test_stft(self):
if self.window is None:
win_p = None
else:
win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception):
paddle.signal.stft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.pad_mode,
self.normalized,
self.onesided),
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n_fft', 'hop_length', 'win_length', 'window', 'center', 'normalized', 'onesided', 'length', 'return_complex'),
[
('test_2d_input', rand_x(2, np.float64, shape=[257, 471], complex=True),
512, None, None, get_window('hann', 512), True, False, True, None, False),
('test_3d_input', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, get_window('hann', 512), True, False, True, None, False),
('test_hop_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 99, None, get_window('hann', 512), True, False, True, None, False),
('test_win_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 99, 299, get_window('hann', 299), True, False, True, None, False),
('test_window', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, None, True, False, True, None, False),
('test_center', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, None, False, False, True, None, False),
('test_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, None, False, False, True, 1888, False),
])
class TestIstft(unittest.TestCase):
def test_istft(self):
if self.window is None:
win_p = None
win_l = 'boxcar' # rectangular window
else:
win_p = paddle.to_tensor(self.window)
win_l = self.window
self.assertTrue(
np.allclose(
istft(self.x, self.hop_length, self.win_length, win_l, self.center, self.length),
paddle.signal.istft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.normalized,
self.onesided,
self.length,
self.return_complex),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype))))
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'n_fft', 'hop_length', 'win_length', 'window', 'center', 'normalized', 'onesided', 'length', 'return_complex', 'expect_exception'),
[
('test_dims', rand_x(4, np.float64, shape=[1, 2, 3, 4], complex=True),
512, None, None, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_n_fft', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
257, None, None, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_hop_length1', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 0, None, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_hop_length2', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 513, None, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_win_length1', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, 0, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_win_length2', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, 513, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_onesided1', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
20, None, None, get_window('hann', 512), True, False, True, None, False, AssertionError),
('test_onesided2', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
256, None, None, None, True, False, False, None, False, AssertionError),
('test_window', rand_x(3, np.float64, shape=[1, 512, 471], complex=True),
512, None, 511, get_window('hann', 512), True, False, False, None, False, AssertionError),
('test_return_complex1', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, get_window('hann', 512), True, False, True, None, True, AssertionError),
('test_return_complex2', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, rand_x(1, np.float64, shape=[512], complex=True), True, False, True, None, False, AssertionError),
('test_NOLA', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 512, None, get_window('hann', 512), True, False, True, None, False, ValueError),
])
class TestIstftException(unittest.TestCase):
def test_istft(self):
if self.window is None:
win_p = None
else:
win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception):
paddle.signal.istft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.normalized,
self.onesided,
self.length,
self.return_complex),
# yapf: enable
if __name__ == '__main__':
unittest.main()
| 34.474827
| 157
| 0.587092
|
ad0a55776dcfe0d1b212e3f90d2a703f6f09d53c
| 5,856
|
py
|
Python
|
Community-Supported/clouddb-extractor/mysql_extractor.py
|
TableauKyle/hyper-api-samples
|
37c21c988122c6dbfb662d9ec72d90c4cd30e4cc
|
[
"MIT"
] | 73
|
2020-04-29T15:41:55.000Z
|
2022-03-12T04:55:24.000Z
|
Community-Supported/clouddb-extractor/mysql_extractor.py
|
TableauKyle/hyper-api-samples
|
37c21c988122c6dbfb662d9ec72d90c4cd30e4cc
|
[
"MIT"
] | 32
|
2020-06-10T00:47:20.000Z
|
2022-03-28T11:19:00.000Z
|
Community-Supported/clouddb-extractor/mysql_extractor.py
|
TableauKyle/hyper-api-samples
|
37c21c988122c6dbfb662d9ec72d90c4cd30e4cc
|
[
"MIT"
] | 54
|
2020-05-01T20:01:51.000Z
|
2022-03-28T11:11:00.000Z
|
"""MySQL implementation of Base Hyper Extractor ABC
Tableau Community supported Hyper API sample
-----------------------------------------------------------------------------
This file is the copyrighted property of Tableau Software and is protected
by registered patents and other applicable U.S. and international laws and
regulations.
You may adapt this file and modify it to fit into your context and use it
as a template to start your own projects.
-----------------------------------------------------------------------------
"""
import logging
from typing import Any, Optional
import mysql.connector
from mysql.connector import FieldType
from tableauhyperapi import Nullability, SqlType, TableDefinition, TableName
from base_extractor import DEFAULT_SITE_ID, BaseExtractor, HyperSQLTypeMappingError
logger = logging.getLogger("hyper_samples.extractor.mySQL")
class QuerySizeLimitError(Exception):
pass
class MySQLExtractor(BaseExtractor):
"""MySQL Implementation of Extractor Interface
Authentication to Tableau Server can be either by Personal Access Token or
Username and Password.
Constructor Args:
- source_database_config (dict): Source database parameters
- tableau_hostname (string): URL for Tableau Server, e.g. "http://localhost"
- tableau_site_id (string): Tableau site identifier - if default use ""
- tableau_project (string): Tableau project identifier
- tableau_token_name (string): PAT name
- tableau_token_secret (string): PAT secret
- tableau_username (string): Tableau username
- tableau_password (string): Tableau password
NOTE: Authentication to Tableau Server can be either by Personal Access Token or
Username and Password. If both are specified then token takes precedence.
"""
def __init__(
self,
source_database_config: dict,
tableau_hostname: str,
tableau_project: str,
tableau_site_id: str = DEFAULT_SITE_ID,
tableau_token_name: Optional[str] = None,
tableau_token_secret: Optional[str] = None,
tableau_username: Optional[str] = None,
tableau_password: Optional[str] = None,
) -> None:
super().__init__(
source_database_config=source_database_config,
tableau_hostname=tableau_hostname,
tableau_project=tableau_project,
tableau_site_id=tableau_site_id,
tableau_token_name=tableau_token_name,
tableau_token_secret=tableau_token_secret,
tableau_username=tableau_username,
tableau_password=tableau_password,
)
self._source_database_connection = None
self.sql_identifier_quote = "`"
def source_database_cursor(self) -> Any:
"""
Returns a DBAPI Cursor to the source database
"""
if self._source_database_connection is None:
db_connection_args = self.source_database_config.get("connection")
logger.info("Connecting to source MySQL Instance...")
self._source_database_connection = mysql.connector.connect(**db_connection_args)
return self._source_database_connection.cursor()
def hyper_sql_type(self, source_column: Any) -> SqlType:
"""
Finds the corresponding Hyper column type for source_column
source_column (obj): Instance of DBAPI Column description tuple
Returns a tableauhyperapi.SqlType Object
"""
type_lookup = {
"TINY": SqlType.bool(),
"SHORT": SqlType.bytes(),
"DATE": SqlType.date(),
"DATETIME": SqlType.timestamp(),
"INT24": SqlType.big_int(),
"LONGLONG": SqlType.big_int(),
"INTEGER": SqlType.int(),
"DECIMAL": SqlType.numeric(18, 9),
"DOUBLE": SqlType.double(),
"FLOAT": SqlType.double(),
"VAR_STRING": SqlType.text(),
"TIME": SqlType.time(),
"TIMESTAMP": SqlType.timestamp_tz(),
}
source_column_type = source_column
return_sql_type = type_lookup.get(source_column_type)
if return_sql_type is None:
error_message = "No Hyper SqlType defined for MySQL source type: {}".format(source_column_type)
logger.error(error_message)
raise HyperSQLTypeMappingError(error_message)
logger.debug("Translated source column type {} to Hyper SqlType {}".format(source_column_type, return_sql_type))
return return_sql_type
def hyper_table_definition(self, source_table: Any, hyper_table_name: str = "Extract") -> TableDefinition:
"""
Build a hyper table definition from source_schema
source_table (obj): Source table (Instance of DBAPI Cursor Description)
hyper_table_name (string): Name of the target Hyper table, default="Extract"
Returns a tableauhyperapi.TableDefinition Object
"""
# logger.debug(
# "Building Hyper TableDefinition for table {}".format(source_table.dtypes)
# )
target_cols = []
for source_field in source_table:
this_name = source_field[0]
this_type = self.hyper_sql_type(FieldType.get_info(source_field[1]))
if source_field[6]:
this_col = TableDefinition.Column(this_name, this_type, Nullability.NOT_NULLABLE)
else:
this_col = TableDefinition.Column(name=this_name, type=this_type)
target_cols.append(this_col)
logger.info("..Column {} - Type {}".format(this_name, this_type))
# create the target schema for our Hyper File
target_schema = TableDefinition(table_name=TableName("Extract", hyper_table_name), columns=target_cols)
return target_schema
def main():
pass
if __name__ == "__main__":
main()
| 37.538462
| 120
| 0.658811
|
f7f18402a1160ce856644f4c8082753e0a841873
| 186
|
py
|
Python
|
python/py-itertools/itertools-product.py
|
th3c0d3br34ker/hackerrank-solutions
|
c61e987cbb359fd27e41051c39ffd7f377f0c5f2
|
[
"MIT"
] | 1
|
2020-08-04T18:31:24.000Z
|
2020-08-04T18:31:24.000Z
|
python/py-itertools/itertools-product.py
|
th3c0d3br34ker/hackerrank-solutions
|
c61e987cbb359fd27e41051c39ffd7f377f0c5f2
|
[
"MIT"
] | null | null | null |
python/py-itertools/itertools-product.py
|
th3c0d3br34ker/hackerrank-solutions
|
c61e987cbb359fd27e41051c39ffd7f377f0c5f2
|
[
"MIT"
] | null | null | null |
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import product
a = map(int, input().split())
b = map(int, input().split())
print(*product(a, b))
| 18.6
| 69
| 0.688172
|
da487deaa8388879e9e089790054c00da9dc595f
| 1,895
|
py
|
Python
|
src/data/64.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/64.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/64.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
import math, string, itertools, fractions, heapq, collections, re, array, bisect, sys, copy, functools
import time, random
sys.setrecursionlimit(10**6)
inf = 10**20
eps = 1.0 / 10**10
mod = 10**9 + 7
mod2 = 998244353
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]
ddn = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
df = collections.defaultdict
def LI():
return list(map(int, sys.stdin.readline().split()))
def LI_():
return [int(x) - 1 for x in sys.stdin.readline().split()]
def LS():
return sys.stdin.readline().split()
def I():
return int(sys.stdin.readline())
def F():
return float(sys.stdin.readline())
def S():
return input()
def pf(s):
return print(s, flush=True)
def pe(s):
return print(str(s), file=sys.stderr)
def JA(a, sep):
return sep.join(map(str, a))
def JAA(a, s, t):
return s.join(t.join(map(str, b)) for b in a)
def main():
n, q = LI()
ab = [LI_() for _ in range(n - 1)]
cd = [LI_() for _ in range(q)]
e = df(set)
for a, b in ab:
e[a].add(b)
e[b].add(a)
def search(s):
d = collections.defaultdict(lambda: inf)
d[s] = 0
q = []
heapq.heappush(q, (0, s))
v = collections.defaultdict(bool)
while len(q):
k, u = heapq.heappop(q)
if v[u]:
continue
v[u] = True
for uv in e[u]:
if v[uv]:
continue
vd = k + 1
if d[uv] > vd:
d[uv] = vd
heapq.heappush(q, (vd, uv))
return d
s = search(0)
rr = []
for c, d in cd:
t = s[c] + s[d]
if t % 2 == 1:
rr.append("Road")
else:
rr.append("Town")
return JA(rr, "\n")
# start = time.time()
print(main())
# pe(time.time() - start)
| 18.578431
| 102
| 0.475462
|
9af7b9e03a24fa9b12f224b7a0ee428e77a94d8d
| 173
|
py
|
Python
|
src/views/index.py
|
Ohtu-FaceTed/FaceTed-Backend
|
0fff9018ec28eb73d40c56b27e26b1535b8272fc
|
[
"BSD-3-Clause"
] | 1
|
2020-01-25T15:12:11.000Z
|
2020-01-25T15:12:11.000Z
|
src/views/index.py
|
Ohtu-FaceTed/FaceTed-Backend
|
0fff9018ec28eb73d40c56b27e26b1535b8272fc
|
[
"BSD-3-Clause"
] | 3
|
2019-10-07T06:41:49.000Z
|
2021-06-02T00:32:15.000Z
|
src/views/index.py
|
StatisticsFinland/rakennusluokkahaku
|
912d62d07fe9802c7d5351f7b73b79732ad12304
|
[
"BSD-3-Clause"
] | 1
|
2020-01-22T11:47:50.000Z
|
2020-01-22T11:47:50.000Z
|
from flask import escape, request
from . import views as app
@app.route("/")
def index():
name = request.args.get("name", "World")
return f"Hello, {escape(name)}"
| 19.222222
| 44
| 0.653179
|
c6fcf36085d569e353140d90b84d5b2aba49b219
| 4,879
|
py
|
Python
|
support/go-NN-master/engine/TFEngine.py
|
sjkim04/AlphaGOZero-python-tensorflow
|
32434d55466480ed2d3d042be654e62cf70d7cce
|
[
"MIT"
] | 325
|
2017-10-28T01:12:47.000Z
|
2022-03-25T12:00:06.000Z
|
support/go-NN-master/engine/TFEngine.py
|
sjkim04/AlphaGOZero-python-tensorflow
|
32434d55466480ed2d3d042be654e62cf70d7cce
|
[
"MIT"
] | 14
|
2017-10-30T19:27:33.000Z
|
2021-03-25T21:41:35.000Z
|
support/go-NN-master/engine/TFEngine.py
|
sjkim04/AlphaGOZero-python-tensorflow
|
32434d55466480ed2d3d042be654e62cf70d7cce
|
[
"MIT"
] | 113
|
2017-10-29T09:58:43.000Z
|
2021-12-30T01:52:45.000Z
|
import tensorflow as tf
import numpy as np
import random
import os
from Engine import *
import Book
import Features
import Normalization
import Symmetry
import Checkpoint
from GTP import Move, true_stderr
from Board import *
def softmax(E, temp):
#print "E =\n", E
expE = np.exp(temp * (E - max(E))) # subtract max to avoid overflow
return expE / np.sum(expE)
def sample_from(probs):
cumsum = np.cumsum(probs)
r = random.random()
for i in xrange(len(probs)):
if r <= cumsum[i]:
return i
assert False, "problem with sample_from"
class TFEngine(BaseEngine):
def __init__(self, eng_name, model):
super(TFEngine,self).__init__()
self.eng_name = eng_name
self.model = model
self.book = Book.load_GoGoD_book()
self.last_move_probs = np.zeros((self.model.N, self.model.N,))
self.kibitz_mode = False
# build the graph
with tf.Graph().as_default():
with tf.device('/cpu:0'):
self.feature_planes = tf.placeholder(tf.float32, shape=[None, self.model.N, self.model.N, self.model.Nfeat], name='feature_planes')
self.logits = model.inference(self.feature_planes, self.model.N, self.model.Nfeat)
saver = tf.train.Saver(tf.trainable_variables())
init = tf.initialize_all_variables()
self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
self.sess.run(init)
checkpoint_dir = os.path.join(model.train_dir, 'checkpoints')
Checkpoint.restore_from_checkpoint(self.sess, saver, checkpoint_dir)
def name(self):
return self.eng_name
def version(self):
return "1.0"
def set_board_size(self, N):
if N != self.model.N:
return False
return BaseEngine.set_board_size(self, N)
def pick_book_move(self, color):
if self.book:
book_move = Book.get_book_move(self.board, self.book)
if book_move:
print "playing book move", book_move
return Move(book_move[0], book_move[1])
print "no book move"
else:
print "no book"
return None
def pick_model_move(self, color):
if self.model.Nfeat == 15:
board_feature_planes = Features.make_feature_planes_stones_3liberties_4history_ko(self.board, color)
Normalization.apply_featurewise_normalization_B(board_feature_planes)
elif self.model.Nfeat == 21:
board_feature_planes = Features.make_feature_planes_stones_4liberties_4history_ko_4captures(self.board, color).astype(np.float32)
Normalization.apply_featurewise_normalization_C(board_feature_planes)
else:
assert False
feature_batch = Symmetry.make_symmetry_batch(board_feature_planes)
feed_dict = {self.feature_planes: feature_batch}
logit_batch = self.sess.run(self.logits, feed_dict)
move_logits = Symmetry.average_plane_over_symmetries(logit_batch, self.model.N)
softmax_temp = 1.0
move_probs = softmax(move_logits, softmax_temp)
# zero out illegal moves
for x in xrange(self.model.N):
for y in xrange(self.model.N):
ind = self.model.N * x + y
if not self.board.play_is_legal(x, y, color):
move_probs[ind] = 0
sum_probs = np.sum(move_probs)
if sum_probs == 0: return Move.Pass() # no legal moves, pass
move_probs /= sum_probs # re-normalize probabilities
pick_best = True
if pick_best:
move_ind = np.argmax(move_probs)
else:
move_ind = sample_from(move_probs)
move_x = move_ind / self.model.N
move_y = move_ind % self.model.N
self.last_move_probs = move_probs.reshape((self.board.N, self.board.N))
return Move(move_x, move_y)
def pick_move(self, color):
book_move = self.pick_book_move(color)
if book_move:
if self.kibitz_mode: # in kibitz mode compute model probabilities anyway
self.pick_model_move(color) # ignore the model move
return book_move
return self.pick_model_move(color)
def get_last_move_probs(self):
return self.last_move_probs
def stone_played(self, x, y, color):
# if we are in kibitz mode, we want to compute model probabilities for ALL turns
if self.kibitz_mode:
self.pick_model_move(color)
true_stderr.write("probability of played move %s (%d, %d) was %.2f%%\n" % (color_names[color], x, y, 100*self.last_move_probs[x,y]))
BaseEngine.stone_played(self, x, y, color)
def toggle_kibitz_mode(self):
self.kibitz_mode = ~self.kibitz_mode
return self.kibitz_mode
| 35.100719
| 147
| 0.636606
|
f789a232e66c4d935a0c2ee689653d33f79b562e
| 4,377
|
py
|
Python
|
template_settings_postgres.py
|
UoW-CPC/Asclepios-TrustedAuthority
|
9fd84d356b4d503bc41940055a51f23fae68c870
|
[
"Apache-1.1"
] | null | null | null |
template_settings_postgres.py
|
UoW-CPC/Asclepios-TrustedAuthority
|
9fd84d356b4d503bc41940055a51f23fae68c870
|
[
"Apache-1.1"
] | 3
|
2020-03-25T13:01:31.000Z
|
2020-10-01T12:07:55.000Z
|
TA/settings.py
|
UoW-CPC/Asclepios-TrustedAuthority
|
9fd84d356b4d503bc41940055a51f23fae68c870
|
[
"Apache-1.1"
] | 1
|
2020-03-24T12:43:26.000Z
|
2020-03-24T12:43:26.000Z
|
"""
Django settings for TA project.
Generated by 'django-admin startproject' using Django 1.11.24.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import logging.config # this is for logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '5jpu71#_4j4jaorh+_llj45p$gno7@)+!04n!#q_27b+4cv%4('
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DJANGO_DEBUG')
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS').split(' ')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'TA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TA.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'], # database name
'USER': os.environ['DB_USER'], # user name
'PASSWORD': os.environ['DB_PASSWORD'], # user password
'HOST': os.environ['DB_HOST'], # postgres server
'PORT': os.environ['DB_PORT'],
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Logging Configuration
# Clear prev config
LOGGING_CONFIG = None
# Get loglevel from env
LOGLEVEL = os.getenv('DJANGO_LOGLEVEL', 'info').upper()
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'level': LOGLEVEL,
'handlers': ['console',],
},
'django.utils.autoreload': {
'level': 'ERROR',
# ...
}
},
})
# Prevent creating pyc files
PYTHONDONTWRITEBYTECODE = 1
| 25.899408
| 118
| 0.66027
|
bae7056d537ae144e5935ef3b52b96861fee89f4
| 211
|
py
|
Python
|
audio_annotator_generic/__init__.py
|
sofiaele/audio_annotator
|
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
|
[
"MIT"
] | null | null | null |
audio_annotator_generic/__init__.py
|
sofiaele/audio_annotator
|
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
|
[
"MIT"
] | null | null | null |
audio_annotator_generic/__init__.py
|
sofiaele/audio_annotator
|
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_login import LoginManager, UserMixin, login_required
app = Flask(__name__)
app.secret_key = "a2f8a7fa-fb61-11ea-8c89-0f4248d2074f"
from audio_annotator_generic import routes
| 26.375
| 63
| 0.834123
|
bb7d928bbe39c2591fa3a7f6ae24299e4658f58a
| 31,672
|
py
|
Python
|
electrum/gui/qt/util.py
|
WER136/electrum
|
b86b3ec1d1e93113a9a551b0497da7076a95f29d
|
[
"MIT"
] | 6
|
2020-09-13T23:41:59.000Z
|
2022-02-18T17:20:19.000Z
|
electrum/gui/qt/util.py
|
WER136/electrum
|
b86b3ec1d1e93113a9a551b0497da7076a95f29d
|
[
"MIT"
] | 19
|
2019-11-19T14:34:30.000Z
|
2022-03-10T21:21:07.000Z
|
electrum/gui/qt/util.py
|
WER136/electrum
|
b86b3ec1d1e93113a9a551b0497da7076a95f29d
|
[
"MIT"
] | 5
|
2019-11-17T12:10:24.000Z
|
2021-05-12T15:47:59.000Z
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from functools import partial, lru_cache
from typing import NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate)
from electrum.i18n import _, languages
from electrum.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum.util import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED
if TYPE_CHECKING:
from .main_window import ElectrumWindow
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
}
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent, message, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.text_txid_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is not None:
editable_columns = set(editable_columns)
elif stretch_column is not None:
editable_columns = {stretch_column}
else:
editable_columns = {}
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Optional[QStandardItem]:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.model().itemFromIndex(idx)
if item:
return item.data(Qt.UserRole)
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
model = self.model()
model.setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def text_txid_from_coordinate(self, row_num, column):
assert not isinstance(self.model(), QSortFilterProxyModel)
idx = self.model().index(row_num, column)
item = self.model().itemFromIndex(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.text_txid_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu, idx):
cc = menu.addMenu(_("Copy column"))
for column in self.Columns:
column_title = self.model().horizontalHeaderItem(column).text()
item_col = self.model().itemFromIndex(idx.sibling(idx.row(), column))
column_data = item_col.text().strip()
cc.addAction(column_title, lambda t=column_data: self.parent.app.clipboard().setText(t))
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = []
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
PURPLE = ColorSchemeItem("#8A2BE2", "#8A2BE2")
DEFAULT = ColorSchemeItem("black", "white")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(widget):
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
for _ in range(100):
if widget is None:
return None
if not isinstance(widget, ElectrumWindow):
widget = widget.parentWidget()
else:
return widget
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
class FromList(QTreeWidget):
def __init__(self, parent, create_menu):
super().__init__(parent)
self.setHeaderHidden(True)
self.setMaximumHeight(300)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# remove left margin
self.setRootIsDecorated(False)
self.setColumnCount(2)
self.header().setStretchLastSection(False)
sm = QHeaderView.ResizeToContents
self.header().setSectionResizeMode(0, sm)
self.header().setSectionResizeMode(1, sm)
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
sys.exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
| 34.919515
| 124
| 0.62942
|
2d6ff61f4f9b933dca3e7cdaceeea3e9cdcc21ec
| 2,245
|
py
|
Python
|
odd_glue_adapter/mappers/jobs.py
|
opendatadiscovery/odd-glue-adapter
|
d54aff8517bdb3f0643290bf3d3b8494b551703f
|
[
"Apache-2.0"
] | null | null | null |
odd_glue_adapter/mappers/jobs.py
|
opendatadiscovery/odd-glue-adapter
|
d54aff8517bdb3f0643290bf3d3b8494b551703f
|
[
"Apache-2.0"
] | 1
|
2021-12-29T15:55:49.000Z
|
2022-01-05T21:22:44.000Z
|
odd_glue_adapter/mappers/jobs.py
|
opendatadiscovery/odd-glue-adapter
|
d54aff8517bdb3f0643290bf3d3b8494b551703f
|
[
"Apache-2.0"
] | 1
|
2021-10-04T15:57:14.000Z
|
2021-10-04T15:57:14.000Z
|
from typing import Dict, Any
from odd_models.models import DataEntity, DataEntityType, DataTransformer, DataTransformerRun, Status
from oddrn_generator import GlueGenerator
from . import metadata_extractor
GLUE_JOB_STATUSES = {
'STARTING': Status.UNKNOWN,
'RUNNING': Status.UNKNOWN,
'STOPPING': Status.UNKNOWN,
'STOPPED': Status.ABORTED,
'SUCCEEDED': Status.SUCCESS,
'FAILED': Status.FAILED,
'ERROR': Status.FAILED,
'TIMEOUT': Status.FAILED,
}
def map_glue_job(raw_job_data: Dict[str, Any], mapper_args: Dict[str, Any]) -> DataEntity:
oddrn_generator: GlueGenerator = mapper_args['oddrn_generator']
return DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path('jobs', raw_job_data['Name']),
name=raw_job_data['Name'],
owner=None,
description=raw_job_data.get('Description', None),
updated_at=raw_job_data['LastModifiedOn'].isoformat(),
created_at=raw_job_data['CreatedOn'].isoformat(),
metadata=[metadata_extractor.extract_transformer_metadata(raw_job_data)],
type=DataEntityType.JOB,
data_transformer=DataTransformer(
source_code_url=raw_job_data['Command']['ScriptLocation'],
sql=None,
inputs=[],
outputs=[],
)
)
def map_glue_job_run(raw_job_run_data: Dict[str, Any], mapper_args: Dict[str, Any]) -> DataEntity:
status = GLUE_JOB_STATUSES.get(raw_job_run_data['JobRunState'], Status.UNKNOWN)
oddrn_generator: GlueGenerator = mapper_args['oddrn_generator']
return DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path("runs", raw_job_run_data['Id']),
name=raw_job_run_data['Id'],
type=DataEntityType.JOB_RUN,
owner=mapper_args['transformer_owner'],
metadata=[metadata_extractor.extract_transformer_run_metadata(raw_job_run_data)],
data_transformer_run=DataTransformerRun(
start_time=raw_job_run_data['StartedOn'].isoformat(),
end_time=raw_job_run_data['CompletedOn'].isoformat(),
transformer_oddrn=oddrn_generator.get_oddrn_by_path('jobs'),
status_reason=raw_job_run_data['ErrorMessage'] if status == 'Fail' else None,
status=status
)
)
| 39.385965
| 101
| 0.69755
|
84bcc51d0c620c71b3d3f48ebe3c462adf280936
| 5,655
|
py
|
Python
|
structural_optimization.py
|
BUAA-WJR/SO-ET
|
c9357b99f07f7f52dfb4a5bae7e4694fba845198
|
[
"MIT"
] | 3
|
2021-09-07T12:11:04.000Z
|
2021-12-02T12:42:03.000Z
|
structural_optimization.py
|
BUAA-WJR/SO-ET
|
c9357b99f07f7f52dfb4a5bae7e4694fba845198
|
[
"MIT"
] | null | null | null |
structural_optimization.py
|
BUAA-WJR/SO-ET
|
c9357b99f07f7f52dfb4a5bae7e4694fba845198
|
[
"MIT"
] | 3
|
2021-11-02T07:22:17.000Z
|
2021-12-28T07:28:55.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# author: ryan_wu
# email: imitator_wu@outlook.com
# date: 2020-11-26 16:09:29
# 把networkx的图转成邻接矩阵,然后用结构信息树,给转成各种深度的树
import os
import sys
import copy
import json
import time
import pickle
import itertools
import traceback
import numpy as np
import networkx as nx
from multiprocessing import Pool
from lib.encoding_tree import PartitionTree
PWD = os.path.dirname(os.path.realpath(__file__))
def trans_to_adj(graph):
graph.remove_edges_from(nx.selfloop_edges(graph))
nodes = range(len(graph.nodes))
return nx.to_numpy_array(graph, nodelist=nodes)
def trans_to_tree(adj, k=2):
undirected_adj = np.array(adj)
y = PartitionTree(adj_matrix=undirected_adj)
x = y.build_encoding_tree(k)
return y.tree_node
def update_depth(tree):
# set leaf depth
wait_update = [k for k, v in tree.items() if v.children is None]
while wait_update:
for nid in wait_update:
node = tree[nid]
if node.children is None:
node.child_h = 0
else:
node.child_h = tree[list(node.children)[0]].child_h + 1
wait_update = set([tree[nid].parent for nid in wait_update if tree[nid].parent])
def update_node(tree):
update_depth(tree)
d_id= [(v.child_h, v.ID) for k, v in tree.items()]
d_id.sort()
new_tree = {}
for k, v in tree.items():
n = copy.deepcopy(v)
n.ID = d_id.index((n.child_h, n.ID))
if n.parent is not None:
n.parent = d_id.index((n.child_h+1, n.parent))
if n.children is not None:
n.children = [d_id.index((n.child_h-1, c)) for c in n.children]
n = n.__dict__
n['depth'] = n['child_h']
new_tree[n['ID']] = n
return new_tree
def pool_trans(input_):
g, tree_depth = input_
adj_mat = trans_to_adj(g['G'])
tree = trans_to_tree(adj_mat, tree_depth)
g['tree'] = update_node(tree)
return g
def pool_trans_disconnected(input_):
g, tree_depth = input_
# 一个数据集里也有连通的图
if nx.is_connected(g['G']):
return pool_trans((g, tree_depth))
trees = []
# gi: 用来标记是第几个子图, graph index
for gi, sub_nodes in enumerate(nx.connected_components(g['G'])):
if len(sub_nodes) == 1:
# 编码树没办法处理单个点, 手动组成单节点树
node = list(sub_nodes)[0]
# leaf node, parent的组成:graphIndex_layerIndex_nodeIndex
js = [{'ID': node, 'parent': '%s_%s_0' % (gi, 1), 'depth': 0, 'children': None}]
for d in range(1, tree_depth+1):
js.append({'ID': '%s_%s_0' % (gi, d),
'parent': '%s_%s_0' % (gi, d+1) if d<tree_depth else None,
'depth': d,
'children': [js[-1]['ID']]
})
else:
sg = g['G'].subgraph(sub_nodes) # sub graph
nodes = list(sg.nodes)
nodes.sort()
nmap = {n: nodes.index(n) for n in nodes}
sg = nx.relabel_nodes(sg, nmap)
# 图转树
adj_mat = trans_to_adj(sg)
tree = trans_to_tree(adj_mat, tree_depth)
tree = update_node(tree)
# relable tree id
js = list(tree.values())
rmap = {nodes.index(n): n for n in nodes} # 叶子节点用原ID
for j in js:
if j['depth'] > 0:
rmap[j['ID']] = '%s_%s_%s' % (gi, j['depth'], j['ID'])
for j in js:
j['ID'] = rmap[j['ID']]
j['parent'] = rmap[j['parent']] if j['depth']<tree_depth else None
j['children'] = [rmap[c] for c in j['children']] if j['children'] else None
trees.append(js)
# 整树节点id relabel
id_map = {}
for d in range(0, tree_depth+1):
for js in trees:
for j in js:
if j['depth'] == d:
# 叶子节点维持原图ID
id_map[j['ID']] = len(id_map) if d>0 else j['ID']
tree = {}
root_ids = []
for js in trees:
for j in js:
n = copy.deepcopy(j)
n['parent'] = id_map[n['parent']] if n['parent'] else None
n['children'] = [id_map[c] for c in n['children']] if n['children'] else None
n['ID'] = id_map[n['ID']]
tree[n['ID']] = n
if n['parent'] is None:
root_ids.append(n['ID'])
# 根节点合并
root_id = min(root_ids)
root_children = list(itertools.chain.from_iterable([tree[i]['children'] for i in root_ids]))
root_node = {'ID': root_id, 'parent': None, 'children': root_children, 'depth': tree_depth}
[tree.pop(i) for i in root_ids] # 删掉所有根节点
for c in root_children: # 修改中间节点到根节点的映射到最新根节点
tree[c]['parent'] = root_id
tree[root_id] = root_node # 加入根节
g['tree'] = tree
return g
def struct_tree(dataset, tree_depth):
if os.path.exists('trees/%s_%s.pickle' % (dataset, tree_depth)):
return
with open('graphs/%s.pickle' % dataset, 'rb') as fp:
g_list = pickle.load(fp)
pool_func = pool_trans_disconnected if dataset in disconnected_dataset else pool_trans
pool = Pool()
g_list = pool.map(pool_func, [(g, tree_depth) for g in g_list])
pool.close()
pool.join()
g_list = filter(lambda g: g is not None, g_list)
with open('trees/%s_%s.pickle' % (dataset, tree_depth), 'wb') as fp:
pickle.dump(list(g_list), fp)
if __name__ == '__main__':
disconnected_dataset = ['REDDITBINARY', 'REDDITMULTI5K', 'PROTEINS', 'NCI1']
for d in os.listdir('datasets'):
for k in [2, 3, 4, 5]:
print(d, k)
struct_tree(d[:-4], k)
| 33.461538
| 96
| 0.563572
|
93e7329c593ae567b1fd4420593a1f0078f785c3
| 5,691
|
py
|
Python
|
sceptre/cli/new.py
|
dennisconrad/sceptre
|
d596500eb395489d889db476b8fcf4fa5753d563
|
[
"Apache-2.0"
] | null | null | null |
sceptre/cli/new.py
|
dennisconrad/sceptre
|
d596500eb395489d889db476b8fcf4fa5753d563
|
[
"Apache-2.0"
] | 1
|
2021-03-26T00:46:38.000Z
|
2021-03-26T00:46:38.000Z
|
sceptre/cli/new.py
|
dennisconrad/sceptre
|
d596500eb395489d889db476b8fcf4fa5753d563
|
[
"Apache-2.0"
] | null | null | null |
import os
import errno
import click
import yaml
from sceptre.config.reader import STACK_GROUP_CONFIG_ATTRIBUTES
from sceptre.cli.helpers import catch_exceptions
from sceptre.exceptions import ProjectAlreadyExistsError
@click.group(name="new")
def new_group():
"""
Commands for initialising Sceptre projects.
"""
pass
@new_group.command("group")
@click.argument('stack_group')
@catch_exceptions
@click.pass_context
def new_stack_group(ctx, stack_group):
"""
Creates a new Stack Group directory in a project.
Creates StackGroup folder in the project and a config.yaml with any
required properties.
:param stack_group: Name of the StackGroup directory to create.
:type stack_group: str
"""
cwd = ctx.obj.get("project_path")
for item in os.listdir(cwd):
# If already a config folder create a sub stack_group
if os.path.isdir(item) and item == "config":
config_dir = os.path.join(os.getcwd(), "config")
_create_new_stack_group(config_dir, stack_group)
@new_group.command("project")
@catch_exceptions
@click.argument('project_name')
@click.pass_context
def new_project(ctx, project_name):
"""
Creates a new project.
Creates PROJECT_NAME project folder and a config.yaml with any
required properties.
:param project_name: The name of the Sceptre Project to create.
:type project_name: str
"""
cwd = os.getcwd()
sceptre_folders = {"config", "templates"}
project_folder = os.path.join(cwd, project_name)
try:
os.mkdir(project_folder)
except OSError as e:
# Check if stack_group folder already exists
if e.errno == errno.EEXIST:
raise ProjectAlreadyExistsError(
'Folder \"{0}\" already exists.'.format(project_name)
)
else:
raise
for folder in sceptre_folders:
folder_path = os.path.join(project_folder, folder)
os.makedirs(folder_path)
defaults = {
"project_code": project_name,
"region": os.environ.get("AWS_DEFAULT_REGION", "")
}
config_path = os.path.join(cwd, project_name, "config")
_create_config_file(config_path, config_path, defaults)
def _create_new_stack_group(config_dir, new_path):
"""
Creates the subfolder for the stack_group specified by `new_path`
starting from the `config_dir`. Even if folder path already exists,
they want to initialise `config.yaml`.
:param config_dir: The directory path to the top-level config folder.
:type config_dir: str
:param new_path: The directory path to the stack_group folder.
:type new_path: str
"""
# Create full path to stack_group
folder_path = os.path.join(config_dir, new_path)
new_config_msg = 'Do you want initialise config.yaml?'
# Make folders for the stack_group
try:
os.makedirs(folder_path)
except OSError as e:
# Check if stack_group folder already exists
if e.errno == errno.EEXIST:
new_config_msg =\
'StackGroup path exists. ' + new_config_msg
else:
raise
if click.confirm(new_config_msg):
_create_config_file(config_dir, folder_path)
def _get_nested_config(config_dir, path):
"""
Collects nested config from between `config_dir` and `path`. Config at
lower level as greater precedence.
:param config_dir: The directory path to the top-level config folder.
:type config_dir: str
:param path: The directory path to the stack_group folder.
:type path: str
:returns: The nested config.
:rtype: dict
"""
config = {}
for root, _, files in os.walk(config_dir):
# Check that folder is within the final stack_group path
if path.startswith(root) and "config.yaml" in files:
config_path = os.path.join(root, "config.yaml")
with open(config_path) as config_file:
config.update(yaml.safe_load(config_file))
return config
def _create_config_file(config_dir, path, defaults={}):
"""
Creates a `config.yaml` file in the given path. The user is asked for
values for requried properties. Defaults are suggested with values in
`defaults` and then vaules found in parent `config.yaml` files. If
properties and their values are the same as in parent `config.yaml`, then
they are not included. No file is produced if require values are satisfied
by parent `config.yaml` files.
:param config_dir: The directory path to the top-level config folder.
:type config_dir: str
:param path: The directory path to the stack_group folder.
:type path: str
:param defaults: Defaults to present to the user for config.
:type defaults: dict
"""
config = dict.fromkeys(STACK_GROUP_CONFIG_ATTRIBUTES.required, "")
parent_config = _get_nested_config(config_dir, path)
# Add standard defaults
config.update(defaults)
# Add parent config values as defaults
config.update(parent_config)
# Ask for new values
for key, value in config.items():
config[key] = click.prompt(
'Please enter a {0}'.format(key), default=value
)
# Remove values if parent config are the same
config = {k: v for k, v in config.items() if parent_config.get(k) != v}
# Write config.yaml if config not empty
filepath = os.path.join(path, "config.yaml")
if config:
with open(filepath, 'w') as config_file:
yaml.safe_dump(
config, stream=config_file, default_flow_style=False
)
else:
click.echo("No config.yaml file needed - covered by parent config.")
| 31.97191
| 78
| 0.677034
|
c0547a05aef370edd1e288190f8e1124413f39a5
| 649
|
py
|
Python
|
ganonymizer-v3/app/api/controller.py
|
MSec-H2020/GANonymizer
|
3bacfbf2593e5ba9e4a8f4d654f53c8df0e55174
|
[
"Apache-2.0"
] | null | null | null |
ganonymizer-v3/app/api/controller.py
|
MSec-H2020/GANonymizer
|
3bacfbf2593e5ba9e4a8f4d654f53c8df0e55174
|
[
"Apache-2.0"
] | null | null | null |
ganonymizer-v3/app/api/controller.py
|
MSec-H2020/GANonymizer
|
3bacfbf2593e5ba9e4a8f4d654f53c8df0e55174
|
[
"Apache-2.0"
] | null | null | null |
import base64
import io
from flask import jsonify
from PIL import Image
import api.gano as gano
def health():
return jsonify({"message": "hello"}), 200
def image(inp_b64):
print("\n---------------------------------------------")
inp_io = io.BytesIO(base64.b64decode(inp_b64))
inp_io.seek(0)
inp_pil = Image.open(inp_io)
inp_pil = inp_pil.convert("RGB")
out_pil = gano.ganonymize(inp_pil)
buff = io.BytesIO()
out_pil.save(buff, format="JPEG")
out_b64 = base64.b64encode(buff.getvalue())
print("---------------------------------------------\n")
return jsonify({"image": out_b64.decode()}), 200
| 22.37931
| 60
| 0.571649
|
75eaa4e7d00bdaed8ea5196cee41c4a0b88a3756
| 5,198
|
py
|
Python
|
pagewalker/pagewalker/report/database/resource.py
|
rafal-qa/page-walker
|
8940a819d436d46f729c9307effc5118d692cad0
|
[
"MIT"
] | 16
|
2018-07-08T19:20:15.000Z
|
2021-01-08T22:08:49.000Z
|
pagewalker/pagewalker/report/database/resource.py
|
rafal-qa/page-walker
|
8940a819d436d46f729c9307effc5118d692cad0
|
[
"MIT"
] | null | null | null |
pagewalker/pagewalker/report/database/resource.py
|
rafal-qa/page-walker
|
8940a819d436d46f729c9307effc5118d692cad0
|
[
"MIT"
] | 5
|
2018-10-26T13:08:10.000Z
|
2020-06-20T21:03:17.000Z
|
class DatabaseResource(object):
def __init__(self, conn):
self.conn = conn
def resource_list_only(self):
c = self.conn.cursor()
c.execute("""
SELECT DISTINCT RS.id, RS.url, RS.url_blacklisted, RS.is_truncated, RS.is_external
FROM devtools_request AS RQ
JOIN devtools_resource AS RS
ON RQ.resource_id = RS.id
WHERE RQ.is_main = 0
""")
result = c.fetchall()
data_resources = []
for row in result:
resource_id, url, url_blacklisted, is_truncated, is_external = row
resource_data = {
"id": resource_id,
"url": url,
"url_blacklisted": url_blacklisted,
"is_truncated": is_truncated,
"is_external": is_external
}
data_resources.append(resource_data)
return data_resources
# Only finished: requests.http_status is not null
# - exclude requests that didn't receive Network.loadingFinished or Network.loadingFailed event
# - unfinished requests are not included in stats, because we don't know if this is an error
def request_stat_for_resources(self):
data_resources = self._all_finished_requests()
self._append_non_cached_requests(data_resources)
self._append_unfinished_requests(data_resources)
return data_resources
def _append_non_cached_requests(self, data):
for resource_id, non_cached in self._non_cached_finished_requests().items():
if resource_id not in data:
data[resource_id] = {}
data[resource_id]["avg_size"] = non_cached["avg_size"]
data[resource_id]["avg_load_time"] = non_cached["avg_load_time"]
def _append_unfinished_requests(self, data):
for resource_id, unfinished in self._unfinished_requests().items():
if resource_id not in data:
data[resource_id] = {}
data[resource_id]["requests_unfinished"] = unfinished
def _all_finished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, COUNT(*), SUM(from_cache)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NOT NULL
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, requests_finished, from_cache = row
resources[resource_id] = {
"requests_finished": requests_finished,
"from_cache": from_cache
}
return resources
# avg_load_time (only non-cached requests)
# avg_size (this is resource size, so cached requests are not included)
def _non_cached_finished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, AVG(data_received), AVG(time_load)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NOT NULL AND from_cache = 0
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, avg_size, avg_load_time = row
resources[resource_id] = {
"avg_size": avg_size,
"avg_load_time": avg_load_time
}
return resources
def _unfinished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, COUNT(*)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NULL
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, requests_unfinished = row
resources[resource_id] = requests_unfinished
return resources
def request_error_for_resources(self):
c = self.conn.cursor()
c.execute("""
SELECT R.page_id, R.resource_id, R.http_status, E.name
FROM devtools_request AS R
LEFT JOIN devtools_request_error AS E
ON E.id = R.error_id
WHERE R.is_main = 0 AND (R.http_status >= 400 OR R.http_status = 0)
""")
result = c.fetchall()
resources = {}
for row in result:
self._append_error_info_to_resources(resources, row)
return resources
# top 10 occurrences for every failed request
def _append_error_info_to_resources(self, resources, row):
page_id, resource_id, http_status, error_name = row
if resource_id not in resources:
resources[resource_id] = {
"requests_errors": 0,
"pages_with_error": []
}
resources[resource_id]["requests_errors"] += 1
page_occurrences = len(resources[resource_id]["pages_with_error"])
max_occurrences = 10
error_data = [page_id, http_status, error_name]
if page_occurrences < max_occurrences and error_data not in resources[resource_id]["pages_with_error"]:
resources[resource_id]["pages_with_error"].append(error_data)
| 39.082707
| 111
| 0.602732
|
7bc8c5173aba14a6be079b78227f14a101b2763d
| 3,898
|
py
|
Python
|
gitlab-ci/tools/git_statistics/git_statistics.py
|
3cL1p5e7/ic
|
2b6011291d900454cedcf86ec41c8c1994fdf7d9
|
[
"Apache-2.0"
] | 941
|
2021-05-10T08:14:14.000Z
|
2022-03-31T11:40:24.000Z
|
gitlab-ci/tools/git_statistics/git_statistics.py
|
3cL1p5e7/ic
|
2b6011291d900454cedcf86ec41c8c1994fdf7d9
|
[
"Apache-2.0"
] | 4
|
2021-12-22T22:34:51.000Z
|
2022-03-31T07:34:19.000Z
|
gitlab-ci/tools/git_statistics/git_statistics.py
|
3cL1p5e7/ic
|
2b6011291d900454cedcf86ec41c8c1994fdf7d9
|
[
"Apache-2.0"
] | 122
|
2021-05-10T08:21:23.000Z
|
2022-03-25T20:34:12.000Z
|
import os
import re
import subprocess
from io import StringIO
import pandas as pd
pd.options.mode.chained_assignment = None
REPOSITORIES = [
"nns-dapp",
"internet-identity",
"motoko",
"sdk",
"ic",
"docs",
"agent-js",
"ic-staking-documentation",
"keysmith",
"examples",
]
COLUMNS = [
"repository",
"commits_2021",
"commits_dec_2021",
"u_devs_2021",
"u_devs_dec",
"u_devs_over_10_commits_dec",
"u_devs_over_100_lines_dec",
]
BEG_YEAR = 1609459200
END_YEAR = 1640995199
BEG_DEC = 1638316800
def clone_repositories(repositories):
print("--- CLONING REPOSITORIES ---")
# Create /repositories if doesn't exist
try:
os.mkdir("repositories")
except OSError:
pass
for repository in repositories:
subprocess.run(["git", "clone", f"git@github.com:dfinity/{repository}.git"], cwd="repositories/")
print("")
def get_commit_history(repository):
path = "raw_history.txt"
raw_history = open(path, "w")
subprocess.run(
["git", "--no-pager", "log", "--shortstat", "--pretty=format:%ae,%ct"],
cwd=f"repositories/{repository}/",
stdout=raw_history,
)
raw_history.close()
raw_history = open(path, "r").read()
git_log = re.sub(
r"(^.+,\d+)\n [0-9]* file(s)? changed(, ([0-9]*) insertion(s)?\(\+\))?(, ([0-9]*) deletion(s)?\(-\))?\n$",
r"\g<1>,\g<4>,\g<7>",
raw_history,
flags=re.MULTILINE,
)
os.remove(path)
csv = StringIO("email,timestamp,additions,deletions\n" + git_log)
return pd.read_csv(csv)
def get_commits_in_range(df, since, until):
return df.loc[(df["timestamp"] > since) & (df["timestamp"] < until)]
def get_u_devs(df):
return df.groupby("email").size().reset_index(name="counts")
def get_u_devs_commit_threshold(u_devs, commit_threshold):
df = u_devs
return df.loc[df["counts"] >= commit_threshold]
def get_u_devs_lines_threshold(df, lines_threshold):
df["lines"] = df["additions"] + df["deletions"]
df = df.groupby("email").agg({"lines": "sum"}).reset_index()
return df.loc[df["lines"] >= lines_threshold]
def get_emails(df):
if "email" in df.columns:
return df["email"].tolist()
return []
def unique_total(df, column):
devs = df[column].tolist()
# Remove none items
devs = [item for item in devs if item]
# Flatten list
devs = [item for sublist in devs for item in sublist]
return len(set(devs))
clone_repositories(REPOSITORIES)
raw_data = []
print("--- RETRIEVING COMMIT HISTORY ---")
for repository in REPOSITORIES:
df = get_commit_history(repository)
df_2021 = get_commits_in_range(df, BEG_YEAR, END_YEAR)
df_dec = get_commits_in_range(df, BEG_DEC, END_YEAR)
commits_2021 = len(df_2021.index)
commits_dec = len(df_dec.index)
# u for unique
u_devs_2021 = get_u_devs(df_2021)
u_devs_dec = get_u_devs(df_dec)
u_devs_over_10_commits_dec = get_emails(get_u_devs_commit_threshold(u_devs_dec, 10))
u_devs_over_100_lines_dec = get_emails(get_u_devs_lines_threshold(df_dec, 100))
raw_data.append(
[
repository,
commits_2021,
commits_dec,
get_emails(u_devs_2021),
get_emails(u_devs_dec),
u_devs_over_10_commits_dec,
u_devs_over_100_lines_dec,
]
)
print("--- COMPUTING TOTALS ---")
raw_data.append(["total"])
df = pd.DataFrame(raw_data, columns=COLUMNS)
df.at[10, "commits_2021"] = df["commits_2021"].sum()
df.at[10, "commits_dec_2021"] = df["commits_dec_2021"].sum()
# Computing unions
for column in COLUMNS[3:]:
df.at[10, column] = unique_total(df, column)
df[column] = df[column].apply(lambda x: x if type(x) is int else len(set(x)))
print(df)
print("--- SAVING CSV ---")
df.to_csv("output.csv", index=False)
print("Done: output.csv")
| 24.987179
| 114
| 0.641868
|
f23603cf15b80660aa76284116c1923e465d7a04
| 4,105
|
py
|
Python
|
check_uptime_by_ssh.py
|
ovh/check-linux-by-ssh
|
e04c8df797818aabe492d687c46972cd212b6997
|
[
"MIT"
] | 13
|
2015-01-19T22:53:22.000Z
|
2018-07-14T20:48:30.000Z
|
check_uptime_by_ssh.py
|
ovh/check-linux-by-ssh
|
e04c8df797818aabe492d687c46972cd212b6997
|
[
"MIT"
] | null | null | null |
check_uptime_by_ssh.py
|
ovh/check-linux-by-ssh
|
e04c8df797818aabe492d687c46972cd212b6997
|
[
"MIT"
] | 4
|
2015-12-19T20:36:43.000Z
|
2021-06-05T16:21:32.000Z
|
#!/usr/bin/env python
# Copyright (C) 2013:
# Gabes Jean, naparuba@gmail.com
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
'''
This script is a check for lookup at memory consumption over ssh without
having an agent on the other side
'''
import os
import sys
import optparse
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '0' # There is no warning, only critical
DEFAULT_CRITICAL = '3600'
def get_uptime(client):
# We are looking for a line like
#5265660.84 4856671.67
raw = r"""cat /proc/uptime"""
stdin, stdout, stderr = client.exec_command(raw)
line = [l for l in stdout][0].strip()
uptime, _ = tuple([int(float(v)) for v in line.split(' ')])
# Before return, close the client
client.close()
return uptime
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-H', '--hostname',
dest="hostname", help='Hostname to connect to')
parser.add_option('-p', '--port',
dest="port", type="int", default=22,
help='SSH port to connect to. Default : 22')
parser.add_option('-i', '--ssh-key',
dest="ssh_key_file",
help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('-u', '--user',
dest="user", help='remote use to use. By default shinken.')
parser.add_option('-P', '--passphrase',
dest="passphrase", help='SSH key passphrase. By default will use void')
parser.add_option('-c', '--critical',
dest="critical", help='Critical value for uptime in seconds. Less means critical error. Default : 3600')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
hostname = opts.hostname or ''
port = opts.port
ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa')
user = opts.user or 'shinken'
passphrase = opts.passphrase or ''
# Try to get numeic warning/critical values
s_warning = DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
_, critical = schecks.get_warn_crit(s_warning, s_critical)
# Ok now connect, and try to get values for memory
client = schecks.connect(hostname, port, ssh_key_file, passphrase, user)
uptime = get_uptime(client)
# Two cases : cpu_based_load or not. For CPU the real warning is based on warning*nb_cpu
status = 0
s_pretty_uptime = '%ddays' % (float(uptime) / 86400)
# Only look at critical level here, don't care about warning one
if uptime < critical:
print "Critical: uptime is %ds | uptime=%ds" %(uptime, uptime)
sys.exit(2)
print "Ok: uptime is %s (%ds) | uptime=%ds" % (s_pretty_uptime, uptime, uptime)
sys.exit(0)
| 33.104839
| 122
| 0.692083
|
76eafba3fa434b84710216a668007383141525bf
| 92
|
py
|
Python
|
redeer/gui/forms.py
|
davidszotten/redeer
|
b6a7a2cc15e4b90b3868042d9131f9d10dcfb4d1
|
[
"MIT"
] | 1
|
2019-06-27T13:24:17.000Z
|
2019-06-27T13:24:17.000Z
|
redeer/gui/forms.py
|
davidszotten/redeer
|
b6a7a2cc15e4b90b3868042d9131f9d10dcfb4d1
|
[
"MIT"
] | null | null | null |
redeer/gui/forms.py
|
davidszotten/redeer
|
b6a7a2cc15e4b90b3868042d9131f9d10dcfb4d1
|
[
"MIT"
] | null | null | null |
from django import forms
class UploadForm(forms.Form):
reader_xml = forms.FileField()
| 15.333333
| 34
| 0.75
|
22bed998f858c0ccc17ec9f605d1e155a60feb7d
| 63,433
|
py
|
Python
|
skbio/io/tests/test_registry.py
|
danodonovan/scikit-bio
|
06e0b513bfc8ef68e0899596d002b5c112049382
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/io/tests/test_registry.py
|
danodonovan/scikit-bio
|
06e0b513bfc8ef68e0899596d002b5c112049382
|
[
"BSD-3-Clause"
] | null | null | null |
skbio/io/tests/test_registry.py
|
danodonovan/scikit-bio
|
06e0b513bfc8ef68e0899596d002b5c112049382
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from io import StringIO
import io
import itertools
import os
import unittest
import warnings
import types
from tempfile import mkstemp
from skbio.io import (FormatIdentificationWarning, UnrecognizedFormatError,
ArgumentOverrideWarning, io_registry, sniff,
create_format)
from skbio.io.registry import (IORegistry, FileSentinel, Format,
DuplicateRegistrationError,
InvalidRegistrationError)
from skbio.util import get_data_path
from skbio.util._exception import TestingUtilError
from skbio import DNA, read, write
class MockClass:
def __init__(self, x):
self.list = x
def __eq__(self, other):
# They are only equal when the class is EXACTLY the same. We don't want
# readers to return knockoff instances...
return self.__class__ is other.__class__ and self.list == other.list
def __repr__(self):
return "%s(%s)" % (str(self.__class__.__name__), str(self.list))
class MockClassA(MockClass):
pass
class MockClassB(MockClass):
pass
class TestFormatAndIORegistry(unittest.TestCase):
def test_add_duplicate_format(self):
f = Format('Example')
r = IORegistry()
r.add_format(f)
with self.assertRaises(DuplicateRegistrationError):
r.add_format(Format('Example'))
class RegistryTest(unittest.TestCase):
def setUp(self):
self.registry = IORegistry()
self.fd1, self.fp1 = mkstemp()
self.fd2, self.fp2 = mkstemp()
def tearDown(self):
os.remove(self.fp1)
os.close(self.fd1)
os.remove(self.fp2)
os.close(self.fd2)
class TestRegisterAndGetReader(RegistryTest):
def test_get_reader_no_match(self):
self.assertIs(None, self.registry.get_reader('not_a_format',
MockClass))
def test_get_reader_when_only_writer_exists(self):
format1 = self.registry.create_format('format1')
@format1.writer(MockClass)
def format_writer(fh):
return
self.assertEqual(None, self.registry.get_reader('format', MockClass))
def test_register_reader_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4', encoding='binary')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.reader(MockClassA)
def format1_reader(fh):
return
@format1.reader(MockClassB)
def format1_reader_b(fh):
return
@format2.reader(MockClassA)
def format2_reader(fh):
return
@format3.reader(MockClassB)
def format3_reader(fh):
return
@format4.reader(MockClassA)
def format4_reader(fh):
return
@format4.reader(MockClassB)
def format4_reader_b(fh):
return
@format5.reader(None)
def format5_reader(fh):
return
self.assertIs(format1_reader,
self.registry.get_reader('format1', MockClassA))
self.assertIs(format1_reader_b,
self.registry.get_reader('format1', MockClassB))
self.assertIs(format2_reader,
self.registry.get_reader('format2', MockClassA))
self.assertIs(None, self.registry.get_reader('format2', MockClassB))
self.assertIs(None, self.registry.get_reader('format3', MockClassA))
self.assertIs(format3_reader,
self.registry.get_reader('format3', MockClassB))
self.assertIs(format4_reader,
self.registry.get_reader('format4', MockClassA))
self.assertIs(format4_reader_b,
self.registry.get_reader('format4', MockClassB))
self.assertIs(format5_reader,
self.registry.get_reader('format5', None))
self.assertIs(None, self.registry.get_reader('format5', MockClassA))
self.assertIs(None, self.registry.get_reader('format5', MockClassB))
def test_register_reader_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.reader(MockClassA)
def format1_reader(fh):
return
@format1.reader(MockClassA)
def duplicate_format1_reader(fh):
return
self.assertTrue('format1' in str(cm.exception))
self.assertTrue('reader' in str(cm.exception))
self.assertTrue(MockClassA.__name__ in str(cm.exception))
def test_register_reader_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.reader(MockClassA)
def format1_reader(fh):
return
self.assertIs(format1_reader,
self.registry.get_reader('format1', MockClassA))
@format1.reader(MockClassA, override=True)
def duplicate_format1_reader(fh):
return
self.assertIs(duplicate_format1_reader,
self.registry.get_reader('format1', MockClassA))
def test_mistype_reader_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.reader
def left_out_parens(fh):
return
class TestRegisterAndGetWriter(RegistryTest):
def test_get_writer_no_match(self):
self.assertEqual(None, self.registry.get_writer('not_a_format',
MockClass))
def test_get_writer_when_only_reader_exists(self):
format = self.registry.create_format('format')
@format.reader(MockClass)
def format_reader(fh):
return
self.assertEqual(None, self.registry.get_writer('format', MockClass))
def test_register_writer_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4', encoding='binary')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.writer(MockClassA)
def format1_writer(obj, fh):
return
@format1.writer(MockClassB)
def format1_writer_b(obj, fh):
return
@format2.writer(MockClassA)
def format2_writer(obj, fh):
return
@format3.writer(MockClassB)
def format3_writer(obj, fh):
return
@format4.writer(MockClassA)
def format4_writer(fh):
return
@format4.writer(MockClassB)
def format4_writer_b(fh):
return
@format5.writer(None)
def format5_writer(fh):
return
self.assertEqual(format1_writer,
self.registry.get_writer('format1', MockClassA))
self.assertEqual(format1_writer_b,
self.registry.get_writer('format1', MockClassB))
self.assertEqual(format2_writer,
self.registry.get_writer('format2', MockClassA))
self.assertEqual(None,
self.registry.get_writer('format2', MockClassB))
self.assertEqual(None,
self.registry.get_writer('format3', MockClassA))
self.assertEqual(format3_writer,
self.registry.get_writer('format3', MockClassB))
self.assertIs(format4_writer,
self.registry.get_writer('format4', MockClassA))
self.assertIs(format4_writer_b,
self.registry.get_writer('format4', MockClassB))
self.assertIs(format5_writer,
self.registry.get_writer('format5', None))
self.assertIs(None, self.registry.get_writer('format5', MockClassA))
self.assertIs(None, self.registry.get_writer('format5', MockClassB))
def test_register_writer_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.writer(MockClassA)
def format1_writer(obj, fh):
return
@format1.writer(MockClassA)
def duplicate_format1_writer(obj, fh):
return
self.assertTrue('format1' in str(cm.exception))
self.assertTrue('writer' in str(cm.exception))
self.assertTrue(MockClassA.__name__ in str(cm.exception))
def test_register_writer_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.writer(MockClassA)
def format1_writer(obj, fh):
return
self.assertIs(format1_writer,
self.registry.get_writer('format1', MockClassA))
@format1.writer(MockClassA, override=True)
def duplicate_format1_writer(obj, fh):
return
self.assertIs(duplicate_format1_writer,
self.registry.get_writer('format1', MockClassA))
def test_mistype_writer_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.writer
def left_out_parens(fh):
return
class TestRegisterAndGetSniffer(RegistryTest):
def test_get_sniffer_no_match(self):
self.assertEqual(None, self.registry.get_sniffer('not_a_format'))
def test_register_sniffer_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
@format1.sniffer()
def format1_sniffer(fh):
return '1' in fh.readline(), {}
@format2.sniffer()
def format2_sniffer(fh):
return '2' in fh.readline(), {}
@format3.sniffer()
def format3_sniffer(fh):
return '3' in fh.readline(), {}
self.assertEqual(format1_sniffer,
self.registry.get_sniffer('format1'))
self.assertEqual(format2_sniffer,
self.registry.get_sniffer('format2'))
self.assertEqual(format3_sniffer,
self.registry.get_sniffer('format3'))
def test_register_sniffer_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.sniffer()
def format1_sniffer(fh):
return False, {}
@format1.sniffer()
def duplicate_format1_sniffer(fh):
return False, {}
self.assertTrue('format1' in str(cm.exception))
def test_register_sniffer_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def format1_sniffer(fh):
return False, {}
self.assertIs(self.registry.get_sniffer('format1'), format1_sniffer)
@format1.sniffer(override=True)
def duplicate_format1_sniffer(fh):
return False, {}
self.assertIs(self.registry.get_sniffer('format1'),
duplicate_format1_sniffer)
def test_sniffer_warns_on_exception(self):
format = self.registry.create_format('format')
@format.sniffer()
def format_sniffer(fh):
raise TestingUtilError("Sniffer will return False and warn.")
fh = StringIO()
sniffer = self.registry.get_sniffer('format')
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
sniffer(fh)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result, kwargs = sniffer(fh)
self.assertFalse(result)
self.assertEqual({}, kwargs)
fh.close()
def test_mistype_sniffer_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.sniffer
def left_out_parens(fh):
return
class TestListReadFormats(RegistryTest):
def test_no_read_formats(self):
format1 = self.registry.create_format('format1')
@format1.reader(MockClassA)
def this_isnt_on_clsB(fh):
return
self.assertEqual([], self.registry.list_read_formats(MockClassB))
def test_one_read_format(self):
format1 = self.registry.create_format('format1')
@format1.reader(MockClass)
def format1_cls(fh):
return
self.assertEqual(['format1'],
self.registry.list_read_formats(MockClass))
def test_many_read_formats(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
format4 = self.registry.create_format('format4')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.reader(MockClassA)
def format1_clsA(fh):
return
@format2.reader(MockClassA)
def format2_clsA(fh):
return
@format3.reader(MockClassA)
def format3_clsA(fh):
return
@format3.reader(MockClassB)
def format3_clsB(fh):
return
@format4.reader(MockClassB)
def format4_clsB(fh):
return
@format5.writer(MockClassA)
def format5_clsA(fh):
return
formats = self.registry.list_read_formats(MockClassA)
self.assertTrue('format1' in formats)
self.assertTrue('format2' in formats)
self.assertTrue('format3' in formats)
self.assertTrue('format4' not in formats)
self.assertTrue('format5' not in formats)
class TestListWriteFormats(RegistryTest):
def test_no_write_formats(self):
format1 = self.registry.create_format('format1')
@format1.writer(MockClassA)
def this_isnt_on_clsB(fh):
return
self.assertEqual([], self.registry.list_write_formats(MockClassB))
def test_one_write_format(self):
format1 = self.registry.create_format('format1')
@format1.writer(MockClass)
def format1_cls(fh):
return
self.assertEqual(['format1'],
self.registry.list_write_formats(MockClass))
def test_many_write_formats(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
format4 = self.registry.create_format('format4')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.writer(MockClassA)
def format1_clsA(fh):
return
@format2.writer(MockClassA)
def format2_clsA(fh):
return
@format3.writer(MockClassA)
def format3_clsA(fh):
return
@format3.writer(MockClassB)
def format3_clsB(fh):
return
@format4.writer(MockClassB)
def format4_clsB(fh):
return
@format5.reader(MockClassA)
def format5_clsA(fh):
return
formats = self.registry.list_write_formats(MockClassA)
self.assertTrue('format1' in formats)
self.assertTrue('format2' in formats)
self.assertTrue('format3' in formats)
self.assertTrue('format4' not in formats)
self.assertTrue('format5' not in formats)
class TestSniff(RegistryTest):
def setUp(self):
super(TestSniff, self).setUp()
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4')
# No sniffer for this format:
self.registry.create_format('format5')
@format1.sniffer()
def format1_sniffer(fh):
return '1' in fh.readline(), {}
@format2.sniffer()
def format2_sniffer(fh):
return '2' in fh.readline(), {}
@format3.sniffer()
def format3_sniffer(fh):
return '3' in fh.readline(), {}
@format4.sniffer()
def format4_sniffer(fh):
return '4' in fh.readline(), {}
@format3.reader(MockClass)
def reader3(fh):
return
@format4.reader(MockClass)
def reader4(fh):
return
def test_no_matches(self):
fh = StringIO("no matches here")
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.sniff(fh)
self.assertTrue(str(fh) in str(cm.exception))
fh.close()
def test_one_match(self):
fh = StringIO("contains a 3")
self.assertEqual('format3', self.registry.sniff(fh)[0])
def test_many_matches(self):
fh = StringIO("1234 will match all")
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.sniff(fh)
self.assertTrue("format1" in str(cm.exception))
self.assertTrue("format2" in str(cm.exception))
self.assertTrue("format3" in str(cm.exception))
self.assertTrue("format4" in str(cm.exception))
fh.close()
def test_that_encoding_is_used(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('big5_file')
@formatx.sniffer()
def sniffer(fh):
self.assertEqual('big5', fh.encoding)
return True, {}
fmt, _ = self.registry.sniff(fp, encoding='big5')
self.assertEqual(fmt, 'formatx')
def test_passing_newline_raises_error(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
return True, {}
with self.assertRaisesRegex(TypeError, r'`newline`'):
self.registry.sniff(fp, newline='\r')
def test_non_default_encoding(self):
big5_format = self.registry.create_format('big5_format',
encoding='big5')
@big5_format.sniffer()
def sniffer(fh):
self.assertEqual(self._expected_encoding, fh.encoding)
return True, {}
self._expected_encoding = 'big5'
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'big5_format')
self._expected_encoding = 'UTF-8'
fmt, _ = self.registry.sniff(self.fp1, encoding='UTF-8')
self.assertEqual(fmt, 'big5_format')
def test_non_default_newline(self):
formatx = self.registry.create_format('formatx', newline='\r')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
self.assertEqual(fh.readlines(), ['a\nb\nc\nd\ne\n'])
return True, {}
fmt, _ = self.registry.sniff(fp)
self.assertEqual(fmt, 'formatx')
def test_position_not_mutated_real_file(self):
formatx = self.registry.create_format('formatx')
@formatx.sniffer()
def sniffer(fh):
return True, {}
with io.open(get_data_path('real_file')) as fh:
fh.seek(2)
self.registry.sniff(fh)
self.assertEqual(fh.tell(), 2)
self.assertEqual('b\n', fh.readline())
def test_position_not_mutated_fileish(self):
formatx = self.registry.create_format('formatx')
@formatx.sniffer()
def sniffer(fh):
return True, {}
fh = StringIO('a\nb\nc\nd\n')
fh.seek(2)
self.registry.sniff(fh)
self.assertEqual('b\n', fh.readline())
def test_sniff_with_errors_in_sniffer(self):
formatx = self.registry.create_format('formatx', encoding='ascii')
@formatx.sniffer()
def sniffer(fh):
raise Exception("OH NO!")
fp = get_data_path('big5_file')
with warnings.catch_warnings(record=True):
warnings.simplefilter('error')
with self.assertRaises(FormatIdentificationWarning):
fmt, _ = self.registry.sniff(fp)
def test_sniff_with_encoding_errors(self):
formatx = self.registry.create_format('formatx', encoding='ascii')
@formatx.sniffer()
def sniffer(fh):
fh.read()
return True, {}
fp = get_data_path('big5_file')
with self.assertRaises(UnrecognizedFormatError):
fmt, _ = self.registry.sniff(fp, errors='strict')
# errors is set to ignore by default, so our sniffer will return
# true even though read() didn't entirely work for ascii
fmt, _ = self.registry.sniff(fp)
self.assertEqual(fmt, 'formatx')
def test_binary_sniffer(self):
binf = self.registry.create_format('binf', encoding='binary')
@binf.sniffer()
def sniffer(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
return True, {}
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'binf')
def test_text_sniffer(self):
textf = self.registry.create_format('textf', encoding=None)
@textf.sniffer()
def sniffer(fh):
self.assertIsInstance(fh, io.TextIOBase)
return True, {}
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'textf')
def test_sniff_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
return True, {}
# Should skip binary sniffers
fmt, _ = self.registry.sniff(self.fp1, encoding=None)
self.assertEqual(fmt, 'textf')
# Should skip text sniffers
fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
self.assertEqual(fmt, 'binf')
with self.assertRaises(ValueError):
self.registry.sniff(['some content\n'], encoding='binary')
with self.assertRaises(ValueError):
binf_sniffer(self.fp1, encoding=None)
with self.assertRaises(ValueError):
textf_sniffer(self.fp1, encoding='binary')
def test_binary_fall_through(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return False, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'textf')
self.assertTrue(self._check_binf)
self.assertTrue(self._check_textf)
def test_sniff_gzip(self):
expected = "This is some content\nIt occurs on more than one line\n"
formata = self.registry.create_format('formata', encoding='binary')
formatb = self.registry.create_format('formatb')
formatc = self.registry.create_format('formatc')
@formata.sniffer()
def formata_sniffer(fh):
self._check_f1 = True
self.assertEqual(fh.read(), expected.encode('ascii'))
return False, {}
@formatb.sniffer()
def formatb_sniffer(fh):
self._check_f2 = True
self.assertEqual(fh.read(), expected)
return True, {}
@formatc.sniffer()
def formatc_sniffer(fh):
self._check_f3 = True
self.assertEqual(fh.read(), expected)
return False, {}
self._check_f1 = False
self._check_f2 = False
self._check_f3 = False
self.registry.sniff(get_data_path('example_file.gz'))
self.assertTrue(self._check_f1)
self.assertTrue(self._check_f2)
self.assertTrue(self._check_f3)
def test_text_skip_binary(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(['text'])
self.assertEqual(fmt, 'textf')
self.assertFalse(self._check_binf)
self.assertTrue(self._check_textf)
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1, encoding=None)
self.assertEqual(fmt, 'textf')
self.assertFalse(self._check_binf)
self.assertTrue(self._check_textf)
def test_text_skip_text(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
self.assertEqual(fmt, 'binf')
self.assertTrue(self._check_binf)
self.assertFalse(self._check_textf)
class TestRead(RegistryTest):
def test_format_and_into_are_none(self):
fh = StringIO()
with self.assertRaises(ValueError):
self.registry.read(fh)
fh.close()
def test_format_is_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh):
self.assertIsInstance(fh, io.TextIOBase)
return MockClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, into=MockClass)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
fh.close()
def test_into_is_none_and_no_generator_reader(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.reader(MockClass)
def reader(fh):
self.assertIsInstance(fh, io.TextIOBase)
return
with self.assertRaisesRegex(UnrecognizedFormatError,
r"Cannot read 'format1'.*Possible.*include"
": MockClass"):
self.registry.read(fh, format='format1')
def test_into_is_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.reader(None)
def reader(fh):
self.assertIsInstance(fh, io.TextIOBase)
yield from [int(x) for x in fh.read().split('\n')]
generator = self.registry.read(fh, format='format1')
self.assertIsInstance(generator, types.GeneratorType)
first_run = True
for a, b in zip(generator, [1, 2, 3, 4]):
if first_run:
fh.seek(3)
first_run = False
self.assertEqual(a, b)
self.assertEqual(3, fh.tell())
fh.close()
def test_into_is_none_real_file(self):
format1 = self.registry.create_format('format1')
fp = self.fp1
with open(fp, 'w') as fh:
fh.write('1\n2\n3\n4')
self._test_fh = None
@format1.reader(None)
def reader(fh):
self._test_fh = fh
yield from [int(x) for x in fh.read().split('\n')]
generator = self.registry.read(fp, format='format1')
for a, b in itertools.zip_longest(generator, [1, 2, 3, 4]):
self.assertEqual(a, b)
self.assertTrue(self._test_fh.closed)
def test_reader_does_not_exist(self):
fh = StringIO()
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.read(fh, format='not_a_format', into=MockClass)
self.assertTrue(MockClass.__name__ in str(cm.exception))
self.assertTrue('not_a_format' in str(cm.exception))
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.read(fh, format='not_a_format2')
self.assertTrue('generator' in str(cm.exception))
self.assertTrue('not_a_format2' in str(cm.exception))
def test_reader_exists_with_verify_true(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh):
return MockClass([int(x) for x in fh.read().split('\n')])
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=MockClass,
verify=True)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
# Remove if read-context management is support in the future.
fh.seek(0)
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=MockClass)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
fh.close()
def test_warning_raised(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return False, {}
@format1.reader(MockClass)
def reader(fh):
return MockClass([int(x) for x in fh.read().split('\n')])
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
self.was_verified = False
instance = self.registry.read(fh, format='format1',
into=MockClass, verify=True)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
self.was_verified = False
instance = self.registry.read(fh, format='format1',
into=MockClass)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
fh.close()
def test_reader_exists_with_verify_false(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh):
return MockClass([int(x) for x in fh.read().split('\n')])
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=MockClass,
verify=False)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
self.assertFalse(self.was_verified)
fh.close()
def test_reader_exists_real_file(self):
format1 = self.registry.create_format('format1')
fp = self.fp1
with open(fp, 'w') as fh:
fh.write('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh):
return MockClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fp, format='format1', into=MockClass)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
def test_read_kwargs_passed_generator(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {'arg1': 15, 'arg2': 'abc'}
@format1.reader(None)
def reader(fh, **kwargs):
self.assertEqual(kwargs['arg1'], 15)
self.assertEqual(kwargs['arg2'], 'abc')
self.assertEqual(kwargs['arg3'], [1])
yield
next(self.registry.read(StringIO(), format='format1', arg3=[1]))
def test_read_kwargs_passed_and_override(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {'arg1': 15, 'arg2': 'abc', 'override': 30}
@format1.reader(MockClass)
def reader(fh, **kwargs):
self.assertEqual(kwargs['arg1'], 15)
self.assertEqual(kwargs['arg2'], 'abc')
self.assertEqual(kwargs['arg3'], [1])
return
self.registry.read(StringIO('notempty'), into=MockClass, arg3=[1])
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
# Should raise no warning and thus no error.
self.registry.read(StringIO('notempty'), into=MockClass, arg3=[1],
override=30)
# Should raise a warning and thus an error.
with self.assertRaises(ArgumentOverrideWarning):
self.registry.read(StringIO('notempty'), into=MockClass,
arg3=[1], override=100)
def test_that_encoding_is_used(self):
format1 = self.registry.create_format('format1')
fp = get_data_path('big5_file')
@format1.sniffer()
def sniffer(fh):
return '\u4f60' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh):
self.assertEqual(self._expected_enc, fh.encoding)
return MockClass(fh.readlines())
@format1.reader(None)
def reader_gen(fh):
self.assertEqual(self._expected_enc, fh.encoding)
yield MockClass(fh.readlines())
self._expected_enc = 'big5'
instance = self.registry.read(fp, into=MockClass, encoding='big5')
self.assertEqual(MockClass(['\u4f60\u597d\n']), instance)
self._expected_enc = 'big5'
gen = self.registry.read(fp, format='format1', encoding='big5')
self.assertEqual(MockClass(['\u4f60\u597d\n']), next(gen))
def test_non_default_encoding(self):
format1 = self.registry.create_format('format1', encoding='big5')
fp = get_data_path('big5_file')
@format1.sniffer()
def sniffer(fh):
return True, {}
@format1.reader(MockClass)
def reader(fh):
self.assertEqual(self._expected_enc, fh.encoding)
return MockClass(fh.readlines())
@format1.reader(None)
def reader_gen(fh):
self.assertEqual(self._expected_enc, fh.encoding)
yield MockClass(fh.readlines())
self._expected_enc = 'big5'
instance = self.registry.read(fp, into=MockClass)
self.assertEqual(MockClass(['\u4f60\u597d\n']), instance)
gen = self.registry.read(fp, format='format1')
self.assertEqual(MockClass(['\u4f60\u597d\n']), next(gen))
gen.close()
self._expected_enc = 'utf8'
with self.assertRaises(UnicodeDecodeError):
self.registry.read(fp, into=MockClass, encoding='utf8')
with self.assertRaises(UnicodeDecodeError):
self.registry.read(fp, format='format1', encoding='utf8')
def test_passing_newline_raises_error(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
return True, {}
@formatx.reader(MockClass)
def reader(fh):
return MockClass(fh.readlines())
@formatx.reader(None)
def reader_gen(fh):
yield MockClass(fh.readlines())
with self.assertRaisesRegex(TypeError, r'`newline`'):
self.registry.read(fp, into=MockClass, newline='\r')
with self.assertRaisesRegex(TypeError, r'`newline`'):
self.registry.read(fp, format='formatx', newline='\r')
def test_non_default_newline(self):
formatx = self.registry.create_format('formatx', newline='\r')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
return True, {}
@formatx.reader(MockClass)
def reader(fh):
return MockClass(fh.readlines())
@formatx.reader(None)
def reader_gen(fh):
yield MockClass(fh.readlines())
instance = self.registry.read(fp, into=MockClass)
self.assertEqual(instance, MockClass(['a\nb\nc\nd\ne\n']))
gen = self.registry.read(fp, format='formatx')
self.assertEqual(next(gen), MockClass(['a\nb\nc\nd\ne\n']))
gen.close()
def test_file_sentinel_many(self):
format1 = self.registry.create_format('format1')
extra = get_data_path('real_file')
extra_2 = get_data_path('real_file_2')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
return MockClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=MockClass,
extra=extra, extra_2=extra_2)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_converted_to_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
return MockClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=MockClass)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_pass_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(MockClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
return MockClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=MockClass,
extra=None)
self.assertEqual(MockClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_generator_many(self):
format1 = self.registry.create_format('format1')
extra = get_data_path('real_file')
extra_2 = get_data_path('real_file_2')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
yield MockClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1', extra=extra,
extra_2=extra_2)
self.assertEqual(MockClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_file_sentinel_converted_to_none_generator(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
yield MockClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1')
self.assertEqual(MockClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_file_sentinel_pass_none_generator(self):
format1 = self.registry.create_format('format1')
fh = StringIO('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
yield MockClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1', extra=None)
self.assertEqual(MockClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_read_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
return True, {}
@binf.reader(MockClass)
def binf_reader(fh):
return MockClass(['bin'])
@binf.reader(None)
def binf_reader_gen(fh):
yield MockClass(['bin'])
@textf.sniffer()
def textf_sniffer(fh):
return True, {}
@textf.reader(MockClass)
def textf_reader(fh):
return MockClass(['text'])
@textf.reader(None)
def textf_reader_gen(fh):
yield MockClass(['text'])
# Should skip binary sniffers
instance = self.registry.read(self.fp1, encoding=None, into=MockClass)
self.assertEqual(instance, MockClass(['text']))
gen = self.registry.read(self.fp1, encoding=None, format='textf')
self.assertEqual(next(gen), MockClass(['text']))
gen.close()
# Should skip text sniffers
instance = self.registry.read(self.fp1, encoding='binary',
into=MockClass)
self.assertEqual(instance, MockClass(['bin']))
gen = self.registry.read(self.fp1, encoding='binary', format='binf')
self.assertEqual(next(gen), MockClass(['bin']))
gen.close()
with self.assertRaises(ValueError):
self.registry.read(['some content\n'], encoding='binary',
into=MockClass)
with self.assertRaises(ValueError):
self.registry.read(['some content\n'], format='textf',
encoding='binary', into=MockClass)
with self.assertRaises(ValueError):
self.registry.read(['some content\n'], format='textf',
encoding='binary', verify=False, into=MockClass)
with self.assertRaises(ValueError):
self.registry.read(['some content\n'], format='textf',
encoding='binary')
with self.assertRaises(ValueError):
self.registry.read(['some content\n'], format='textf',
encoding='binary', verify=False)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, into=MockClass)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, verify=False, into=MockClass)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, verify=False)
def test_read_with_binary_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
@binf.reader(MockClass)
def reader1(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
return MockClass(['woo'])
@binf.reader(None)
def reader2(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
yield MockClass(['woo'])
instance = self.registry.read(self.fp1, format='binf', verify=False,
into=MockClass)
self.assertEqual(MockClass(['woo']), instance)
gen = self.registry.read(self.fp1, format='binf', verify=False,
into=None)
self.assertEqual(MockClass(['woo']), next(gen))
gen.close()
def test_io_kwargs_passed(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {}
@format1.reader(MockClass)
def reader1(fh):
self.assertEqual(fh.errors, 'replace')
return MockClass(['woo'])
@format1.reader(None)
def reader1_gen(fh):
self.assertEqual(fh.errors, 'replace')
yield MockClass(['woo'])
obj = self.registry.read(self.fp1, into=MockClass, errors='replace')
self.assertEqual(obj, MockClass(['woo']))
gen = self.registry.read(self.fp1, format='format1', errors='replace')
self.assertEqual(next(gen), MockClass(['woo']))
gen.close()
def test_read_empty_file_gen_with_format(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {}
@format1.reader(None)
def reader1(fh):
return
yield
with io.StringIO("") as fh:
gen = self.registry.read(fh, format='format1')
self.assertEqual(list(gen), [])
class TestWrite(RegistryTest):
def test_writer_does_not_exist(self):
fh = StringIO()
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.write({}, format='not_a_format', into=fh)
self.assertTrue('not_a_format' in str(cm.exception))
self.assertTrue(str(fh) in str(cm.exception))
fh.close()
def test_writer_exists(self):
format1 = self.registry.create_format('format1')
obj = MockClass(['1', '2', '3', '4'])
fh = StringIO()
@format1.writer(MockClass)
def writer(obj, fh):
self.assertIsInstance(fh, io.TextIOBase)
fh.write('\n'.join(obj.list))
self.registry.write(obj, format='format1', into=fh)
fh.seek(0)
self.assertEqual("1\n2\n3\n4", fh.read())
fh.close()
def test_writer_exists_real_file(self):
format1 = self.registry.create_format('format1')
obj = MockClass(['1', '2', '3', '4'])
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
self.assertIsInstance(fh, io.TextIOBase)
fh.write('\n'.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp) as fh:
self.assertEqual("1\n2\n3\n4", fh.read())
def test_writer_passed_kwargs(self):
format1 = self.registry.create_format('format1')
@format1.reader(None)
def reader(fh):
yield
@format1.writer(None)
def writer(obj, fh, **kwargs):
self.assertEqual(kwargs['passed'], True)
generator = self.registry.get_reader('format1', None)([])
self.registry.write(generator, format='format1',
into=StringIO(), passed=True)
def test_that_encoding_is_used(self):
format1 = self.registry.create_format('format1')
obj = MockClass(['\u4f60\u597d\n']) # Ni Hau
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
fh.write(''.join(obj.list))
self.assertEqual(self._expected_encoding, fh.encoding)
self._expected_encoding = 'big5'
self.registry.write(obj, format='format1', into=fp, encoding='big5')
with io.open(fp, mode='rb') as fh:
# This would have been b'\xe4\xbd\xa0\xe5\xa5\xbd\n' in utf8
self.assertEqual(b'\xa7A\xa6n\n', fh.read())
def test_non_default_encoding(self):
format1 = self.registry.create_format('format1', encoding='big5')
obj = MockClass(['\u4f60\u597d\n']) # Ni Hau
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
fh.write(''.join(obj.list))
self.assertEqual(self._expected_encoding, fh.encoding)
self._expected_encoding = 'big5'
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'\xa7A\xa6n\n', fh.read())
self._expected_encoding = 'utf8'
self.registry.write(obj, format='format1', into=fp, encoding='utf8')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'\xe4\xbd\xa0\xe5\xa5\xbd\n', fh.read())
def test_that_newline_is_used(self):
format1 = self.registry.create_format('format1')
obj = MockClass(['a\n', 'b\n', 'c\n'])
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
fh.write(''.join(obj.list))
self.registry.write(obj, format='format1', into=fp, newline='\r')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\rb\rc\r', fh.read())
def test_non_default_newline(self):
format1 = self.registry.create_format('format1', newline='\r')
obj = MockClass(['a\n', 'b\n', 'c\n'])
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
fh.write(''.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\rb\rc\r', fh.read())
self.registry.write(obj, format='format1', into=fp, newline='\n')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\nb\nc\n', fh.read())
def test_file_sentinel_many(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(MockClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
extra.write('oh yeah...')
extra_2.write('oh no...')
self.registry.write(MockClass([]), format='format1', into=fh,
extra=self.fp1, extra_2=self.fp2)
with open(self.fp1) as f1:
self.assertEqual('oh yeah...', f1.read())
with open(self.fp2) as f2:
self.assertEqual('oh no...', f2.read())
fh.close()
def test_file_sentinel_converted_to_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(MockClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
self.registry.write(MockClass([]), format='format1', into=fh)
fh.close()
def test_file_sentinel_pass_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(MockClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
self.registry.write(MockClass([]), format='format1', into=fh,
extra=None)
fh.close()
def test_write_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.writer(MockClass)
def writer(obj, fh):
pass
@textf.writer(MockClass)
def writer2(obj, fh):
pass
with self.assertRaises(ValueError):
self.registry.write(MockClass([]), into=self.fp1, format='binf',
encoding=None)
with self.assertRaises(ValueError):
self.registry.write(MockClass([]), into=self.fp1, format='textf',
encoding='binary')
def test_write_binary_format(self):
format1 = self.registry.create_format('format1', encoding='binary')
obj = MockClass([b'a\n', b'b\n', b'c\n'])
fp = self.fp1
@format1.writer(MockClass)
def writer(obj, fh):
self.assertIsInstance(fh, (io.BufferedWriter, io.BufferedRandom))
fh.write(b''.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\nb\nc\n', fh.read())
def test_io_kwargs_passed(self):
format1 = self.registry.create_format('format1', encoding='ascii')
obj = MockClass(['a\n', 'b\n', 'c\n'])
fp = self.fp1
f = io.BytesIO()
@format1.writer(MockClass)
def writer(obj, fh):
iterator = iter(obj.list)
fh.write(next(iterator))
fh.flush() # Flush should be a noop for bz2
for x in iterator:
fh.write(x)
self.registry.write(obj, format='format1', into=fp, compression='bz2')
self.registry.write(obj, format='format1', into=f, compression='bz2')
expected = (
b'BZh91AY&SY\x03\x89\x0c\xa6\x00\x00\x01\xc1\x00\x00\x108\x00 \x00'
b'!\x9ah3M\x1c\xb7\x8b\xb9"\x9c(H\x01\xc4\x86S\x00')
with io.open(fp, mode='rb') as fh:
self.assertEqual(expected, fh.read())
self.assertEqual(expected, f.getvalue())
class TestMonkeyPatch(RegistryTest):
def setUp(self):
super(TestMonkeyPatch, self).setUp()
class UnassumingClass:
pass
class ClassWithDefault:
default_write_format = 'favfmt'
class NoMonkeySee:
pass
self.unassuming_class = UnassumingClass
self.class_with_default = ClassWithDefault
self.no_monkey_see = NoMonkeySee
def test_no_readers_writers(self):
self.registry.monkey_patch()
self.assertFalse(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertFalse(hasattr(self.class_with_default, 'write'))
def test_readers_only(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.reader(self.unassuming_class)
def fvfmt_to_unasumming_class(fh):
return
@favfmt.reader(None)
def fvfmt_to_gen(fh):
yield
@favfmt2.reader(self.unassuming_class)
def fvfmt2_to_unasumming_class(fh):
return
self.registry.monkey_patch()
self.assertTrue(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertFalse(hasattr(self.class_with_default, 'write'))
self.assertIn('favfmt', self.unassuming_class.read.__doc__)
self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
def test_writers_only(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.writer(self.class_with_default)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.class_with_default)
def favfmt2_writer(fh):
pass
self.registry.monkey_patch()
self.assertFalse(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertTrue(hasattr(self.class_with_default, 'write'))
self.assertIn('favfmt', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.class_with_default.write.__doc__)
def test_writers_no_default_format(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.writer(self.unassuming_class)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.unassuming_class)
def favfmt2_writer(fh):
pass
with self.assertRaises(NotImplementedError) as cm:
self.registry.monkey_patch()
self.assertIn('default_write_format', str(cm.exception))
def test_readers_writers(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.reader(self.unassuming_class)
def fvfmt_to_unasumming_class(fh):
return
@favfmt.reader(self.class_with_default)
def fvfmt_to_class_w_default(fh):
return
@favfmt.reader(None)
def fvfmt_to_gen(fh):
yield
@favfmt2.reader(self.unassuming_class)
def fvfmt2_to_unasumming_class(fh):
return
@favfmt2.reader(self.class_with_default)
def fvfmt2_to_class_w_default(fh):
return
@favfmt.writer(self.class_with_default)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.class_with_default)
def favfmt2_writer(fh):
pass
@favfmt2.reader(self.no_monkey_see, monkey_patch=True)
def favfmt2_to_monkey(fh):
pass
@favfmt2.writer(self.no_monkey_see, monkey_patch=False)
def monkey_to_favfmt2(fh):
pass
self.registry.monkey_patch()
self.assertTrue(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertTrue(hasattr(self.class_with_default, 'read'))
self.assertTrue(hasattr(self.class_with_default, 'write'))
self.assertTrue(hasattr(self.no_monkey_see, 'read'))
self.assertFalse(hasattr(self.no_monkey_see, 'write'))
self.assertIn('favfmt', self.unassuming_class.read.__doc__)
self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
self.assertIn('favfmt', self.class_with_default.read.__doc__)
self.assertIn('favfmt2', self.class_with_default.read.__doc__)
self.assertIn('favfmt', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.no_monkey_see.read.__doc__)
def test_read_kwargs_passed(self):
favfmt = self.registry.create_format('favfmt')
self.was_called = False
@favfmt.sniffer()
def fvfmt_sniffer(fh):
return True, {}
@favfmt.reader(self.class_with_default)
def fvfmt_to_class_w_default(fh, **kwargs):
self.assertEqual('a', kwargs['a'])
self.assertEqual(123, kwargs['b'])
self.was_called = True
self.registry.monkey_patch()
fh = StringIO('notempty')
self.class_with_default.read(fh, a='a', b=123)
self.assertTrue(self.was_called)
fh.close()
def test_write_kwargs_passed(self):
favfmt = self.registry.create_format('favfmt')
self.was_called = False
@favfmt.writer(self.class_with_default)
def favfmt_writer(obj, fh, **kwargs):
self.assertEqual('a', kwargs['a'])
self.assertEqual(123, kwargs['b'])
self.was_called = True
self.registry.monkey_patch()
fh = StringIO()
self.class_with_default().write(fh, a='a', b=123)
self.assertTrue(self.was_called)
fh.close()
class TestModuleFunctions(unittest.TestCase):
def test_sniff_matches(self):
exp = io_registry.sniff(['(a, b);'])
result = sniff(['(a, b);'])
self.assertEqual(exp, result)
self.assertEqual('newick', exp[0])
self.assertEqual({}, exp[1])
def test_read_matches(self):
input = ['>\n', 'ACGT\n']
exp = io_registry.read(input, into=DNA)
result = read(input, into=DNA)
self.assertEqual(exp, result)
self.assertEqual(exp, DNA('ACGT', metadata={'id': '',
'description': ''}))
def test_write_matches(self):
input = DNA('ACGT')
exp = io_registry.write(input, format='fasta', into=[])
result = write(input, format='fasta', into=[])
self.assertEqual(exp, result)
self.assertEqual(exp, ['>\n', 'ACGT\n'])
def test_create_format_matches(self):
with self.assertRaises(DuplicateRegistrationError):
io_registry.create_format('fasta')
with self.assertRaises(DuplicateRegistrationError):
create_format('fasta')
if __name__ == '__main__':
unittest.main()
| 32.613368
| 79
| 0.599404
|
42bf9a0048bf583ff4c6703353e2bab9e4b67009
| 4,191
|
py
|
Python
|
simplejson/tests/test_speedups.py
|
Daybreak2019/simplejson
|
9be9bc0742d1dfaff26e418138643b5b253c4242
|
[
"AFL-2.1"
] | null | null | null |
simplejson/tests/test_speedups.py
|
Daybreak2019/simplejson
|
9be9bc0742d1dfaff26e418138643b5b253c4242
|
[
"AFL-2.1"
] | null | null | null |
simplejson/tests/test_speedups.py
|
Daybreak2019/simplejson
|
9be9bc0742d1dfaff26e418138643b5b253c4242
|
[
"AFL-2.1"
] | null | null | null |
from __future__ import with_statement
import sys
import unittest
from unittest import TestCase
import simplejson
from simplejson import encoder, decoder, scanner
from simplejson.compat import PY3, long_type, b
def has_speedups():
return encoder.c_make_encoder is not None
def skip_if_speedups_missing(func):
def wrapper(*args, **kwargs):
if not has_speedups():
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("C Extension not available")
else:
sys.stdout.write("C Extension not available")
return
return func(*args, **kwargs)
return wrapper
class BadBool:
def __bool__(self):
1/0
__nonzero__ = __bool__
class TestDecode(TestCase):
@skip_if_speedups_missing
def test_make_scanner(self):
self.assertRaises(AttributeError, scanner.c_make_scanner, 1)
@skip_if_speedups_missing
def test_bad_bool_args(self):
def test(value):
decoder.JSONDecoder(strict=BadBool()).decode(value)
self.assertRaises(ZeroDivisionError, test, '""')
self.assertRaises(ZeroDivisionError, test, '{}')
if not PY3:
self.assertRaises(ZeroDivisionError, test, u'""')
self.assertRaises(ZeroDivisionError, test, u'{}')
class TestEncode(TestCase):
@skip_if_speedups_missing
def test_make_encoder(self):
self.assertRaises(
TypeError,
encoder.c_make_encoder,
None,
("\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7"
"\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75"),
None
)
@skip_if_speedups_missing
def test_bad_str_encoder(self):
# Issue #31505: There shouldn't be an assertion failure in case
# c_make_encoder() receives a bad encoder() argument.
import decimal
def bad_encoder1(*args):
return None
enc = encoder.c_make_encoder(
None, lambda obj: str(obj),
bad_encoder1, None, ': ', ', ',
False, False, False, {}, False, False, False,
None, None, 'utf-8', False, False, decimal.Decimal, False)
self.assertRaises(TypeError, enc, 'spam', 4)
self.assertRaises(TypeError, enc, {'spam': 42}, 4)
def bad_encoder2(*args):
1/0
enc = encoder.c_make_encoder(
None, lambda obj: str(obj),
bad_encoder2, None, ': ', ', ',
False, False, False, {}, False, False, False,
None, None, 'utf-8', False, False, decimal.Decimal, False)
self.assertRaises(ZeroDivisionError, enc, 'spam', 4)
@skip_if_speedups_missing
def test_bad_bool_args(self):
def test(name):
encoder.JSONEncoder(**{name: BadBool()}).encode({})
self.assertRaises(ZeroDivisionError, test, 'skipkeys')
self.assertRaises(ZeroDivisionError, test, 'ensure_ascii')
self.assertRaises(ZeroDivisionError, test, 'check_circular')
self.assertRaises(ZeroDivisionError, test, 'allow_nan')
self.assertRaises(ZeroDivisionError, test, 'sort_keys')
self.assertRaises(ZeroDivisionError, test, 'use_decimal')
self.assertRaises(ZeroDivisionError, test, 'namedtuple_as_object')
self.assertRaises(ZeroDivisionError, test, 'tuple_as_array')
self.assertRaises(ZeroDivisionError, test, 'bigint_as_string')
self.assertRaises(ZeroDivisionError, test, 'for_json')
self.assertRaises(ZeroDivisionError, test, 'ignore_nan')
self.assertRaises(ZeroDivisionError, test, 'iterable_as_array')
@skip_if_speedups_missing
def test_int_as_string_bitcount_overflow(self):
long_count = long_type(2)**32+31
def test():
encoder.JSONEncoder(int_as_string_bitcount=long_count).encode(0)
self.assertRaises((TypeError, OverflowError), test)
if PY3:
@skip_if_speedups_missing
def test_bad_encoding(self):
with self.assertRaises(UnicodeEncodeError):
encoder.JSONEncoder(encoding='\udcff').encode({b('key'): 123})
if __name__ == '__main__':
unittest.main()
| 35.820513
| 78
| 0.639704
|
155d88d6ec2d9f96a5de40384eb2a166715be02d
| 62,050
|
py
|
Python
|
pandas/core/internals/managers.py
|
fgebhart/pandas
|
524fc9c6790ddc4b151b6fb910a27073961c5fba
|
[
"BSD-3-Clause"
] | 1
|
2020-09-01T12:13:29.000Z
|
2020-09-01T12:13:29.000Z
|
pandas/core/internals/managers.py
|
fgebhart/pandas
|
524fc9c6790ddc4b151b6fb910a27073961c5fba
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/managers.py
|
fgebhart/pandas
|
524fc9c6790ddc4b151b6fb910a27073961c5fba
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T15:07:11.000Z
|
2022-03-08T15:07:11.000Z
|
from collections import defaultdict
import itertools
from typing import (
Any,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import warnings
import numpy as np
from pandas._libs import internals as libinternals, lib
from pandas._typing import ArrayLike, DtypeObj, Label, Shape
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_promote,
)
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_dtype_equal,
is_extension_array_dtype,
is_list_like,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import array_equals, isna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
from pandas.core.construction import extract_array
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import Index, ensure_index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
extend_blocks,
get_block_type,
make_block,
safe_reshape,
)
from pandas.core.internals.ops import blockwise_all, operate_blockwise
# TODO: flexible with index=None and/or items=None
T = TypeVar("T", bound="BlockManager")
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
blocks: Sequence of Block
axes: Sequence of Index
do_integrity_check: bool, default True
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
_blknos: np.ndarray
_blklocs: np.ndarray
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks: Tuple[Block, ...] = tuple(blocks)
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
f"Number of Block dimensions ({block.ndim}) must equal "
f"number of axes ({self.ndim})"
)
if do_integrity_check:
self._verify_integrity()
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
self._blknos = None
self._blklocs = None
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]):
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
return cls(blocks, axes, do_integrity_check=False)
@property
def blknos(self):
"""
Suppose we want to find the array corresponding to our i'th column.
blknos[i] identifies the block from self.blocks that contains this column.
blklocs[i] identifies the column of interest within
self.blocks[self.blknos[i]]
"""
if self._blknos is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blknos
@property
def blklocs(self):
"""
See blknos.__doc__
"""
if self._blklocs is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blklocs
def make_empty(self: T, axes=None) -> T:
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [Index([])] + self.axes[1:]
# preserve dtype if possible
if self.ndim == 1:
assert isinstance(self, SingleBlockManager) # for mypy
blk = self.blocks[0]
arr = blk.values[:0]
nb = blk.make_block_same_class(arr, placement=slice(0, 0), ndim=1)
blocks = [nb]
else:
blocks = []
return type(self).from_blocks(blocks, axes)
def __nonzero__(self) -> bool:
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self) -> Shape:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self.axes[axis] = new_labels
@property
def is_single_block(self) -> bool:
# Assumes we are 2D; overridden by SingleBlockManager
return len(self.blocks) == 1
def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.intp)
new_blklocs = np.empty(self.shape[0], dtype=np.intp)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
# TODO: can we avoid this? it isn't cheap
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self) -> Index:
return self.axes[0]
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self.blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs, ndim: int):
# TODO(EA2D): ndim would be unnecessary with 2D EAs
return make_block(values, placement=mgr_locs, ndim=ndim)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
ndim = len(self.axes)
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
self._post_setstate()
def _post_setstate(self) -> None:
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = type(self).__name__
for i, ax in enumerate(self.axes):
if i == 0:
output += f"\nItems: {ax}"
else:
output += f"\nAxis {i}: {ax}"
for block in self.blocks:
output += f"\n{block}"
return output
def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block.shape[1:] != mgr_shape[1:]:
raise construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
def reduce(
self: T, func: Callable, ignore_failures: bool = False
) -> Tuple[T, np.ndarray]:
"""
Apply reduction function blockwise, returning a single-row BlockManager.
Parameters
----------
func : reduction function
ignore_failures : bool, default False
Whether to drop blocks where func raises TypeError.
Returns
-------
BlockManager
np.ndarray
Indexer of mgr_locs that are retained.
"""
# If 2D, we assume that we're operating column-wise
assert self.ndim == 2
res_blocks: List[Block] = []
for blk in self.blocks:
nbs = blk.reduce(func, ignore_failures)
res_blocks.extend(nbs)
index = Index([None]) # placeholder
if ignore_failures:
if res_blocks:
indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks])
new_mgr = self._combine(res_blocks, copy=False, index=index)
else:
indexer = []
new_mgr = type(self).from_blocks([], [Index([]), index])
else:
indexer = np.arange(self.shape[0])
new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
return new_mgr, indexer
def operate_blockwise(self, other: "BlockManager", array_op) -> "BlockManager":
"""
Apply array_op blockwise with another (aligned) BlockManager.
"""
return operate_blockwise(self, other, array_op)
def apply(
self: T,
f,
align_keys: Optional[List[str]] = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
"""
Iterate over the blocks, collect and create a new BlockManager.
Parameters
----------
f : str or callable
Name of the Block method to apply.
align_keys: List[str] or None, default None
ignore_failures: bool, default False
**kwargs
Keywords to pass to `f`
Returns
-------
BlockManager
"""
assert "filter" not in kwargs
align_keys = align_keys or []
result_blocks: List[Block] = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
else:
kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
else:
# otherwise we have an ndarray
kwargs[k] = obj[b.mgr_locs.indexer]
try:
if callable(f):
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
except (TypeError, NotImplementedError):
if not ignore_failures:
raise
continue
result_blocks = extend_blocks(applied, result_blocks)
if ignore_failures:
return self._combine(result_blocks)
if len(result_blocks) == 0:
return self.make_empty(self.axes)
return type(self).from_blocks(result_blocks, self.axes)
def quantile(
self,
axis: int = 0,
transposed: bool = False,
interpolation="linear",
qs=None,
numeric_only=None,
) -> "BlockManager":
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
transposed: bool, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
BlockManager
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax._values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return type(self)(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
make_block(values, ndim=1, placement=np.arange(len(values))), axes[0]
)
def isna(self, func) -> "BlockManager":
return self.apply("apply", func=func)
def where(
self, other, cond, align: bool, errors: str, try_cast: bool, axis: int
) -> "BlockManager":
if align:
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
other = extract_array(other, extract_numpy=True)
return self.apply(
"where",
align_keys=align_keys,
other=other,
cond=cond,
errors=errors,
try_cast=try_cast,
axis=axis,
)
def setitem(self, indexer, value) -> "BlockManager":
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, mask, new, align: bool = True, axis: int = 0):
transpose = self.ndim == 2
if align:
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
new = extract_array(new, extract_numpy=True)
return self.apply(
"putmask",
align_keys=align_keys,
mask=mask,
new=new,
inplace=True,
axis=axis,
transpose=transpose,
)
def diff(self, n: int, axis: int) -> "BlockManager":
return self.apply("diff", n=n, axis=axis)
def interpolate(self, **kwargs) -> "BlockManager":
return self.apply("interpolate", **kwargs)
def shift(self, periods: int, axis: int, fill_value) -> "BlockManager":
if fill_value is lib.no_default:
fill_value = None
if axis == 0 and self.ndim == 2 and self.nblocks > 1:
# GH#35488 we need to watch out for multi-block cases
# We only get here with fill_value not-lib.no_default
ncols = self.shape[0]
if periods > 0:
indexer = [-1] * periods + list(range(ncols - periods))
else:
nper = abs(periods)
indexer = list(range(nper, ncols)) + [-1] * nper
result = self.reindex_indexer(
self.items,
indexer,
axis=0,
fill_value=fill_value,
allow_dups=True,
consolidate=False,
)
return result
return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value)
def fillna(self, value, limit, inplace: bool, downcast) -> "BlockManager":
return self.apply(
"fillna", value=value, limit=limit, inplace=inplace, downcast=downcast
)
def downcast(self) -> "BlockManager":
return self.apply("downcast")
def astype(
self, dtype, copy: bool = False, errors: str = "raise"
) -> "BlockManager":
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
) -> "BlockManager":
return self.apply(
"convert",
copy=copy,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
)
def replace(self, to_replace, value, inplace: bool, regex: bool) -> "BlockManager":
assert np.ndim(value) == 0, value
return self.apply(
"replace", to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
def replace_list(
self: T,
src_list: List[Any],
dest_list: List[Any],
inplace: bool = False,
regex: bool = False,
) -> T:
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
bm = self.apply(
"_replace_list",
src_list=src_list,
dest_list=dest_list,
inplace=inplace,
regex=regex,
)
bm._consolidate_inplace()
return bm
def to_native_types(self, **kwargs) -> "BlockManager":
"""
Convert values to native types (strings / python objects) that are used
in formatting (repr / csv).
"""
return self.apply("to_native_types", **kwargs)
def is_consolidated(self) -> bool:
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self) -> None:
dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_numeric_mixed_type(self) -> bool:
return all(block.is_numeric for block in self.blocks)
@property
def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy: bool = False) -> "BlockManager":
"""
Select blocks that are bool-dtype and columns from object-dtype blocks
that are all-bool.
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
new_blocks = []
for blk in self.blocks:
if blk.dtype == bool:
new_blocks.append(blk)
elif blk.is_object:
nbs = blk._split()
for nb in nbs:
if nb.is_bool:
new_blocks.append(nb)
return self._combine(new_blocks, copy)
def get_numeric_data(self, copy: bool = False) -> "BlockManager":
"""
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
return self._combine([b for b in self.blocks if b.is_numeric], copy)
def _combine(
self: T, blocks: List[Block], copy: bool = True, index: Optional[Index] = None
) -> T:
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks: List[Block] = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = inv_indexer[b.mgr_locs.indexer]
new_blocks.append(b)
axes = list(self.axes)
if index is not None:
axes[-1] = index
axes[0] = self.items.take(indexer)
return type(self).from_blocks(new_blocks, axes)
def get_slice(self, slobj: slice, axis: int = 0) -> "BlockManager":
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
slicer = (slice(None), slobj)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
else:
raise IndexError("Requested axis not found in manager")
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = type(self)(new_blocks, new_axes, do_integrity_check=False)
return bm
@property
def nblocks(self) -> int:
return len(self.blocks)
def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : bool or string, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
res.axes = new_axes
return res
def as_array(
self,
transpose: bool = False,
dtype=None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : bool, default False
If True, transpose the return array.
dtype : object, default None
Data type of the return array.
copy : bool, default False
If True then guarantee that a copy is returned. A value of
False does not guarantee that the underlying data is not
copied.
na_value : object, default lib.no_default
Value to be used as the missing value sentinel.
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
# We want to copy when na_value is provided to avoid
# mutating the original object
copy = copy or na_value is not lib.no_default
if self.is_single_block:
blk = self.blocks[0]
if blk.is_extension:
# Avoid implicit conversion of extension blocks to object
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value).reshape(
blk.shape
)
else:
arr = np.asarray(blk.get_values())
if dtype:
arr = arr.astype(dtype, copy=False)
else:
arr = self._interleave(dtype=dtype, na_value=na_value)
# The underlying data was copied within _interleave
copy = False
if copy:
arr = arr.copy()
if na_value is not lib.no_default:
arr[isna(arr)] = na_value
return arr.transpose() if transpose else arr
def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
if not dtype:
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
elif is_dtype_equal(dtype, str):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
if blk.is_extension:
# Avoid implicit conversion of extension blocks to object
arr = blk.values.to_numpy(dtype=dtype, na_value=na_value)
else:
arr = blk.get_values(dtype)
result[rl.indexer] = arr
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy: bool = True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : bool, default True
Returns
-------
values : a dict of dtype -> BlockManager
"""
bd: Dict[str, List[Block]] = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
# TODO(EA2D): the combine will be unnecessary with 2D EAs
return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc: int) -> ArrayLike:
"""
Return the array corresponding to `frame.iloc[loc]`.
Parameters
----------
loc : int
Returns
-------
np.ndarray or ExtensionArray
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
dtype = _interleaved_dtype(self.blocks)
n = len(self)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self) -> "BlockManager":
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = type(self)(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self) -> None:
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def iget(self, i: int) -> "SingleBlockManager":
"""
Return the data as a SingleBlockManager.
"""
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
),
self.axes[1],
)
def iget_values(self, i: int) -> ArrayLike:
"""
Return the data for column i as the values (ndarray or ExtensionArray).
"""
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
return values
def idelete(self, indexer):
"""
Delete selected locations in-place (new block and array, same BlockManager)
"""
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._rebuild_blknos_and_blklocs()
def iset(self, loc: Union[int, slice, np.ndarray], value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
value = extract_array(value, extract_numpy=True)
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
if self._blklocs is None and self.ndim > 1:
self._rebuild_blknos_and_blklocs()
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
if lib.is_integer(loc):
# We have 6 tests where loc is _not_ an int.
# In this case, get_blkno_placements will yield only one tuple,
# containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
loc = [loc]
# Accessing public blknos ensures the public versions are initialized
blknos = self.blknos[loc]
blklocs = self.blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set_inplace(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = new_blknos[self._blknos]
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks: List[Block] = []
if value_is_extension_type:
# This code (ab-)uses the fact that EA blocks contain only
# one item.
# TODO(EA2D): special casing unnecessary with 2D EAs
new_blocks.extend(
make_block(
values=value,
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {item}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
# TODO(EA2D): special case not needed with 2D EAs
value = safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self.blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
# Accessing public blklocs ensures the public versions are initialized
if loc == self.blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self,
new_index,
axis: int,
method=None,
limit=None,
fill_value=None,
copy: bool = True,
):
"""
Conform block manager to new index.
"""
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
consolidate: bool = True,
) -> T:
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
consolidate: bool, default True
Whether to consolidate inplace before reindexing.
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
if consolidate:
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_value=fill_value)
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_value=(
fill_value if fill_value is not None else blk.fill_value
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return type(self).from_blocks(new_blocks, new_axes)
def _slice_take_blocks_ax0(
self, slice_or_indexer, fill_value=lib.no_default, only_slice: bool = False
):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Parameters
----------
slice_or_indexer : slice, ndarray[bool], or list-like of ints
fill_value : scalar, default lib.no_default
only_slice : bool, default False
If True, we always return views on existing arrays, never copies.
This is used when called from ops.blockwise.operate_blockwise.
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_value is not lib.no_default
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self.is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
# GH#32959 EABlock would fail since we cant make 0-width
# TODO(EA2D): special casing unnecessary with 2D EAs
if sllen == 0:
return []
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_value is None:
_, fill_value = maybe_promote(blk.dtype)
if not allow_fill and only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
blocks = [
blk.getitem_block([ml], new_mgr_locs=i)
for i, ml in enumerate(slobj)
]
return blocks
else:
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_value=fill_value,
)
]
if sl_type in ("slice", "mask"):
blknos = self.blknos[slobj]
blklocs = self.blklocs[slobj]
else:
blknos = algos.take_1d(
self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
blocks = []
group = not only_slice
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
if blkno == -1:
# If we've got here, fill_value was not lib.no_default
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=False)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
# GH#32779 to avoid the performance penalty of copying,
# we may try to only slice
taker = blklocs[mgr_locs.indexer]
max_len = max(len(mgr_locs), taker.max() + 1)
if only_slice:
taker = lib.maybe_indices_to_slice(taker, max_len)
if isinstance(taker, slice):
nb = blk.getitem_block(taker, new_mgr_locs=mgr_locs)
blocks.append(nb)
elif only_slice:
# GH#33597 slice instead of take, so we get
# views instead of copies
for i, ml in zip(taker, mgr_locs):
nb = blk.getitem_block([i], new_mgr_locs=ml)
blocks.append(nb)
else:
nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
blocks.append(nb)
return blocks
def _make_na_block(self, placement, fill_value=None):
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def equals(self, other: object) -> bool:
if not isinstance(other, BlockManager):
return False
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
if self.ndim == 1:
# For SingleBlockManager (i.e.Series)
if other.ndim != 1:
return False
left = self.blocks[0].values
right = other.blocks[0].values
return array_equals(left, right)
return blockwise_all(self, other, array_equals)
def unstack(self, unstacker, fill_value) -> "BlockManager":
"""
Return a BlockManager with all blocks unstacked..
Parameters
----------
unstacker : reshape._Unstacker
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
new_columns = unstacker.get_new_columns(self.items)
new_index = unstacker.new_index
new_blocks: List[Block] = []
columns_mask: List[np.ndarray] = []
for blk in self.blocks:
blk_cols = self.items[blk.mgr_locs.indexer]
new_items = unstacker.get_new_columns(blk_cols)
new_placement = new_columns.get_indexer(new_items)
blocks, mask = blk._unstack(
unstacker, fill_value, new_placement=new_placement
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
is_single_block = True
def __init__(
self,
block: Block,
axis: Index,
do_integrity_check: bool = False,
fastpath=lib.no_default,
):
assert isinstance(block, Block), type(block)
assert isinstance(axis, Index), type(axis)
if fastpath is not lib.no_default:
warnings.warn(
"The `fastpath` keyword is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
self.axes = [axis]
self.blocks = tuple([block])
@classmethod
def from_blocks(
cls, blocks: List[Block], axes: List[Index]
) -> "SingleBlockManager":
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
return cls(blocks[0], axes[0], do_integrity_check=False)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> "SingleBlockManager":
"""
Constructor for if we have an array that is not yet a Block.
"""
block = make_block(array, placement=slice(0, len(index)), ndim=1)
return cls(block, index)
def _post_setstate(self):
pass
@property
def _block(self) -> Block:
return self.blocks[0]
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def get_slice(self, slobj: slice, axis: int = 0) -> "SingleBlockManager":
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
blk = self._block
array = blk._slice(slobj)
block = blk.make_block_same_class(array, placement=slice(0, len(array)))
return type(self)(block, self.index[slobj])
@property
def index(self) -> Index:
return self.axes[0]
@property
def dtype(self) -> DtypeObj:
return self._block.dtype
def get_dtypes(self) -> np.ndarray:
return np.array([self._block.dtype])
def external_values(self):
"""The array that Series.values returns"""
return self._block.external_values()
def internal_values(self):
"""The array that Series._values returns"""
return self._block.internal_values()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
def is_consolidated(self) -> bool:
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def idelete(self, indexer):
"""
Delete single location from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
self._block.delete(indexer)
self.axes[0] = self.axes[0].delete(indexer)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
raise NotImplementedError("Use series._values[loc] instead")
# --------------------------------------------------------------------
# Constructor Helpers
def create_block_manager_from_blocks(blocks, axes: List[Index]) -> BlockManager:
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(values=blocks[0], placement=slice(0, len(axes[0])))
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
raise construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(
arrays, names: Index, axes: List[Index]
) -> BlockManager:
assert isinstance(names, Index)
assert isinstance(axes, list)
assert all(isinstance(x, Index) for x in axes)
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
raise construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
# Correcting the user facing error message during dataframe construction
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
# Correcting the user facing error message during dataframe construction
if len(implied) <= 2:
implied = implied[::-1]
# We return the exception object instead of raising it so that we
# can raise it in the caller; mypy plays better with that
if passed == implied and e is not None:
return e
if block_shape[0] == 0:
return ValueError("Empty data passed with indices specified.")
return ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
# -----------------------------------------------------------------------
def form_blocks(arrays, names: Index, axes) -> List[Block]:
# put "leftover" items in float bucket, where else?
# generalize?
items_dict: DefaultDict[str, List] = defaultdict(list)
extra_locs = []
names_idx = names
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
blocks: List[Block] = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["ComplexBlock"]):
complex_blocks = _multi_blockify(items_dict["ComplexBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["IntBlock"]):
int_blocks = _multi_blockify(items_dict["IntBlock"])
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], DT64NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=i)
for i, _, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["BoolBlock"]):
bool_blocks = _simple_blockify(items_dict["BoolBlock"], np.bool_)
blocks.extend(bool_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=i)
for i, _, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=i)
for i, _, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=i)
for i, _, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype) -> List[Block]:
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x) -> Shape:
if isinstance(x, ABCSeries):
return (len(x),)
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks: Sequence[Block]) -> Optional[DtypeObj]:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks: List[Block] = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
)
new_blocks.extend(merged_blocks)
return new_blocks
def _merge_blocks(
blocks: List[Block], dtype: DtypeObj, can_consolidate: bool
) -> List[Block]:
if len(blocks) == 1:
return blocks
if can_consolidate:
if dtype is None:
if len({b.dtype for b in blocks}) != 1:
raise AssertionError("_merge_blocks are invalid!")
# TODO: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = np.vstack([b.values for b in blocks])
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return [make_block(new_values, placement=new_mgr_locs)]
# can't consolidate --> no merge
return blocks
def _fast_count_smallints(arr: np.ndarray) -> np.ndarray:
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length: int, allow_fill: bool):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
| 31.771633
| 88
| 0.571378
|
804fc62be2edfbd57a9df58d3706561181aebbfc
| 2,750
|
py
|
Python
|
exercicios/1-9.py
|
figueredo/pda
|
26003d6f9a30fd928b3cc709379b456c8950b63c
|
[
"MIT"
] | null | null | null |
exercicios/1-9.py
|
figueredo/pda
|
26003d6f9a30fd928b3cc709379b456c8950b63c
|
[
"MIT"
] | null | null | null |
exercicios/1-9.py
|
figueredo/pda
|
26003d6f9a30fd928b3cc709379b456c8950b63c
|
[
"MIT"
] | null | null | null |
# Altere o script para ao invés de imprimir o resultado na tela, salvar em um
# arquivo no formato CSV (comma separated values).
# Cada linha deverá conter o nome do investimento como primeiro elemento,
# seguido dos resultados para cada ano.
#
# CSV:
# - Tabela
# - Cada do arquivo representa uma linha da tabela
# - Colunas são delimitadas por “,”.
# Exemplo de CSV com duas linhas e quatro colunas:
# 1,2,3,4
# 1,2,4,5
#
# Dicas:
# Ver método join() de string
# Representação de nova linha em string é “\n”. Ex.:
# 'Primeira linha\nSegunda linha’
def compound_interest(amount, interest_rate, time):
return amount * (1 + interest_rate) ** time
def calculate_return_poupanca(amount, time_year):
interest_rate_month = 0.469 / 100
return compound_interest(amount, interest_rate_month, time_year * 12)
def calculate_return_xp(amount, time_year):
interest_rate_year = (105 / 100) * (7.5 / 100)
return compound_interest(amount, interest_rate_year, time_year)
def calculate_all_return_poupanca(amount, time_year):
return [calculate_return_poupanca(amount, year) for year in range(time_year + 1)]
def calculate_all_return_xp(amount, time_year):
return [calculate_return_xp(amount, year) for year in range(time_year + 1)]
def calculate_all_investment_returns(amount, time_year):
roi_poupanca = calculate_all_return_poupanca(amount, time_year)
roi_xp = calculate_all_return_xp(amount, time_year)
return [['POUP'] + roi_poupanca, ['XP'] + roi_xp]
def ask_name():
name = None
while name is None or name.isnumeric():
name = input('Qual é o seu nome? ')
return name
def ask_amount():
amount = None
while amount is None or not amount.isnumeric():
amount = input('Quanto deseja investir? R$')
return float(amount)
def ask_time_year():
time_year = None
while time_year is None or not time_year.isnumeric():
time_year = input('Em quantos _anos_ deseja resgatar seu dinheiro? ')
return int(time_year)
def to_formatted_list(float_list):
element_template = '{0:.2f}'
return [element_template.format(element) for element in float_list]
def to_csv_str(values):
return ','.join(values)
def to_csv(results):
lines = ['{name},{values}\n'.format(
name=result[0],
values=to_csv_str(to_formatted_list(result[1:]))
) for result in results]
return ''.join(lines)
def write_file(filename, content):
result_file = open(filename, 'w')
result_file.write(content)
result_file.close()
def save_results(results):
content = to_csv(results)
write_file('result.csv', content)
name = ask_name()
amount = ask_amount()
time_year = ask_time_year()
results = calculate_all_investment_returns(amount, time_year)
save_results(results)
print("{0}, o resultado foi salvo em 'result.csv'".format(name))
| 30.898876
| 83
| 0.740364
|
b90dcca8c743b4fcea2057c54f31539d724580ab
| 1,195
|
py
|
Python
|
core/django_arcade_core/game_event.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | 1
|
2021-10-03T05:44:32.000Z
|
2021-10-03T05:44:32.000Z
|
core/django_arcade_core/game_event.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
core/django_arcade_core/game_event.py
|
kfields/django-arcade
|
24df3d43dde2d69df333529d8790507fb1f5fcf1
|
[
"MIT"
] | null | null | null |
from .message import Event
factories = None
class GameEvent(Event):
def __init__(self, id):
super().__init__(id)
@classmethod
def produce(self, data):
return factories[data['__typename']](data)
class JoinEvent(GameEvent):
def __init__(self, id, playerId):
super().__init__(id)
self.player_id = playerId
class StartEvent(GameEvent):
pass
class TurnEvent(GameEvent):
def __init__(self, id, playerId):
super().__init__(id)
self.player_id = playerId
class MarkEvent(GameEvent):
def __init__(self, id, symbol, x, y):
super().__init__(id)
self.symbol = symbol
self.x = x
self.y = y
class EndEvent(GameEvent):
def __init__(self, id, playerId):
super().__init__(id)
self.player_id = playerId
factories = {
'JoinEvent': lambda data: JoinEvent(data['id'], data['playerId']),
'StartEvent': lambda data: StartEvent(data['id']),
'TurnEvent': lambda data: TurnEvent(data['id'], data['playerId']),
'MarkEvent': lambda data: MarkEvent(data['id'], data['symbol'], data['x'], data['y']),
'EndEvent': lambda data: EndEvent(data['id'], data['playerId']),
}
| 25.978261
| 90
| 0.630126
|
e941fd9b62783c62be7a05f0cc49c912c6f8f459
| 2,985
|
py
|
Python
|
tests/aws_interfaces_tests/test_s3_interface.py
|
aws-samples/aws-iot-ota-deployment-tool
|
23ecbbcd37e92af4f37ba2b247f859d910fd6b80
|
[
"MIT-0"
] | 28
|
2020-01-30T01:29:14.000Z
|
2022-03-06T06:31:58.000Z
|
tests/aws_interfaces_tests/test_s3_interface.py
|
aws-samples/aws-iot-ota-deployment-tool
|
23ecbbcd37e92af4f37ba2b247f859d910fd6b80
|
[
"MIT-0"
] | 1
|
2021-06-02T02:39:39.000Z
|
2021-06-02T02:39:39.000Z
|
tests/aws_interfaces_tests/test_s3_interface.py
|
aws-samples/aws-iot-ota-deployment-tool
|
23ecbbcd37e92af4f37ba2b247f859d910fd6b80
|
[
"MIT-0"
] | 5
|
2020-02-16T01:42:55.000Z
|
2022-02-24T14:57:16.000Z
|
import unittest
from unittest.mock import Mock, call
from unittest.mock import patch
from aws_interfaces.s3_interface import S3Interface
from botocore.exceptions import ClientError
from pathlib import Path
REGION = 'us-east-1'
FILE_NAME = 'test.bin'
BUCKET = 'testBucket'
KEY = 'testObjectName'
Path(FILE_NAME).touch()
class S3InterfaceTests(unittest.TestCase):
def setUp(self):
self.s3_interface = S3Interface(REGION)
self.s3_interface.client.put_object = Mock(return_value=None)
self.s3_interface.client.download_file = Mock(return_value=None)
def test_upload_file_to_s3(self):
mock_open = unittest.mock.mock_open(read_data=None)
# When this happens
with unittest.mock.patch('builtins.open', mock_open):
objectData = open(FILE_NAME, 'rb')
self.s3_interface.upload_file_to_s3(fileName=FILE_NAME, bucket=BUCKET, key=KEY)
# Expect these behaviors
self.s3_interface.client.put_object.assert_called_with(Body=objectData, Bucket=BUCKET, Key=KEY)
objectData.close()
def test_upload_file_to_s3_file_not_exist(self):
mock_open = unittest.mock.mock_open(read_data=None)
# When this happens
with unittest.mock.patch('builtins.open', mock_open):
objectData = open(FILE_NAME, 'rb')
with patch('os.path.isfile', return_value=False) as mocked_os:
with self.assertRaises(Exception):
self.s3_interface.upload_file_to_s3(fileName=objectData, bucket=BUCKET, key=KEY)
objectData.close()
def test_upload_file_to_s3_exception(self):
self.s3_interface.client.put_object = Mock(side_effect=ClientError({}, 'test'))
mock_open = unittest.mock.mock_open(read_data=None)
# When this happens
with unittest.mock.patch('builtins.open', mock_open):
objectData = open(FILE_NAME, 'rb')
with self.assertRaises(Exception):
self.s3_interface.upload_file_to_s3(fileName=FILE_NAME, bucket=BUCKET, key=KEY)
# Expect these behaviors
self.s3_interface.client.put_object.assert_called_with(Body=objectData, Bucket=BUCKET, Key=KEY)
objectData.close()
def test_download_file_from_s3(self):
# When this happens
self.s3_interface.download_file_from_s3(fileName=FILE_NAME, bucket=BUCKET, key=KEY)
# Expect these behaviors
self.s3_interface.client.download_file.assert_called_with(Bucket=BUCKET, Key=KEY, Filename=FILE_NAME)
def test_download_file_from_s3_exception(self):
self.s3_interface.client.download_file = Mock(side_effect=ClientError({}, 'test'))
# When this happens
with self.assertRaises(Exception):
self.s3_interface.download_file_from_s3(fileName=FILE_NAME, bucket=BUCKET, key=KEY)
# Expect these behaviors
self.s3_interface.client.download_file.assert_called_with(Bucket=BUCKET, Key=KEY, Filename=FILE_NAME)
| 39.276316
| 109
| 0.709213
|
40813bf16b65e09f4c617102dc691a27354eab85
| 419
|
py
|
Python
|
django_project/users/signals.py
|
Amol0296/Blog_Post_Website
|
c08e729d0ffb435276e437b933987e15cb567484
|
[
"Apache-2.0"
] | null | null | null |
django_project/users/signals.py
|
Amol0296/Blog_Post_Website
|
c08e729d0ffb435276e437b933987e15cb567484
|
[
"Apache-2.0"
] | null | null | null |
django_project/users/signals.py
|
Amol0296/Blog_Post_Website
|
c08e729d0ffb435276e437b933987e15cb567484
|
[
"Apache-2.0"
] | null | null | null |
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save,sender=User)
def create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save,sender=User)
def save_profile(sender,instance,**kwargs):
instance.profile.save()
| 24.647059
| 53
| 0.775656
|
dca404d540bbc85c482d61234d182fc7b31e4de1
| 3,124
|
py
|
Python
|
contrib/testgen/base58.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 4
|
2021-02-28T04:34:58.000Z
|
2021-09-14T15:25:31.000Z
|
contrib/testgen/base58.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | null | null | null |
contrib/testgen/base58.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 1
|
2021-06-18T13:13:17.000Z
|
2021-06-18T13:13:17.000Z
|
# Copyright (c) 2012-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Satcoin base58 encoding and decoding.
Based on https://satcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Satcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/satcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| 26.931034
| 97
| 0.621639
|
d0f3f4bfba6f60f95e5d2c4209f8fce6fe26be57
| 6,140
|
py
|
Python
|
benchmark.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | 1
|
2020-05-31T13:41:44.000Z
|
2020-05-31T13:41:44.000Z
|
benchmark.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | null | null | null |
benchmark.py
|
PRRAG/DeepSpeech-PyTorch
|
cc65e008dacce794309b55bc4d3d75e909731b56
|
[
"MIT"
] | null | null | null |
"""
This file is to test if the training can occur on the hardware we use or not
"""
import argparse
import json
import time
import torch
import torch.distributed as dist
import torch.utils.data.distributed
from apex.fp16_utils import FP16_Optimizer
from apex.parallel import DistributedDataParallel
from tqdm import tqdm
from tqdm import trange
from warpctc_pytorch import CTCLoss
from model import DeepSpeech, supported_rnns
from utils import convert_model_to_half
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=32, help='Size of input')
parser.add_argument('--seconds', type=int, default=15,
help='The size of the fake input in seconds using default stride of 0.01, '
'15s is usually the maximum duration')
parser.add_argument('--dry-runs', type=int, default=2, help='Dry runs before measuring performance')
parser.add_argument('--runs', type=int, default=5, help='How many benchmark runs to measure performance')
parser.add_argument('--labels-path', default='labels.json', help='Path to the labels to infer over in the model')
parser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')
parser.add_argument('--hidden-layers', default=5, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--num-samples', default=1024, type=int, help='Number of samples to go through')
parser.add_argument('--mixed-precision', action='store_true', help='Use Mixed Precision to train the model')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist_backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--rank', default=0, type=int, help='The rank of this process')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale for mixed precision, ' +
'positive power of 2 values can improve FP16 convergence,' +
'however dynamic loss scaling is preferred.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling for mixed precision. If supplied, this argument supersedes ' +
'--static_loss_scale. Suggested to turn on for mixed precision')
args = parser.parse_args()
device = torch.device("cuda")
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.distributed:
input_data = torch.randn(int(args.num_samples / args.world_size), 1, 161, args.seconds * 100)
else:
input_data = torch.randn(args.num_samples, 1, 161, args.seconds * 100)
input_data = input_data.to(device)
input_data = torch.chunk(input_data, int(len(input_data) / args.batch_size))
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
with open(args.labels_path) as label_file:
labels = str(''.join(json.load(label_file)))
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size)
model = DeepSpeech(rnn_hidden_size=args.hidden_size,
nb_layers=args.hidden_layers,
audio_conf=audio_conf,
labels=labels,
rnn_type=supported_rnns[rnn_type],
mixed_precision=args.mixed_precision)
model = model.to(device)
if args.mixed_precision:
model = convert_model_to_half(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
parameters = model.parameters()
optimizer = torch.optim.SGD(parameters, lr=3e-4, momentum=0.9, nesterov=True, weight_decay=1e-5)
if args.distributed:
model = DistributedDataParallel(model)
if args.mixed_precision:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.static_loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale)
criterion = CTCLoss()
seconds = int(args.seconds)
batch_size = int(args.batch_size)
def iteration(inputs):
# targets, align half of the audio
targets = torch.ones(int(batch_size * ((seconds * 100) / 2)))
target_sizes = torch.empty(batch_size, dtype=torch.int).fill_(int((seconds * 100) / 2))
input_percentages = torch.ones(batch_size).fill_(1)
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
out, output_sizes = model(inputs, input_sizes)
out = out.transpose(0, 1) # TxNxH
float_out = out.float() # ensure float32 for loss
loss = criterion(float_out, targets, output_sizes, target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
optimizer.zero_grad()
# compute gradient
if args.mixed_precision:
optimizer.backward(loss)
optimizer.clip_master_grads(400)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 400)
optimizer.step()
del loss, out, float_out
def run_benchmark():
print("Running dry runs...")
for n in trange(args.dry_runs):
for data in tqdm(input_data, total=len(input_data)):
iteration(data)
print("\n Running measured runs...")
running_time = 0
for n in trange(args.runs):
start_time = time.time()
for data in tqdm(input_data, total=len(input_data)):
iteration(data)
end_time = time.time()
running_time += (end_time - start_time)
return running_time / float(args.runs)
run_time = run_benchmark()
print("\n Average run time: %.2fs" % run_time)
| 42.638889
| 113
| 0.696254
|
ce4d1cea8d8dfe4762588a5efb6ea66b669fa18f
| 9,239
|
py
|
Python
|
rawtools/gui/nsihdr.py
|
Topp-Roots-Lab/python-rawtools
|
f453388eba32eaa6cddc5decfd72a3dbb657b198
|
[
"BSD-3-Clause"
] | null | null | null |
rawtools/gui/nsihdr.py
|
Topp-Roots-Lab/python-rawtools
|
f453388eba32eaa6cddc5decfd72a3dbb657b198
|
[
"BSD-3-Clause"
] | null | null | null |
rawtools/gui/nsihdr.py
|
Topp-Roots-Lab/python-rawtools
|
f453388eba32eaa6cddc5decfd72a3dbb657b198
|
[
"BSD-3-Clause"
] | null | null | null |
""""GUI for NSIHDR conversion tool"""
import logging
import os
import tkinter as tk
from enum import Enum
from importlib.metadata import version
from pprint import pformat
from tkinter import (E, N, S, StringVar, Toplevel, W, filedialog, ttk)
from rawtools import nsihdr
from ttkthemes import ThemedTk
__version__ = version('rawtools')
def center(root, toplevel):
toplevel.update_idletasks()
# Tkinter way to find the screen resolution
# screen_width = toplevel.winfo_screenwidth()
# screen_height = toplevel.winfo_screenheight()
# PyQt way to find the screen resolution
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = screen_width/2 - size[0]/2
y = screen_height/2 - size[1]/2
toplevel.geometry("+%d+%d" % (x, y))
class App():
def __init__(self, args):
self.source = 'C:/Users/Tim Parker/Datasets/topp/xrt/development/batch2'
self.args = args
# Source: https://www.elegantthemes.com/blog/freebie-of-the-week/beautiful-flat-icons-for-free
self.icon_fp = "rawtools\\assets\\tools.ico"
self.icon_caution_fp = "rawtools\\assets\\caution.ico"
self.state = 'idle'
self.root = ThemedTk(theme='arc')
root = self.root
root.title(f"Batch Export Tool v{__version__}")
root.resizable(False, False)
menubar = tk.Menu(root)
file_menu = tk.Menu(menubar, tearoff=False)
file_menu.add_command(label="View Logs", command=lambda: print("Load logs"))
file_menu.add_separator()
file_menu.add_command(label="Quit", command=self.quitApplication, accelerator='Ctrl-Q')
menubar.add_cascade(label="File", menu=file_menu)
help_menu = tk.Menu(menubar, tearoff=False)
help_menu.add_command(label="About", command = None)
help_menu.add_separator()
help_menu.add_command(label="Documentation")
menubar.add_cascade(label="Help", menu=help_menu)
root.config(menu = menubar)
# Assign hotkey(s)
root.bind("<Control-q>", self.quitApplication)
mainframe = ttk.Frame(root, padding="16 16")
mainframe.grid(column=0, row=0, sticky=(N, S, E, W))
self.mainframe = mainframe
root.iconbitmap(self.icon_fp)
# Source folder selection
src_intro_label_text = "Select an NSI Reconstruction folder."
src_intro_label = ttk.Label(mainframe, text=src_intro_label_text)
src_intro_label.grid(row=0, column=0, sticky=(E,W), pady="0 8")
self.src = tk.StringVar()
self.src.set(self.source)
# # Add event handling to changes to the source directory text field
self.src_entry = ttk.Entry(mainframe, textvariable = self.src, width=85)
self.src_entry.grid(row=1, column=0, columnspan=3, sticky=(E, W), padx="0 8", pady="0 16")
self.src_folder_btn = ttk.Button(mainframe, text = 'Select Folder', command=self.choose_src)
self.src_folder_btn.grid(row=1, column=4, columnspan=1, pady="0 16", padx="8 0")
# Export data
self.export_btn = ttk.Button(mainframe, text = 'Export', command=self.export)
self.export_btn.grid(row=2, column=0, columnspan=5, pady="0 8")
# Center window on screen
root.update() # virtual pre-render of GUI to calculate actual sizes
w = root.winfo_reqwidth()
h = root.winfo_reqheight()
logging.debug(f"Root width: {w}")
logging.debug(f"Root height: {h}")
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
root.geometry('+%d+%d' % (x, y))
# Display window to user
root.mainloop()
def choose_src(self):
"""Select a folder to act as data source"""
self.source = filedialog.askdirectory(initialdir=self.source, title="Choose directory")
logging.debug(f'Selected folder: {self.source}')
self.src.set(self.source)
def scan_folder(self, path):
"""Scan folder for nsihdr and corresponding raw files
Args:
path (str): Input path
"""
logging.debug(f"{path=}")
if len(path) < 2:
return
# Invalid path provided, abort
if not (os.path.exists(path) and os.path.isdir(path)):
return
# Get all files
files = [ files for r, d, files in os.walk(path) ][0]
logging.debug(f"{files=}")
# Filter NSIHDR files
nsihdr_files = [ f for f in files if f.endswith('.nsihdr') ]
logging.debug(f"{nsihdr_files=}")
# Filter RAW files
raw_files = [ f for f in files if f.endswith('.raw') ]
logging.debug(f"{raw_files=}")
# Determine what RAW would be created from the NSIHDR files
expected_raw_files = [ '.'.join([os.path.splitext(f)[0], 'raw']) for f in nsihdr_files ]
logging.debug(f"{expected_raw_files=}")
# # Get all files
logging.debug(f"All input scans: {nsihdr_files}")
nsihdr_files = list(set(nsihdr_files)) # remove duplicates
logging.debug(f"Unique input scans: {nsihdr_files}")
return nsihdr_files, raw_files, expected_raw_files
def export(self):
# Get selected path
path = self.src.get()
self.args.path = [path] # CLI requires list of paths
self.cancelled = False
# Scan input directory for .NSIHDR files
nsihdr_files, raw_files, expected_raw_files = self.scan_folder(path)
# Prompt user with actions
# Case 1: Existing data
overlapping_raw_files = list(set(raw_files) & set(expected_raw_files))
logging.debug(f"{overlapping_raw_files=}")
if len(overlapping_raw_files) > 0:
prompt_title = "Warning - File Conflict Encountered"
at_risk_files = '\n'.join(overlapping_raw_files)
if len(overlapping_raw_files) == 1:
prompt_message = "A conflict in the data files was encountered.\n\nThe following reconstructed volume appears to have already been exported.\n\n"+at_risk_files+"\n\nDo you want to overwrite this file? This will first *destroy* it."
else:
prompt_message = "A conflict in the data files was encountered.\n\nThe following reconstructed volumes appear to have already been exported.\n\n"+at_risk_files+"\n\nDo you want to overwrite these files? This will first *destroy* them."
logging.warning(prompt_message)
self.prompt = Toplevel(self.root)
self.prompt.title(prompt_title)
self.prompt.iconbitmap(self.icon_caution_fp)
self.prompt.resizable(False, False)
self.prompt_frame = ttk.Frame(self.prompt, padding="16 16")
self.prompt_frame.grid(column=0, row=0, sticky=(N, S, E, W))
self.prompt_message = ttk.Label(self.prompt_frame, text=prompt_message).grid(row = 0, column = 0, columnspan=3, pady="0 32")
self.prompt_button = ttk.Button(self.prompt_frame, text="Overwrite", command=self.overwrite_files).grid(row = 1, column = 0, columnspan=1)
self.prompt_button = ttk.Button(self.prompt_frame, text="Skip", command=self.skip_files).grid(row = 1, column = 1, columnspan=1)
self.prompt_button = ttk.Button(self.prompt_frame, text="Cancel", command=self.cancel_export).grid(row = 1, column = 2, columnspan=1)
# Orient window on screen
center(self.root, self.prompt)
# Disable interaction with parent window
self.prompt.protocol("WM_DELETE_WINDOW", self.dismiss)
self.prompt.transient(self.root)
self.prompt.wait_visibility()
self.prompt.grab_set()
self.prompt.wait_window()
# Only new data was found
else:
# Case 2: New data
prompt_title = "Confirm Action - Export"
expected_raw_files = '\n'.join(expected_raw_files)
if len(overlapping_raw_files) == 1:
prompt_message = "The following file will be generated.\n\n"+expected_raw_files
else:
prompt_message = "The following files will be generated.\n\n"+expected_raw_files
logging.debug(prompt_message)
self.prompt = Toplevel(self.root)
self.prompt.title(prompt_title)
self.prompt.iconbitmap(self.icon_fp)
self.prompt.resizable(False, False)
prompt_frame = ttk.Frame(self.prompt, padding="16 16")
prompt_frame.grid(column=0, row=0, sticky=(N, S, E, W))
self.prompt_message = ttk.Label(prompt_frame, text=prompt_message).grid(row = 0, column = 0, columnspan=4, pady="0 32")
self.prompt_button = ttk.Button(prompt_frame, text="Ok", command=self.dismiss).grid(row = 1, column = 1, columnspan=1)
self.prompt_button = ttk.Button(prompt_frame, text="Cancel", command=self.cancel_export).grid(row = 1, column = 2, columnspan=1)
# Orient window on screen
center(self.root, self.prompt)
# Disable interaction with parent window
self.prompt.protocol("WM_DELETE_WINDOW", self.dismiss)
self.prompt.transient(self.root)
self.prompt.wait_visibility()
self.prompt.grab_set()
self.prompt.wait_window()
self.prompt_frame.grid_forget()
self.prompt = None
# Process data
if not self.cancelled:
# Do processing
logging.debug(self.args)
self.args.app = self
nsihdr.main(self.args)
else:
logging.debug(f"Cancelled export")
def quitApplication(self, _event=None):
self.root.destroy()
def overwrite_files(self):
# Case 1a: Overwrite all data and create new
self.args.force = True
self.dismiss()
def skip_files(self):
# Case 1b: Skip all existing data
self.args.force = False # just in case it was enabled via CLI
self.dismiss()
def cancel_export(self):
# Case 1b: Skip all existing data
self.cancelled = True
self.dismiss()
def dismiss(self):
self.prompt.grab_release()
self.prompt.destroy()
def dismiss_progress_prompt(self):
self.progress_bar_prompt.grab_release()
self.progress_bar_prompt.destroy()
| 35.671815
| 239
| 0.721182
|
8c3bb894da81c3e2c7b5a331b858a19d4e77093c
| 5,002
|
py
|
Python
|
docs/sphinx/conf.py
|
ExternalRepositories/CHAI
|
f9d3bd096d6d860e86145f2858f7d649fda3dcc4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 73
|
2017-07-27T23:15:04.000Z
|
2022-01-30T17:59:43.000Z
|
docs/sphinx/conf.py
|
ExternalRepositories/CHAI
|
f9d3bd096d6d860e86145f2858f7d649fda3dcc4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 107
|
2017-09-06T00:45:40.000Z
|
2022-03-31T23:35:15.000Z
|
docs/sphinx/conf.py
|
ExternalRepositories/CHAI
|
f9d3bd096d6d860e86145f2858f7d649fda3dcc4
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 19
|
2017-10-17T18:17:48.000Z
|
2021-08-13T22:19:40.000Z
|
##############################################################################
# Copyright (c) 2016-20, Lawrence Livermore National Security, LLC and CHAI
# project contributors. See the COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
##############################################################################
# -*- coding: utf-8 -*-
#
# CHAI documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 30 12:14:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CHAI'
copyright = u'2016-2018, Lawrence Livermore National Security, LLC.'
author = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.4'
# The full version, including alpha/beta/rc tags.
release = u'2.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CHAIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CHAI.tex', u'CHAI Documentation',
u'CHAI Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chai', u'CHAI Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CHAI', u'CHAI Documentation',
author, 'CHAI', 'One line description of project.',
'Miscellaneous'),
]
| 30.315152
| 79
| 0.659936
|
a7b6ac7a413f2b8328de8de2d040a6b01674f9bf
| 4,492
|
py
|
Python
|
src/geometry.py
|
ralmeidabem/garoopa
|
76e7cd39ce939780c1725d8f287a00392bd58a3f
|
[
"MIT"
] | 1
|
2019-11-13T14:14:38.000Z
|
2019-11-13T14:14:38.000Z
|
src/geometry.py
|
ralmeidabem/garoopa
|
76e7cd39ce939780c1725d8f287a00392bd58a3f
|
[
"MIT"
] | null | null | null |
src/geometry.py
|
ralmeidabem/garoopa
|
76e7cd39ce939780c1725d8f287a00392bd58a3f
|
[
"MIT"
] | null | null | null |
# Python program to implement Cohen Sutherland algorithm for line clipping.
# Adapted from: https://www.geeksforgeeks.org/line-clipping-set-1-cohen-sutherland-algorithm/
# Defining x_max,y_max and x_min,y_min for rectangle
# Since diagonal points are enough to define a rectangle
# x_max = 10.0
# y_max = 8.0
# x_min = 4.0
# y_min = 4.0
# Defining region codes
INSIDE = 0 # 0000
LEFT = 1 # 0001
RIGHT = 2 # 0010
BOTTOM = 4 # 0100
TOP = 8 # 1000
class Geometry:
__instance = None
@staticmethod
def get_instance():
if Geometry.__instance is None:
Geometry()
return Geometry.__instance
def set_pos(self, pos):
self.x_min = pos[0][0]
self.y_min = pos[0][1]
self.x_max = pos[1][0]
self.y_max = pos[1][1]
def __init__(self, x_max=0, y_max=0, x_min=0, y_min=0):
if Geometry.__instance is not None:
raise Exception("This class is a singleton.")
else:
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
Geometry.__instance = self
# Function to compute region code for a point(x,y)
def computeCode(self, x, y):
code = INSIDE
if x < self.x_min: # to the left of rectangle
code |= LEFT
elif x > self.x_max: # to the right of rectangle
code |= RIGHT
if y < self.y_min: # below the rectangle
code |= BOTTOM
elif y > self.y_max: # above the rectangle
code |= TOP
return code
# Implementing Cohen-Sutherland algorithm
# Clipping a line from P1 = (x1, y1) to P2 = (x2, y2)
def cohenSutherlandClip(self, x1, y1, x2, y2):
# Compute region codes for P1, P2
code1 = self.computeCode(x1, y1)
code2 = self.computeCode(x2, y2)
accept = False
while True:
# If both endpoints lie within rectangle
if code1 == 0 and code2 == 0:
accept = True
break
# If both endpoints are outside rectangle
elif (code1 & code2) != 0:
break
# Some segment lies within the rectangle
else:
# Line Needs clipping
# At least one of the points is outside,
# select it
x = 1.0
y = 1.0
if code1 != 0:
code_out = code1
else:
code_out = code2
# Find intersection point
# using formulas y = y1 + slope * (x - x1),
# x = x1 + (1 / slope) * (y - y1)
if code_out & TOP:
# point is above the clip rectangle
x = x1 + (x2 - x1) * \
(self.y_max - y1) / (y2 - y1)
y = self.y_max
elif code_out & BOTTOM:
# point is below the clip rectangle
x = x1 + (x2 - x1) * \
(self.y_min - y1) / (y2 - y1)
y = self.y_min
elif code_out & RIGHT:
# point is to the right of the clip rectangle
y = y1 + (y2 - y1) * \
(self.x_max - x1) / (x2 - x1)
x = self.x_max
elif code_out & LEFT:
# point is to the left of the clip rectangle
y = y1 + (y2 - y1) * \
(self.x_min - x1) / (x2 - x1)
x = self.x_min
# Now intersection point x,y is found
# We replace point outside clipping rectangle
# by intersection point
if code_out == code1:
x1 = x
y1 = y
code1 = self.computeCode(x1, y1)
else:
x2 = x
y2 = y
code2 = self.computeCode(x2, y2)
if accept:
return True
else:
return False
def in_rectangle(self, x1, y1, x2, y2):
# Compute region codes for P1, P2
code1 = self.computeCode(x1, y1)
code2 = self.computeCode(x2, y2)
# If both endpoints are outside rectangle
if (code1 & code2) != 0:
return False
# Either the line is completely inside or partially inside...
return True
| 33.774436
| 93
| 0.483081
|
2ffcbf3e566c0c3decf742827029005175ab88bf
| 227
|
py
|
Python
|
app/schemas/reset.py
|
Gingernaut/microAuth
|
0355147b571ed5d9e50ab534f13eb6eee6d9a219
|
[
"MIT"
] | 28
|
2017-10-03T21:33:43.000Z
|
2021-03-04T08:04:36.000Z
|
app/schemas/reset.py
|
Gingernaut/microAuth
|
0355147b571ed5d9e50ab534f13eb6eee6d9a219
|
[
"MIT"
] | 406
|
2017-12-10T22:24:31.000Z
|
2021-07-26T12:16:38.000Z
|
app/schemas/reset.py
|
Gingernaut/microAuth
|
0355147b571ed5d9e50ab534f13eb6eee6d9a219
|
[
"MIT"
] | 4
|
2019-03-04T14:14:25.000Z
|
2021-08-07T12:47:47.000Z
|
from pydantic import BaseModel
from datetime import datetime
class Reset(BaseModel):
id: int
userId: int
createdTime: datetime
expireTime: datetime
isValid: bool
class Config:
orm_mode = True
| 16.214286
| 30
| 0.69163
|
19603a84c5d15096f73a0244badbeff30854c5e2
| 569
|
py
|
Python
|
fs/utils.py
|
Trietptm-on-Coding-Algorithms/imageio
|
4afc0fbfb42021b9c4aec1abb2b7224ef8a8227c
|
[
"Apache-2.0"
] | 5
|
2020-04-12T20:01:08.000Z
|
2021-01-03T13:27:04.000Z
|
fs/utils.py
|
0xPhoeniX/imageio
|
4afc0fbfb42021b9c4aec1abb2b7224ef8a8227c
|
[
"Apache-2.0"
] | null | null | null |
fs/utils.py
|
0xPhoeniX/imageio
|
4afc0fbfb42021b9c4aec1abb2b7224ef8a8227c
|
[
"Apache-2.0"
] | 1
|
2020-06-15T16:36:47.000Z
|
2020-06-15T16:36:47.000Z
|
def hexdump(src, size=None, length=16):
if src is None:
return ''
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
if size is None:
size = len(src)
size = min(size, len(src))
lines = []
for c in range(0, size, length):
chars = src[c:c + length]
hex = ' '.join(["%02x" % x for x in chars])
printable = ''.join(["%s" % ((x <= 127 and FILTER[x]) or '.') for x in chars])
lines.append("%04x %-*s %s\n" % (c, length * 3, hex, printable))
return ''.join(lines)
| 33.470588
| 86
| 0.506151
|
9656cc57c2718ca6f3c0689722f3c5e5861600c7
| 3,163
|
py
|
Python
|
clients/python-flask/generated/openapi_server/models/default_crumb_issuer.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-flask/generated/openapi_server/models/default_crumb_issuer.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-flask/generated/openapi_server/models/default_crumb_issuer.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class DefaultCrumbIssuer(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class=None, crumb=None, crumb_request_field=None): # noqa: E501
"""DefaultCrumbIssuer - a model defined in OpenAPI
:param _class: The _class of this DefaultCrumbIssuer. # noqa: E501
:type _class: str
:param crumb: The crumb of this DefaultCrumbIssuer. # noqa: E501
:type crumb: str
:param crumb_request_field: The crumb_request_field of this DefaultCrumbIssuer. # noqa: E501
:type crumb_request_field: str
"""
self.openapi_types = {
'_class': str,
'crumb': str,
'crumb_request_field': str
}
self.attribute_map = {
'_class': '_class',
'crumb': 'crumb',
'crumb_request_field': 'crumbRequestField'
}
self.__class = _class
self._crumb = crumb
self._crumb_request_field = crumb_request_field
@classmethod
def from_dict(cls, dikt) -> 'DefaultCrumbIssuer':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DefaultCrumbIssuer of this DefaultCrumbIssuer. # noqa: E501
:rtype: DefaultCrumbIssuer
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this DefaultCrumbIssuer.
:return: The _class of this DefaultCrumbIssuer.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this DefaultCrumbIssuer.
:param _class: The _class of this DefaultCrumbIssuer.
:type _class: str
"""
self.__class = _class
@property
def crumb(self):
"""Gets the crumb of this DefaultCrumbIssuer.
:return: The crumb of this DefaultCrumbIssuer.
:rtype: str
"""
return self._crumb
@crumb.setter
def crumb(self, crumb):
"""Sets the crumb of this DefaultCrumbIssuer.
:param crumb: The crumb of this DefaultCrumbIssuer.
:type crumb: str
"""
self._crumb = crumb
@property
def crumb_request_field(self):
"""Gets the crumb_request_field of this DefaultCrumbIssuer.
:return: The crumb_request_field of this DefaultCrumbIssuer.
:rtype: str
"""
return self._crumb_request_field
@crumb_request_field.setter
def crumb_request_field(self, crumb_request_field):
"""Sets the crumb_request_field of this DefaultCrumbIssuer.
:param crumb_request_field: The crumb_request_field of this DefaultCrumbIssuer.
:type crumb_request_field: str
"""
self._crumb_request_field = crumb_request_field
| 27.034188
| 101
| 0.635789
|
0f43ba839dca1e05ce4209d68691bcb72939baa0
| 2,156
|
py
|
Python
|
vspk/v6/fetchers/nubridgeinterfaces_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 19
|
2016-03-07T12:34:22.000Z
|
2020-06-11T11:09:02.000Z
|
vspk/v6/fetchers/nubridgeinterfaces_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 40
|
2016-06-13T15:36:54.000Z
|
2020-11-10T18:14:43.000Z
|
vspk/v6/fetchers/nubridgeinterfaces_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 15
|
2016-06-10T22:06:01.000Z
|
2020-12-15T18:37:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUBridgeInterfacesFetcher(NURESTFetcher):
""" Represents a NUBridgeInterfaces fetcher
Notes:
This fetcher enables to fetch NUBridgeInterface objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUBridgeInterface class that is managed.
Returns:
.NUBridgeInterface: the managed class
"""
from .. import NUBridgeInterface
return NUBridgeInterface
| 40.679245
| 86
| 0.731447
|
65ac07853eaf291b3793ee277bdaf6629604fa1c
| 2,791
|
py
|
Python
|
app/models.py
|
ethan-leba/flask-twitter
|
27785b88354679d853fe86e6e8629ee72b1d40a4
|
[
"MIT"
] | null | null | null |
app/models.py
|
ethan-leba/flask-twitter
|
27785b88354679d853fe86e6e8629ee72b1d40a4
|
[
"MIT"
] | null | null | null |
app/models.py
|
ethan-leba/flask-twitter
|
27785b88354679d853fe86e6e8629ee72b1d40a4
|
[
"MIT"
] | null | null | null |
from . import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Role(db.Model):
__tablename__ = 'role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('role.id'))
password_hash = db.Column(db.String(128))
def __repr__(self):
return '<User %r>' % self.username
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class Tweet(db.Model):
__tablename__ = 'tweet'
tweet_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
message = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, default = datetime.datetime.utcnow())
def __repr__(self):
return '<Tweet %r, UID: %r>' % (self.message, self.user_id)
class Comment(db.Model):
__tablename__ = 'comment'
comment_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tweet_id = db.Column(db.Integer, db.ForeignKey('tweet.tweet_id'))
message = db.Column(db.String(255))
timestamp = db.Column(db.DateTime, default = datetime.datetime.utcnow())
def __repr__(self):
return '<Tweet %r, UID: %r, TID: %r>' % (self.message, self.user_id, self.tweet_id)
class TweetLike(db.Model):
__tablename__ = 'tweet_like'
tweet_like_id = db.Column(db.Integer, primary_key=True)
tweet_id = db.Column(db.Integer, db.ForeignKey('tweet.user_id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Like - UID: %r, TID: %r>' % (self.user_id, self.tweet_id)
class CommentLike(db.Model):
__tablename__ = 'comment_like'
comment_like_id = db.Column(db.Integer, primary_key=True)
comment_id = db.Column(db.Integer, db.ForeignKey('comment.comment_id'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Like - UID: %r, TID: %r>' % (self.user_id, self.comment_id)
| 34.8875
| 91
| 0.687209
|
dc4c072d298ba73ca41d202c60b27cb7146431ec
| 77
|
py
|
Python
|
kernel/security/protol/spdz/communicator/__init__.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 39
|
2021-10-12T01:43:27.000Z
|
2022-03-28T04:46:35.000Z
|
kernel/security/protol/spdz/communicator/__init__.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 6
|
2021-10-14T02:11:47.000Z
|
2022-03-23T02:41:50.000Z
|
kernel/security/protol/spdz/communicator/__init__.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 10
|
2021-10-14T09:36:03.000Z
|
2022-02-10T11:05:12.000Z
|
from kernel.security.protol.spdz.communicator.federation import Communicator
| 38.5
| 76
| 0.883117
|
e5082ecefb8e645b6c80618d0c3da7b05f0c5c78
| 669
|
py
|
Python
|
doc/source/tutorial-schema.py
|
techdragon/validatingconfigparser
|
8cf939c75330db53901072eb4e3f21dec58b05e5
|
[
"Apache-2.0"
] | null | null | null |
doc/source/tutorial-schema.py
|
techdragon/validatingconfigparser
|
8cf939c75330db53901072eb4e3f21dec58b05e5
|
[
"Apache-2.0"
] | null | null | null |
doc/source/tutorial-schema.py
|
techdragon/validatingconfigparser
|
8cf939c75330db53901072eb4e3f21dec58b05e5
|
[
"Apache-2.0"
] | null | null | null |
import validatingconfigparser
import formencode.validators as validators
import formencode.schema as schema
class MySchema(schema.Schema):
# Not validating names!
gender = validators.OneOf(['male', 'female'])
age = validators.Int(min=0)
married = validators.StringBool()
weight = validators.Number()
parser = validatingconfigparser.ConfigParser(schema=MySchema)
parser.read('tutorial-config.ini')
print parser.get('peter', 'name')
print parser.get('peter', 'gender')
print parser.get('peter', 'age')
print parser.get('peter', 'married')
print parser.get('peter', 'weight')
print parser.items("peter")
print parser.set('peter', 'age', -1)
| 25.730769
| 61
| 0.724963
|
f9f71bd238fc5681a1e1929cc06db1c1c53b3dfb
| 4,291
|
py
|
Python
|
zerver/webhooks/wordpress/tests.py
|
alexandraciobica/zulip
|
f3753504469070bfccc73f22f933c87bee7d1852
|
[
"Apache-2.0"
] | 4
|
2019-06-04T09:06:53.000Z
|
2019-06-04T09:07:47.000Z
|
zerver/webhooks/wordpress/tests.py
|
991rajat/zulip
|
648a60baf63f9afade83148bd9ae1fc480510178
|
[
"Apache-2.0"
] | 4
|
2020-06-06T00:51:42.000Z
|
2022-02-10T21:38:40.000Z
|
zerver/webhooks/wordpress/tests.py
|
991rajat/zulip
|
648a60baf63f9afade83148bd9ae1fc480510178
|
[
"Apache-2.0"
] | 1
|
2020-02-06T13:56:40.000Z
|
2020-02-06T13:56:40.000Z
|
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class WordPressHookTests(WebhookTestCase):
STREAM_NAME = 'wordpress'
URL_TEMPLATE = "/api/v1/external/wordpress?api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'wordpress'
def test_publish_post(self) -> None:
expected_topic = u"WordPress Post"
expected_message = u"New post published:\n* [New Blog Post](http://example.com\n)"
self.send_and_test_stream_message('publish_post', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_post_type_not_provided(self) -> None:
expected_topic = u"WordPress Post"
expected_message = u"New post published:\n* [New Blog Post](http://example.com\n)"
self.send_and_test_stream_message('publish_post_type_not_provided',
expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_post_no_data_provided(self) -> None:
# Note: the fixture includes 'hook=publish_post' because it's always added by HookPress
expected_topic = u"WordPress Notification"
expected_message = u"New post published:\n* [New WordPress Post](WordPress Post URL)"
self.send_and_test_stream_message('publish_post_no_data_provided',
expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_publish_page(self) -> None:
expected_topic = u"WordPress Page"
expected_message = u"New page published:\n* [New Blog Page](http://example.com\n)"
self.send_and_test_stream_message('publish_page', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_user_register(self) -> None:
expected_topic = u"New Blog Users"
expected_message = u"New blog user registered:\n* **Name**: test_user\n* **Email**: test_user@example.com"
self.send_and_test_stream_message('user_register', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_wp_login(self) -> None:
expected_topic = u"New Login"
expected_message = u"User testuser logged in."
self.send_and_test_stream_message('wp_login', expected_topic, expected_message,
content_type="application/x-www-form-urlencoded")
def test_unknown_action_no_data(self) -> None:
# Mimic send_and_test_stream_message() to manually execute a negative test.
# Otherwise its call to send_json_payload() would assert on the non-success
# we are testing. The value of result is the error message the webhook should
# return if no params are sent. The fixture for this test is an empty file.
# subscribe to the target stream
self.subscribe(self.test_user, self.STREAM_NAME)
# post to the webhook url
post_params = {'stream_name': self.STREAM_NAME,
'content_type': 'application/x-www-form-urlencoded'}
result = self.client_post(self.url, 'unknown_action', **post_params)
# check that we got the expected error message
self.assert_json_error(result, "Unknown WordPress webhook action: WordPress Action")
def test_unknown_action_no_hook_provided(self) -> None:
# Similar to unknown_action_no_data, except the fixture contains valid blog post
# params but without the hook parameter. This should also return an error.
self.subscribe(self.test_user, self.STREAM_NAME)
post_params = {'stream_name': self.STREAM_NAME,
'content_type': 'application/x-www-form-urlencoded'}
result = self.client_post(self.url, 'unknown_action', **post_params)
self.assert_json_error(result, "Unknown WordPress webhook action: WordPress Action")
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("wordpress", fixture_name, file_type="txt")
| 46.641304
| 114
| 0.661617
|
6c84c8a0c6f7b3464b36c3c9c33b6886e46eef0b
| 225,788
|
py
|
Python
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_sql_resources_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_sql_resources_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/cosmos/azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/aio/operations/_sql_resources_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sql_resources_operations import build_create_update_client_encryption_key_request_initial, build_create_update_sql_container_request_initial, build_create_update_sql_database_request_initial, build_create_update_sql_role_assignment_request_initial, build_create_update_sql_role_definition_request_initial, build_create_update_sql_stored_procedure_request_initial, build_create_update_sql_trigger_request_initial, build_create_update_sql_user_defined_function_request_initial, build_delete_sql_container_request_initial, build_delete_sql_database_request_initial, build_delete_sql_role_assignment_request_initial, build_delete_sql_role_definition_request_initial, build_delete_sql_stored_procedure_request_initial, build_delete_sql_trigger_request_initial, build_delete_sql_user_defined_function_request_initial, build_get_client_encryption_key_request, build_get_sql_container_request, build_get_sql_container_throughput_request, build_get_sql_database_request, build_get_sql_database_throughput_request, build_get_sql_role_assignment_request, build_get_sql_role_definition_request, build_get_sql_stored_procedure_request, build_get_sql_trigger_request, build_get_sql_user_defined_function_request, build_list_client_encryption_keys_request, build_list_sql_container_partition_merge_request_initial, build_list_sql_containers_request, build_list_sql_databases_request, build_list_sql_role_assignments_request, build_list_sql_role_definitions_request, build_list_sql_stored_procedures_request, build_list_sql_triggers_request, build_list_sql_user_defined_functions_request, build_migrate_sql_container_to_autoscale_request_initial, build_migrate_sql_container_to_manual_throughput_request_initial, build_migrate_sql_database_to_autoscale_request_initial, build_migrate_sql_database_to_manual_throughput_request_initial, build_retrieve_continuous_backup_information_request_initial, build_sql_container_redistribute_throughput_request_initial, build_sql_container_retrieve_throughput_distribution_request_initial, build_update_sql_container_throughput_request_initial, build_update_sql_database_throughput_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlResourcesOperations: # pylint: disable=too-many-public-methods
"""SqlResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_sql_databases(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlDatabaseListResult"]:
"""Lists the SQL databases under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlDatabaseListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_databases_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.list_sql_databases.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_databases_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlDatabaseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_databases.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases"} # type: ignore
@distributed_trace_async
async def get_sql_database(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> "_models.SqlDatabaseGetResults":
"""Gets the SQL database under an existing Azure Cosmos DB database account with the provided
name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlDatabaseGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlDatabaseGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_database_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self.get_sql_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_database.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
async def _create_update_sql_database_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: "_models.SqlDatabaseCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlDatabaseGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlDatabaseGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_database_parameters, 'SqlDatabaseCreateUpdateParameters')
request = build_create_update_sql_database_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_database_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_database_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_database(
self,
resource_group_name: str,
account_name: str,
database_name: str,
create_update_sql_database_parameters: "_models.SqlDatabaseCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlDatabaseGetResults"]:
"""Create or update an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param create_update_sql_database_parameters: The parameters to provide for the current SQL
database.
:type create_update_sql_database_parameters:
~azure.mgmt.cosmosdb.models.SqlDatabaseCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlDatabaseGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlDatabaseGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlDatabaseGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
create_update_sql_database_parameters=create_update_sql_database_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlDatabaseGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_database.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
async def _delete_sql_database_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_database_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self._delete_sql_database_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_database_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_database( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_database_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_database.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}"} # type: ignore
@distributed_trace_async
async def get_sql_database_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> "_models.ThroughputSettingsGetResults":
"""Gets the RUs per second of the SQL database under an existing Azure Cosmos DB database account
with the provided name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_database_throughput_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self.get_sql_database_throughput.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_database_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
async def _update_sql_database_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: "_models.ThroughputSettingsUpdateParameters",
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
request = build_update_sql_database_throughput_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_sql_database_throughput_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_sql_database_throughput_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
@distributed_trace_async
async def begin_update_sql_database_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
update_throughput_parameters: "_models.ThroughputSettingsUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Update RUs per second of an Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL database.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_sql_database_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_sql_database_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default"} # type: ignore
async def _migrate_sql_database_to_autoscale_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_migrate_sql_database_to_autoscale_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self._migrate_sql_database_to_autoscale_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_database_to_autoscale_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_database_to_autoscale(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Migrate an Azure Cosmos DB SQL database from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_database_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_database_to_autoscale.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
async def _migrate_sql_database_to_manual_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_migrate_sql_database_to_manual_throughput_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self._migrate_sql_database_to_manual_throughput_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_database_to_manual_throughput_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_database_to_manual_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Migrate an Azure Cosmos DB SQL database from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_database_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_database_to_manual_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace
def list_client_encryption_keys(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ClientEncryptionKeysListResult"]:
"""Lists the ClientEncryptionKeys under an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClientEncryptionKeysListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.ClientEncryptionKeysListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClientEncryptionKeysListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_client_encryption_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self.list_client_encryption_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_client_encryption_keys_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ClientEncryptionKeysListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_client_encryption_keys.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys"} # type: ignore
@distributed_trace_async
async def get_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
**kwargs: Any
) -> "_models.ClientEncryptionKeyGetResults":
"""Gets the ClientEncryptionKey under an existing Azure Cosmos DB SQL database.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name.
:type client_encryption_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ClientEncryptionKeyGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClientEncryptionKeyGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_client_encryption_key_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
api_version=api_version,
template_url=self.get_client_encryption_key.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ClientEncryptionKeyGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_client_encryption_key.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
async def _create_update_client_encryption_key_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: "_models.ClientEncryptionKeyCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.ClientEncryptionKeyGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ClientEncryptionKeyGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_client_encryption_key_parameters, 'ClientEncryptionKeyCreateUpdateParameters')
request = build_create_update_client_encryption_key_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_client_encryption_key_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ClientEncryptionKeyGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_client_encryption_key_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_client_encryption_key(
self,
resource_group_name: str,
account_name: str,
database_name: str,
client_encryption_key_name: str,
create_update_client_encryption_key_parameters: "_models.ClientEncryptionKeyCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ClientEncryptionKeyGetResults"]:
"""Create or update a ClientEncryptionKey. This API is meant to be invoked via tools such as the
Azure Powershell (instead of directly).
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param client_encryption_key_name: Cosmos DB ClientEncryptionKey name.
:type client_encryption_key_name: str
:param create_update_client_encryption_key_parameters: The parameters to provide for the client
encryption key.
:type create_update_client_encryption_key_parameters:
~azure.mgmt.cosmosdb.models.ClientEncryptionKeyCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ClientEncryptionKeyGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ClientEncryptionKeyGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClientEncryptionKeyGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_client_encryption_key_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
client_encryption_key_name=client_encryption_key_name,
create_update_client_encryption_key_parameters=create_update_client_encryption_key_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ClientEncryptionKeyGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_client_encryption_key.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName}"} # type: ignore
@distributed_trace
def list_sql_containers(
self,
resource_group_name: str,
account_name: str,
database_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlContainerListResult"]:
"""Lists the SQL container under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlContainerListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlContainerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlContainerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_containers_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=self.list_sql_containers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_containers_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlContainerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_containers.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers"} # type: ignore
@distributed_trace_async
async def get_sql_container(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> "_models.SqlContainerGetResults":
"""Gets the SQL container under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlContainerGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlContainerGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlContainerGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_container_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self.get_sql_container.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlContainerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_container.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _create_update_sql_container_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: "_models.SqlContainerCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlContainerGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlContainerGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_container_parameters, 'SqlContainerCreateUpdateParameters')
request = build_create_update_sql_container_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_container_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlContainerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_container_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_container(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
create_update_sql_container_parameters: "_models.SqlContainerCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlContainerGetResults"]:
"""Create or update an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param create_update_sql_container_parameters: The parameters to provide for the current SQL
container.
:type create_update_sql_container_parameters:
~azure.mgmt.cosmosdb.models.SqlContainerCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlContainerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlContainerGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlContainerGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_container_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
create_update_sql_container_parameters=create_update_sql_container_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlContainerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_container.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _delete_sql_container_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_container_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self._delete_sql_container_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_container_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_container( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_container_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_container.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}"} # type: ignore
async def _list_sql_container_partition_merge_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: "_models.MergeParameters",
**kwargs: Any
) -> Optional["_models.PhysicalPartitionStorageInfoCollection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PhysicalPartitionStorageInfoCollection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(merge_parameters, 'MergeParameters')
request = build_list_sql_container_partition_merge_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._list_sql_container_partition_merge_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PhysicalPartitionStorageInfoCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_sql_container_partition_merge_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/partitionMerge"} # type: ignore
@distributed_trace_async
async def begin_list_sql_container_partition_merge(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
merge_parameters: "_models.MergeParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.PhysicalPartitionStorageInfoCollection"]:
"""Merges the partitions of a SQL Container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param merge_parameters: The parameters for the merge operation.
:type merge_parameters: ~azure.mgmt.cosmosdb.models.MergeParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionStorageInfoCollection or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionStorageInfoCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PhysicalPartitionStorageInfoCollection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_sql_container_partition_merge_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
merge_parameters=merge_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PhysicalPartitionStorageInfoCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_sql_container_partition_merge.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/partitionMerge"} # type: ignore
@distributed_trace_async
async def get_sql_container_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> "_models.ThroughputSettingsGetResults":
"""Gets the RUs per second of the SQL container under an existing Azure Cosmos DB database
account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ThroughputSettingsGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_container_throughput_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self.get_sql_container_throughput.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_container_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
async def _update_sql_container_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: "_models.ThroughputSettingsUpdateParameters",
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(update_throughput_parameters, 'ThroughputSettingsUpdateParameters')
request = build_update_sql_container_throughput_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_sql_container_throughput_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_sql_container_throughput_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
@distributed_trace_async
async def begin_update_sql_container_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
update_throughput_parameters: "_models.ThroughputSettingsUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Update RUs per second of an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param update_throughput_parameters: The parameters to provide for the RUs per second of the
current SQL container.
:type update_throughput_parameters:
~azure.mgmt.cosmosdb.models.ThroughputSettingsUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_sql_container_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
update_throughput_parameters=update_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_sql_container_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default"} # type: ignore
async def _migrate_sql_container_to_autoscale_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_migrate_sql_container_to_autoscale_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self._migrate_sql_container_to_autoscale_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_container_to_autoscale_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_container_to_autoscale(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Migrate an Azure Cosmos DB SQL container from manual throughput to autoscale.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_container_to_autoscale_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_container_to_autoscale.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToAutoscale"} # type: ignore
async def _migrate_sql_container_to_manual_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> Optional["_models.ThroughputSettingsGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ThroughputSettingsGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_migrate_sql_container_to_manual_throughput_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self._migrate_sql_container_to_manual_throughput_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_migrate_sql_container_to_manual_throughput_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
@distributed_trace_async
async def begin_migrate_sql_container_to_manual_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ThroughputSettingsGetResults"]:
"""Migrate an Azure Cosmos DB SQL container from autoscale to manual throughput.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ThroughputSettingsGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.ThroughputSettingsGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ThroughputSettingsGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._migrate_sql_container_to_manual_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ThroughputSettingsGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_migrate_sql_container_to_manual_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/migrateToManualThroughput"} # type: ignore
async def _sql_container_retrieve_throughput_distribution_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: "_models.RetrieveThroughputParameters",
**kwargs: Any
) -> Optional["_models.PhysicalPartitionThroughputInfoResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PhysicalPartitionThroughputInfoResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(retrieve_throughput_parameters, 'RetrieveThroughputParameters')
request = build_sql_container_retrieve_throughput_distribution_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._sql_container_retrieve_throughput_distribution_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PhysicalPartitionThroughputInfoResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_container_retrieve_throughput_distribution_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
@distributed_trace_async
async def begin_sql_container_retrieve_throughput_distribution(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
retrieve_throughput_parameters: "_models.RetrieveThroughputParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.PhysicalPartitionThroughputInfoResult"]:
"""Retrieve throughput distribution for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param retrieve_throughput_parameters: The parameters to provide for retrieving throughput
distribution for the current SQL container.
:type retrieve_throughput_parameters: ~azure.mgmt.cosmosdb.models.RetrieveThroughputParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PhysicalPartitionThroughputInfoResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_container_retrieve_throughput_distribution_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
retrieve_throughput_parameters=retrieve_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PhysicalPartitionThroughputInfoResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_container_retrieve_throughput_distribution.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/retrieveThroughputDistribution"} # type: ignore
async def _sql_container_redistribute_throughput_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: "_models.RedistributeThroughputParameters",
**kwargs: Any
) -> Optional["_models.PhysicalPartitionThroughputInfoResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PhysicalPartitionThroughputInfoResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(redistribute_throughput_parameters, 'RedistributeThroughputParameters')
request = build_sql_container_redistribute_throughput_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._sql_container_redistribute_throughput_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PhysicalPartitionThroughputInfoResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_sql_container_redistribute_throughput_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/redistributeThroughput"} # type: ignore
@distributed_trace_async
async def begin_sql_container_redistribute_throughput(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
redistribute_throughput_parameters: "_models.RedistributeThroughputParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.PhysicalPartitionThroughputInfoResult"]:
"""Redistribute throughput for an Azure Cosmos DB SQL container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param redistribute_throughput_parameters: The parameters to provide for redistributing
throughput for the current SQL container.
:type redistribute_throughput_parameters:
~azure.mgmt.cosmosdb.models.RedistributeThroughputParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either
PhysicalPartitionThroughputInfoResult or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.PhysicalPartitionThroughputInfoResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PhysicalPartitionThroughputInfoResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._sql_container_redistribute_throughput_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
redistribute_throughput_parameters=redistribute_throughput_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PhysicalPartitionThroughputInfoResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_sql_container_redistribute_throughput.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/throughputSettings/default/redistributeThroughput"} # type: ignore
@distributed_trace
def list_sql_stored_procedures(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlStoredProcedureListResult"]:
"""Lists the SQL storedProcedure under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlStoredProcedureListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlStoredProcedureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlStoredProcedureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_stored_procedures_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self.list_sql_stored_procedures.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_stored_procedures_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlStoredProcedureListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_stored_procedures.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures"} # type: ignore
@distributed_trace_async
async def get_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> "_models.SqlStoredProcedureGetResults":
"""Gets the SQL storedProcedure under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name.
:type stored_procedure_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlStoredProcedureGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlStoredProcedureGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_stored_procedure_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
api_version=api_version,
template_url=self.get_sql_stored_procedure.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlStoredProcedureGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_stored_procedure.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
async def _create_update_sql_stored_procedure_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: "_models.SqlStoredProcedureCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlStoredProcedureGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlStoredProcedureGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_stored_procedure_parameters, 'SqlStoredProcedureCreateUpdateParameters')
request = build_create_update_sql_stored_procedure_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_stored_procedure_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlStoredProcedureGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_stored_procedure_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_stored_procedure(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
create_update_sql_stored_procedure_parameters: "_models.SqlStoredProcedureCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlStoredProcedureGetResults"]:
"""Create or update an Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name.
:type stored_procedure_name: str
:param create_update_sql_stored_procedure_parameters: The parameters to provide for the current
SQL storedProcedure.
:type create_update_sql_stored_procedure_parameters:
~azure.mgmt.cosmosdb.models.SqlStoredProcedureCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlStoredProcedureGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlStoredProcedureGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlStoredProcedureGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_stored_procedure_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
create_update_sql_stored_procedure_parameters=create_update_sql_stored_procedure_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlStoredProcedureGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_stored_procedure.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
async def _delete_sql_stored_procedure_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_stored_procedure_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
api_version=api_version,
template_url=self._delete_sql_stored_procedure_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_stored_procedure_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_stored_procedure( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
stored_procedure_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL storedProcedure.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param stored_procedure_name: Cosmos DB storedProcedure name.
:type stored_procedure_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_stored_procedure_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
stored_procedure_name=stored_procedure_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_stored_procedure.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName}"} # type: ignore
@distributed_trace
def list_sql_user_defined_functions(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlUserDefinedFunctionListResult"]:
"""Lists the SQL userDefinedFunction under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlUserDefinedFunctionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlUserDefinedFunctionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_user_defined_functions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self.list_sql_user_defined_functions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_user_defined_functions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlUserDefinedFunctionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_user_defined_functions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions"} # type: ignore
@distributed_trace_async
async def get_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> "_models.SqlUserDefinedFunctionGetResults":
"""Gets the SQL userDefinedFunction under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name.
:type user_defined_function_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlUserDefinedFunctionGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlUserDefinedFunctionGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_user_defined_function_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
api_version=api_version,
template_url=self.get_sql_user_defined_function.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlUserDefinedFunctionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_user_defined_function.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
async def _create_update_sql_user_defined_function_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: "_models.SqlUserDefinedFunctionCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlUserDefinedFunctionGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlUserDefinedFunctionGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_user_defined_function_parameters, 'SqlUserDefinedFunctionCreateUpdateParameters')
request = build_create_update_sql_user_defined_function_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_user_defined_function_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlUserDefinedFunctionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_user_defined_function_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_user_defined_function(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
create_update_sql_user_defined_function_parameters: "_models.SqlUserDefinedFunctionCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlUserDefinedFunctionGetResults"]:
"""Create or update an Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name.
:type user_defined_function_name: str
:param create_update_sql_user_defined_function_parameters: The parameters to provide for the
current SQL userDefinedFunction.
:type create_update_sql_user_defined_function_parameters:
~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlUserDefinedFunctionGetResults or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlUserDefinedFunctionGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlUserDefinedFunctionGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_user_defined_function_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
create_update_sql_user_defined_function_parameters=create_update_sql_user_defined_function_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlUserDefinedFunctionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_user_defined_function.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
async def _delete_sql_user_defined_function_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_user_defined_function_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
api_version=api_version,
template_url=self._delete_sql_user_defined_function_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_user_defined_function_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_user_defined_function( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
user_defined_function_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL userDefinedFunction.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param user_defined_function_name: Cosmos DB userDefinedFunction name.
:type user_defined_function_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_user_defined_function_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
user_defined_function_name=user_defined_function_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_user_defined_function.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName}"} # type: ignore
@distributed_trace
def list_sql_triggers(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlTriggerListResult"]:
"""Lists the SQL trigger under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlTriggerListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlTriggerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlTriggerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_triggers_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=self.list_sql_triggers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_triggers_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlTriggerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_triggers.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers"} # type: ignore
@distributed_trace_async
async def get_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> "_models.SqlTriggerGetResults":
"""Gets the SQL trigger under an existing Azure Cosmos DB database account.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param trigger_name: Cosmos DB trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlTriggerGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlTriggerGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlTriggerGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_trigger_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
api_version=api_version,
template_url=self.get_sql_trigger.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlTriggerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_trigger.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
async def _create_update_sql_trigger_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: "_models.SqlTriggerCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlTriggerGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlTriggerGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_trigger_parameters, 'SqlTriggerCreateUpdateParameters')
request = build_create_update_sql_trigger_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_trigger_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlTriggerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_trigger_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_trigger(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
create_update_sql_trigger_parameters: "_models.SqlTriggerCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlTriggerGetResults"]:
"""Create or update an Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param trigger_name: Cosmos DB trigger name.
:type trigger_name: str
:param create_update_sql_trigger_parameters: The parameters to provide for the current SQL
trigger.
:type create_update_sql_trigger_parameters:
~azure.mgmt.cosmosdb.models.SqlTriggerCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlTriggerGetResults or the result
of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlTriggerGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlTriggerGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_trigger_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
create_update_sql_trigger_parameters=create_update_sql_trigger_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlTriggerGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_trigger.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
async def _delete_sql_trigger_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_trigger_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
api_version=api_version,
template_url=self._delete_sql_trigger_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_trigger_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_trigger( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
trigger_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL trigger.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param trigger_name: Cosmos DB trigger name.
:type trigger_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_trigger_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
trigger_name=trigger_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_trigger.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName}"} # type: ignore
@distributed_trace_async
async def get_sql_role_definition(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.SqlRoleDefinitionGetResults":
"""Retrieves the properties of an existing Azure Cosmos DB SQL Role Definition with the given Id.
:param role_definition_id: The GUID for the Role Definition.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlRoleDefinitionGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleDefinitionGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_role_definition_request(
role_definition_id=role_definition_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.get_sql_role_definition.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlRoleDefinitionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_role_definition.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
async def _create_update_sql_role_definition_initial(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: "_models.SqlRoleDefinitionCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlRoleDefinitionGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlRoleDefinitionGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_role_definition_parameters, 'SqlRoleDefinitionCreateUpdateParameters')
request = build_create_update_sql_role_definition_request_initial(
role_definition_id=role_definition_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_role_definition_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlRoleDefinitionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_role_definition_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_role_definition(
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_definition_parameters: "_models.SqlRoleDefinitionCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlRoleDefinitionGetResults"]:
"""Creates or updates an Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param create_update_sql_role_definition_parameters: The properties required to create or
update a Role Definition.
:type create_update_sql_role_definition_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleDefinitionCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleDefinitionGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleDefinitionGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_role_definition_initial(
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
create_update_sql_role_definition_parameters=create_update_sql_role_definition_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlRoleDefinitionGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_role_definition.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
async def _delete_sql_role_definition_initial( # pylint: disable=inconsistent-return-statements
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_role_definition_request_initial(
role_definition_id=role_definition_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self._delete_sql_role_definition_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_role_definition_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_role_definition( # pylint: disable=inconsistent-return-statements
self,
role_definition_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL Role Definition.
:param role_definition_id: The GUID for the Role Definition.
:type role_definition_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_role_definition_initial(
role_definition_id=role_definition_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_role_definition.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions/{roleDefinitionId}"} # type: ignore
@distributed_trace
def list_sql_role_definitions(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlRoleDefinitionListResult"]:
"""Retrieves the list of all Azure Cosmos DB SQL Role Definitions.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlRoleDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlRoleDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_role_definitions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.list_sql_role_definitions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_role_definitions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlRoleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_role_definitions.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleDefinitions"} # type: ignore
@distributed_trace_async
async def get_sql_role_assignment(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.SqlRoleAssignmentGetResults":
"""Retrieves the properties of an existing Azure Cosmos DB SQL Role Assignment with the given Id.
:param role_assignment_id: The GUID for the Role Assignment.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SqlRoleAssignmentGetResults, or the result of cls(response)
:rtype: ~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleAssignmentGetResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_get_sql_role_assignment_request(
role_assignment_id=role_assignment_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.get_sql_role_assignment.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SqlRoleAssignmentGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sql_role_assignment.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
async def _create_update_sql_role_assignment_initial(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: "_models.SqlRoleAssignmentCreateUpdateParameters",
**kwargs: Any
) -> Optional["_models.SqlRoleAssignmentGetResults"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlRoleAssignmentGetResults"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(create_update_sql_role_assignment_parameters, 'SqlRoleAssignmentCreateUpdateParameters')
request = build_create_update_sql_role_assignment_request_initial(
role_assignment_id=role_assignment_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_update_sql_role_assignment_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SqlRoleAssignmentGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_update_sql_role_assignment_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@distributed_trace_async
async def begin_create_update_sql_role_assignment(
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
create_update_sql_role_assignment_parameters: "_models.SqlRoleAssignmentCreateUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.SqlRoleAssignmentGetResults"]:
"""Creates or updates an Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param create_update_sql_role_assignment_parameters: The properties required to create or
update a Role Assignment.
:type create_update_sql_role_assignment_parameters:
~azure.mgmt.cosmosdb.models.SqlRoleAssignmentCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SqlRoleAssignmentGetResults or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentGetResults]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleAssignmentGetResults"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_update_sql_role_assignment_initial(
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
create_update_sql_role_assignment_parameters=create_update_sql_role_assignment_parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SqlRoleAssignmentGetResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_update_sql_role_assignment.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
async def _delete_sql_role_assignment_initial( # pylint: disable=inconsistent-return-statements
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
request = build_delete_sql_role_assignment_request_initial(
role_assignment_id=role_assignment_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self._delete_sql_role_assignment_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_sql_role_assignment_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@distributed_trace_async
async def begin_delete_sql_role_assignment( # pylint: disable=inconsistent-return-statements
self,
role_assignment_id: str,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an existing Azure Cosmos DB SQL Role Assignment.
:param role_assignment_id: The GUID for the Role Assignment.
:type role_assignment_id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_sql_role_assignment_initial(
role_assignment_id=role_assignment_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_sql_role_assignment.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments/{roleAssignmentId}"} # type: ignore
@distributed_trace
def list_sql_role_assignments(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SqlRoleAssignmentListResult"]:
"""Retrieves the list of all Azure Cosmos DB SQL Role Assignments.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SqlRoleAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.SqlRoleAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlRoleAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sql_role_assignments_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.list_sql_role_assignments.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sql_role_assignments_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SqlRoleAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sql_role_assignments.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlRoleAssignments"} # type: ignore
async def _retrieve_continuous_backup_information_initial(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: "_models.ContinuousBackupRestoreLocation",
**kwargs: Any
) -> Optional["_models.BackupInformation"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BackupInformation"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(location, 'ContinuousBackupRestoreLocation')
request = build_retrieve_continuous_backup_information_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._retrieve_continuous_backup_information_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackupInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_retrieve_continuous_backup_information_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/retrieveContinuousBackupInformation"} # type: ignore
@distributed_trace_async
async def begin_retrieve_continuous_backup_information(
self,
resource_group_name: str,
account_name: str,
database_name: str,
container_name: str,
location: "_models.ContinuousBackupRestoreLocation",
**kwargs: Any
) -> AsyncLROPoller["_models.BackupInformation"]:
"""Retrieves continuous backup information for a container resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param database_name: Cosmos DB database name.
:type database_name: str
:param container_name: Cosmos DB container name.
:type container_name: str
:param location: The name of the continuous backup restore location.
:type location: ~azure.mgmt.cosmosdb.models.ContinuousBackupRestoreLocation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackupInformation or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.cosmosdb.models.BackupInformation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-15-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupInformation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._retrieve_continuous_backup_information_initial(
resource_group_name=resource_group_name,
account_name=account_name,
database_name=database_name,
container_name=container_name,
location=location,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BackupInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_retrieve_continuous_backup_information.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/retrieveContinuousBackupInformation"} # type: ignore
| 48.390056
| 2,139
| 0.679948
|
fd3c5fd1d3615bc0be462a474e0d1a389d6b01cf
| 1,326
|
py
|
Python
|
softuniada_2021/04_easter_surprise.py
|
elenaborisova/Softuniada-Hackathon
|
0816cb560fe76791f1f419e1de7cb6d50de7ea4a
|
[
"MIT"
] | null | null | null |
softuniada_2021/04_easter_surprise.py
|
elenaborisova/Softuniada-Hackathon
|
0816cb560fe76791f1f419e1de7cb6d50de7ea4a
|
[
"MIT"
] | null | null | null |
softuniada_2021/04_easter_surprise.py
|
elenaborisova/Softuniada-Hackathon
|
0816cb560fe76791f1f419e1de7cb6d50de7ea4a
|
[
"MIT"
] | null | null | null |
def is_valid(curr_row, curr_col, change_row, change_col, matrix, initial_symbol):
return 0 <= curr_row + change_row < rows_count and \
0 <= curr_col + change_col < cols_count and \
matrix[curr_row+change_row][curr_col+change_col] == initial_symbol
def traverse(curr_row, curr_col, initial_pos, directions, matrix, initial_symbol, found_egg_symbol):
for dir in directions:
change_row, change_col = directions[dir][0], directions[dir][1]
if is_valid(curr_row, curr_col, change_row, change_col, matrix, initial_symbol):
matrix[curr_row+change_row][curr_col+change_col] = found_egg_symbol
traverse(curr_row+change_row, curr_col+change_col, initial_pos, directions, matrix, initial_symbol, found_egg_symbol)
rows_count, cols_count = list(map(int, input().split()))
matrix = [input().split() for _ in range(rows_count)]
found_egg_symbol = input()
initial_pos = list(map(int, input().split()))
initial_symbol = matrix[initial_pos[0]][initial_pos[1]]
directions = {
'up': (-1, 0),
'down': (1, 0),
'left': (0, -1),
'right': (0, 1),
}
matrix[initial_pos[0]][initial_pos[1]] = found_egg_symbol
traverse(initial_pos[0], initial_pos[1], initial_pos, directions, matrix, initial_symbol, found_egg_symbol)
[print(''.join(row)) for row in matrix]
| 39
| 129
| 0.705128
|
89e1635fefdf28e4461ec626c2a22a062ec6d3ac
| 3,673
|
py
|
Python
|
z80/assertions.py
|
stefan-wolfsheimer/Z80-ASM
|
42863f5e329e27fb3b9375510695c027348dd793
|
[
"MIT"
] | 2
|
2021-03-05T15:02:50.000Z
|
2021-10-30T21:53:43.000Z
|
z80/assertions.py
|
stefan-wolfsheimer/Z80-ASM
|
42863f5e329e27fb3b9375510695c027348dd793
|
[
"MIT"
] | null | null | null |
z80/assertions.py
|
stefan-wolfsheimer/Z80-ASM
|
42863f5e329e27fb3b9375510695c027348dd793
|
[
"MIT"
] | null | null | null |
# MIT License
# Copyright (c) 2018 stefan-wolfsheimer
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
FLAGS = ('S', 'Z', '5', 'H', '3', 'P', 'V', 'N', 'C')
def assert_n(n):
if not isinstance(n, int) or n < 0 or n > 0xff:
raise ValueError("%s is not a byte" % str(n))
def assert_nn(nn):
if not isinstance(nn, int) or nn < 0 or nn > 0xffff:
raise ValueError("%s is not a word" % str(nn))
def assert_d(d):
if not isinstance(d, int) or d < -0x80 or d > 0x7f:
raise ValueError("%s is not a signed byte" % str(d))
def assert_q(q):
if not isinstance(q, str) or q not in "BCDEHLAF":
raise ValueError("Invalid register %s (not in BCDEHLAF)" % str(q))
def assert_r(r):
if not isinstance(r, str) or r not in "BCDEHLA":
raise ValueError("Invalid register %s (not in BCDEHLA)" % str(r))
def assert_b(b):
if not isinstance(b, int) or b < 0 or b > 7:
raise ValueError("Bit %s not in range [0, 7)" % str(b))
def assert_aa(ii):
""" is any 16 bit register """
pairs = ('BC', 'DE', 'HL', 'AF', 'SP', 'PC', 'IX', 'IY', 'IR')
if not isinstance(ii, str) or ii not in pairs:
raise ValueError("Invalid register pair %s" % str(ii))
def assert_dd(dd):
pairs = ('BC', 'DE', 'HL', 'SP')
if not isinstance(dd, str) or dd not in pairs:
raise ValueError("Invalid register pair %s" % str(dd))
def assert_qq(qq):
pairs = ('BC', 'DE', 'HL', 'AF')
if not isinstance(qq, str) or qq not in pairs:
raise ValueError("Invalid register pair %s" % str(qq))
def assert_ss(ss):
pairs = ('BC', 'DE', 'HL', 'SP')
if not isinstance(ss, str) or ss not in pairs:
raise ValueError("Invalid register pair %s" % str(ss))
def assert_pp(pp):
pairs = ('BC', 'DE', 'IX', 'SP')
if not isinstance(pp, str) or pp not in pairs:
raise ValueError("Invalid register pair %s" % str(pp))
def assert_rr(rr):
pairs = ('BC', 'DE', 'IY', 'SP')
if not isinstance(rr, str) or rr not in pairs:
raise ValueError("Invalid register pair %s" % str(rr))
def assert_ii(index):
""" is any index bit register """
if not isinstance(index, str) or index not in ('IX', 'IY'):
raise ValueError("Invalid index register %s" % str(index))
def assert_bcde(bcde):
if not isinstance(bcde, str) or bcde not in ('BC', 'DE'):
raise ValueError("Invalid register pair %s (expected BC or DE)" %
str(bcde))
def assert_flag(flag):
if not isinstance(flag, str) or flag not in FLAGS:
raise ValueError("Invalid flag pair %s ( expected %s)" %
(str(flag), ", ".join(FLAGS)))
| 33.390909
| 79
| 0.640893
|
a95ce6e045bb48355cb802e62f727f6f8e5ebcfc
| 1,750
|
py
|
Python
|
games/spiders/cutter.py
|
jrcowart/megaminer_spring2016
|
115f71dadde49e170af50b2f55127ba184cfb2cb
|
[
"MIT"
] | 4
|
2016-07-25T01:06:05.000Z
|
2018-11-17T09:02:43.000Z
|
games/spiders/cutter.py
|
jrcowart/megaminer_spring2016
|
115f71dadde49e170af50b2f55127ba184cfb2cb
|
[
"MIT"
] | null | null | null |
games/spiders/cutter.py
|
jrcowart/megaminer_spring2016
|
115f71dadde49e170af50b2f55127ba184cfb2cb
|
[
"MIT"
] | 2
|
2018-11-17T09:02:43.000Z
|
2019-11-13T11:09:41.000Z
|
# Generated by Creer at 10:56PM on April 20, 2016 UTC, git hash: '087b1901032ab5bed5806b24830233eac5c2de55'
# This is a simple class to represent the Cutter object in the game. You can extend it by adding utility functions here in this file.
from games.spiders.spiderling import Spiderling
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add addtional import(s) here
# <<-- /Creer-Merge: imports -->>
class Cutter(Spiderling):
"""The class representing the Cutter in the Spiders game.
A Spiderling that can cut existing Webs.
"""
def __init__(self):
"""Initializes a Cutter with basic logic as provided by the Creer code generator."""
Spiderling.__init__(self)
# private attributes to hold the properties so they appear read only
self._cutting_web = None
@property
def cutting_web(self):
"""The Web that this Cutter is trying to cut. Null if not cutting.
:rtype: Web
"""
return self._cutting_web
def cut(self, web):
""" Cuts a web, destroying it, and any Spiderlings on it.
Args:
web (Web): The web you want to Cut. Must be connected to the Nest this Cutter is currently on.
Returns:
bool: True if the cut was successfully started, false otherwise.
"""
return self._run_on_server('cut', web=web)
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
| 35
| 135
| 0.672
|
1e61f5cb0daf08ea67f29a2b7d7c7210b7dc5e6b
| 11,117
|
py
|
Python
|
src/pico_code/pico/explorer-base/ExplorerWorkout2.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 15
|
2021-02-04T02:38:23.000Z
|
2022-01-20T17:55:15.000Z
|
src/pico_code/pico/explorer-base/ExplorerWorkout2.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 1
|
2021-05-06T10:09:51.000Z
|
2021-05-06T10:09:51.000Z
|
src/pico_code/pico/explorer-base/ExplorerWorkout2.py
|
romilly/pico-code
|
57bbc14e0a5c3e874162fcfb1fcd7cca3a838cce
|
[
"MIT"
] | 2
|
2021-02-04T20:09:01.000Z
|
2021-02-18T16:16:22.000Z
|
# Physical Computing with Graphics on Pico Explorer
# Tony Goodhew 30th Jan 2021
# 10K Ohm potentiometer on ADC0
# LED with 470 Ohm resistor on GP4
import picoexplorer as display
import utime, random, math
from machine import Pin
width = display.get_width()
height = display.get_height()
display_buffer = bytearray(width * height * 2)
display.init(display_buffer)
led = Pin(4, Pin.IN,Pin.PULL_DOWN)
# Set the backlight to 30%
# display.set_backlight(0.3)
def blk():
display.set_pen(0,0,0)
display.clear()
display.update()
def title(msg,r,g,b):
blk()
display.set_pen(r,g,b)
display.text(msg, 20, 70, 200, 4)
display.update()
utime.sleep(2)
blk()
def horiz(l,t,r): # left, right, top
n = r-l+1 # Horizontal line
for i in range(n):
display.pixel(l + i, t)
def vert(l,t,b): # left, top, bottom
n = b-t+1 # Vertical line
for i in range(n):
display.pixel(l, t+i)
def box(l,t,r,b): # left, top, right, bottom
horiz(l,t,r) # Hollow rectangle
horiz(l,b,r)
vert(l,t,b)
vert(r,t,b)
def line(x,y,xx,yy): # (x,y) to (xx,yy)
if x > xx:
t = x # Swap co-ordinates if necessary
x = xx
xx = t
t = y
y = yy
yy = t
if xx-x == 0: # Avoid div by zero if vertical
vert(x,min(y,yy),max(y,yy))
else: # Draw line one dot at a time L to R
n=xx-x+1
grad = float((yy-y)/(xx-x)) # Calculate gradient
for i in range(n):
y3 = y + int(grad * i)
display.pixel(x+i,y3) # One dot at a time
def show(tt):
display.update()
utime.sleep(tt)
def align(n, max_chars):
# Aligns string of n in max_chars
msg1 = str(n)
space = max_chars - len(msg1)
msg2 = ""
for m in range(space):
msg2 = msg2 +" "
msg2 = msg2 + msg1
return msg2 # String - ready for display
def ring(cx,cy,rr): # Centre and radius
display.circle(cx,cy,rr)
display.set_pen(0,0,0) # background colour
display.circle(cx,cy,rr-1)
def ring2(cx,cy,r): # Centre (x,y), radius
for angle in range(0, 90, 2): # 0 to 90 degrees in 2s
y3=int(r*math.sin(math.radians(angle)))
x3=int(r*math.cos(math.radians(angle)))
display.pixel(cx-x3,cy+y3) # 4 quadrants
display.pixel(cx-x3,cy-y3)
display.pixel(cx+x3,cy+y3)
display.pixel(cx+x3,cy-y3)
def showgraph(v): # Bar graph
display.set_pen(255,0,0)
display.text("V", 8, 50, 240, 3)
display.set_pen(0,0,0) # Blank old bar graph
display.rectangle(29, 50, 220, 16)
display.set_pen(200,200,0) # New bar graph
display.rectangle(29, 50, v, 15)
display.set_pen(255,255,255) # Base line zero
vert(28, 46, 68)
display.set_pen(0,0,255) # percentage
display.text(str(align(v,4)) + " %", 140, 48, 240, 3)
# Define special 5x8 characters - 8 bytes each - 0...7
# Bytes top to bottom, 5 least significant bits only
smiley = [0x00,0x0A,0x00,0x04,0x11,0x0E,0x00,0x00]
sad = [0x00,0x0A,0x00,0x04,0x00,0x0E,0x11,0x00]
heart = [0,0,0,10,31,14,4,0]
b_heart = [0,10,31,0,0,14,4,0]
up_arrow =[0,4,14,21,4,4,0,0]
down_arrow = [0,4,4,21,14,4,0,0]
bits = [128,64,32,16,8,4,2,1] # Powers of 2
def mychar2(xpos, ypos, pattern): # Print defined character
for line in range(8): # 5x8 characters
for ii in range(5): # Low value bits only
i = ii + 3
dot = pattern[line] & bits[i] # Extract bit
if dot: # Only print WHITE dots
display.pixel(xpos+i*2, ypos+line*2)
display.pixel(xpos+i*2, ypos+line*2+1)
display.pixel(xpos+i*2+1, ypos+line*2)
display.pixel(xpos+i*2+1, ypos+line*2+1)
def mychar3(xpos, ypos, pattern): # Print defined character
for line in range(8): # 5x8 characters
for ii in range(5): # Low value bits only
i = ii + 3
dot = pattern[line] & bits[i] # Extract bit
if dot: # Only print WHITE dots
display.pixel(xpos+i*3, ypos+line*3)
display.pixel(xpos+i*3, ypos+line*3+1)
display.pixel(xpos+i*3, ypos+line*3+2)
display.pixel(xpos+i*3+1, ypos+line*3)
display.pixel(xpos+i*3+1, ypos+line*3+1)
display.pixel(xpos+i*3+1, ypos+line*3+2)
display.pixel(xpos+i*3+2, ypos+line*3)
display.pixel(xpos+i*3+2, ypos+line*3+1)
display.pixel(xpos+i*3+2, ypos+line*3+2)
# ==== Main ====
title("Pimoroni Pico Explorer Workout",200,200,0)
# === Basics ===
title("Basics",200,0,0)
display.set_pen(255,255,0)
line(10,10,100,100)
show(0.25)
display.set_pen(255,0,255)
line(10,100,100,10)
show(0.25)
display.set_pen(0,255,255)
box(0,105,100,205)
show(0.25)
display.set_pen(255,0,0)
ring(160,50,50)
show(0.25)
display.set_pen(0,0,255)
ring2(160,160,50)
show(0.25)
display.text("Tony Goodhew", 15, 220, 240, 3)
display.update()
mychar2(20, 130, up_arrow) # Defined characters
mychar2(40, 130, smiley)
mychar2(60, 130, heart)
mychar2(20, 160, down_arrow)
mychar2(40, 160, sad)
mychar2(60, 160, b_heart)
mychar3(120, 130, up_arrow) # Bigger
mychar3(140, 130, smiley)
mychar3(160, 130, heart)
mychar3(120, 160, down_arrow)
mychar3(140, 160, sad)
mychar3(160, 160, b_heart)
show(3)
# Character Set - No lower case!
title("Character set",200,200,0)
display.set_pen(0,200,0)
display.text("Character Set", 15, 15, 200, 2)
s = ""
count = 0
for i in range(32,128,8):
for j in range(0,8,1):
p = i + j
if ((p < 97) or (p>122)):
s = s + chr(p)
count = count + 1
if (count)/16 == int((count)/16):
s = s +" " # 'space' for text wrap
print(s)
display.set_pen(200,200,0)
display.text(s, 15, 40, 200, 2)
display.set_pen(0,0,200)
display.text("No lower case", 140, 110, 200, 1)
display.set_pen(200,0,0)
display.text("Size 3", 15, 130, 200, 3)
display.set_pen(0,0,200)
display.text("Size 4", 15, 156, 200, 4)
display.set_pen(0,200,0)
display.text("Size 6", 15, 190, 200, 6)
display.update()
utime.sleep(5)
# Lines demo
title("lines",200,0,0)
for step in range(18, 2, -5):
blk()
display.set_pen(0,0,0)
display.clear()
red = random.randint(0, 255)
green = random.randint(0, 255)
blue =random.randint(0, 255)
display.set_pen(red, green, blue)
x = 0 # Block 1
y = 0
x2 = 239
for y2 in range(0,240, step):
line(x, y, x2, y2)
display.update()
x = 0 # Block 2
y = 239
x2 = 239
for y2 in range(239,-1,-step):
line(x, y, x2, y2)
display.update()
x = 239 # Block 3
y = 0
x2 = 0
for y2 in range(0,240, step):
line(x, y, x2, y2)
display.update()
x = 239 # Block 4
y = 239
x2 = 0
for y2 in range(239,-1,-step):
line(x, y, x2, y2)
display.update()
utime.sleep(0.5)
# === Sin & Cos graphs ====
title("Drawing graphs",0,200,0)
factor = 361 /240
#sine = []
display.set_pen(80,80,80)
horiz(0,60,239)
display.update()
display.set_pen(200,0,0)
for x in range(0,240):
y = int ((math.sin(math.radians(x * factor)))* -50) + 60
# sine.append(y)
display.pixel(x,y)
display.update()
display.text("Sine", 40, 70, 200, 2)
display.update()
display.set_pen(80,80,80)
horiz(0,180,239)
display.update()
display.set_pen(0,200,0)
for x in range(0,240):
y = int((math.cos(math.radians(x * factor)))* -50) + 180
display.pixel(x,y)
display.text("Cosine", 90, 160, 200, 2)
display.update()
utime.sleep(3)
title("Text on a path",0,0,200)
# Text on a downward slant
display.set_pen(255,0,0)
msg =" Pimoroni pico explorer"
b = bytes(msg, 'utf-8')
for i in range(len(b)):
c = b[i]
display.character(c, i*10,i*5 +110,2)
display.update()
# Text on a Sin wave
factor = 361 /240
display.set_pen(0,255,0)
for i in range(len(b)):
y = int ((math.sin(math.radians(i*10 * factor)))* -50) + 60
c = b[i]
display.character(c, i*10,y +10,2)
display.update()
utime.sleep(3)
title("Scrolling text on a Sine Curve",0,0,200)
# Scrolling on a Sine curve
# Modified from a method by Tony DiCola for a SSD1306
msg = 'Scrolling text on a sine curve using a pico explorer!'
f_width = 13 # Font width in pixels
f_height = 10 # Font Height in pixels
amp = 100 # Amplitude of sin wave
freq = 1 # Screen cycles (360 degrees)
pos = width # X position of the first character in the msg.
msg_len_px = len(msg) * f_width # Pixel width of the msg.
# Extra wide lookup table - calculate once to speed things up
y_table = [0] * (width+f_width) # 1 character extra
for i in range(len(y_table)):
p = i / (width-1) # Compute current position along
# lookup table in 0 to 1 range.
# Get y co-ordinate from table
y_table[i] = int(((amp/2.0) * math.sin(2.0*math.pi*freq*p)) + (amp/2.0))
# Scrolling loop:
blk()
running = True
while running:
# Clear scroll area
display.set_pen(0,0,0)
display.rectangle(0, 50, 240, 200)
display.set_pen(200,200,0)
# Start again if msg finished
pos -= 1
if pos <= -msg_len_px:
pos = width
# Go through each character in the msg.
for i in range(len(msg)):
char = msg[i]
char_x = pos + (i * f_width) # Character's X position on the screen.
if -f_width <= char_x < width:
# If haracter is visible, draw it.
display.text(char, char_x + 5, y_table[char_x+f_width]+60,2)
display.set_pen(100,100,100)
display.text("Press button Y to halt", 5, 215, 230, 2)
display.update()
if display.is_pressed(3): # Y button is pressed ?
running = False
utime.sleep(0.01)
blk()
# Physical Computing: Potentiometer, LED PWM and Bar Graph
potentiometer = machine.ADC(26) # 10K Ohm pot on ADC0
led = machine.PWM(machine.Pin(4)) # LED with 470 Ohm resistor on GP4
led.freq(1000)
led.duty_u16(0) # Switch LED OFF
title("Physical computing with graphics",0,0,200)
running = True
display.set_pen(255,255,255)
display.text("Turn Potentiometer", 20, 15, 230, 2)
display.set_pen(100,100,100)
display.text("Press button Y to halt", 5, 215, 230, 2)
display.set_pen(0,100,0)
box(60,80,180,200)
while running:
pot_raw = potentiometer.read_u16()
pot = pot_raw/256
# Adjust end values: 0 & 255
pot = int(pot * 256.0 /255.0) - 1
if pot > 255:
pot = 255
# print(pot) # Check pot's range is 0 -> 255 inclusive
percent = int(100 * pot / 255)
showgraph(percent)
display.update()
duty = pot_raw - 300 # duty must not go negative
if duty < 0 :
duty = 0
led.duty_u16(duty)
display.set_pen(pot,pot,pot) # grey to white
display.circle(120,140,50)
if display.is_pressed(3): # Y button is pressed ?
running = False
# Tidy up
led.duty_u16(0) # LED off
led = Pin(4, Pin.IN, Pin.PULL_DOWN) # Normal state
blk()
display.set_pen(200,0,0)
display.text("All Done!", 55, 140, 200, 3)
display.update()
utime.sleep(2)
blk()
| 29.80429
| 77
| 0.601421
|
b3906d3ddaa50c3c0c9bf004ff3230845a39cec4
| 2,706
|
py
|
Python
|
apps/beeswax/src/beeswax/management/commands/create_table_query_data.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
apps/beeswax/src/beeswax/management/commands/create_table_query_data.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
apps/beeswax/src/beeswax/management/commands/create_table_query_data.py
|
yetsun/hue
|
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import logging
import sys
from django.core.management.base import BaseCommand
from desktop.lib import django_mako
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config
from beeswax.design import hql_query
from beeswax import hive_site
from useradmin.models import install_sample_user
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Create table sys.query_data over hive.hook.proto.base-directory
"""
args = ''
help = 'Create table sys.query_data over hive.hook.proto.base-directory'
def handle(self, *args, **options):
create_table()
def create_table(user=None, query_server=None, table=None):
if not user:
user = install_sample_user()
if not query_server:
query_server = get_query_server_config('beeswax')
if not table:
base_dir = hive_site.get_hive_hook_proto_base_directory()
if not base_dir:
msg = _('Error creating table query_data hive.hook.proto.base-directory is not configured')
LOG.error(msg)
return False
table = {
'name': 'query_data',
'external_location': base_dir
}
server = dbms.get(user, query_server)
for query in ["create_table_query_data.mako", "msck.mako"]:
proposed_query = django_mako.render_to_string(query, {'table': table})
query = hql_query(proposed_query)
try:
handle = server.execute_and_wait(query)
if not handle:
LOG.error(_('Error executing %s: Operation timeout.' % query))
return False
server.close(handle)
except Exception as ex:
LOG.error(_('Error executing %(query)s: %(error)s.') % {'query': query, 'error': ex})
return False
LOG.info(_('Table query_data has been created successfully'))
return True
| 31.465116
| 97
| 0.734294
|
d0393989651b3644a64a62b6f74f91ce19d7cb29
| 945
|
py
|
Python
|
tests/unix/ffi_float.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 4,538
|
2017-10-20T05:19:03.000Z
|
2022-03-30T02:29:30.000Z
|
tests/unix/ffi_float.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 1,088
|
2017-10-21T07:57:22.000Z
|
2022-03-31T08:15:49.000Z
|
tests/unix/ffi_float.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 1,860
|
2017-10-20T05:22:35.000Z
|
2022-03-27T10:54:14.000Z
|
# test ffi float support
try:
import ffi
except ImportError:
print("SKIP")
raise SystemExit
def ffi_open(names):
err = None
for n in names:
try:
mod = ffi.open(n)
return mod
except OSError as e:
err = e
raise err
libc = ffi_open(("libc.so", "libc.so.0", "libc.so.6", "libc.dylib"))
try:
strtof = libc.func("f", "strtof", "sp")
except OSError:
# Some libc's (e.g. Android's Bionic) define strtof as macro/inline func
# in terms of strtod().
print("SKIP")
raise SystemExit
print("%.6f" % strtof("1.23", None))
strtod = libc.func("d", "strtod", "sp")
print("%.6f" % strtod("1.23", None))
# test passing double and float args
libm = ffi_open(("libm.so", "libm.so.6", "libc.so.0", "libc.so.6", "libc.dylib"))
tgamma = libm.func("d", "tgamma", "d")
for fun in (tgamma,):
for val in (0.5, 1, 1.0, 1.5, 4, 4.0):
print("%.6f" % fun(val))
| 23.04878
| 81
| 0.568254
|
801bd14891412bfcebf8217f7efd06c55f3de54d
| 736
|
py
|
Python
|
optimal_experiments_Huber_1.py
|
superporchetta/robust_linear_regression
|
d0b0022b442d7498ebecd1dcd44b5b2bea9a2459
|
[
"MIT"
] | null | null | null |
optimal_experiments_Huber_1.py
|
superporchetta/robust_linear_regression
|
d0b0022b442d7498ebecd1dcd44b5b2bea9a2459
|
[
"MIT"
] | null | null | null |
optimal_experiments_Huber_1.py
|
superporchetta/robust_linear_regression
|
d0b0022b442d7498ebecd1dcd44b5b2bea9a2459
|
[
"MIT"
] | null | null | null |
from src.utils import experiment_runner
from itertools import product
if __name__ == "__main__":
deltas_large = [0.5, 1.0, 2.0, 5.0, 10.0] # 0.5, 1.0, 2.0, 5.0, 10.0
percentages = [0.1, 0.3] # 0.01, 0.05, 0.1, 0.3
loss_name = "Huber"
experiment_settings = [
{
"loss_name" : loss_name,
"alpha_min": 0.01,
"alpha_max": 100,
"alpha_pts": 36,
"percentage": p,
"delta_small": 0.1,
"delta_large": dl,
"beta": 0.0,
"experiment_type": "reg_param huber_param optimal",
}
for dl, p in product(deltas_large, percentages)
]
for dic in experiment_settings:
experiment_runner(**dic)
| 28.307692
| 72
| 0.535326
|
78b974e828684d14f435a7f273a108f37a36d762
| 1,981
|
py
|
Python
|
dhost/notifications/views.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | null | null | null |
dhost/notifications/views.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | 67
|
2021-07-06T11:50:25.000Z
|
2021-10-14T13:45:51.000Z
|
dhost/notifications/views.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | null | null | null |
from rest_framework.decorators import action
from rest_framework.response import Response
from dhost.api.viewsets import DestroyListRetrieveViewSet
from .models import Notification
from .serializers import NotificationSerializer
class NotificationViewSet(DestroyListRetrieveViewSet):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user=self.request.user)
@action(detail=False, methods=["get"])
def count(self, request):
"""Total count of notifications."""
data = {"count": self.get_queryset().count()}
return Response(data)
@action(detail=False, methods=["get"])
def unread_count(self, request):
"""Count of unread notifications."""
data = {"count": self.get_queryset().unread().count()}
return Response(data)
@action(detail=True, methods=["get"])
def read(self, request, pk=None):
"""Mark notification has read."""
notification = self.get_object()
notification.mark_as_read()
serializer = self.get_serializer(notification)
return Response(serializer.data)
@action(detail=True, methods=["get"])
def unread(self, request, pk=None):
"""Mark notification has unread."""
notification = self.get_object()
notification.mark_as_unread()
serializer = self.get_serializer(notification)
return Response(serializer.data)
@action(detail=False, methods=["get"])
def mark_all_as_read(self, request):
"""Mark all notifications has read."""
data = {"count": self.get_queryset().mark_all_as_read()}
return Response(data)
@action(detail=False, methods=["get"])
def mark_all_as_unread(self, request):
"""Mark all notifications has unread."""
data = {"count": self.get_queryset().mark_all_as_unread()}
return Response(data)
| 34.754386
| 66
| 0.674407
|
ec953bf462daf5f05c4704c4cf5bd5057288f255
| 726
|
py
|
Python
|
test_project/forward_different_fields/urls.py
|
dr-rompecabezas/django-autocomplete-light
|
4adff74ac4b20a644f57d2a40a283ed79264543d
|
[
"MIT"
] | null | null | null |
test_project/forward_different_fields/urls.py
|
dr-rompecabezas/django-autocomplete-light
|
4adff74ac4b20a644f57d2a40a283ed79264543d
|
[
"MIT"
] | null | null | null |
test_project/forward_different_fields/urls.py
|
dr-rompecabezas/django-autocomplete-light
|
4adff74ac4b20a644f57d2a40a283ed79264543d
|
[
"MIT"
] | null | null | null |
from dal import autocomplete
from django.urls import re_path as url
class ListWithForwardsView(autocomplete.Select2ListView):
def get_list(self):
self.forwarded.get("name")
self.forwarded.get("checkbox")
self.forwarded.get("select")
self.forwarded.get("select_radio")
self.forwarded.get("multiselect")
self.forwarded.get("multiselect_checks")
self.forwarded.get("multiselect_checks_poor")
self.forwarded.get("const42")
self.forwarded.get("reverse_name")
return [str(self.forwarded)]
urlpatterns = [
url(
'^forward_different_fields/$',
ListWithForwardsView.as_view(),
name='forward_different_fields'
),
]
| 25.928571
| 57
| 0.666667
|
17ffa3521ff98fcad5bd2483ac4000a37213f0c9
| 33,006
|
py
|
Python
|
muranoclient/tests/functional/cli/test_murano.py
|
starlingx-staging/stx-python-muranoclient
|
c4e5a5e36ee1e1920f1639ecda981c97bd4c0a03
|
[
"Apache-2.0"
] | null | null | null |
muranoclient/tests/functional/cli/test_murano.py
|
starlingx-staging/stx-python-muranoclient
|
c4e5a5e36ee1e1920f1639ecda981c97bd4c0a03
|
[
"Apache-2.0"
] | null | null | null |
muranoclient/tests/functional/cli/test_murano.py
|
starlingx-staging/stx-python-muranoclient
|
c4e5a5e36ee1e1920f1639ecda981c97bd4c0a03
|
[
"Apache-2.0"
] | 1
|
2019-01-11T16:13:46.000Z
|
2019-01-11T16:13:46.000Z
|
# Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import unittest
from muranoclient.tests.functional.cli import \
murano_test_utils as utils
from muranoclient.tests.functional import muranoclient as murano_client
from oslo_utils.strutils import bool_from_string as str2bool
# TODO(mstolyarenko): need to remove this raw when
# https://bugs.launchpad.net/python-muranoclient/+bug/1625039 is fixed
backend_name =\
murano_client.ClientTestBase.get_backend_flag().rstrip().split()[-1]
class SimpleReadOnlyMuranoClientTest(utils.CLIUtilsTestBase):
"""Basic, read-only tests for Murano CLI client.
Basic smoke test for the Murano CLI commands which do not require
creating or modifying murano objects.
"""
def test_category_list(self):
category = self.get_table_struct('category-list')
self.assertEqual(['ID', 'Name'], category)
def test_env_template_list(self):
templates = self.get_table_struct('env-template-list')
self.assertEqual(['ID', 'Name', 'Created', 'Updated', 'Is public'],
templates)
def test_environment_list(self):
environment = self.get_table_struct('environment-list')
self.assertEqual(['ID', 'Name', 'Status', 'Created', 'Updated'],
environment)
def test_package_list(self):
packages = self.get_table_struct('package-list')
self.assertEqual(['ID', 'Name', 'FQN', 'Author', 'Active',
'Is Public', 'Type', 'Version'], packages)
class TableStructureMuranoClientTest(utils.CLIUtilsTestBase):
"""Smoke test for the Murano CLI commands
Smoke test for the Murano CLI commands which checks table
structure after create or delete category, env-template
environment and package.
"""
def test_table_struct_deployment_list(self):
"""Test scenario:
1) create environment
2) check table structure
"""
environment = self.create_murano_object('environment',
'MuranoTestTS-depl-list')
table_struct = self.get_table_struct('deployment-list',
params=environment['ID'])
self.assertEqual(['ID', 'State', 'Created', 'Updated', 'Finished'],
table_struct)
def test_table_struct_of_environment_create(self):
"""Test scenario:
1) create environment
2) check table structure
"""
self.create_murano_object('environment', 'MuranoTestTS-env-create')
table_struct = self.get_table_struct('environment-list')
self.assertEqual(['ID', 'Name', 'Status', 'Created', 'Updated'],
table_struct)
def test_table_struct_of_environment_delete(self):
"""Test scenario:
1) create environment
2) delete environment
3) check table structure
"""
environment = self.create_murano_object('environment',
'MuranoTestTS-env-del')
self.delete_murano_object('environment', environment)
table_struct = self.get_table_struct('environment-list')
self.assertEqual(['ID', 'Name', 'Status', 'Created', 'Updated'],
table_struct)
def test_table_struct_of_category_create(self):
"""Test scenario:
1) create category
2) check table structure
"""
self.create_murano_object('category', 'MuranoTestTS-cat-create')
table_struct = self.get_table_struct('category-list')
self.assertEqual(['ID', 'Name'], table_struct)
def test_table_struct_of_category_delete(self):
"""Test scenario:
1) create category
2) delete category
3) check table structure
"""
category = self.create_murano_object('category',
'MuranoTestTS-cat-create')
self.delete_murano_object('category', category)
category = self.get_table_struct('category-list')
self.assertEqual(['ID', 'Name'], category)
def test_table_struct_of_env_template_create(self):
"""Test scenario:
1) create env_template
2) check table structure
"""
self.create_murano_object('env-template',
'MuranoTestTS-env-tmp-create')
table_struct = self.get_table_struct('env-template-list')
self.assertEqual(['ID', 'Name', 'Created', 'Updated', 'Is public'],
table_struct)
def test_table_struct_of_env_template_delete(self):
"""Test scenario:
1) create env_template
2) delete env_template
3) check table structure
"""
env_template = self.create_murano_object('env-template',
'MuranoTestTS-env-tmp-create')
self.delete_murano_object('env-template', env_template)
table_struct = self.get_table_struct('env-template-list')
self.assertEqual(['ID', 'Name', 'Created', 'Updated', 'Is public'],
table_struct)
class EnvironmentMuranoSanityClientTest(utils.CLIUtilsTestBase):
"""Sanity tests for testing actions with environment.
Smoke test for the Murano CLI commands which checks basic actions with
environment command like create, delete, rename etc.
"""
def test_environment_create(self):
"""Test scenario:
1) create environment
2) check that created environment exist
"""
environment = self.create_murano_object('environment',
'TestMuranoSanityEnv')
env_list = self.listing('environment-list')
# Deleting dates from dictionaries to skip it in assert
map(lambda x: x.pop('Updated', None),
env_list + [environment])
map(lambda x: x.pop('Created', None),
env_list + [environment])
self.assertIn(environment, env_list)
def test_environment_delete(self):
"""Test scenario:
1) create environment
2) delete environment
"""
environment = self.create_murano_object('environment',
'TestMuranoSanityEnv')
self.delete_murano_object('environment', environment)
env_list = self.listing('environment-list')
self.assertNotIn(environment, env_list)
def test_environment_rename(self):
"""Test scenario:
1) create environment
2) rename environment
"""
environment = self.create_murano_object('environment',
'TestMuranoSanityEnv')
new_env_name = self.generate_name('TestMuranoSEnv-env-rename')
rename_params = "{0} {1}".format(environment['Name'], new_env_name)
new_list = self.listing('environment-rename', params=rename_params)
renamed_env = self.get_object(new_list, new_env_name)
self.addCleanup(self.delete_murano_object, 'environment', renamed_env)
new_env_list = self.listing('environment-list')
# Deleting dates from dictionaries to skip it in assert
map(lambda x: x.pop('Updated', None),
new_env_list + [environment] + [renamed_env])
map(lambda x: x.pop('Created', None),
new_env_list + [environment] + [renamed_env])
self.assertIn(renamed_env, new_env_list)
self.assertNotIn(environment, new_env_list)
def test_table_struct_env_show(self):
"""Test scenario:
1) create environment
2) check structure of env_show object
"""
environment = self.create_murano_object('environment',
'TestMuranoSanityEnv')
env_show = self.listing('environment-show', params=environment['Name'])
# Check structure of env_show object
self.assertEqual(['acquired_by', 'created', 'description_text', 'id',
'name', 'services', 'status', 'tenant_id',
'updated', 'version'],
map(lambda x: x['Property'], env_show))
def test_environment_show(self):
"""Test scenario:
1) create environment
2) check that env_name, ID, updated and created values
exist in env_show object
"""
environment = self.create_murano_object('environment',
'TestMuranoSanityEnv')
env_show = self.listing('environment-show', params=environment['Name'])
self.assertIn(environment['Created'],
map(lambda x: x['Value'], env_show))
self.assertIn(environment['Updated'],
map(lambda x: x['Value'], env_show))
self.assertIn(environment['Name'], map(lambda x: x['Value'], env_show))
self.assertIn(environment['ID'], map(lambda x: x['Value'], env_show))
def test_environment_delete_by_id(self):
"""Test scenario:
1) create environment
2) delete environment by environment ID
"""
env_name = self.generate_name('TestMuranoSanityEnv')
environment = self.create_murano_object('environment', env_name)
result = self.murano('environment-delete', params=environment['ID'],
fail_ok=False)
self.assertNotIn(environment['Name'], result)
env_list = self.listing('environment-list')
self.assertNotIn(environment, env_list)
def test_environment_model_show(self):
"""Test scenario:
1) create environment
2) check that the result of environment-model-show is a valid
non-empty json
"""
env_name = self.generate_name('TestMuranoSanityEnv')
environment = self.create_murano_object('environment', env_name)
model = self.murano('environment-model-show', params=environment['ID'])
result = json.loads(model)
self.assertEqual(4, len(result))
class CategoryMuranoSanityClientTest(utils.CLIUtilsTestBase):
"""Sanity tests for testing actions with Category.
Smoke test for the Murano CLI commands which checks basic actions with
category command like create, delete etc.
"""
def test_category_create(self):
"""Test scenario:
1) create category
2) check that created category exist
"""
category = self.create_murano_object('category',
'TestMuranoSanityCategory')
category_list = self.listing('category-list')
self.assertIn(category, category_list)
def test_category_delete(self):
"""Test scenario:
1) create category
2) delete category
3) check that category has been deleted successfully
"""
category = self.create_murano_object('category',
'TestMuranoSanityCategory')
self.delete_murano_object('category', category)
category_list = self.listing('category-list')
self.assertNotIn(category, category_list)
def test_table_struct_category_show(self):
"""Test scenario:
1) create category
2) check table structure of category-show object
"""
category = self.create_murano_object('category',
'TestMuranoSanityCategory')
category_show = self.listing('category-show', params=category['ID'])
self.assertEqual(['id', 'name', 'packages'],
map(lambda x: x['Property'], category_show))
def test_category_show(self):
"""Test scenario:
1) create category
2) check that category values exist in category_show object
"""
category = self.create_murano_object('category',
'TestMuranoSanityCategory')
category_show = self.listing('category-show', params=category['ID'])
self.assertIn(category['ID'], map(lambda x: x['Value'], category_show))
self.assertIn(category['Name'],
map(lambda x: x['Value'], category_show))
def test_non_existing_category_delete(self):
"""Test scenario:
1) try to call category-delete for non existing category
2) check that error message contains user friendly substring
"""
result = self.murano('category-delete', params='non-existing',
fail_ok=True)
self.assertIn("Failed to delete 'non-existing'; category not found",
result)
def test_non_existing_category_show(self):
"""Test scenario:
1) try to call category-show for non existing category
2) check that error message contains user friendly substring
"""
result = self.murano('category-show', params='non-existing',
fail_ok=True)
self.assertIn("Category id 'non-existing' not found", result)
def test_category_create_with_long_name(self):
"""Test scenario:
1) try to create category with long name (>80)
2) check that error message contains user friendly substring
"""
result = self.murano('category-create', params='name' * 21,
fail_ok=True)
self.assertIn(
"Category name should be 80 characters maximum",
result)
class EnvTemplateMuranoSanityClientTest(utils.CLIUtilsTestBase):
"""Sanity tests for testing actions with Environment template.
Smoke test for the Murano CLI commands which checks basic actions with
env-temlate command like create, delete etc.
"""
def test_environment_template_create(self):
"""Test scenario:
1) create environment template
2) check that created environment template exist
"""
env_template = self.create_murano_object('env-template',
'TestMuranoSanityEnvTemp')
env_template_list = self.listing('env-template-list')
# Deleting dates from dictionaries to skip it in assert
map(lambda x: x.pop('Updated', None),
env_template_list + [env_template])
map(lambda x: x.pop('Created', None),
env_template_list + [env_template])
self.assertIn(env_template, env_template_list)
def test_environment_template_delete(self):
"""Test scenario:
1) create environment template
2) delete environment template
3) check that deleted environment template doesn't exist
"""
env_template = self.create_murano_object('env-template',
'TestMuranoSanityEnvTemp')
env_template_list = self.delete_murano_object('env-template',
env_template)
self.assertNotIn(env_template, env_template_list)
def test_table_struct_env_template_show(self):
"""Test scenario:
1) create environment template
2) check table structure of env-template-show object
"""
env_template = self.create_murano_object('env-template',
'TestMuranoSanityEnvTemp')
env_template_show = self.listing('env-template-show',
params=env_template['ID'])
tested_env_template = map(lambda x: x['Property'], env_template_show)
self.assertIn('created', tested_env_template)
self.assertIn('id', tested_env_template)
self.assertIn('name', tested_env_template)
self.assertIn('services', tested_env_template)
self.assertIn('tenant_id', tested_env_template)
self.assertIn('updated', tested_env_template)
self.assertIn('version', tested_env_template)
def test_env_template_show(self):
"""Test scenario:
1) create environment template
2) check that environment template values exist in
env-template-show object
"""
env_template = self.create_murano_object('env-template',
'TestMuranoSanityEnvTemp')
env_template_show = self.listing('env-template-show',
params=env_template['ID'])
tested_env = map(lambda x: x['Value'], env_template_show)
self.assertIn(env_template['ID'], tested_env)
self.assertIn(env_template['Name'], tested_env)
def test_env_template_create_environment(self):
"""Test scenario:
1) create environment template
2) create environment from template
"""
env_template = self.create_murano_object('env-template',
'TestMuranoSanityEnvTemp')
new_env_name = self.generate_name('EnvFromTemp')
params = "{0} {1}".format(env_template['ID'], new_env_name)
env_created = self.listing('env-template-create-env', params=params)
tested_env_created = map(lambda x: x['Property'], env_created)
self.assertIn('environment_id', tested_env_created)
self.assertIn('session_id', tested_env_created)
def test_env_template_clone(self):
"""Test scenario:
1) create environment template
2) clone template
3) check that create environment template has the new name
4) delete new template
"""
env_template = self.create_murano_object_parameter(
'env-template', 'TestMuranoSanityEnvTemp', '--is-public')
new_template = self.generate_name('TestMuranoSanityEnvTemp')
params = "{0} {1}".format(env_template['ID'], new_template)
template_created = self.listing('env-template-clone', params=params)
list = map(lambda x: ({x['Property']: x['Value']}), template_created)
result_name = filter(lambda x: x.get('name'), list)[0]['name']
result_id = filter(lambda x: x.get('id'), list)[0]['id']
self.listing('env-template-delete', params=result_id)
self.assertIn(result_name, new_template)
class PackageMuranoSanityClientTest(utils.CLIUtilsTestPackagesBase):
"""Sanity tests for testing actions with Packages.
Smoke tests for the Murano CLI commands which check basic actions with
packages like import, create, delete etc.
"""
def test_package_import_by_url(self):
"""Test scenario:
1) import package by url
2) check that package exists
"""
try:
self.run_server()
package = self.import_package(
self.app_name,
'http://localhost:8089/apps/{0}.zip'.format(self.app_name)
)
finally:
self.stop_server()
package_list = self.listing('package-list')
self.assertIn(package, package_list)
def test_package_import_by_path(self):
"""Test scenario:
1) import package by path
2) check that package exists
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
package_list = self.listing('package-list')
self.assertIn(package, package_list)
def test_package_is_public(self):
"""Test scenario:
1) import package
2) check that package is public
"""
package = self.import_package(
self.app_name,
self.dummy_app_path,
'--is-public')
package_show = self.listing('package-show', params=package['ID'])
package_show = {item['Property']: item['Value']
for item in package_show}
self.assertEqual(package['Is Public'], 'True')
self.assertEqual(
str2bool(package['Is Public']),
str2bool(package_show['is_public']))
def test_package_delete(self):
"""Test scenario:
1) import package
2) delete package
3) check that package has been deleted
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
package_list = self.delete_murano_object('package', package)
self.assertNotIn(package, package_list)
def test_package_show(self):
"""Test scenario:
1) import package
2) check that package values exist in
return by package-show object
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
package_show = self.listing('package-show', params=package['ID'])
package_show = {item['Property']: item['Value']
for item in package_show}
self.assertEqual(
str2bool(package['Active']),
str2bool(package_show['enabled']))
self.assertEqual(
package['FQN'],
package_show['fully_qualified_name'])
self.assertEqual(
package['ID'],
package_show['id'])
self.assertEqual(
str2bool(package['Is Public']),
str2bool(package_show['is_public']))
self.assertEqual(
package['Name'],
package_show['name'])
self.assertEqual(
package['Type'],
package_show['type'])
def test_package_import_update(self):
"""Test scenario:
1) import package
2) import new_package using option 'u' - update
3) check that package has been updated
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
upd_package = self.import_package(
self.app_name,
self.dummy_app_path,
'--exists-action', 'u'
)
self.assertEqual(package['Name'], upd_package['Name'])
self.assertNotEqual(package['ID'], upd_package['ID'])
def test_package_import_skip(self):
"""Test scenario:
1) import package using option 's' - skip for existing package
2) try to import the same package using option 's' - skip
3) check that package hasn't been updated
"""
package = self.import_package(
self.app_name,
self.dummy_app_path,
'--exists-action', 's'
)
updated_package = self.import_package(
self.app_name,
self.dummy_app_path,
'--exists-action', 's'
)
package_list = self.listing("package-list")
self.assertIn(package, package_list)
self.assertIsNone(updated_package)
def test_package_import_abort(self):
"""Test scenario:
1) import package
2) import new_package using option 'a' - skip
3) check that package hasn't been updated
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
package_list = self.listing('package-list')
self.assertIn(package, package_list)
package = self.import_package(
self.app_name,
self.dummy_app_path,
'--exists-action', 'a'
)
package_list = self.listing('package-list')
self.assertNotIn(package, package_list)
class DeployMuranoEnvironmentTest(utils.CLIUtilsTestPackagesBase):
"""Test for testing Murano environment deployment.
Test for the Murano CLI commands which checks addition of app
to the environment, session creation and deployment of
environment.
"""
# TODO(mstolyarenko): need to unskip this test when
# https://bugs.launchpad.net/python-muranoclient/+bug/1625039 is fixed
@unittest.skipIf(backend_name == 'glare',
"This test fails when GLARE is used as packages "
"service. To be fixed as part of #1625039")
def test_environment_deployment(self):
"""Test scenario:
1) import package
2) create environment
3) create session for created environment
4) add application to the environment
5) send environment to deploy
6) check that deployment was successful
"""
self.import_package(
self.app_name,
self.dummy_app_path
)
env_id = self.create_murano_object('environment',
'TestMuranoDeployEnv')['ID']
obj_model = {
'op': 'add',
'path': '/-',
'value': {
'?': {
'type': 'io.murano.apps.{0}'.format(self.app_name),
'id': '{0}'.format(self.generate_uuid()),
}
}
}
self.deploy_environment(env_id, obj_model)
deployments = self.listing('deployment-list', params=env_id)
self.assertEqual('success', deployments[0]['State'])
self.assertEqual(1, len(deployments))
# TODO(mstolyarenko): need to unskip this test when
# https://bugs.launchpad.net/python-muranoclient/+bug/1625039 is fixed
@unittest.skipIf(backend_name == 'glare',
"This test fails when GLARE is used as packages "
"service. To be fixed as part of #1625039")
def test_add_component_to_deployed_env(self):
"""Test scenario:
1) import package
2) create environment
3) create session for created environment
4) add application to the environment
5) send environment to deploy
6) check that deployment was successful
7) add application to environment
8) deploy environment again
"""
self.import_package(
self.app_name,
self.dummy_app_path
)
env_id = self.create_murano_object('environment',
'TestMuranoDeployEnv')['ID']
obj_model = {
'op': 'add',
'path': '/-',
'value': {
'?': {
'type': 'io.murano.apps.{0}'.format(self.app_name),
'id': '',
}
}
}
obj_model['value']['?']['id'] = self.generate_uuid()
self.deploy_environment(env_id, obj_model)
deployments = self.listing('deployment-list', params=env_id)
self.assertEqual('success', deployments[0]['State'])
self.assertEqual(1, len(deployments))
obj_model['value']['?']['id'] = self.generate_uuid()
self.deploy_environment(env_id, obj_model)
deployments = self.listing('deployment-list', params=env_id)
self.assertEqual('success', deployments[1]['State'])
self.assertEqual(2, len(deployments))
# TODO(mstolyarenko): need to unskip this test when
# https://bugs.launchpad.net/python-muranoclient/+bug/1625039 is fixed
@unittest.skipIf(backend_name == 'glare',
"This test fails when GLARE is used as packages "
"service. To be fixed as part of #1625039")
def test_delete_component_from_deployed_env(self):
"""Test scenario:
1) import package
2) create environment
3) create session for created environment
4) add application to the environment
5) send environment to deploy
6) check that deployment was successful
7) delete application from environment
8) deploy environment again
"""
self.import_package(
self.app_name,
self.dummy_app_path
)
env_id = self.create_murano_object('environment',
'TestMuranoDeployEnv')['ID']
obj_model = {
'op': 'add',
'path': '/-',
'value': {
'?': {
'type': 'io.murano.apps.{0}'.format(self.app_name),
'id': '{0}'.format(self.generate_uuid()),
}
}
}
self.deploy_environment(env_id, obj_model)
obj_model = {
'op': 'remove',
'path': '/0'
}
self.deploy_environment(env_id, obj_model)
deployments = self.listing('deployment-list', params=env_id)
self.assertEqual('success', deployments[1]['State'])
self.assertEqual(2, len(deployments))
class BundleMuranoSanityClientTest(utils.CLIUtilsTestPackagesBase):
"""Sanity tests for testing actions with bundle.
Tests for the Murano CLI commands which check basic actions with
bundles.
"""
def test_bundle_import_without_bundle_name(self):
"""Test scenario:
1) Execute murano bundle-import command without bundle name
2) check that error message contains user friendly substring
"""
result = self.murano('bundle-import', params='',
fail_ok=True)
self.assertIn("murano bundle-import: error: too few arguments", result)
@unittest.skip("Skip due to apps.openstack.org website is retired.")
def test_bundle_import_with_non_existing_package_name(self):
"""Test scenario:
1) Execute murano bundle-import command with non-existing packages
name inside
2) check that error message contains user friendly substring
"""
result = self.murano(
'bundle-import',
params=self.prepare_bundle_with_non_existed_package(),
fail_ok=False)
self.assertIn("Couldn't find file for package", result)
self.assertIn("Error Got non-ok status(404) while connecting", result)
@unittest.skip("Skip due to apps.openstack.org website is retired.")
def test_bundle_import_with_non_existing_name(self):
"""Test scenario:
1) Execute murano bundle-import command with non-existing bundle
name
2) check that error message contains user friendly substring
"""
result = self.murano('bundle-import', params=self.app_name,
fail_ok=True)
self.assertIn("Bundle file '{}' does not exist".format(self.app_name),
result)
self.assertIn("reason: Got non-ok status(404) while connecting to",
result)
def test_bundle_import_with_invalid_file_format(self):
"""Test scenario:
1) Execute murano bundle-import command with invalid bundle file
format
2) check that error message contains user friendly substring
"""
try:
self.murano(
'bundle-import',
params=self.prepare_bundle_with_invalid_format(),
fail_ok=False)
except utils.exceptions.CommandFailed as exception:
self.assertIn("Can't parse bundle contents", exception.stdout)
class StaticActionMuranoClientTest(utils.CLIUtilsTestPackagesBase):
"""Tests for testing static actions execution.
Tests for the Murano CLI commands which check the result of sample
static action execution.
"""
def test_static_action_call(self):
"""Test scenario:
1) import package
2) call static action of the class in that package
3) check the result of action
"""
package = self.import_package(
self.app_name,
self.dummy_app_path
)
result = self.murano(
'static-action-call', params='{0} staticAction --package-name {1} '
'--arguments myName=John myAge=28'.format(package['FQN'],
package['FQN']))
expected = "Waiting for result...\nStatic action result: Hello, " \
"John. In 5 years you will be 33 years old.\n"
self.assertEqual(expected, result)
| 37.168919
| 79
| 0.592014
|
63ccf626379578406f3ac061aa9d1f454fd611bd
| 10,911
|
py
|
Python
|
pyspherepack/box.py
|
cunni/pyspherepack
|
a5f87619eeac211472a9c3cab3632c31477beba9
|
[
"MIT"
] | 1
|
2020-02-29T04:17:09.000Z
|
2020-02-29T04:17:09.000Z
|
pyspherepack/box.py
|
cunni/pyspherepack
|
a5f87619eeac211472a9c3cab3632c31477beba9
|
[
"MIT"
] | 1
|
2019-07-29T10:06:45.000Z
|
2019-07-29T10:06:45.000Z
|
pyspherepack/box.py
|
cunni/pyspherepack
|
a5f87619eeac211472a9c3cab3632c31477beba9
|
[
"MIT"
] | null | null | null |
import autograd.numpy as np
from autograd import grad
from autograd.misc.optimizers import adam
import matplotlib
matplotlib.use('Agg') # http://www.davidketcheson.info/2015/01/13/using_matplotlib_image_comparison.html
from matplotlib import pyplot as plt
from matplotlib import interactive
interactive(True)
#import pylab
import matplotlib.patches as patches
from .utils import np_printoptions
import pickle
import pylatex
class Box(object):
"""a fairly simple class, with some non-oop goofiness to play nicely with autograd grad and their adam"""
def __init__(self,n_balls,n_dims=None,n_iters=None,box=None,logits=None):
# number of balls in box
self.n_balls = n_balls
self.n_dims = n_dims if n_dims is not None else 2
# defines the bounding box; just a vector of the outer corner; the inner corner is assumed to be the origin
self.box = box if box is not None else np.array([1.0]*self.n_dims)
# defines the ball positions in logit space; always
self.logits = logits if logits is not None else np.random.randn(self.n_balls,self.n_dims)
# some optimization parameters
self.n_iters = n_iters if n_iters is not None else 100000
self.step_size = 0.001
def ball_radius(self,x=None,i=None):
"""calculates the maximum size sphere that can be
packed in a given constellation of x points.
Note: assumes no boundary, so that's really for the warper to determine"""
x = x if x is not None else self.box_warp() # still oop but allowing autograd to do its thing
# Note that i is in the argument here because I was too lazy to rewrite autograd.misc.optimizers adam
sum_squares = np.sum((x[np.newaxis,:,:] - x[:,np.newaxis,:])**2, axis=2) + np.diag([np.inf]*np.shape(x)[0])
return 0.5*np.sqrt(np.min(sum_squares))
def box_warp(self,logits=None):
"""warps real logits into the specified box. Note here self.box is used in usual oop fashion,
though the fp approach of grad is such that we need grad to pass logits around, hence not self.logits"""
logits = logits if logits is not None else self.logits # still oop but allowing autograd to do its thing
return (self.box / (1.0 + np.exp(-logits)))
def print_status(self,logits=None,i=None,g=None):
"""just a print callback"""
logits = logits if logits is not None else self.logits # still oop but allowing autograd to do its thing
if i % 5000 == 0:
print("{:9}|{:23}|{:20}".format(i, self.ball_radius(self.box_warp(logits),i), self.density(logits) ))
def pack(self):
print(" Iter | Ball radius | Density ")
self.logits = adam(grad(lambda logits,i: -1*self.ball_radius(self.box_warp(logits),i)), self.logits, num_iters=self.n_iters,callback=self.print_status)
# one more print at final iteration
self.print_status(i=self.n_iters)
def density(self,logits=None):
logits = logits if logits is not None else self.logits # still oop but allowing autograd to do its thing
rad = self.ball_radius(self.box_warp(logits))
return 100*rad**2*np.pi*self.n_balls/(np.prod(self.box+(2*rad)))
def plot(self,scaled_rad=None,clamp_edge=0.0):
"""visualize the balls packed in"""
x = self.box_warp(self.logits)
rad = self.ball_radius(x)
scaled_rad = scaled_rad if scaled_rad is not None else rad
scaled_box = scaled_rad/rad*(self.box+2*rad)
scaled_x = scaled_rad/rad*(x + rad)
#rad_norm = rad/(1+2*rad) # not quite right, box is normalizing oddly when nonsquare
print("Optimized Ball radii: {:04.2f}, scaled {:04.2f}".format(rad,scaled_rad))
with np_printoptions(precision=6, suppress=True):
print('Ball centers (scaled):\n {}'.format(scaled_x))
# print("Normalized (true) Ball radii: {:06.4f}".format(rad_norm))
print("Density %: {:04.2f}%".format(self.density()))
print("Waste %: {:04.2f}%".format(100-self.density()))
print("Density with clamp edge %: {:04.2f}%".format((self.density()*np.prod(scaled_box)/(scaled_box[1]*(scaled_box[0]+2*clamp_edge)))))
print("Waste with clamp edge %: {:04.2f}%".format(100-(self.density()*np.prod(scaled_box)/(scaled_box[1]*(scaled_box[0]+2*clamp_edge)))))
if self.n_dims==2:
fig,ax = plt.subplots()
# plot bounding box
#rect = patches.Rectangle((0,0)-rad,self.box[0]+2*rad,self.box[1]+2*rad,linewidth=2,edgecolor='k',facecolor='none')
rect = patches.Rectangle((-clamp_edge,0),scaled_box[0]+2*clamp_edge,scaled_box[1],hatch='x',linewidth=2,edgecolor='k',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
rect2 = patches.Rectangle((0,0),scaled_box[0],scaled_box[1],linewidth=2,edgecolor='k',facecolor='w')
ax.add_patch(rect2)
# plot balls
for i in range(self.n_balls):
ax.add_artist(plt.Circle((scaled_x[i,0],scaled_x[i,1]),scaled_rad,fill=False,color='C0',linewidth=2))
# plot centers
ax.add_artist(plt.scatter(scaled_x[:,0],scaled_x[:,1]))
ax.axis('equal')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
# add annotations
ax.add_artist(plt.scatter(0.0,0.0,color='k'))
ax.text(0.0, 0.0, '(0.00,0.00)',horizontalalignment='left',verticalalignment='top')
ax.add_artist(plt.scatter(scaled_box[0],0.0,color='k'))
ax.text(scaled_box[0], 0.0, '({:04.2f},0.00)'.format(scaled_box[0]),horizontalalignment='right',verticalalignment='top')
ax.add_artist(plt.scatter(0.0,scaled_box[1],color='k'))
ax.text(0.0, scaled_box[1], '(0.00,{:04.2f})'.format(scaled_box[1]),horizontalalignment='left',verticalalignment='bottom')
ax.add_artist(plt.scatter(scaled_box[0],scaled_box[1],color='k'))
ax.text(scaled_box[0], scaled_box[1], '({:04.2f},{:04.2f})'.format(scaled_box[0],scaled_box[1]),horizontalalignment='right',verticalalignment='bottom')
if clamp_edge > 0:
ax.add_artist(plt.scatter(-clamp_edge,0.0,color='k'))
ax.text(-clamp_edge, 0.0, '-{:03.1f}'.format(clamp_edge),horizontalalignment='right',verticalalignment='top')
ax.add_artist(plt.scatter(scaled_box[0]+clamp_edge,0.0,color='k'))
ax.text(scaled_box[0]+clamp_edge, 0.0, '+{:03.1f}'.format(clamp_edge),horizontalalignment='left',verticalalignment='top')
plt.show()
class ManyBox(object):
"""instantiates many boxes of a size, packs through them, for local optima silliness"""
def __init__(self,n_balls,n_boxes=None,locked=None,filename=None,**kwargs):
self.n_balls = n_balls
self.n_boxes = n_boxes if n_boxes is not None else 10
self.boxes = [Box(n_balls,**kwargs) for i in range(self.n_boxes)]
self.best_box = {'i':None, 'density':0, 'box':None}
self.locked = locked if locked is not None else True
self.filename = filename if filename is not None else 'data/manybox.pkl'
def pack(self):
for i in range(self.n_boxes):
print('=========packing box {}==========='.format(i))
self.boxes[i].pack()
self.best_box = self.best_box if self.best_box['density'] > self.boxes[i].density() else {'i':i,'density':self.boxes[i].density(),'box':self.boxes[i]}
self.save()
print('=========done box {}, with density {}========'.format(i,self.boxes[i].density()))
def density_distrib(self):
return [b.density() for b in self.boxes]
def save(self):
with open(self.filename,'wb') as f:
pickle.dump(self,f)
@classmethod
def load(cls,filename=None):
# https://stackoverflow.com/questions/2709800/how-to-pickle-yourself
filename = filename if filename is not None else 'data/manybox.pkl'
try:
with open(filename,'rb') as f:
return pickle.load(f)
except:
raise IOError('unable to load file: {}'.format(filename))
@classmethod
def tex_best(cls,filenames=None,texname=None,scaled_rad=None,clamp_edge=None):
filenames = filenames if filenames is not None else ['data/mb_50_2x1.pkl','data/mb_50_3x1.pkl']
texname = texname if texname is not None else 'data/aggregated_results'
# set up pylatex doc
geometry_options = {"margin": "1in"}
doc = pylatex.Document(texname, geometry_options=geometry_options)
dapne = lambda s: doc.append(pylatex.NoEscape(s))
with doc.create(pylatex.Section('Introduction')):
doc.append('Each section that follows shows an optimized layout for a given number of circles and an approximate aspect ratio of the sheet. Throughout, the following parameters are assumed: clamp edge of 10.0mm, circle diameter of 20mm, spacing between circles of 0.50mm.')
for fn in filenames:
mb = cls.load(filename=fn)
b = mb.best_box['box']
b.plot(clamp_edge=clamp_edge,scaled_rad=scaled_rad)
# pylatex to put this in tex
#matplotlib.use('Agg')
with doc.create(pylatex.Section(pylatex.NoEscape(r'{} circles, box aspect ratio of roughly ${}\times{}$'.format(b.n_balls,b.box[0],b.box[1])),label=fn)):
with doc.create(pylatex.Figure(position='htbp')) as plot:
plot.add_plot(width=pylatex.NoEscape(r'0.8\textwidth'))
#plot.add_caption('Optimized circle packing for this sheet size.')
x = b.box_warp(b.logits)
rad = b.ball_radius(x)
clamp_edge = clamp_edge if clamp_edge is not None else 0.0
scaled_rad = scaled_rad if scaled_rad is not None else rad
scaled_box = scaled_rad/rad*(b.box+2*rad)
scaled_x = scaled_rad/rad*(x + rad)
#doc.append(pylatex.NoEscape('\noindent Density %:'))
dapne(r'\noindent Density \%: {:04.2f}\% \\'.format(b.density()))
dapne(r'Waste \%: {:04.2f}\% \\'.format(100-b.density()))
dapne(r'Density with clamp edge \%: {:04.2f}\% \\'.format((b.density()*np.prod(scaled_box)/(scaled_box[1]*(scaled_box[0]+2*clamp_edge)))))
dapne(r'Waste with clamp edge \%: {:04.2f}\% \\'.format(100-(b.density()*np.prod(scaled_box)/(scaled_box[1]*(scaled_box[0]+2*clamp_edge)))))
dapne(r'Circle center coordinates: \\')
for i in range(b.n_balls):
#dapne(r'$c_{{{}}}$: {}\\'.format(i+1,scaled_x[i,:]))
dapne(r'$[{}~~{}]$ \\'.format(scaled_x[i,0],scaled_x[i,1]))
dapne(r'\clearpage')
doc.generate_tex()
| 57.426316
| 285
| 0.630006
|
3256534b6267a9a08b85923f4bc3e4078c18f9cf
| 9,974
|
py
|
Python
|
official/nlp/configs/encoders.py
|
franzigeiger/models
|
8756b1176c70256e082d865580adbc9e09ce3036
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/configs/encoders.py
|
franzigeiger/models
|
8756b1176c70256e082d865580adbc9e09ce3036
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/configs/encoders.py
|
franzigeiger/models
|
8756b1176c70256e082d865580adbc9e09ce3036
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer Encoders.
Includes configurations and factory methods.
"""
from typing import Optional
from absl import logging
import dataclasses
import gin
import tensorflow as tf
from official.modeling import hyperparams
from official.modeling import tf_utils
from official.nlp.modeling import layers
from official.nlp.modeling import networks
@dataclasses.dataclass
class BertEncoderConfig(hyperparams.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
embedding_size: Optional[int] = None
@dataclasses.dataclass
class MobileBertEncoderConfig(hyperparams.Config):
"""MobileBERT encoder configuration.
Attributes:
word_vocab_size: number of words in the vocabulary.
word_embed_size: word embedding size.
type_vocab_size: number of word types.
max_sequence_length: maximum length of input sequence.
num_blocks: number of transformer block in the encoder model.
hidden_size: the hidden size for the transformer block.
num_attention_heads: number of attention heads in the transformer block.
intermediate_size: the size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: the non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: dropout probability for the hidden layers.
attention_probs_dropout_prob: dropout probability of the attention
probabilities.
intra_bottleneck_size: the size of bottleneck.
initializer_range: The stddev of the truncated_normal_initializer for
initializing all weight matrices.
key_query_shared_bottleneck: whether to share linear transformation for
keys and queries.
num_feedforward_networks: number of stacked feed-forward networks.
normalization_type: the type of normalization_type, only 'no_norm' and
'layer_norm' are supported. 'no_norm' represents the element-wise linear
transformation for the student model, as suggested by the original
MobileBERT paper. 'layer_norm' is used for the teacher model.
classifier_activation: if using the tanh activation for the final
representation of the [CLS] token in fine-tuning.
return_all_layers: if return all layer outputs.
return_attention_score: if return attention scores for each layer.
"""
word_vocab_size: int = 30522
word_embed_size: int = 128
type_vocab_size: int = 2
max_sequence_length: int = 512
num_blocks: int = 24
hidden_size: int = 512
num_attention_heads: int = 4
intermediate_size: int = 4096
intermediate_act_fn: str = "gelu"
hidden_dropout_prob: float = 0.1
attention_probs_dropout_prob: float = 0.1
intra_bottleneck_size: int = 1024
initializer_range: float = 0.02
key_query_shared_bottleneck: bool = False
num_feedforward_networks: int = 1
normalization_type: str = "layer_norm"
classifier_activation: bool = True
return_all_layers: bool = False
return_attention_score: bool = False
@dataclasses.dataclass
class AlbertEncoderConfig(hyperparams.Config):
"""ALBERT encoder configuration."""
vocab_size: int = 30000
embedding_width: int = 128
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.0
attention_dropout_rate: float = 0.0
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
@dataclasses.dataclass
class EncoderConfig(hyperparams.OneOfConfig):
"""Encoder configuration."""
type: Optional[str] = "bert"
albert: AlbertEncoderConfig = AlbertEncoderConfig()
bert: BertEncoderConfig = BertEncoderConfig()
mobilebert: MobileBertEncoderConfig = MobileBertEncoderConfig()
ENCODER_CLS = {
"bert": networks.TransformerEncoder,
"mobilebert": networks.MobileBERTEncoder,
"albert": networks.AlbertTransformerEncoder,
}
@gin.configurable
def build_encoder(config: EncoderConfig,
embedding_layer: Optional[layers.OnDeviceEmbedding] = None,
encoder_cls=None,
bypass_config: bool = False):
"""Instantiate a Transformer encoder network from EncoderConfig.
Args:
config: the one-of encoder config, which provides encoder parameters of a
chosen encoder.
embedding_layer: an external embedding layer passed to the encoder.
encoder_cls: an external encoder cls not included in the supported encoders,
usually used by gin.configurable.
bypass_config: whether to ignore config instance to create the object with
`encoder_cls`.
Returns:
An encoder instance.
"""
encoder_type = config.type
encoder_cfg = config.get()
encoder_cls = encoder_cls or ENCODER_CLS[encoder_type]
logging.info("Encoder class: %s to build...", encoder_cls.__name__)
if bypass_config:
return encoder_cls()
if encoder_cls.__name__ == "EncoderScaffold":
embedding_cfg = dict(
vocab_size=encoder_cfg.vocab_size,
type_vocab_size=encoder_cfg.type_vocab_size,
hidden_size=encoder_cfg.hidden_size,
max_seq_length=encoder_cfg.max_position_embeddings,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
dropout_rate=encoder_cfg.dropout_rate,
)
hidden_cfg = dict(
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_activation=tf_utils.get_activation(
encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
)
kwargs = dict(
embedding_cfg=embedding_cfg,
hidden_cfg=hidden_cfg,
num_hidden_instances=encoder_cfg.num_layers,
pooled_output_dim=encoder_cfg.hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
return encoder_cls(**kwargs)
if encoder_type == "mobilebert":
return encoder_cls(
word_vocab_size=encoder_cfg.word_vocab_size,
word_embed_size=encoder_cfg.word_embed_size,
type_vocab_size=encoder_cfg.type_vocab_size,
max_sequence_length=encoder_cfg.max_sequence_length,
num_blocks=encoder_cfg.num_blocks,
hidden_size=encoder_cfg.hidden_size,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
intermediate_act_fn=encoder_cfg.intermediate_act_fn,
hidden_dropout_prob=encoder_cfg.hidden_dropout_prob,
attention_probs_dropout_prob=encoder_cfg.attention_probs_dropout_prob,
intra_bottleneck_size=encoder_cfg.intra_bottleneck_size,
key_query_shared_bottleneck=encoder_cfg.key_query_shared_bottleneck,
num_feedforward_networks=encoder_cfg.num_feedforward_networks,
normalization_type=encoder_cfg.normalization_type,
classifier_activation=encoder_cfg.classifier_activation,
return_all_layers=encoder_cfg.return_all_layers,
return_attention_score=encoder_cfg.return_attention_score)
if encoder_type == "albert":
return encoder_cls(
vocab_size=encoder_cfg.vocab_size,
embedding_width=encoder_cfg.embedding_width,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range))
# Uses the default BERTEncoder configuration schema to create the encoder.
# If it does not match, please add a switch branch by the encoder type.
return encoder_cls(
vocab_size=encoder_cfg.vocab_size,
hidden_size=encoder_cfg.hidden_size,
num_layers=encoder_cfg.num_layers,
num_attention_heads=encoder_cfg.num_attention_heads,
intermediate_size=encoder_cfg.intermediate_size,
activation=tf_utils.get_activation(encoder_cfg.hidden_activation),
dropout_rate=encoder_cfg.dropout_rate,
attention_dropout_rate=encoder_cfg.attention_dropout_rate,
max_sequence_length=encoder_cfg.max_position_embeddings,
type_vocab_size=encoder_cfg.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=encoder_cfg.initializer_range),
embedding_width=encoder_cfg.embedding_size,
embedding_layer=embedding_layer)
| 40.544715
| 80
| 0.755564
|
8f79e93200ec47f8635d0f58c9e3545822ecad59
| 619
|
py
|
Python
|
Algorithms/Sorting/Selection_Sort/Selection_Sort.py
|
debarshi-1999/DSA-Library
|
f9ebf63ca50c8bbd7538313b3c7aeddcc27fd198
|
[
"MIT"
] | 30
|
2020-10-08T17:44:47.000Z
|
2022-03-23T18:05:06.000Z
|
Algorithms/Sorting/Selection_Sort/Selection_Sort.py
|
debarshi-1999/DSA-Library
|
f9ebf63ca50c8bbd7538313b3c7aeddcc27fd198
|
[
"MIT"
] | 118
|
2020-10-08T04:29:44.000Z
|
2021-12-10T06:22:26.000Z
|
Algorithms/Sorting/Selection_Sort/Selection_Sort.py
|
debarshi-1999/DSA-Library
|
f9ebf63ca50c8bbd7538313b3c7aeddcc27fd198
|
[
"MIT"
] | 125
|
2020-10-08T04:34:19.000Z
|
2022-03-30T19:16:27.000Z
|
def Selection_Sort(arr):
for i in range(len(arr)):
mins=i
for k in range(i+1,len(arr)):
if(arr[k] < arr[mins]):
mins=k
q=arr[i]
arr[i]=arr[mins]
arr[mins]=q
#swapping is donr
print(arr)
Selection_Sort([4,2,4,2,1,0,6,7])
#Algorith followed is:
#selection_Sort(array):
# for i=0 to len(array):
# mins=initial value of the given array
# for k=i+1 to len(array):
# if (any elemnet in array is less than our selected value in mins)
# store its index in mins
# swap the index i value with new mins value
#repeat the same thing for next set of array removing already sorted elements
| 23.807692
| 79
| 0.668821
|
e97a252d70337dc042ec30f6542715bb0e66bc7d
| 5,978
|
py
|
Python
|
coto/session/session.py
|
wvanheerde/coto
|
d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d
|
[
"Apache-2.0"
] | null | null | null |
coto/session/session.py
|
wvanheerde/coto
|
d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d
|
[
"Apache-2.0"
] | null | null | null |
coto/session/session.py
|
wvanheerde/coto
|
d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from urllib.parse import unquote
from colors import color
from .. import clients
def dr(r):
for i in r.history + [r]:
if i.status_code < 400:
fg = 'green'
else:
fg = 'red'
print()
print(
color(
str(i.status_code) + \
" " + i.request.method + \
" " + i.request.url,
fg=fg,
style='underline'))
for k, v in i.request.headers.items():
if k == 'Cookie':
print(color(k + ':', fg='blue'))
for c in v.split(";"):
c = c.strip()
(n, c) = c.split('=', maxsplit=1)
print(color(' ' + n + ': ', fg='blue') + unquote(c))
else:
print(color(k + ':', fg='blue'), v)
for k, v in i.headers.items():
print(color(k + ':', fg='yellow'), v)
if i.request.body and len(i.request.body) > 0:
print(color('Body:', fg='blue'))
print(i.request.body)
print(color('EOF', fg='blue'))
class Session:
"""
The Session class represents a session with the AWS Management Console.
Use the `client` method to obtain a client for one of the supported
services.
"""
def __init__(
self, debug=False, verify=True,
metadata1_generator=None,
captcha_solver=None, **kwargs
):
"""
Args:
debug (bool): Enable debug messages.
verify (str | bool): Requests SSL certificate checking. Path to
CA certificates file. ``False`` to ignore certificate errors.
``True`` to use defaults (default).
captcha_solver (coto.captcha.Solver): Class implementing a way to solve captchas (e.g., send them to Slack for you to solve).
metadata1_generator (coto.metadata1.Generator): Class implementing a way to generate metadata1.
**kwargs: You can pass arguments for the signin method here.
"""
self.debug = debug
self._metadata1_generator = metadata1_generator
self._captcha_solver = captcha_solver
self.root = False
self.coupled = None
self.session = requests.Session()
self.session.verify = verify
self.authenticated = False
self._clients = {}
self.timeout = (3.1, 10)
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
if len(kwargs) > 0:
self.signin(**kwargs)
def signin(self, **kwargs):
"""
Signin to the AWS Management Console.
There are various ways to sign in:
* Using a boto3.Session object, pass the ``boto3_session`` argument.
* Using the Account Root User, pass the ``email``, ``password``, and
optionally ``mfa_secret`` arguments.
Args:
boto3_session (boto3.session.Session): The credentials of this
session are retrieved and used to signin to the console.
email (str): AWS account root user email to use for login.
password (str): AWS account root user password to use for login.
mfa_secret (str): AWS account root user mfa secret to use for login.
The Base32 seed defined as specified in RFC3548.
The Base32StringSeed is Base64-encoded.
"""
if 'boto3_session' in kwargs:
boto3_session = kwargs.get('boto3_session')
return self.client('federation').signin(boto3_session)
elif 'email' in kwargs and 'password' in kwargs:
args = {}
for key in ['email', 'password', 'mfa_secret']:
if key in kwargs:
args[key] = kwargs.get(key)
return self.client('signin').signin(**args)
# http requests
def _set_defaults(self, kwargs):
if not 'timeout' in kwargs:
kwargs['timeout'] = self.timeout
if not 'headers' in kwargs:
kwargs['headers'] = {}
kwargs['headers']['User-Agent'] = self.user_agent
def _get(self, url, **kwargs):
self._set_defaults(kwargs)
r = self.session.get(url, **kwargs)
if self.debug:
dr(r)
return r
def _post(self, url, **kwargs):
self._set_defaults(kwargs)
r = self.session.post(url, **kwargs)
if self.debug:
dr(r)
return r
def _put(self, url, **kwargs):
self._set_defaults(kwargs)
r = self.session.put(url, **kwargs)
if self.debug:
dr(r)
return r
def _delete(self, url, **kwargs):
self._set_defaults(kwargs)
r = self.session.delete(url, **kwargs)
if self.debug:
dr(r)
return r
def client(self, service):
"""
Create a client for a service.
Supported services:
* ``account``
* ``billing``
* ``federation``
* ``iam``
* ``mfa``
* ``resetpassword``
* ``signin``
* ``signin_amazon``
* ``signin_aws``
Args:
service: name of the service, eg., `billing`
Returns:
object: service client
"""
service = service.lower()
if service not in self._clients:
if not hasattr(clients, service):
raise Exception("service {0} unsupported".format(service))
klass = getattr(clients, service).Client
if klass.REQUIRES_AUTHENTICATION and not self.authenticated:
raise Exception(
"signin before creating {0} service client".format(
service))
self._clients[service] = klass(self)
return self._clients[service]
| 31.967914
| 149
| 0.537471
|
9be4528212577315fb17a1f59df18efd93534fa0
| 1,320
|
py
|
Python
|
server/authentication/tests/test_refresh_to_access.py
|
Aradhya-Tripathi/free-watch
|
c3353c0eec8d545372f22d6ac437ed71c1940f70
|
[
"MIT"
] | 5
|
2021-10-09T09:36:12.000Z
|
2021-12-14T17:03:31.000Z
|
server/authentication/tests/test_refresh_to_access.py
|
Aradhya-Tripathi/fire-watch
|
c3353c0eec8d545372f22d6ac437ed71c1940f70
|
[
"MIT"
] | 8
|
2022-02-21T19:01:49.000Z
|
2022-03-05T20:43:05.000Z
|
server/authentication/tests/test_refresh_to_access.py
|
Aradhya-Tripathi/free-watch
|
c3353c0eec8d545372f22d6ac437ed71c1940f70
|
[
"MIT"
] | 1
|
2021-12-17T09:12:49.000Z
|
2021-12-17T09:12:49.000Z
|
from unittest import TestCase
from authentication.issue_jwt import AuthToken
class TestAccessToRefresh(TestCase):
auth_token = AuthToken()
def test_access_token_as_refresh(self):
tokens = self.auth_token.generate_key(
payload={"user": "TestUser", "age": 10}, get_refresh=True, is_admin=True
)
check = self.auth_token.refresh_to_access(key=tokens["access_token"])
self.assertIsNone(check)
def test_successful_op(self):
tokens = self.auth_token.generate_key(
payload={"user": "TestUser", "age": 10}, get_refresh=True, is_admin=True
)
check = self.auth_token.refresh_to_access(key=tokens["refresh_token"])
self.assertIsNotNone(check)
payload = self.auth_token.verify_key(is_admin=True, key=check["access_token"])
self.assertEqual(payload["user"], "TestUser")
self.assertEqual(payload["age"], 10)
self.assertEqual(payload["is_admin"], True)
self.assertEqual(payload.get("refresh"), None)
payload = self.auth_token.verify_key(is_admin=True, key=check["refresh_token"])
self.assertEqual(payload["user"], "TestUser")
self.assertEqual(payload["age"], 10)
self.assertEqual(payload["is_admin"], True)
self.assertEqual(payload["refresh"], True)
| 40
| 87
| 0.675
|
4c1b000bf7d9f2dc46d4e87ce1f53c9cc2b93508
| 461
|
py
|
Python
|
dueros/card/LinkAccountCard.py
|
Mryan2005/bot-sdk-python
|
f961aedf141e966badd5cd577ad8913dd9733998
|
[
"Apache-2.0"
] | 70
|
2018-01-04T06:47:58.000Z
|
2021-07-28T03:08:48.000Z
|
dueros/card/LinkAccountCard.py
|
mlzboy/bot-sdk-python
|
664c90ec6d0abbb0844c030cd3114693a96b12ab
|
[
"Apache-2.0"
] | 16
|
2018-01-02T15:25:23.000Z
|
2020-03-14T07:25:44.000Z
|
dueros/card/LinkAccountCard.py
|
mlzboy/bot-sdk-python
|
664c90ec6d0abbb0844c030cd3114693a96b12ab
|
[
"Apache-2.0"
] | 32
|
2018-01-09T10:19:46.000Z
|
2021-05-06T08:35:52.000Z
|
#!/usr/bin/env python3
# -*- encoding=utf-8 -*-
# description:
# author:jack
# create_time: 2018/8/18
from dueros.card.BaseCard import BaseCard
import dueros.card.CardType as CardType
class LinkAccountCard(BaseCard):
"""
第三方账号授权
debug模式:要将回调地址域名替换下 https://xiaodu-dbp.baidu.com/xxxx
"""
def __init__(self):
BaseCard.__init__(self)
self.data['type'] = CardType.CARD_TYPE_LINKACCOUNT
if __name__ == '__main__':
pass
| 18.44
| 58
| 0.681128
|
2ed303150831a2c17fbed9011edadc60573438f0
| 7,316
|
py
|
Python
|
homeassistant/components/mill/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/mill/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/mill/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for mill wifi-enabled home heaters."""
from __future__ import annotations
import mill
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
CONF_IP_ADDRESS,
CONF_USERNAME,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import DeviceInfo, EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
BATTERY,
CLOUD,
CONNECTION_TYPE,
CONSUMPTION_TODAY,
CONSUMPTION_YEAR,
DOMAIN,
ECO2,
HUMIDITY,
LOCAL,
MANUFACTURER,
TEMPERATURE,
TVOC,
)
HEATER_SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key=CONSUMPTION_YEAR,
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
name="Year consumption",
),
SensorEntityDescription(
key=CONSUMPTION_TODAY,
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
name="Day consumption",
),
)
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key=TEMPERATURE,
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
name="Temperature",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=HUMIDITY,
device_class=SensorDeviceClass.HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
name="Humidity",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key=BATTERY,
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
name="Battery",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
SensorEntityDescription(
key=ECO2,
device_class=SensorDeviceClass.CO2,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
name="Estimated CO2",
),
SensorEntityDescription(
key=TVOC,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_BILLION,
name="TVOC",
state_class=SensorStateClass.MEASUREMENT,
),
)
LOCAL_SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="control_signal",
native_unit_of_measurement=PERCENTAGE,
name="Control signal",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="current_power",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=POWER_WATT,
name="Current power",
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
key="raw_ambient_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
name="Uncalibrated temperature",
state_class=SensorStateClass.MEASUREMENT,
entity_registry_enabled_default=False,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Mill sensor."""
if entry.data.get(CONNECTION_TYPE) == LOCAL:
mill_data_coordinator = hass.data[DOMAIN][LOCAL][entry.data[CONF_IP_ADDRESS]]
async_add_entities(
LocalMillSensor(
mill_data_coordinator,
entity_description,
)
for entity_description in LOCAL_SENSOR_TYPES
)
return
mill_data_coordinator = hass.data[DOMAIN][CLOUD][entry.data[CONF_USERNAME]]
entities = [
MillSensor(
mill_data_coordinator,
entity_description,
mill_device,
)
for mill_device in mill_data_coordinator.data.values()
for entity_description in (
HEATER_SENSOR_TYPES
if isinstance(mill_device, mill.Heater)
else SENSOR_TYPES
)
]
async_add_entities(entities)
class MillSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Mill Sensor device."""
def __init__(self, coordinator, entity_description, mill_device):
"""Initialize the sensor."""
super().__init__(coordinator)
self._id = mill_device.device_id
self.entity_description = entity_description
self._available = False
self._attr_name = f"{mill_device.name} {entity_description.name}"
self._attr_unique_id = f"{mill_device.device_id}_{entity_description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, mill_device.device_id)},
name=self.name,
manufacturer=MANUFACTURER,
)
if isinstance(mill_device, mill.Heater):
self._attr_device_info["model"] = f"Generation {mill_device.generation}"
elif isinstance(mill_device, mill.Sensor):
self._attr_device_info["model"] = "Mill Sense Air"
self._update_attr(mill_device)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_attr(self.coordinator.data[self._id])
self.async_write_ha_state()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return super().available and self._available
@callback
def _update_attr(self, device):
self._available = device.available
self._attr_native_value = getattr(device, self.entity_description.key)
class LocalMillSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Mill Sensor device."""
def __init__(self, coordinator, entity_description):
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = entity_description
self._attr_name = (
f"{coordinator.mill_data_connection.name} {entity_description.name}"
)
if mac := coordinator.mill_data_connection.mac_address:
self._attr_unique_id = f"{mac}_{entity_description.key}"
self._attr_device_info = DeviceInfo(
connections={(CONNECTION_NETWORK_MAC, mac)},
configuration_url=self.coordinator.mill_data_connection.url,
manufacturer=MANUFACTURER,
model="Generation 3",
name=coordinator.mill_data_connection.name,
sw_version=coordinator.mill_data_connection.version,
)
@property
def native_value(self):
"""Return the native value of the sensor."""
return self.coordinator.data[self.entity_description.key]
| 32.515556
| 85
| 0.688218
|
aba79612a9931a2b9ff859972f110f09d3fdd8ff
| 13,460
|
py
|
Python
|
lib/python2.7/site-packages/pysaml2-4.0.5-py2.7.egg/saml2/attributemaps/saml_uri.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pysaml2-4.0.5-py2.7.egg/saml2/attributemaps/saml_uri.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pysaml2-4.0.5-py2.7.egg/saml2/attributemaps/saml_uri.py
|
crav7/ProjectDjango
|
10dc03919b1fcfc34d2ddc93b85989638399e3e9
|
[
"MIT"
] | null | null | null |
EDUCOURSE_OID = 'urn:oid:1.3.6.1.4.1.5923.1.6.1.'
EDUPERSON_OID = 'urn:oid:1.3.6.1.4.1.5923.1.1.1.'
LDAPGVAT_OID = 'urn:oid:1.2.40.0.10.2.1.1.' # ldap.gv.at definitions as specified in http://www.ref.gv.at/AG-IZ-PVP2-Version-2-1-0-2.2754.0.html
UCL_DIR_PILOT = 'urn:oid:0.9.2342.19200300.100.1.'
X500ATTR_OID = 'urn:oid:2.5.4.'
LDAPGVAT_UCL_DIR_PILOT = UCL_DIR_PILOT
LDAPGVAT_X500ATTR_OID = X500ATTR_OID
NETSCAPE_LDAP = 'urn:oid:2.16.840.1.113730.3.1.'
NOREDUPERSON_OID = 'urn:oid:1.3.6.1.4.1.2428.90.1.'
PKCS_9 = 'urn:oid:1.2.840.113549.1.9.1.'
SCHAC = 'urn:oid:1.3.6.1.4.1.25178.1.2.'
SIS = 'urn:oid:1.2.752.194.10.2.'
UMICH = 'urn:oid:1.3.6.1.4.1.250.1.57.'
OPENOSI_OID = 'urn:oid:1.3.6.1.4.1.27630.2.1.1.' #openosi-0.82.schema http://www.openosi.org/osi/display/ldap/Home
MAP = {
'identifier': 'urn:oasis:names:tc:SAML:2.0:attrname-format:uri',
'fro': {
EDUCOURSE_OID+'1': 'eduCourseOffering',
EDUCOURSE_OID+'2': 'eduCourseMember',
EDUPERSON_OID+'1': 'eduPersonAffiliation',
EDUPERSON_OID+'2': 'eduPersonNickname',
EDUPERSON_OID+'3': 'eduPersonOrgDN',
EDUPERSON_OID+'4': 'eduPersonOrgUnitDN',
EDUPERSON_OID+'5': 'eduPersonPrimaryAffiliation',
EDUPERSON_OID+'6': 'eduPersonPrincipalName',
EDUPERSON_OID+'7': 'eduPersonEntitlement',
EDUPERSON_OID+'8': 'eduPersonPrimaryOrgUnitDN',
EDUPERSON_OID+'9': 'eduPersonScopedAffiliation',
EDUPERSON_OID+'10': 'eduPersonTargetedID',
EDUPERSON_OID+'11': 'eduPersonAssurance',
LDAPGVAT_OID+'1': 'PVP-GID',
LDAPGVAT_OID+'149': 'PVP-BPK',
LDAPGVAT_OID+'153': 'PVP-OU-OKZ',
LDAPGVAT_OID+'261.10': 'PVP-VERSION',
LDAPGVAT_OID+'261.20': 'PVP-PRINCIPAL-NAME',
LDAPGVAT_OID+'261.24': 'PVP-PARTICIPANT-OKZ',
LDAPGVAT_OID+'261.30': 'PVP-ROLES',
LDAPGVAT_OID+'261.40': 'PVP-INVOICE-RECPT-ID',
LDAPGVAT_OID+'261.50': 'PVP-COST-CENTER-ID',
LDAPGVAT_OID+'261.60': 'PVP-CHARGE-CODE',
LDAPGVAT_OID+'3': 'PVP-OU-GV-OU-ID',
LDAPGVAT_OID+'33': 'PVP-FUNCTION',
LDAPGVAT_OID+'55': 'PVP-BIRTHDATE',
LDAPGVAT_OID+'71': 'PVP-PARTICIPANT-ID',
LDAPGVAT_UCL_DIR_PILOT+'1': 'PVP-USERID',
LDAPGVAT_UCL_DIR_PILOT+'3': 'PVP-MAIL',
LDAPGVAT_X500ATTR_OID+'11': 'PVP-OU',
LDAPGVAT_X500ATTR_OID+'20': 'PVP-TEL',
LDAPGVAT_X500ATTR_OID+'42': 'PVP-GIVENNAME',
NETSCAPE_LDAP+'1': 'carLicense',
NETSCAPE_LDAP+'2': 'departmentNumber',
NETSCAPE_LDAP+'3': 'employeeNumber',
NETSCAPE_LDAP+'4': 'employeeType',
NETSCAPE_LDAP+'39': 'preferredLanguage',
NETSCAPE_LDAP+'40': 'userSMIMECertificate',
NETSCAPE_LDAP+'216': 'userPKCS12',
NETSCAPE_LDAP+'241': 'displayName',
NOREDUPERSON_OID+'1': 'norEduOrgUniqueNumber',
NOREDUPERSON_OID+'2': 'norEduOrgUnitUniqueNumber',
NOREDUPERSON_OID+'3': 'norEduPersonBirthDate',
NOREDUPERSON_OID+'4': 'norEduPersonLIN',
NOREDUPERSON_OID+'5': 'norEduPersonNIN',
NOREDUPERSON_OID+'6': 'norEduOrgAcronym',
NOREDUPERSON_OID+'7': 'norEduOrgUniqueIdentifier',
NOREDUPERSON_OID+'8': 'norEduOrgUnitUniqueIdentifier',
NOREDUPERSON_OID+'9': 'federationFeideSchemaVersion',
NOREDUPERSON_OID+'10': 'norEduPersonLegalName',
NOREDUPERSON_OID+'11': 'norEduOrgSchemaVersion',
NOREDUPERSON_OID+'12': 'norEduOrgNIN',
OPENOSI_OID+'17': 'osiHomeUrl',
OPENOSI_OID+'19': 'osiPreferredTZ',
OPENOSI_OID+'72': 'osiICardTimeLastUpdated',
OPENOSI_OID+'104': 'osiMiddleName',
OPENOSI_OID+'107': 'osiOtherEmail',
OPENOSI_OID+'109': 'osiOtherHomePhone',
OPENOSI_OID+'120': 'osiWorkURL',
PKCS_9+'1': 'email',
SCHAC+'1': 'schacMotherTongue',
SCHAC+'2': 'schacGender',
SCHAC+'3': 'schacDateOfBirth',
SCHAC+'4': 'schacPlaceOfBirth',
SCHAC+'5': 'schacCountryOfCitizenship',
SCHAC+'6': 'schacSn1',
SCHAC+'7': 'schacSn2',
SCHAC+'8': 'schacPersonalTitle',
SCHAC+'9': 'schacHomeOrganization',
SCHAC+'10': 'schacHomeOrganizationType',
SCHAC+'11': 'schacCountryOfResidence',
SCHAC+'12': 'schacUserPresenceID',
SCHAC+'13': 'schacPersonalPosition',
SCHAC+'14': 'schacPersonalUniqueCode',
SCHAC+'15': 'schacPersonalUniqueID',
SCHAC+'17': 'schacExpiryDate',
SCHAC+'18': 'schacUserPrivateAttribute',
SCHAC+'19': 'schacUserStatus',
SCHAC+'20': 'schacProjectMembership',
SCHAC+'21': 'schacProjectSpecificRole',
SIS+'1': 'sisLegalGuardianFor',
SIS+'2': 'sisSchoolGrade',
UCL_DIR_PILOT+'1': 'uid',
UCL_DIR_PILOT+'3': 'mail',
UCL_DIR_PILOT+'25': 'dc',
UCL_DIR_PILOT+'37': 'associatedDomain',
UCL_DIR_PILOT+'43': 'co',
UCL_DIR_PILOT+'60': 'jpegPhoto',
UMICH+'57': 'labeledURI',
X500ATTR_OID+'2': 'knowledgeInformation',
X500ATTR_OID+'3': 'cn',
X500ATTR_OID+'4': 'sn',
X500ATTR_OID+'5': 'serialNumber',
X500ATTR_OID+'6': 'c',
X500ATTR_OID+'7': 'l',
X500ATTR_OID+'8': 'st',
X500ATTR_OID+'9': 'street',
X500ATTR_OID+'10': 'o',
X500ATTR_OID+'11': 'ou',
X500ATTR_OID+'12': 'title',
X500ATTR_OID+'14': 'searchGuide',
X500ATTR_OID+'15': 'businessCategory',
X500ATTR_OID+'16': 'postalAddress',
X500ATTR_OID+'17': 'postalCode',
X500ATTR_OID+'18': 'postOfficeBox',
X500ATTR_OID+'19': 'physicalDeliveryOfficeName',
X500ATTR_OID+'20': 'telephoneNumber',
X500ATTR_OID+'21': 'telexNumber',
X500ATTR_OID+'22': 'teletexTerminalIdentifier',
X500ATTR_OID+'23': 'facsimileTelephoneNumber',
X500ATTR_OID+'24': 'x121Address',
X500ATTR_OID+'25': 'internationaliSDNNumber',
X500ATTR_OID+'26': 'registeredAddress',
X500ATTR_OID+'27': 'destinationIndicator',
X500ATTR_OID+'28': 'preferredDeliveryMethod',
X500ATTR_OID+'29': 'presentationAddress',
X500ATTR_OID+'30': 'supportedApplicationContext',
X500ATTR_OID+'31': 'member',
X500ATTR_OID+'32': 'owner',
X500ATTR_OID+'33': 'roleOccupant',
X500ATTR_OID+'36': 'userCertificate',
X500ATTR_OID+'37': 'cACertificate',
X500ATTR_OID+'38': 'authorityRevocationList',
X500ATTR_OID+'39': 'certificateRevocationList',
X500ATTR_OID+'40': 'crossCertificatePair',
X500ATTR_OID+'42': 'givenName',
X500ATTR_OID+'43': 'initials',
X500ATTR_OID+'44': 'generationQualifier',
X500ATTR_OID+'45': 'x500UniqueIdentifier',
X500ATTR_OID+'46': 'dnQualifier',
X500ATTR_OID+'47': 'enhancedSearchGuide',
X500ATTR_OID+'48': 'protocolInformation',
X500ATTR_OID+'50': 'uniqueMember',
X500ATTR_OID+'51': 'houseIdentifier',
X500ATTR_OID+'52': 'supportedAlgorithms',
X500ATTR_OID+'53': 'deltaRevocationList',
X500ATTR_OID+'54': 'dmdName',
X500ATTR_OID+'65': 'pseudonym',
},
'to': {
'associatedDomain': UCL_DIR_PILOT+'37',
'authorityRevocationList': X500ATTR_OID+'38',
'businessCategory': X500ATTR_OID+'15',
'c': X500ATTR_OID+'6',
'cACertificate': X500ATTR_OID+'37',
'carLicense': NETSCAPE_LDAP+'1',
'certificateRevocationList': X500ATTR_OID+'39',
'cn': X500ATTR_OID+'3',
'co': UCL_DIR_PILOT+'43',
'crossCertificatePair': X500ATTR_OID+'40',
'dc': UCL_DIR_PILOT+'25',
'deltaRevocationList': X500ATTR_OID+'53',
'departmentNumber': NETSCAPE_LDAP+'2',
'destinationIndicator': X500ATTR_OID+'27',
'displayName': NETSCAPE_LDAP+'241',
'dmdName': X500ATTR_OID+'54',
'dnQualifier': X500ATTR_OID+'46',
'eduCourseMember': EDUCOURSE_OID+'2',
'eduCourseOffering': EDUCOURSE_OID+'1',
'eduPersonAffiliation': EDUPERSON_OID+'1',
'eduPersonAssurance': EDUPERSON_OID+'11',
'eduPersonEntitlement': EDUPERSON_OID+'7',
'eduPersonNickname': EDUPERSON_OID+'2',
'eduPersonOrgDN': EDUPERSON_OID+'3',
'eduPersonOrgUnitDN': EDUPERSON_OID+'4',
'eduPersonPrimaryAffiliation': EDUPERSON_OID+'5',
'eduPersonPrimaryOrgUnitDN': EDUPERSON_OID+'8',
'eduPersonPrincipalName': EDUPERSON_OID+'6',
'eduPersonScopedAffiliation': EDUPERSON_OID+'9',
'eduPersonTargetedID': EDUPERSON_OID+'10',
'email': PKCS_9+'1',
'employeeNumber': NETSCAPE_LDAP+'3',
'employeeType': NETSCAPE_LDAP+'4',
'enhancedSearchGuide': X500ATTR_OID+'47',
'facsimileTelephoneNumber': X500ATTR_OID+'23',
'federationFeideSchemaVersion': NOREDUPERSON_OID+'9',
'generationQualifier': X500ATTR_OID+'44',
'givenName': X500ATTR_OID+'42',
'houseIdentifier': X500ATTR_OID+'51',
'initials': X500ATTR_OID+'43',
'internationaliSDNNumber': X500ATTR_OID+'25',
'jpegPhoto': UCL_DIR_PILOT+'60',
'knowledgeInformation': X500ATTR_OID+'2',
'l': X500ATTR_OID+'7',
'labeledURI': UMICH+'57',
'mail': UCL_DIR_PILOT+'3',
'member': X500ATTR_OID+'31',
'norEduOrgAcronym': NOREDUPERSON_OID+'6',
'norEduOrgNIN': NOREDUPERSON_OID+'12',
'norEduOrgSchemaVersion': NOREDUPERSON_OID+'11',
'norEduOrgUniqueIdentifier': NOREDUPERSON_OID+'7',
'norEduOrgUniqueNumber': NOREDUPERSON_OID+'1',
'norEduOrgUnitUniqueIdentifier': NOREDUPERSON_OID+'8',
'norEduOrgUnitUniqueNumber': NOREDUPERSON_OID+'2',
'norEduPersonBirthDate': NOREDUPERSON_OID+'3',
'norEduPersonLIN': NOREDUPERSON_OID+'4',
'norEduPersonLegalName': NOREDUPERSON_OID+'10',
'norEduPersonNIN': NOREDUPERSON_OID+'5',
'o': X500ATTR_OID+'10',
'osiHomeUrl': OPENOSI_OID+'17',
'osiPreferredTZ': OPENOSI_OID+'19',
'osiICardTimeLastUpdated': OPENOSI_OID+'72',
'osiMiddleName': OPENOSI_OID+'104',
'osiOtherEmail': OPENOSI_OID+'107',
'osiOtherHomePhone': OPENOSI_OID+'109',
'osiWorkURL': OPENOSI_OID+'120',
'ou': X500ATTR_OID+'11',
'owner': X500ATTR_OID+'32',
'physicalDeliveryOfficeName': X500ATTR_OID+'19',
'postOfficeBox': X500ATTR_OID+'18',
'postalAddress': X500ATTR_OID+'16',
'postalCode': X500ATTR_OID+'17',
'preferredDeliveryMethod': X500ATTR_OID+'28',
'preferredLanguage': NETSCAPE_LDAP+'39',
'presentationAddress': X500ATTR_OID+'29',
'protocolInformation': X500ATTR_OID+'48',
'pseudonym': X500ATTR_OID+'65',
'PVP-USERID': LDAPGVAT_UCL_DIR_PILOT+'1',
'PVP-MAIL': LDAPGVAT_UCL_DIR_PILOT+'3',
'PVP-GID': LDAPGVAT_OID+'1',
'PVP-BPK': LDAPGVAT_OID+'149',
'PVP-OU-OKZ': LDAPGVAT_OID+'153',
'PVP-VERSION': LDAPGVAT_OID+'261.10',
'PVP-PRINCIPAL-NAME': LDAPGVAT_OID+'261.20',
'PVP-PARTICIPANT-OKZ': LDAPGVAT_OID+'261.24',
'PVP-ROLES': LDAPGVAT_OID+'261.30',
'PVP-INVOICE-RECPT-ID': LDAPGVAT_OID+'261.40',
'PVP-COST-CENTER-ID': LDAPGVAT_OID+'261.50',
'PVP-CHARGE-CODE': LDAPGVAT_OID+'261.60',
'PVP-OU-GV-OU-ID': LDAPGVAT_OID+'3',
'PVP-FUNCTION': LDAPGVAT_OID+'33',
'PVP-BIRTHDATE': LDAPGVAT_OID+'55',
'PVP-PARTICIPANT-ID': LDAPGVAT_OID+'71',
'PVP-OU': LDAPGVAT_X500ATTR_OID+'11',
'PVP-TEL': LDAPGVAT_X500ATTR_OID+'20',
'PVP-GIVENNAME': LDAPGVAT_X500ATTR_OID+'42',
'registeredAddress': X500ATTR_OID+'26',
'roleOccupant': X500ATTR_OID+'33',
'schacCountryOfCitizenship': SCHAC+'5',
'schacCountryOfResidence': SCHAC+'11',
'schacDateOfBirth': SCHAC+'3',
'schacExpiryDate': SCHAC+'17',
'schacGender': SCHAC+'2',
'schacHomeOrganization': SCHAC+'9',
'schacHomeOrganizationType': SCHAC+'10',
'schacMotherTongue': SCHAC+'1',
'schacPersonalPosition': SCHAC+'13',
'schacPersonalTitle': SCHAC+'8',
'schacPersonalUniqueCode': SCHAC+'14',
'schacPersonalUniqueID': SCHAC+'15',
'schacPlaceOfBirth': SCHAC+'4',
'schacProjectMembership': SCHAC+'20',
'schacProjectSpecificRole': SCHAC+'21',
'schacSn1': SCHAC+'6',
'schacSn2': SCHAC+'7',
'schacUserPresenceID': SCHAC+'12',
'schacUserPrivateAttribute': SCHAC+'18',
'schacUserStatus': SCHAC+'19',
'searchGuide': X500ATTR_OID+'14',
'serialNumber': X500ATTR_OID+'5',
'sisLegalGuardianFor': SIS+'1',
'sisSchoolGrade': SIS+'2',
'sn': X500ATTR_OID+'4',
'st': X500ATTR_OID+'8',
'street': X500ATTR_OID+'9',
'supportedAlgorithms': X500ATTR_OID+'52',
'supportedApplicationContext': X500ATTR_OID+'30',
'telephoneNumber': X500ATTR_OID+'20',
'teletexTerminalIdentifier': X500ATTR_OID+'22',
'telexNumber': X500ATTR_OID+'21',
'title': X500ATTR_OID+'12',
'uid': UCL_DIR_PILOT+'1',
'uniqueMember': X500ATTR_OID+'50',
'userCertificate': X500ATTR_OID+'36',
'userPKCS12': NETSCAPE_LDAP+'216',
'userSMIMECertificate': NETSCAPE_LDAP+'40',
'x121Address': X500ATTR_OID+'24',
'x500UniqueIdentifier': X500ATTR_OID+'45',
}
}
| 45.016722
| 144
| 0.623328
|
c0ca1e4dd1d248cc6fb8faf3993d88c8d62ad378
| 7,429
|
py
|
Python
|
test/fuzz/test_runner.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 4
|
2021-02-28T04:34:58.000Z
|
2021-09-14T15:25:31.000Z
|
test/fuzz/test_runner.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | null | null | null |
test/fuzz/test_runner.py
|
satcoin-dev/satcoin
|
a68f5965a8c28cfcaf8855a661ea3f15de9ae7d5
|
[
"MIT"
] | 1
|
2021-06-18T13:13:17.000Z
|
2021-06-18T13:13:17.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
import argparse
import configparser
import os
import sys
import subprocess
import logging
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir. Needs /target subdirectory.',
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/satcoin-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]),
'-help=1',
],
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
if args.m_dir:
merge_inputs(
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
corpus=args.seed_dir,
test_list=test_list_selection,
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def merge_inputs(*, corpus, test_list, build_dir, merge_dir):
logging.info("Merge the inputs in the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-merge=1',
'-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
logging.debug('Run {} with args {}'.format(t, args))
output = subprocess.run(args, check=True, stderr=subprocess.PIPE, universal_newlines=True).stderr
logging.debug('Output: {}'.format(output))
def run_once(*, corpus, test_list, build_dir, use_valgrind):
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', t),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
logging.debug('Run {} with args {}'.format(t, args))
result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True)
output = result.stderr
logging.debug('Output: {}'.format(output))
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}: {}".format(t, e.returncode, " ".join(args)))
sys.exit(1)
def parse_test_list(makefile):
with open(makefile, encoding='utf-8') as makefile_test:
test_list_all = []
read_targets = False
for line in makefile_test.readlines():
line = line.strip().replace('test/fuzz/', '').replace(' \\', '')
if read_targets:
if not line:
break
test_list_all.append(line)
continue
if line == 'FUZZ_TARGETS =':
read_targets = True
return test_list_all
if __name__ == '__main__':
main()
| 36.063107
| 180
| 0.615964
|
921f719f5701e3ac447b555cce4710e299259fc2
| 38,037
|
py
|
Python
|
python/ccxt/async_support/theocean.py
|
GimmerBot/ccxt
|
36982385305506697d57e995a9ac0e2eb04dc9b4
|
[
"MIT"
] | 3
|
2020-05-10T12:51:24.000Z
|
2021-04-20T21:36:43.000Z
|
python/ccxt/async_support/theocean.py
|
XiaoBinNumberOne/ccxt
|
b8aea97b018ef633df9866f44bf61565f44d8e6d
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/theocean.py
|
XiaoBinNumberOne/ccxt
|
b8aea97b018ef633df9866f44bf61565f44d8e6d
|
[
"MIT"
] | 2
|
2019-04-17T02:49:29.000Z
|
2019-06-01T06:45:50.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderNotFillable
from ccxt.base.errors import NotSupported
from ccxt.base.errors import ExchangeNotAvailable
class theocean(Exchange):
def describe(self):
self.check_required_dependencies()
return self.deep_extend(super(theocean, self).describe(), {
'id': 'theocean',
'name': 'The Ocean',
'countries': ['US'],
'rateLimit': 3000,
'version': 'v1',
'requiresWeb3': True,
'timeframes': {
'5m': '300',
'15m': '900',
'1h': '3600',
'6h': '21600',
'1d': '86400',
},
'has': {
'cancelAllOrders': True,
'CORS': False, # ?
'fetchClosedOrders': True,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchTickers': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/43103756-d56613ce-8ed7-11e8-924e-68f9d4bcacab.jpg',
'api': 'https://api.theocean.trade',
'www': 'https://theocean.trade',
'doc': 'https://docs.theocean.trade',
'fees': 'https://theocean.trade/fees',
},
'api': {
'public': {
'get': [
'fee_components',
'token_pairs',
'ticker',
'tickers',
'candlesticks',
'candlesticks/intervals',
'trade_history',
'order_book',
'order/{orderHash}',
'version',
],
},
'private': {
'get': [
'balance',
'available_balance',
'order_history',
'order/unsigned',
'order/unsigned/market',
],
'post': [
'order',
],
'delete': [
'order/{orderHash}',
'order',
],
},
},
'exceptions': {
'exact': {
'Order not found': OrderNotFound, # {"message":"Order not found","errors":...}
},
'broad': {
"Price can't exceed 8 digits in precision.": InvalidOrder, # {"message":"Price can't exceed 8 digits in precision.","type":"paramPrice"}
'Order cannot be canceled': InvalidOrder, # {"message":"Order cannot be canceled","type":"General error"}
'Greater than available wallet balance.': InsufficientFunds,
'Fillable amount under minimum': InvalidOrder, # {"message":"Fillable amount under minimum WETH trade size.","type":"paramQuoteTokenAmount"}
'Fillable amount over maximum': InvalidOrder, # {"message":"Fillable amount over maximum TUSD trade size.","type":"paramQuoteTokenAmount"}
"Schema validation failed for 'params'": BadRequest, # # {"message":"Schema validation failed for 'params'"}
'Service Temporarily Unavailable': ExchangeNotAvailable,
},
},
'options': {
'decimals': {},
'fetchOrderMethod': 'fetch_order_from_history',
},
})
async def fetch_markets(self, params={}):
markets = await self.publicGetTokenPairs(params)
#
# [
# "baseToken": {
# "symbol": "ZRX",
# "address": "0x6ff6c0ff1d68b964901f986d4c9fa3ac68346570",
# "name": "0x Protocol Token",
# "decimals": "18",
# "minAmount": "10000000000000000000",
# "maxAmount": "10000000000000000000000",
# "precision": "-8"
# },
# "quoteToken": {
# "symbol": "ETH",
# "address": "0xd0a1e359811322d97991e03f863a0c30c2cf029c",
# "name": "Ether Token",
# "decimals": "18",
# "minAmount": "20000000000000000",
# "maxAmount": "20000000000000000000",
# "precision": "-8"
# }
# ]
#
result = []
for i in range(0, len(markets)):
market = markets[i]
baseToken = self.safe_value(market, 'baseToken', {})
quoteToken = self.safe_value(market, 'quoteToken', {})
baseId = self.safe_string(baseToken, 'address')
quoteId = self.safe_string(quoteToken, 'address')
base = self.safe_currency_code(self.safe_string(baseToken, 'symbol'))
quote = self.safe_currency_code(self.safe_string(quoteToken, 'symbol'))
symbol = base + '/' + quote
id = baseId + '/' + quoteId
baseDecimals = self.safe_integer(baseToken, 'decimals')
quoteDecimals = self.safe_integer(quoteToken, 'decimals')
self.options['decimals'][base] = baseDecimals
self.options['decimals'][quote] = quoteDecimals
precision = {
'amount': -int(baseToken['precision']),
'price': -int(quoteToken['precision']),
}
amountLimits = {
'min': self.from_wei(self.safe_string(baseToken, 'minAmount'), baseDecimals),
'max': self.from_wei(self.safe_string(baseToken, 'maxAmount'), baseDecimals),
}
priceLimits = {
'min': None,
'max': None,
}
costLimits = {
'min': self.from_wei(self.safe_string(quoteToken, 'minAmount'), quoteDecimals),
'max': self.from_wei(self.safe_string(quoteToken, 'maxAmount'), quoteDecimals),
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
active = True
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):
baseDecimals = self.safe_integer(self.options['decimals'], market['base'], 18)
return [
self.safe_timestamp(ohlcv, 'startTime'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.from_wei(self.safe_string(ohlcv, 'baseVolume'), baseDecimals),
]
async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'baseTokenAddress': market['baseId'],
'quoteTokenAddress': market['quoteId'],
'interval': self.timeframes[timeframe],
}
if since is None:
raise ExchangeError(self.id + ' fetchOHLCV requires a since argument')
since = int(since)
request['startTime'] = since
response = await self.publicGetCandlesticks(self.extend(request, params))
#
# [
# {
# "high": "100.52",
# "low": "97.23",
# "open": "98.45",
# "close": "99.23",
# "baseVolume": "2400000000000000000000",
# "quoteVolume": "1200000000000000000000",
# "startTime": "1512929323784"
# },
# {
# "high": "100.52",
# "low": "97.23",
# "open": "98.45",
# "close": "99.23",
# "volume": "2400000000000000000000",
# "startTime": "1512929198980"
# }
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_balance_by_code(self, code, params={}):
if not self.walletAddress or (self.walletAddress.find('0x') != 0):
raise InvalidAddress(self.id + ' fetchBalanceByCode() requires the .walletAddress to be a "0x"-prefixed hexstring like "0xbF2d65B3b2907214EEA3562f21B80f6Ed7220377"')
await self.load_markets()
currency = self.currency(code)
request = {
'walletAddress': self.walletAddress.lower(),
'tokenAddress': currency['id'],
}
response = await self.privateGetBalance(self.extend(request, params))
#
# {"available":"0","committed":"0","total":"0"}
#
decimals = self.safe_integer(self.options['decimals'], code, 18)
free = self.from_wei(self.safe_string(response, 'available'), decimals)
used = self.from_wei(self.safe_string(response, 'committed'), decimals)
total = self.from_wei(self.safe_string(response, 'total'), decimals)
return {
'free': free,
'used': used,
'total': total,
}
async def fetch_balance(self, params={}):
if not self.walletAddress or (self.walletAddress.find('0x') != 0):
raise InvalidAddress(self.id + ' fetchBalance() requires the .walletAddress to be a "0x"-prefixed hexstring like "0xbF2d65B3b2907214EEA3562f21B80f6Ed7220377"')
codes = self.safe_value(self.options, 'fetchBalanceCurrencies')
if codes is None:
codes = self.safe_value(params, 'codes')
if (codes is None) or (not isinstance(codes, list)):
raise ExchangeError(self.id + ' fetchBalance() requires a `codes` parameter(an array of currency codes)')
await self.load_markets()
result = {}
for i in range(0, len(codes)):
code = codes[i]
result[code] = await self.fetch_balance_by_code(code)
return self.parse_balance(result)
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1, market=None):
if market is None:
raise ArgumentsRequired(self.id + ' parseBidAsk requires a market argument')
price = float(bidask[priceKey])
amountDecimals = self.safe_integer(self.options['decimals'], market['base'], 18)
amount = self.from_wei(bidask[amountKey], 'ether', amountDecimals)
return [price, amount]
def parse_order_book(self, orderbook, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=0, amountKey=1, market=None):
result = {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
sides = [bidsKey, asksKey]
for i in range(0, len(sides)):
side = sides[i]
orders = []
bidasks = self.safe_value(orderbook, side)
for k in range(0, len(bidasks)):
orders.append(self.parse_bid_ask(bidasks[k], priceKey, amountKey, market))
result[side] = orders
result[bidsKey] = self.sort_by(result[bidsKey], 0, True)
result[asksKey] = self.sort_by(result[asksKey], 0)
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'baseTokenAddress': market['baseId'],
'quoteTokenAddress': market['quoteId'],
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetOrderBook(self.extend(request, params))
#
# {
# "bids": [
# {orderHash: '0xe2b7f80198edb561cc66cd85cb8e5f420073cf1e5143193d8add8774bd8236c4',
# price: '30',
# availableAmount: '500000000000000000',
# creationTimestamp: '1547193525',
# expirationTimestampInSec: '1549789124'
# }
# ],
# "asks": [
# {orderHash: '0xe2b7f80198edb561cc66cd85cb8e5f420073cf1e5143193d8add8774bd8236c4',
# price: '30',
# availableAmount: '500000000000000000',
# creationTimestamp: '1547193525',
# expirationTimestampInSec: '1549789124'
# }
# ]
# }
#
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'availableAmount', market)
def parse_ticker(self, ticker, market=None):
#
# {
# "bid": "0.00050915",
# "ask": "0.00054134",
# "last": "0.00052718",
# "volume": "3000000000000000000",
# "timestamp": "1512929327792"
# }
#
timestamp = int(self.safe_integer(ticker, 'timestamp') / 1000)
symbol = None
base = None
if market is not None:
symbol = market['symbol']
base = market['base']
baseDecimals = self.safe_integer(self.options['decimals'], base, 18)
baseVolume = self.from_wei(self.safe_string(ticker, 'volume'), baseDecimals)
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': self.safe_float(ticker, 'priceChange'),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetTickers(params)
#
# [{
# "baseTokenAddress": "0xa8e9fa8f91e5ae138c74648c9c304f1c75003a8d",
# "quoteTokenAddress": "0xc00fd9820cd2898cc4c054b7bf142de637ad129a",
# "ticker": {
# "bid": "0.00050915",
# "ask": "0.00054134",
# "last": "0.00052718",
# "volume": "3000000000000000000",
# "timestamp": "1512929327792"
# }
# }]
#
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
baseId = self.safe_string(ticker, 'baseTokenAddress')
quoteId = self.safe_string(ticker, 'quoteTokenAddress')
marketId = baseId + '/' + quoteId
market = None
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker['ticker'], market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'baseTokenAddress': market['baseId'],
'quoteTokenAddress': market['quoteId'],
}
response = await self.publicGetTicker(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
#
# fetchTrades
#
# {
# "id": "37212",
# "transactionHash": "0x5e6e75e1aa681b51b034296f62ac19be7460411a2ad94042dd8ba637e13eac0c",
# "amount": "300000000000000000",
# "price": "0.00052718",
# ------- they also have a "confirmed" status here ↓ -----------------
# "status": "filled", # filled | settled | failed
# "lastUpdated": "1520265048996"
# }
#
# parseOrder trades(timeline "actions", "fills")
#
# { action: "confirmed",
# amount: "1000000000000000000",
# intentID: "MARKET_INTENT:90jjw2s7gj90jjw2s7gkjjw2s7gl",
# txHash: "0x043488fdc3f995bf9e632a32424e41ed126de90f8cb340a1ff006c2a74ca8336",
# blockNumber: "8094822",
# timestamp: "1532261686" }
#
timestamp = self.safe_integer(trade, 'lastUpdated')
if timestamp is not None:
timestamp /= 1000
price = self.safe_float(trade, 'price')
id = self.safe_string(trade, 'id')
side = self.safe_string(trade, 'side')
symbol = None
base = None
if market is not None:
symbol = market['symbol']
base = market['base']
baseDecimals = self.safe_integer(self.options['decimals'], base, 18)
amount = self.from_wei(self.safe_string(trade, 'amount'), baseDecimals)
cost = None
if amount is not None and price is not None:
cost = amount * price
takerOrMaker = 'taker'
return {
'id': id,
'order': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'baseTokenAddress': market['baseId'],
'quoteTokenAddress': market['quoteId'],
}
response = await self.publicGetTradeHistory(self.extend(request, params))
#
# [
# {
# "id": "37212",
# "transactionHash": "0x5e6e75e1aa681b51b034296f62ac19be7460411a2ad94042dd8ba637e13eac0c",
# "amount": "300000000000000000",
# "price": "0.00052718",
# "status": "filled", # filled | settled | failed
# "lastUpdated": "1520265048996"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
errorMessage = self.id + ' createOrder() requires `exchange.walletAddress` and `exchange.privateKey`. The .walletAddress should be a "0x"-prefixed hexstring like "0xbF2d65B3b2907214EEA3562f21B80f6Ed7220377". The .privateKey for that wallet should be a "0x"-prefixed hexstring like "0xe4f40d465efa94c98aec1a51f574329344c772c1bce33be07fa20a56795fdd09".'
if not self.walletAddress or (self.walletAddress.find('0x') != 0):
raise InvalidAddress(errorMessage)
if not self.privateKey or (self.privateKey.find('0x') != 0):
raise InvalidAddress(errorMessage)
orderParams = await self.fetch_order_params_to_sign(symbol, type, side, amount, price, params)
unsignedOrder = orderParams['unsignedZeroExOrder']
if unsignedOrder is None:
raise OrderNotFillable(self.id + ' ' + type + ' order to ' + side + ' ' + symbol + ' is not fillable at the moment')
signedOrder = await self.sign_zero_ex_order_v2(unsignedOrder, self.privateKey)
id = self.safe_string(signedOrder, 'orderHash')
await self.post_signed_order(signedOrder, orderParams, params)
order = await self.fetch_order(id)
order['type'] = type
return order
async def fetch_order_params_to_sign(self, symbol, type, side, amount, price=None, params={}):
if side != 'buy' and side != 'sell':
raise ExchangeError(side + ' is not valid side param. Use \'buy\' or \'sell\'')
if type != 'market' and type != 'limit':
raise ExchangeError(type + ' is not valid type param. Use \'market\' or \'limit\'')
if type == 'limit' and price is None:
raise ExchangeError('Price is not provided for limit order')
await self.load_markets()
market = self.market(symbol)
baseDecimals = self.safe_integer(self.options['decimals'], market['base'], 18)
request = {
'walletAddress': self.walletAddress.lower(), # Your Wallet Address
'baseTokenAddress': market['baseId'], # Base token address
'quoteTokenAddress': market['quoteId'], # Quote token address
'side': side, # "buy" or "sell"
'amount': self.to_wei(self.amount_to_precision(symbol, amount), baseDecimals), # Base token amount in wei
}
method = None
if type == 'limit':
method = 'privateGetOrderUnsigned'
request['price'] = self.price_to_precision(symbol, price)
elif type == 'market':
method = 'privateGetOrderUnsignedMarket'
else:
raise ExchangeError('Unsupported order type: ' + type)
response = await getattr(self, method)(self.extend(request, params))
return response
async def post_signed_order(self, signedOrder, requestParams, params={}):
request = requestParams
request['signedZeroExOrder'] = signedOrder
request = self.omit(request, 'unsignedZeroExOrder')
response = await self.privatePostOrder(self.extend(request, params))
return response
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderHash': id,
}
response = await self.privateDeleteOrderOrderHash(self.extend(request, params))
#
# {
# "canceledOrder": {
# "orderHash": "0x3d6b287c1dc79262d2391ae2ca9d050fdbbab2c8b3180e4a46f9f321a7f1d7a9",
# "amount": "100000000000"
# }
# }
#
market = None
if symbol is not None:
market = self.market(symbol)
return self.extend(self.parse_order(response['canceledOrder'], market), {
'status': 'canceled',
})
async def cancel_all_orders(self, symbol=None, params={}):
response = await self.privateDeleteOrder(params)
#
# [{
# "canceledOrder": {
# "orderHash": "0x3d6b287c1dc79262d2391ae2ca9d050fdbbab2c8b3180e4a46f9f321a7f1d7a9",
# "amount": "100000000000"
# }
# }]
#
return response
def parse_order(self, order, market=None):
zeroExOrder = self.safe_value(order, 'zeroExOrder')
id = self.safe_string(order, 'orderHash')
if (id is None) and (zeroExOrder is not None):
id = self.safe_string(zeroExOrder, 'orderHash')
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type') # injected from outside
timestamp = self.safe_integer(order, 'creationTimestamp')
if timestamp != 'None':
timestamp = int(timestamp / 1000)
symbol = None
baseId = self.safe_string(order, 'baseTokenAddress')
quoteId = self.safe_string(order, 'quoteTokenAddress')
marketId = None
if baseId is not None and quoteId is not None:
marketId = baseId + '/' + quoteId
market = self.safe_value(self.markets_by_id, marketId, market)
base = None
if market is not None:
symbol = market['symbol']
base = market['base']
baseDecimals = self.safe_integer(self.options['decimals'], base, 18)
price = self.safe_float(order, 'price')
filledAmount = self.from_wei(self.safe_string(order, 'filledAmount'), baseDecimals)
settledAmount = self.from_wei(self.safe_string(order, 'settledAmount'), baseDecimals)
confirmedAmount = self.from_wei(self.safe_string(order, 'confirmedAmount'), baseDecimals)
failedAmount = self.from_wei(self.safe_string(order, 'failedAmount'), baseDecimals)
deadAmount = self.from_wei(self.safe_string(order, 'deadAmount'), baseDecimals)
prunedAmount = self.from_wei(self.safe_string(order, 'prunedAmount'), baseDecimals)
amount = self.from_wei(self.safe_string(order, 'initialAmount'), baseDecimals)
filled = self.sum(filledAmount, settledAmount, confirmedAmount)
remaining = None
lastTradeTimestamp = None
timeline = self.safe_value(order, 'timeline')
trades = None
status = None
if timeline is not None:
numEvents = len(timeline)
if numEvents > 0:
timelineEventsGroupedByAction = self.group_by(timeline, 'action')
if 'error' in timelineEventsGroupedByAction:
status = 'failed'
if 'filled' in timelineEventsGroupedByAction:
fillEvents = self.safe_value(timelineEventsGroupedByAction, 'filled')
numFillEvents = len(fillEvents)
lastTradeTimestamp = self.safe_integer(fillEvents[numFillEvents - 1], 'timestamp')
lastTradeTimestamp = lastTradeTimestamp if (lastTradeTimestamp is not None) else lastTradeTimestamp
trades = []
for i in range(0, numFillEvents):
trade = self.parse_trade(self.extend(fillEvents[i], {
'price': price,
}), market)
trades.append(self.extend(trade, {
'order': id,
'type': type,
'side': side,
}))
cost = None
if filled is not None:
if remaining is None:
if amount is not None:
remaining = amount - filled
if price is not None:
cost = filled * price
fee = None
feeCost = self.safe_string(order, 'feeAmount')
if feeCost is not None:
feeOption = self.safe_string(order, 'feeOption')
feeCurrency = None
if feeOption == 'feeInNative':
if market is not None:
feeCurrency = market['base']
elif feeOption == 'feeInZRX':
feeCurrency = 'ZRX'
else:
raise NotSupported(self.id + ' encountered an unsupported order fee option: ' + feeOption)
feeDecimals = self.safe_integer(self.options['decimals'], feeCurrency, 18)
fee = {
'cost': self.from_wei(feeCost, feeDecimals),
'currency': feeCurrency,
}
amountPrecision = market['precision']['amount'] if market else 8
if remaining is not None:
if status is None:
status = 'open'
rest = remaining - failedAmount - deadAmount - prunedAmount
if rest < math.pow(10, -amountPrecision):
status = 'canceled' if (filled < amount) else 'closed'
result = {
'info': order,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'remaining': remaining,
'filled': filled,
'status': status,
'fee': fee,
'trades': trades,
}
return result
async def fetch_open_order(self, id, symbol=None, params={}):
method = self.options['fetchOrderMethod']
return await getattr(self, method)(id, symbol, self.extend({
'openAmount': 1,
}, params))
async def fetch_closed_order(self, id, symbol=None, params={}):
method = self.options['fetchOrderMethod']
return await getattr(self, method)(id, symbol, self.extend(params))
async def fetch_order_from_history(self, id, symbol=None, params={}):
request = {
'orderHash': id,
}
orders = await self.fetch_orders(symbol, None, None, self.extend(request, params))
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' could not find order ' + id + ' in order history')
async def fetch_order_by_id(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'orderHash': id,
}
response = await self.publicGetOrderOrderHash(self.extend(request, params))
# {
# baseTokenAddress: '0xb18845c260f680d5b9d84649638813e342e4f8c9',
# quoteTokenAddress: '0x6ff6c0ff1d68b964901f986d4c9fa3ac68346570',
# side: 'sell',
# price: '30',
# feeTokenAddress: '0x6ff6c0ff1d68b964901f986d4c9fa3ac68346570',
# amount: '500000000000000000',
# created: '1547194003',
# expires: '1549786003',
# zeroExOrder: {
# salt: '71810414258284992779348693906799008280152689028521273772736250669496045815907',
# maker: '0xfa1a3371bcbfcf3deaa8a6f67784bfbe5b886d7f',
# taker: '0x77b18613579d49f252bd237ef113884eb37a7090',
# makerFee: '0',
# takerFee: '0',
# orderHash: '0x368540323af55868dd9ce6ac248e6a91d9b7595252ca061c4ada7612b09af1cf',
# feeRecipient: '0x88a64b5e882e5ad851bea5e7a3c8ba7c523fecbe',
# makerTokenAmount: '500000000000000000',
# takerTokenAmount: '14845250714350000000',
# makerTokenAddress: '0xb18845c260f680d5b9d84649638813e342e4f8c9',
# takerTokenAddress: '0x6ff6c0ff1d68b964901f986d4c9fa3ac68346570',
# exchangeContractAddress: '0x35dd2932454449b14cee11a94d3674a936d5d7b2',
# expirationUnixTimestampSec: '1549789602'
# },
# feeAmount: '154749285650000000',
# feeOption: 'feeInNative',
# cancelAfter: '1549786003'
# }
return self.parse_order(response)
async def fetch_order(self, id, symbol=None, params={}):
request = {
'orderHash': id,
}
orders = await self.fetch_orders(symbol, None, None, self.extend(request, params))
numOrders = len(orders)
if numOrders != 1:
raise OrderNotFound(self.id + ' order ' + id + ' not found')
return orders[0]
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['baseTokenAddress'] = market['baseId']
request['quoteTokenAddress'] = market['quoteId']
if limit is not None:
request['limit'] = limit
response = await self.privateGetOrderHistory(self.extend(request, params))
#
# [
# {
# "orderHash": "0x94629386298dee69ae63cd3e414336ae153b3f02cffb9ffc53ad71e166615618",
# "baseTokenAddress": "0x323b5d4c32345ced77393b3530b1eed0f346429d",
# "quoteTokenAddress": "0xef7fff64389b814a946f3e92105513705ca6b990",
# "side": "buy",
# "openAmount": "10000000000000000000",
# "filledAmount": "0",
# "reservedAmount": "0",
# "settledAmount": "0",
# "confirmedAmount": "0",
# "deadAmount": "0",
# "price": "0.00050915",
# "timeline": [
# {
# "action": "placed",
# "amount": "10000000000000000000",
# "timestamp": "1512929327792"
# }
# ]
# }
# ]
#
return self.parse_orders(response, None, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openAmount': 1, # returns open orders with remaining openAmount >= 1
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'openAmount': 0, # returns closed orders with remaining openAmount == 0
}
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
timestamp = str(self.seconds())
prehash = self.apiKey + timestamp + method
if method == 'POST':
body = self.json(query)
prehash += body
else:
if query:
url += '?' + self.urlencode(query)
prehash += self.json({})
signature = self.hmac(self.encode(prehash), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'TOX-ACCESS-KEY': self.apiKey,
'TOX-ACCESS-SIGN': signature,
'TOX-ACCESS-TIMESTAMP': timestamp,
'Content-Type': 'application/json',
}
elif api == 'public':
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
# code 401 and plain body 'Authentication failed'(with single quotes)
# self error is sent if you do not submit a proper Content-Type
if body == "'Authentication failed'":
raise AuthenticationError(self.id + ' ' + body)
message = self.safe_string(response, 'message')
if message is not None:
#
# {"message":"Schema validation failed for 'query'","errors":[{"name":"required","argument":"startTime","message":"requires property \"startTime\"","instance":{"baseTokenAddress":"0x6ff6c0ff1d68b964901f986d4c9fa3ac68346570","quoteTokenAddress":"0xd0a1e359811322d97991e03f863a0c30c2cf029c","interval":"300"},"property":"instance"}]}
# {"message":"Logic validation failed for 'query'","errors":[{"message":"startTime should be between 0 and current date","type":"startTime"}]}
# {"message":"Order not found","errors":[]}
# {"message":"Orderbook exhausted for intent MARKET_INTENT:8yjjzd8b0e8yjjzd8b0fjjzd8b0g"}
# {"message":"Intent validation failed.","errors":[{"message":"Greater than available wallet balance.","type":"walletBaseTokenAmount"}]}
# {"message":"Schema validation failed for 'body'","errors":[{"name":"anyOf","argument":["[subschema 0]","[subschema 1]","[subschema 2]"],"message":"is not any of [subschema 0],[subschema 1],[subschema 2]","instance":{"signedTargetOrder":{"error":{"message":"Unsigned target order validation failed.","errors":[{"message":"Greater than available wallet balance.","type":"walletBaseTokenAmount"}]},"maker":"0x1709c02cd7327d391a39a7671af8a91a1ef8a47b","orderHash":"0xda007ea8b5eca71ac96fe4072f7c1209bb151d898a9cc89bbeaa594f0491ee49","ecSignature":{"v":27,"r":"0xb23ce6c4a7b5d51d77e2d00f6d1d472a3b2e72d5b2be1510cfeb122f9366b79e","s":"0x07d274e6d7a00b65fc3026c2f9019215b1e47a5ac4d1f05e03f90550d27109be"}}},"property":"instance"}]}
# {"message":"Schema validation failed for 'params'","errors":[{"name":"pattern","argument":"^0x[0-9a-fA-F]{64}$","message":"does not match pattern \"^0x[0-9a-fA-F]{64}$\"","instance":"1","property":"instance.orderHash"}]}
#
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
| 45.120996
| 738
| 0.552383
|
1ec9010913ec1a55e6d755e7be3c367a3b55fa9e
| 4,576
|
py
|
Python
|
export.py
|
onecatcn/MedicalSeg
|
ba490c5c4541ac5bad0aefad6453ce0a48241ec7
|
[
"Apache-2.0"
] | null | null | null |
export.py
|
onecatcn/MedicalSeg
|
ba490c5c4541ac5bad0aefad6453ce0a48241ec7
|
[
"Apache-2.0"
] | null | null | null |
export.py
|
onecatcn/MedicalSeg
|
ba490c5c4541ac5bad0aefad6453ce0a48241ec7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import paddle
import yaml
from medicalseg.cvlibs import Config
from medicalseg.utils import logger
def parse_args():
parser = argparse.ArgumentParser(description='Model export.')
# params of training
parser.add_argument("--config",
dest="cfg",
help="The config file.",
default=None,
type=str,
required=True)
parser.add_argument('--save_dir',
dest='save_dir',
help='The directory for saving the exported model',
type=str,
default='./output')
parser.add_argument('--model_path',
dest='model_path',
help='The path of model for export',
type=str,
default=None)
parser.add_argument(
'--without_argmax',
dest='without_argmax',
help='Do not add the argmax operation at the end of the network',
action='store_true')
parser.add_argument(
'--with_softmax',
dest='with_softmax',
help='Add the softmax operation at the end of the network',
action='store_true')
parser.add_argument(
"--input_shape",
nargs='+',
help="Export the model with fixed input shape, such as 1 3 1024 1024.",
type=int,
default=None)
return parser.parse_args()
class SavedSegmentationNet(paddle.nn.Layer):
def __init__(self, net, without_argmax=False, with_softmax=False):
super().__init__()
self.net = net
self.post_processer = PostPorcesser(without_argmax, with_softmax)
def forward(self, x):
outs = self.net(x)
outs = self.post_processer(outs)
return outs
class PostPorcesser(paddle.nn.Layer):
def __init__(self, without_argmax, with_softmax):
super().__init__()
self.without_argmax = without_argmax
self.with_softmax = with_softmax
def forward(self, outs):
new_outs = []
for out in outs:
if self.with_softmax:
out = paddle.nn.functional.softmax(out, axis=1)
if not self.without_argmax:
out = paddle.argmax(out, axis=1)
new_outs.append(out)
return new_outs
def main(args):
os.environ['MEDICALSEG_EXPORT_STAGE'] = 'True'
cfg = Config(args.cfg)
net = cfg.model
if args.model_path:
para_state_dict = paddle.load(args.model_path)
net.set_dict(para_state_dict)
logger.info('Loaded trained params of model successfully.')
if args.input_shape is None:
shape = [None, 1, None, None, None]
else:
shape = args.input_shape
if not args.without_argmax or args.with_softmax:
new_net = SavedSegmentationNet(net, args.without_argmax,
args.with_softmax)
else:
new_net = net
new_net.eval()
new_net = paddle.jit.to_static(new_net,
input_spec=[
paddle.static.InputSpec(shape=shape,
dtype='float32')
]) # export is export to static graph
save_path = os.path.join(args.save_dir, 'model')
paddle.jit.save(new_net, save_path)
yml_file = os.path.join(args.save_dir, 'deploy.yaml')
with open(yml_file, 'w') as file:
transforms = cfg.export_config.get('transforms', [{}])
data = {
'Deploy': {
'transforms': transforms,
'model': 'model.pdmodel',
'params': 'model.pdiparams'
}
}
yaml.dump(data, file)
logger.info(f'Model is saved in {args.save_dir}.')
if __name__ == '__main__':
args = parse_args()
main(args)
| 31.777778
| 79
| 0.580638
|
c563f37aa78b4ba0da1cac35d56971c44f826db3
| 16,973
|
py
|
Python
|
controller.py
|
dexterfichuk/neural-architecture-search
|
d1fe391d7ab48df36eb2a1fc1efeaf4ae87d58c5
|
[
"MIT"
] | 1
|
2022-03-16T06:04:14.000Z
|
2022-03-16T06:04:14.000Z
|
controller.py
|
DanielLSM/neural-architecture-search
|
87da6fe0a3e1599f6ee3192a22cfbb5b0b317698
|
[
"MIT"
] | null | null | null |
controller.py
|
DanielLSM/neural-architecture-search
|
87da6fe0a3e1599f6ee3192a22cfbb5b0b317698
|
[
"MIT"
] | null | null | null |
import numpy as np
import pprint
from collections import OrderedDict
from keras import backend as K
import tensorflow as tf
import os
if not os.path.exists('weights/'):
os.makedirs('weights/')
class StateSpace:
'''
State Space manager
Provides utilit functions for holding "states" / "actions" that the controller
must use to train and predict.
Also provides a more convenient way to define the search space
'''
def __init__(self):
self.states = OrderedDict()
self.state_count_ = 0
def add_state(self, name, values):
'''
Adds a "state" to the state manager, along with some metadata for efficient
packing and unpacking of information required by the RNN Controller.
Stores metadata such as:
- Global ID
- Name
- Valid Values
- Number of valid values possible
- Map from value ID to state value
- Map from state value to value ID
Args:
name: name of the state / action
values: valid values that this state can take
Returns:
Global ID of the state. Can be used to refer to this state later.
'''
index_map = {}
for i, val in enumerate(values):
index_map[i] = val
value_map = {}
for i, val in enumerate(values):
value_map[val] = i
metadata = {
'id': self.state_count_,
'name': name,
'values': values,
'size': len(values),
'index_map_': index_map,
'value_map_': value_map,
}
self.states[self.state_count_] = metadata
self.state_count_ += 1
return self.state_count_ - 1
def one_hot_encode(self, id, value):
'''
One hot encode the specific state value
Args:
id: global id of the state
value: state value
Returns:
one hot encoded representation of the state value
'''
state = self[id]
size = state['size']
value_map = state['value_map_']
value_idx = value_map[value]
one_hot = np.zeros((1, size), dtype=np.float32)
one_hot[np.arange(1), value_idx] = 1.0
return one_hot
def get_state_value(self, id, index):
'''
Retrieves the state value from the state value ID
Args:
id: global id of the state
index: index of the state value (usually from argmax)
Returns:
The actual state value at given value index
'''
state = self[id]
index_map = state['index_map_']
value = index_map[index]
return value
def get_random_state_space(self, num_layers):
'''
Constructs a random initial state space for feeding as an initial value
to the Controller RNN
Args:
num_layers: number of layers to duplicate the search space
Returns:
A list of one hot encoded states
'''
states = []
for id in range(self.size * num_layers):
state = self[id]
size = state['size']
sample = np.random.choice(size, size=1)
sample = state['index_map_'][sample[0]]
state = self.one_hot_encode(id, sample)
states.append(state)
return states
def parse_state_space_list(self, state_list):
'''
Parses a list of one hot encoded states to retrieve a list of state values
Args:
state_list: list of one hot encoded states
Returns:
list of state values
'''
state_values = []
for id, state_one_hot in enumerate(state_list):
state_val_idx = np.argmax(state_one_hot, axis=-1)[0]
value = self.get_state_value(id, state_val_idx)
state_values.append(value)
return state_values
def print_state_space(self):
''' Pretty print the state space '''
print('*' * 40, 'STATE SPACE', '*' * 40)
pp = pprint.PrettyPrinter(indent=2, width=100)
for id, state in self.states.items():
pp.pprint(state)
print()
def print_actions(self, actions):
''' Print the action space properly '''
print('Actions :')
for id, action in enumerate(actions):
if id % self.size == 0:
print("*" * 20, "Layer %d" % (((id + 1) // self.size) + 1), "*" * 20)
state = self[id]
name = state['name']
vals = [(n, p) for n, p in zip(state['values'], *action)]
print("%s : " % name, vals)
print()
def __getitem__(self, id):
return self.states[id % self.size]
@property
def size(self):
return self.state_count_
class Controller:
'''
Utility class to manage the RNN Controller
'''
def __init__(self, policy_session, num_layers, state_space,
reg_param=0.001,
discount_factor=0.99,
exploration=0.8,
controller_cells=32,
restore_controller=False):
self.policy_session = policy_session # type: tf.Session
self.num_layers = num_layers
self.state_space = state_space # type: StateSpace
self.state_size = self.state_space.size
self.controller_cells = controller_cells
self.reg_strength = reg_param
self.discount_factor = discount_factor
self.exploration = exploration
self.restore_controller = restore_controller
self.reward_buffer = []
self.state_buffer = []
self.cell_outputs = []
self.policy_classifiers = []
self.policy_actions = []
self.policy_labels = []
self.build_policy_network()
def get_action(self, state):
'''
Gets a one hot encoded action list, either from random sampling or from
the Controller RNN
Args:
state: a list of one hot encoded states, whose first value is used as initial
state for the controller RNN
Returns:
A one hot encoded action list
'''
if np.random.random() < self.exploration:
print("Generating random action to explore")
actions = []
for i in range(self.state_size * self.num_layers):
state_ = self.state_space[i]
size = state_['size']
sample = np.random.choice(size, size=1)
sample = state_['index_map_'][sample[0]]
action = self.state_space.one_hot_encode(i, sample)
actions.append(action)
return actions
else:
print("Prediction action from Controller")
initial_state = self.state_space[0]
size = initial_state['size']
if state[0].shape != (1, size, 1):
state = state[0].reshape((1, size, 1))
else:
state = state[0]
print("State input to Controller for Action : ", state.flatten())
with self.policy_session.as_default():
K.set_session(self.policy_session)
with tf.name_scope('action_prediction'):
pred_actions = self.policy_session.run(self.policy_actions, feed_dict={self.state_input: state})
return pred_actions
def build_policy_network(self):
with self.policy_session.as_default():
K.set_session(self.policy_session)
with tf.name_scope('controller'):
with tf.variable_scope('policy_network'):
# state input is the first input fed into the controller RNN.
# the rest of the inputs are fed to the RNN internally
with tf.name_scope('state_input'):
state_input = tf.placeholder(dtype=tf.float32, shape=(1, None, 1), name='state_input')
self.state_input = state_input
# we can use LSTM as the controller as well
nas_cell = tf.contrib.rnn.NASCell(self.controller_cells)
cell_state = nas_cell.zero_state(batch_size=1, dtype=tf.float32)
# initially, cell input will be 1st state input
cell_input = state_input
# we provide a flat list of chained input-output to the RNN
for i in range(self.state_size * self.num_layers):
state_space = self.state_space[i]
size = state_space['size']
with tf.name_scope('controller_output_%d' % i):
# feed the ith layer input (i-1 layer output) to the RNN
outputs, final_state = tf.nn.dynamic_rnn(nas_cell,
cell_input,
initial_state=cell_state,
dtype=tf.float32)
# add a new classifier for each layers output
classifier = tf.layers.dense(outputs[:, -1, :], units=size, name='classifier_%d' % (i),
reuse=False)
preds = tf.nn.softmax(classifier)
# feed the previous layer (i-1 layer output) to the next layers input, along with state
cell_input = tf.expand_dims(classifier, -1, name='cell_output_%d' % (i))
cell_state = final_state
# store the tensors for later loss computation
self.cell_outputs.append(cell_input)
self.policy_classifiers.append(classifier)
self.policy_actions.append(preds)
policy_net_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='policy_network')
with tf.name_scope('optimizer'):
self.global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step,
500, 0.95, staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
with tf.name_scope('losses'):
self.discounted_rewards = tf.placeholder(tf.float32, shape=(None,), name='discounted_rewards')
tf.summary.scalar('discounted_reward', tf.reduce_sum(self.discounted_rewards))
# calculate sum of all the individual classifiers
cross_entropy_loss = 0
for i in range(self.state_size * self.num_layers):
classifier = self.policy_classifiers[i]
state_space = self.state_space[i]
size = state_space['size']
with tf.name_scope('state_%d' % (i + 1)):
labels = tf.placeholder(dtype=tf.float32, shape=(None, size), name='cell_label_%d' % i)
self.policy_labels.append(labels)
ce_loss = tf.nn.softmax_cross_entropy_with_logits(logits=classifier, labels=labels)
tf.summary.scalar('state_%d_ce_loss' % (i + 1), tf.reduce_mean(ce_loss))
cross_entropy_loss += ce_loss
policy_gradient_loss = tf.reduce_mean(cross_entropy_loss)
reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in policy_net_variables]) # Regularization
# sum up policy gradient and regularization loss
self.total_loss = policy_gradient_loss + self.reg_strength * reg_loss
tf.summary.scalar('total_loss', self.total_loss)
self.gradients = self.optimizer.compute_gradients(self.total_loss)
with tf.name_scope('policy_gradients'):
# compute policy gradients
for i, (grad, var) in enumerate(self.gradients):
if grad is not None:
self.gradients[i] = (grad * self.discounted_rewards, var)
# training update
with tf.name_scope("train_policy_network"):
# apply gradients to update policy network
self.train_op = self.optimizer.apply_gradients(self.gradients, global_step=self.global_step)
self.summaries_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter('logs', graph=self.policy_session.graph)
self.policy_session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=1)
if self.restore_controller:
path = tf.train.latest_checkpoint('weights/')
if path is not None and tf.train.checkpoint_exists(path):
print("Loading Controller Checkpoint !")
self.saver.restore(self.policy_session, path)
def store_rollout(self, state, reward):
self.reward_buffer.append(reward)
self.state_buffer.append(state)
# dump buffers to file if it grows larger than 50 items
if len(self.reward_buffer) > 20:
with open('buffers.txt', mode='a+') as f:
for i in range(20):
state_ = self.state_buffer[i]
state_list = self.state_space.parse_state_space_list(state_)
state_list = ','.join(str(v) for v in state_list)
f.write("%0.4f,%s\n" % (self.reward_buffer[i], state_list))
print("Saved buffers to file `buffers.txt` !")
self.reward_buffer = [self.reward_buffer[-1]]
self.state_buffer = [self.state_buffer[-1]]
def discount_rewards(self):
'''
Compute discounted rewards over the entire reward buffer
Returns:
Discounted reward value
'''
rewards = np.asarray(self.reward_buffer)
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, rewards.size)):
if rewards[t] != 0:
running_add = 0
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards[-1]
def train_step(self):
'''
Perform a single train step on the Controller RNN
Returns:
the training loss
'''
states = self.state_buffer[-1]
label_list = []
# parse the state space to get real value of the states,
# then one hot encode them for comparison with the predictions
state_list = self.state_space.parse_state_space_list(states)
for id, state_value in enumerate(state_list):
state_one_hot = self.state_space.one_hot_encode(id, state_value)
label_list.append(state_one_hot)
# the initial input to the controller RNN
state_input_size = self.state_space[0]['size']
state_input = states[0].reshape((1, state_input_size, 1))
print("State input to Controller for training : ", state_input.flatten())
# the discounted reward value
reward = self.discount_rewards()
reward = np.asarray([reward]).astype('float32')
feed_dict = {
self.state_input: state_input,
self.discounted_rewards: reward
}
# prepare the feed dict with the values of all the policy labels for each
# of the Controller outputs
for i, label in enumerate(label_list):
feed_dict[self.policy_labels[i]] = label
with self.policy_session.as_default():
K.set_session(self.policy_session)
print("Training RNN (States ip) : ", state_list)
print("Training RNN (Reward ip) : ", reward.flatten())
_, loss, summary, global_step = self.policy_session.run([self.train_op, self.total_loss, self.summaries_op,
self.global_step],
feed_dict=feed_dict)
self.summary_writer.add_summary(summary, global_step)
self.saver.save(self.policy_session, save_path='weights/controller.ckpt', global_step=self.global_step)
# reduce exploration after many train steps
if global_step != 0 and global_step % 20 == 0 and self.exploration > 0.5:
self.exploration *= 0.99
return loss
| 37.467991
| 119
| 0.564013
|
fb7e63a6b251dbc47fe7df5f154a3e724232bdd1
| 9,967
|
py
|
Python
|
arjuna-samples/arjex/test/pkg/rules/check_rules_06_tags.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
arjuna-samples/arjex/test/pkg/rules/check_rules_06_tags.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
arjuna-samples/arjex/test/pkg/rules/check_rules_06_tags.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
# This file is a part of Arjuna
# Copyright 2015-2020 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
from arjuna.engine.selection.selector import Selector
from arjuna.core.constant import *
from .helpers import *
@test
def check_rule_creation_tags(request):
r = "with tags chrome, firefox"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "tags"
assert rule.target == set({'chrome', 'firefox'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'chrome', 'firefox'})
assert rule.checker.__name__ == 'has_intersection'
r = "with tag chrome"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "tags"
assert rule.target == set({'chrome'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'chrome'})
assert rule.checker.__name__ == 'has_intersection'
r = "withall tags chrome, abc"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "tags"
assert rule.target == set({'chrome', 'abc'})
assert rule.condition == RuleConditionType.IS_SUBSET
assert rule.expression == set({'chrome', 'abc'})
assert rule.checker.__name__ == 'is_subset'
r = "without tags chrome, abc"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "tags"
assert rule.target == set({'chrome', 'abc'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'chrome', 'abc'})
assert rule.checker.__name__ == 'has_no_intersection'
r = "without tag chrome"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "tags"
assert rule.target == set({'chrome'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'chrome'})
assert rule.checker.__name__ == 'has_no_intersection'
r = "with bugs b1,b2"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "bugs"
assert rule.target == set({'b1', 'b2'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'b1', 'b2'})
assert rule.checker.__name__ == 'has_intersection'
r = "with bug b1"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "bugs"
assert rule.target == set({'b1'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'b1'})
assert rule.checker.__name__ == 'has_intersection'
r = "withall bugs b1,abc"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "bugs"
assert rule.target == set({'b1', 'abc'})
assert rule.condition == RuleConditionType.IS_SUBSET
assert rule.expression == set({'b1', 'abc'})
assert rule.checker.__name__ == 'is_subset'
r = "without bugs b1,abc"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "bugs"
assert rule.target == set({'b1', 'abc'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'b1', 'abc'})
assert rule.checker.__name__ == 'has_no_intersection'
r = "without bug b1"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "bugs"
assert rule.target == set({'b1'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'b1'})
assert rule.checker.__name__ == 'has_no_intersection'
r = "with envs env1, env2"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "envs"
assert rule.target == set({'env1', 'env2'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'env1', 'env2'})
assert rule.checker.__name__ == 'has_intersection'
r = "with env env1"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "envs"
assert rule.target == set({'env1'})
assert rule.condition == RuleConditionType.HAS_INTERSECTION
assert rule.expression == set({'env1'})
assert rule.checker.__name__ == 'has_intersection'
r = "withall envs env1, env2"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "envs"
assert rule.target == set({'env1', 'env2'})
assert rule.condition == RuleConditionType.IS_SUBSET
assert rule.expression == set({'env1', 'env2'})
assert rule.checker.__name__ == 'is_subset'
r = "without envs env1, env2"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "envs"
assert rule.target == set({'env1', 'env2'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'env1', 'env2'})
assert rule.checker.__name__ == 'has_no_intersection'
r = "without env env1"
selector = Selector()
selector.include(r)
rule = selector.irules[0]
print(rule)
assert rule.__class__.__name__ == "IterablePatternRule"
assert rule.rule_str == r
assert rule.container == "envs"
assert rule.target == set({'env1'})
assert rule.condition == RuleConditionType.NO_INTERSECTION
assert rule.expression == set({'env1'})
assert rule.checker.__name__ == 'has_no_intersection'
@test
def check_tag_selection(request):
for tname in ['tag', 'bug' , 'env']:
rule = get_rule(f"with {tname}s t1, t2")
obj = Obj()
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('t1')
assert rule.matches(obj) is True
obj = Obj()
getattr(obj, f'{tname}s').add('t2')
assert rule.matches(obj) is True
obj = Obj()
getattr(obj, f'{tname}s').add('abc')
assert rule.matches(obj) is False
rule = get_rule(f"with {tname} t1")
obj = Obj()
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('t1')
assert rule.matches(obj) is True
obj = Obj()
getattr(obj, f'{tname}s').add('abc')
assert rule.matches(obj) is False
rule = get_rule(f"withall {tname}s t1, abc")
obj = Obj()
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('t1')
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('t2')
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').update({'t2', 't1'})
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('abc')
assert rule.matches(obj) is False
rule = get_rule(f"without {tname}s t1, abc")
obj = Obj()
assert rule.matches(obj) is True
obj = Obj()
getattr(obj, f'{tname}s').add('t1')
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('abc')
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('t2')
assert rule.matches(obj) is True
rule = get_rule(f"without {tname} t1")
obj = Obj()
assert rule.matches(obj) is True
obj = Obj()
getattr(obj, f'{tname}s').add('t1')
assert rule.matches(obj) is False
obj = Obj()
getattr(obj, f'{tname}s').add('abc')
assert rule.matches(obj) is True
| 33.558923
| 74
| 0.643624
|
631e995958db992d536f4bb7679f0b0cef558f77
| 525
|
py
|
Python
|
vut/lib/python3.8/site-packages/pipenv/vendor/tomlkit/toml_file.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 6,263
|
2017-01-20T17:41:36.000Z
|
2022-02-15T20:48:57.000Z
|
vut/lib/python3.8/site-packages/pipenv/vendor/tomlkit/toml_file.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 1,100
|
2017-01-20T19:41:52.000Z
|
2017-12-06T09:15:13.000Z
|
vut/lib/python3.8/site-packages/pipenv/vendor/tomlkit/toml_file.py
|
dan-mutua/djangowk1
|
1e5dcb6443ef21451e21845ec639198719e11b10
|
[
"MIT"
] | 366
|
2017-01-21T10:06:52.000Z
|
2021-11-25T17:09:19.000Z
|
import io
from .api import loads
from .toml_document import TOMLDocument
class TOMLFile(object):
"""
Represents a TOML file.
"""
def __init__(self, path): # type: (str) -> None
self._path = path
def read(self): # type: () -> TOMLDocument
with io.open(self._path, encoding="utf-8") as f:
return loads(f.read())
def write(self, data): # type: (TOMLDocument) -> None
with io.open(self._path, "w", encoding="utf-8") as f:
f.write(data.as_string())
| 23.863636
| 61
| 0.588571
|
f3d2d8bb7176bf227731f7faf543cba5f9e31935
| 936
|
py
|
Python
|
settings_example.py
|
alanmitchell/solar-summary
|
60647d50645ec1464e91b8b7d974be86c26c70de
|
[
"MIT"
] | null | null | null |
settings_example.py
|
alanmitchell/solar-summary
|
60647d50645ec1464e91b8b7d974be86c26c70de
|
[
"MIT"
] | null | null | null |
settings_example.py
|
alanmitchell/solar-summary
|
60647d50645ec1464e91b8b7d974be86c26c70de
|
[
"MIT"
] | null | null | null |
"""This is a sample settings.py file. A 'settings.py' needs to be included
in the directory where the script runs. It needs to include the following
attributes.
"""
# This is the API Key your receive from Enphase when you sign for an API Account.
# See the Enphase API documentation for more details.
API_KEY = '6d32223a44baffe9ffa9b9edbff05adc7a'
# This is the User ID from the Enphase Account of the Solar system you are
# collecting data from.
# See the Enphase API documentation for more details.
USER_ID = '2e7a67afed9a51220a'
# This is the System ID of the system you are collecting data from
# See the Enphase API documentation for more details.
SYSTEM_ID = 1223456
# Rated DC Capacity of the System in kW
SYSTEM_KW = 2.85
# Set to True to collect new data from the Enphase API each time the
# script is run. False otherwise.
COLLECT = True
# Set to True to create new Plot files each time the script is run.
PLOT = True
| 34.666667
| 81
| 0.766026
|
ba9148a0e92a5b8b375d31e60aa4ebe7485e4cbe
| 14,876
|
py
|
Python
|
tests/contrib/urllib3/test_urllib3.py
|
twosigmajab/dd-trace-py
|
6c582ae7d606a7c102a14731dff05560ebed7831
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/urllib3/test_urllib3.py
|
twosigmajab/dd-trace-py
|
6c582ae7d606a7c102a14731dff05560ebed7831
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/contrib/urllib3/test_urllib3.py
|
twosigmajab/dd-trace-py
|
6c582ae7d606a7c102a14731dff05560ebed7831
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import mock
import pytest
import urllib3
from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.urllib3 import patch
from ddtrace.contrib.urllib3 import unpatch
from ddtrace.ext import errors
from ddtrace.ext import http
from ddtrace.pin import Pin
from tests.opentracer.utils import init_tracer
from tests.utils import TracerTestCase
# socket name comes from https://english.stackexchange.com/a/44048
SOCKET = "httpbin.org"
URL_200 = "http://{}/status/200".format(SOCKET)
URL_500 = "http://{}/status/500".format(SOCKET)
class BaseUrllib3TestCase(TracerTestCase):
"""Provides the setup and teardown for patching/unpatching the urllib3 integration"""
def setUp(self):
super(BaseUrllib3TestCase, self).setUp()
patch()
self.http = urllib3.PoolManager()
Pin.override(urllib3.connectionpool.HTTPConnectionPool, tracer=self.tracer)
def tearDown(self):
super(BaseUrllib3TestCase, self).tearDown()
unpatch()
class TestUrllib3(BaseUrllib3TestCase):
def test_HTTPConnectionPool_traced(self):
"""Tests that requests made from the HTTPConnectionPool are traced"""
pool = urllib3.connectionpool.HTTPConnectionPool(SOCKET)
# Test a relative URL
r = pool.request("GET", "/status/200")
assert r.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.URL) == URL_200
# Test an absolute URL
r = pool.request("GET", URL_200)
assert r.status == 200
assert len(self.pop_spans()) == 1
def test_traced_connection_from_url(self):
"""Tests tracing from ``connection_from_url`` is set up"""
conn = urllib3.connectionpool.connection_from_url(URL_200)
resp = conn.request("GET", "/")
assert resp.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.URL) == "http://" + SOCKET + "/"
def test_resource_path(self):
"""Tests that a successful request tags a single span with the URL"""
resp = self.http.request("GET", URL_200)
assert resp.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag("http.url") == URL_200
def test_tracer_disabled(self):
"""Tests a disabled tracer produces no spans on request"""
self.tracer.enabled = False
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 0
def test_args_kwargs(self):
"""
Test that args are kwargs are correctly inferred from the target function's
signature.
The args/kwargs used in the integration are:
- method (idx 0)
- url (idx 1)
- headers (idx 3)
"""
inputs = [
(("POST", URL_200, b"payload", {"accept": "*"}), {}),
(("POST", URL_200, b"payload"), {"headers": {"accept": "*"}}),
(("POST", URL_200), {"headers": {"accept": "*"}}),
(("POST",), {"url": URL_200, "headers": {"accept": "*"}}),
((), {"method": "POST", "url": URL_200, "headers": {"accept": "*"}}),
]
for args, kwargs in inputs:
with self.override_config("urllib3", {}):
config.urllib3.http.trace_headers(["accept"])
pool = urllib3.connectionpool.HTTPConnectionPool(SOCKET)
out = pool.urlopen(*args, **kwargs)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.METHOD) == "POST"
assert s.get_tag(http.STATUS_CODE) == "200"
assert s.get_tag(http.URL) == URL_200
assert s.get_tag("http.request.headers.accept") == "*"
def test_untraced_request(self):
"""Disabling tracing with unpatch should submit no spans"""
# Assumes patching is done in the setUp of the test
unpatch()
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 0
def test_double_patch(self):
"""Ensure that double patch doesn't duplicate instrumentation"""
patch()
connpool = urllib3.connectionpool.HTTPConnectionPool(SOCKET)
setattr(connpool, "datadog_tracer", self.tracer)
out = connpool.urlopen("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
def test_200(self):
"""Test 200 span tags"""
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.METHOD) == "GET"
assert s.get_tag(http.URL) == URL_200
assert s.get_tag(http.STATUS_CODE) == "200"
assert s.error == 0
assert s.span_type == "http"
assert http.QUERY_STRING not in s.meta
def test_200_query_string(self):
"""Tests query string tag is added when trace_query_string config is set"""
query_string = "key=value&key2=value2"
URL_200_QS = URL_200 + "?" + query_string
with self.override_http_config("urllib3", dict(trace_query_string=True)):
out = self.http.request("GET", URL_200_QS)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.METHOD) == "GET"
assert s.get_tag(http.STATUS_CODE) == "200"
assert s.get_tag(http.URL) == URL_200_QS
assert s.error == 0
assert s.span_type == "http"
assert s.get_tag(http.QUERY_STRING) == query_string
def test_post_500(self):
"""Test a request with method POST and expected status 500"""
out = self.http.request("POST", URL_500)
assert out.status == 500
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag(http.METHOD) == "POST"
assert s.get_tag(http.STATUS_CODE) == "500"
assert s.get_tag(http.URL) == URL_500
assert s.error == 1
def test_connection_retries(self):
"""Tests a connection error results in error spans with proper exc info"""
retries = 3
try:
self.http.request("GET", "http://fakesubdomain." + SOCKET, retries=retries)
except Exception:
pass
else:
assert 0, "expected error"
spans = self.pop_spans()
assert len(spans) == 4 # Default retry behavior is 3 retries + original request
for i, s in enumerate(spans):
assert s.get_tag(http.METHOD) == "GET"
if i > 0:
assert s.get_tag(http.RETRIES_REMAIN) == str(retries - i)
assert s.error == 1
assert "Failed to establish a new connection" in s.get_tag(errors.MSG)
assert "Failed to establish a new connection" in s.get_tag(errors.STACK)
assert "Traceback (most recent call last)" in s.get_tag(errors.STACK)
assert "urllib3.exceptions.MaxRetryError" in s.get_tag(errors.TYPE)
def test_default_service_name(self):
"""Test the default service name is set"""
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.service == "urllib3"
def test_user_set_service_name(self):
"""Test the user-set service name is set on the span"""
with self.override_config("urllib3", dict(split_by_domain=False)):
config.urllib3["service_name"] = "clients"
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.service == "clients"
def test_parent_service_name_split_by_domain(self):
"""
Tests the request span does not inherit the service name when
split_by_domain is set to True
"""
with self.override_config("urllib3", dict(split_by_domain=True)):
with self.tracer.trace("parent.span", service="web"):
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 2
s = spans[1]
assert s.name == "urllib3.request"
assert s.service == SOCKET
def test_parent_without_service_name(self):
"""Test that span with a parent with no service defaults to the hostname"""
with self.override_config("urllib3", dict(split_by_domain=True)):
with self.tracer.trace("parent.span"):
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 2
s = spans[1]
assert s.name == "urllib3.request"
assert s.service == SOCKET
def test_split_by_domain_remove_auth_in_url(self):
"""Tests that only the hostname is used as the default service name"""
with self.override_config("urllib3", dict(split_by_domain=True)):
out = self.http.request("GET", "http://user:pass@{}".format(SOCKET))
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.service == SOCKET
def test_split_by_domain_includes_port(self):
"""Test the port is included if not 80 or 443"""
with self.override_config("urllib3", dict(split_by_domain=True)):
with pytest.raises(Exception):
# Using a port the service is not listening on will throw an error, which is fine
self.http.request("GET", "http://httpbin.org:8000/hello", timeout=0.0001, retries=0)
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.error == 1
assert s.service == "httpbin.org:8000"
def test_200_ot(self):
"""OpenTracing version of test_200."""
ot_tracer = init_tracer("urllib3_svc", self.tracer)
with ot_tracer.start_active_span("urllib3_get"):
out = self.http.request("GET", URL_200)
assert out.status == 200
spans = self.pop_spans()
assert len(spans) == 2
ot_span, dd_span = spans
# confirm the parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.name == "urllib3_get"
assert ot_span.service == "urllib3_svc"
assert dd_span.get_tag(http.METHOD) == "GET"
assert dd_span.get_tag(http.STATUS_CODE) == "200"
assert dd_span.error == 0
assert dd_span.span_type == "http"
def test_request_and_response_headers(self):
"""Tests the headers are added as tag when the headers are whitelisted"""
self.http.request("GET", URL_200, headers={"my-header": "my_value"})
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag("http.request.headers.my-header") is None
assert s.get_tag("http.response.headers.access-control-allow-origin") is None
# Enabled when explicitly configured
with self.override_config("urllib3", {}):
config.urllib3.http.trace_headers(["my-header", "access-control-allow-origin"])
self.http.request("GET", URL_200, headers={"my-header": "my_value"})
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_tag("http.request.headers.my-header") == "my_value"
assert s.get_tag("http.response.headers.access-control-allow-origin") == "*"
def test_analytics_integration_default(self):
"""Tests the default behavior of analytics integration is disabled"""
r = self.http.request("GET", URL_200)
assert r.status == 200
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_analytics_integration_disabled(self):
"""Test disabling the analytics integration"""
with self.override_config("urllib3", dict(analytics_enabled=False, analytics_sample_rate=0.5)):
self.http.request("GET", URL_200)
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None
def test_analytics_integration_enabled(self):
"""Tests enabline the analytics integration"""
with self.override_config("urllib3", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
self.http.request("GET", URL_200)
spans = self.pop_spans()
assert len(spans) == 1
s = spans[0]
assert s.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5
def test_distributed_tracing_enabled(self):
"""Tests distributed tracing headers are passed by default"""
# Check that distributed tracing headers are passed down; raise an error rather than make the
# request since we don't care about the response at all
config.urllib3["distributed_tracing"] = True
with mock.patch(
"urllib3.connectionpool.HTTPConnectionPool._make_request", side_effect=ValueError
) as m_make_request:
with pytest.raises(ValueError):
self.http.request("GET", URL_200)
spans = self.pop_spans()
s = spans[0]
expected_headers = {
"x-datadog-trace-id": str(s.trace_id),
"x-datadog-parent-id": str(s.span_id),
"x-datadog-sampling-priority": "1",
}
m_make_request.assert_called_with(
mock.ANY, "GET", "/status/200", body=None, chunked=mock.ANY, headers=expected_headers, timeout=mock.ANY
)
def test_distributed_tracing_disabled(self):
"""Test with distributed tracing disabled does not propagate the headers"""
config.urllib3["distributed_tracing"] = False
with mock.patch(
"urllib3.connectionpool.HTTPConnectionPool._make_request", side_effect=ValueError
) as m_make_request:
with pytest.raises(ValueError):
self.http.request("GET", URL_200)
m_make_request.assert_called_with(
mock.ANY, "GET", "/status/200", body=None, chunked=mock.ANY, headers={}, timeout=mock.ANY
)
| 38.439276
| 119
| 0.606816
|
6962cf4215d8017950f35e451ef56952d710955b
| 368
|
py
|
Python
|
test/test_junosdev.py
|
jeffmca/ginpy
|
221651d9032e3ca5caee59ad821ec54fa21cf825
|
[
"Apache-2.0"
] | 4
|
2018-01-04T00:52:07.000Z
|
2021-03-18T15:13:23.000Z
|
test/test_junosdev.py
|
jeffmca/jcfg
|
221651d9032e3ca5caee59ad821ec54fa21cf825
|
[
"Apache-2.0"
] | 2
|
2018-01-15T18:21:13.000Z
|
2018-01-15T19:53:13.000Z
|
test/test_junosdev.py
|
jeffmca/ginpy
|
221651d9032e3ca5caee59ad821ec54fa21cf825
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import ginpy
from lxml import etree
class Test_JunosDev(unittest.TestCase):
def setUp(self):
self.jdev = ginpy.JunosDev("3a-4.appriss.net", "sysjeff")
def test_name(self):
self.assertEqual( self.jdev.name, "3a-4.appriss.net")
def test_get_config(self):
self.assertIsInstance( self.jdev.xmlconfig, etree._Element)
| 26.285714
| 67
| 0.703804
|
026c945fdb41df36cada9ef40e8acfd85d048ee3
| 1,369
|
py
|
Python
|
optical flow/optical_flow.py
|
magnusoy/OpenCV-Python-Applications
|
b6e41564d6b8007194c57c99469759a334959a76
|
[
"MIT"
] | 4
|
2019-09-24T10:20:12.000Z
|
2022-03-10T20:21:40.000Z
|
optical flow/optical_flow.py
|
magnusoy/OpenCV-Python-Applications
|
b6e41564d6b8007194c57c99469759a334959a76
|
[
"MIT"
] | null | null | null |
optical flow/optical_flow.py
|
magnusoy/OpenCV-Python-Applications
|
b6e41564d6b8007194c57c99469759a334959a76
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Create old frame
_, frame = cap.read()
old_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Lucas kanade params
lk_params = dict(winSize = (15, 15),
maxLevel = 4,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Mouse function
def select_point(event, x, y, flags, params):
global point, point_selected, old_points
if event == cv2.EVENT_LBUTTONDOWN:
point = (x, y)
point_selected = True
old_points = np.array([[x, y]], dtype=np.float32)
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", select_point)
point_selected = False
point = ()
old_points = np.array([[]])
while True:
_, frame = cap.read()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if point_selected is True:
cv2.circle(frame, point, 5, (0, 0, 255), 2)
new_points, status, error = cv2.calcOpticalFlowPyrLK(old_gray, gray_frame, old_points, None, **lk_params)
old_gray = gray_frame.copy()
old_points = new_points
x, y = new_points.ravel()
cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| 26.326923
| 114
| 0.608473
|
94b7c557c253161fe97d6101b944cc2db3a16be5
| 1,666
|
py
|
Python
|
usecase-4/usecase-4-step-8.py
|
ItWillBeBetter/data-protection
|
6db0f4c7089fe110af8065583959d7dc13dba45e
|
[
"MIT-0"
] | 93
|
2019-01-02T16:51:45.000Z
|
2022-02-13T07:29:36.000Z
|
usecase-4/usecase-4-step-8.py
|
mukhendra/data-protection
|
6a4629855b181a4e5fbfb690b9b22203ff7e1b2e
|
[
"MIT-0"
] | 2
|
2019-10-16T07:37:23.000Z
|
2021-06-07T10:38:37.000Z
|
usecase-4/usecase-4-step-8.py
|
mukhendra/data-protection
|
6a4629855b181a4e5fbfb690b9b22203ff7e1b2e
|
[
"MIT-0"
] | 54
|
2019-01-06T21:39:57.000Z
|
2022-02-25T09:01:05.000Z
|
"""
#####################################################################################
# #
# Using curl command to succesfully authenticate webserver over a TLS connection #
# This is a very simple web app that prints hello world #
# #
#####################################################################################
"""
import os
import sys
import subprocess
import shlex
def main():
"""
######################################################################################
# 1.Runing curl to hit the website #
# #
# 2.You will see in the run configuration window that this line gets printed : #
# curl: (60) Peer's Certificate issuer is not recognized. Why did this happen ? #
######################################################################################
"""
try:
command = "curl --verbose https://127.0.0.1:5000/"
command = shlex.split(command)
returned_output = subprocess.check_output(command)
print("\nStep-8 has been successfully completed \n")
except subprocess.CalledProcessError as e:
print("\nCertificate is not trusted - cannot validate server certificate")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
else:
exit(0)
if __name__ == "__main__":
main()
| 43.842105
| 90
| 0.37395
|
6cd759620c3667b7d810a39e31a7abecca523ede
| 1,130
|
py
|
Python
|
src/demo_hic_et_nunc/models.py
|
arrijabba/dipdup-py
|
fa90bfd889c473966e0d5aed98cec90a575fcb90
|
[
"MIT"
] | null | null | null |
src/demo_hic_et_nunc/models.py
|
arrijabba/dipdup-py
|
fa90bfd889c473966e0d5aed98cec90a575fcb90
|
[
"MIT"
] | null | null | null |
src/demo_hic_et_nunc/models.py
|
arrijabba/dipdup-py
|
fa90bfd889c473966e0d5aed98cec90a575fcb90
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from tortoise import Model, fields
class SwapStatus(IntEnum):
ACTIVE = 0
FINISHED = 1
CANCELED = 2
class Holder(Model):
address = fields.CharField(36, pk=True)
class Token(Model):
id = fields.BigIntField(pk=True)
creator = fields.ForeignKeyField('models.Holder', 'tokens')
supply = fields.BigIntField()
level = fields.BigIntField()
timestamp = fields.DatetimeField()
class Swap(Model):
id = fields.BigIntField(pk=True)
creator = fields.ForeignKeyField('models.Holder', 'swaps')
price = fields.BigIntField()
amount = fields.BigIntField()
amount_left = fields.BigIntField()
level = fields.BigIntField()
status = fields.IntEnumField(SwapStatus)
timestamp = fields.DatetimeField()
class Trade(Model):
id = fields.BigIntField(pk=True)
swap = fields.ForeignKeyField('models.Swap', 'trades')
seller = fields.ForeignKeyField('models.Holder', 'sales')
buyer = fields.ForeignKeyField('models.Holder', 'purchases')
amount = fields.BigIntField()
level = fields.BigIntField()
timestamp = fields.DatetimeField()
| 26.27907
| 64
| 0.697345
|
b5e19a2466e28af5ad29d9aa063db81c928d0cac
| 61
|
py
|
Python
|
python1.py
|
hir09/moonwalk
|
bd641693e5dbbce986f5fc6ae752b12010b76aa2
|
[
"Apache-2.0"
] | 6
|
2021-09-25T12:37:29.000Z
|
2022-01-14T15:29:10.000Z
|
python1.py
|
hir09/moonwalk
|
bd641693e5dbbce986f5fc6ae752b12010b76aa2
|
[
"Apache-2.0"
] | 1
|
2021-12-13T17:50:18.000Z
|
2021-12-13T17:50:18.000Z
|
python1.py
|
hir09/moonwalk
|
bd641693e5dbbce986f5fc6ae752b12010b76aa2
|
[
"Apache-2.0"
] | 7
|
2021-12-01T17:08:22.000Z
|
2022-02-26T17:47:50.000Z
|
print("successfully binding the python-script with Jenkins")
| 30.5
| 60
| 0.819672
|
49b56b5102c6a3d07b8ef06dee768989f04a8fbc
| 4,239
|
py
|
Python
|
cogs/help_command.py
|
RTa-technology/Satsuki
|
4e867a1b78552a616834503f0c3b7b977c053a0a
|
[
"MIT"
] | null | null | null |
cogs/help_command.py
|
RTa-technology/Satsuki
|
4e867a1b78552a616834503f0c3b7b977c053a0a
|
[
"MIT"
] | null | null | null |
cogs/help_command.py
|
RTa-technology/Satsuki
|
4e867a1b78552a616834503f0c3b7b977c053a0a
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Optional
from discord import Embed
import discord
from discord.ext import commands
from discord.ext.commands.core import Group
from discord.ext.commands import Cog, command
from discord.ext.menus import ListPageSource, MenuPages
from discord.utils import get
def syntax(command):
cmd_and_aliases = "|".join([str(command), *command.aliases])
params = []
sub_commands = []
sub_command_str = ''
for key, value in command.params.items():
if key not in ("self", "ctx"):
params.append(
f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
if isinstance(command, Group):
for sub_command in command.commands:
sub_command_str = f'{sub_command.name.ljust(7)} | {" ".join(sub_command.aliases)}'
for key, value in sub_command.params.items():
if key not in ("self", "ctx"):
sub_command_arg = f"[{key}]" if "NoneType" in str(value) else f"<{key}>"
sub_command_str = f'{sub_command_str} {sub_command_arg}'
sub_commands.append(sub_command_str)
params = " ".join(params)
sub_commands = "\n - ".join(sub_commands)
if len(sub_commands) == 0:
return f"`{cmd_and_aliases} {params}`"
else:
return f"`{cmd_and_aliases} {params}`\n\nサブコマンド\n` - {sub_commands}`"
class HelpMenu(ListPageSource):
def __init__(self, ctx, data):
self.ctx = ctx
super().__init__(data, per_page=6)
async def write_page(self, menu, fields=[]):
offset = (menu.current_page * self.per_page) + 1
len_data = len(self.entries)
embed = Embed(title="コマンド一覧",
description=f"使用可能なコマンド : {self.ctx.author.mention}",
colour=self.ctx.author.colour)
embed.set_thumbnail(url=self.ctx.author.avatar.url)
embed.set_footer(
text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} commands.")
for name, value in fields:
embed.add_field(name=name, value=value, inline=False)
return embed
async def format_page(self, menu, entries):
fields = []
for entry in entries:
fields.append(
(entry.description or "No Description", syntax(entry)))
return await self.write_page(menu, fields)
class Help(Cog):
def __init__(self, bot):
self.bot = bot
self.bot.remove_command("help")
async def cmd_help(self, ctx, command):
embed = Embed(title=f"Help with `{command}` : {ctx.author.name}",
description=syntax(command),
colour=ctx.author.colour)
embed.add_field(name="Command description", value=command.help)
await ctx.send(embed=embed)
@ command(name="help", description='リアクション式ヘルプコマンド')
async def help(self, ctx, *args: str):
"""拡張版ヘルプコマンド"""
show_commands = [
my_command for my_command in self.bot.commands if not my_command.hidden]
show_commands = sorted(show_commands, key=lambda x: x.cog_name.lower())
if (arg_len := len(args)) == 0:
menu = MenuPages(source=HelpMenu(ctx, show_commands),
delete_message_after=False,
clear_reactions_after=True,
timeout=60.0)
await menu.start(ctx)
else:
if (command := get(self.bot.commands, name=args[0])):
if isinstance(command, Group):
if arg_len == 1:
await self.cmd_help(ctx, command)
else:
if (sub_command := get(
command.commands, name=args[1])):
await self.cmd_help(ctx, sub_command)
else:
await ctx.send("**Command error**: `subcommand not found`")
else:
await self.cmd_help(ctx, command)
else:
await ctx.send("**Command error**: `command not found`")
def setup(bot):
bot.add_cog(Help(bot))
| 33.117188
| 101
| 0.567115
|
e9e8090e3c5aab85920c783762c46293cb932237
| 10,964
|
py
|
Python
|
Configs/explore_configs/S13_explore_BT_No_A1c.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | 1
|
2022-01-17T13:13:02.000Z
|
2022-01-17T13:13:02.000Z
|
Configs/explore_configs/S13_explore_BT_No_A1c.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
Configs/explore_configs/S13_explore_BT_No_A1c.py
|
yochaiedlitz/T2DM_UKB_predictions
|
1e6b22e3d51d515eb065d7d5f46408f86f33d0b8
|
[
"MIT"
] | null | null | null |
import collections # Used for ordered dictionary
from PRS import PRS_sumstats
from UKBB_Functions import PROBA_FOLDER
import sys
Top_Gen_Dict = PRS_sumstats.Get_Top_Gen_Dict()
Hyp_Param_Dict_A = collections.OrderedDict()
Hyp_Param_Dict_R = collections.OrderedDict()
# TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_train.csv'
# TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_test.csv'
TRAIN_PATH=Imputed_TRAIN_TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv'
TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv'
# ['Diabetes_all','Age_and_Sex','Anthropometry','Blood_Tests','BP_and_HR',
# 'Diet','Early_Life_Factors','Family_and_Ethnicity','Lifestyle_and_physical_activity','Medication',
# 'Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
ALL_TEST_AS_VAL = True
BASIC_JOB_NAME = ['BT_No_A1c']#['Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
BASIC_PROB_BASED_JOB_NAME = ["Val_" + x for x in BASIC_JOB_NAME]
Sub_Class_array = ["All"] # "All",, "All"
Job_ID = ["2443-0.0"]
RET_FEAT_file_names = BASIC_JOB_NAME
feat_list_folder="Diabetes_Features_lists/For_article/" #Folder where the features lists located
FEAT_file_names = [
"Diabetes_Features_0705"] # Diabetes_Features.csv,Diabetes_Features_No_Baseline.csv,Baseline_Features.csv,Diabetes_Features_Lifestyle.csv,Diabetes_Features_No_Baseline.csv, Full_Diabetes_Features # "Diabetes_Features.csv","Diabetes_Features.csv","Diabetes_Features.csv",BMI_Features_Lifestyle.csv
# Features File name without ending
# Features File name without ending
FEAT_PATH = [feat_list_folder + x + ".csv" for x in FEAT_file_names]
RET_FEAT_PATH = [feat_list_folder + x + ".csv" for x in RET_FEAT_file_names]
#
# Data_Job_Names = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer", "4041-0.0": "Gestational diabetes","21001-0.0":'BMI'}
CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
DISEASE_PROBA_DICT = {"Diabetes Probabilities": PROBA_FOLDER + "Diabetes_OnlyPROB.csv",
"CVD Probabilities": PROBA_FOLDER + "Vascular_OnlyPROB.csv",
"Cancer Probabilities": PROBA_FOLDER + "Cancer_OnlyPROB.csv"}
# PRS_COLS -Adding PRS -Only final score for each phenotype for each user
PRS_COLS = ['PRS_MAGIC_HbA1C', 'PRS_cigs_per_day', 'PRS_MAGIC_Scott_FG', 'PRS_ln_HOMA-IR', 'PRS_MAGIC_Scott_FI',
'PRS_height', 'PRS_Manning_FI', 'PRS_Leptin_BMI', 'PRS_cardio', 'PRS_triglycerides',
'PRS_Manning_FG', 'PRS_anorexia', 'PRS_Magic_2hrGlucose', 'PRS_Non_Diabetic_glucose2', 'PRS_ever_smoked',
'PRS_age_smoke', 'PRS_MAGIC_fastingProinsulin', 'PRS_Leptin_Unadjusted_BMI',
'PRS_MAGIC_Scott_FI_adjBMI', 'PRS_MAGIC_Scott_2hGlu', 'PRS_glucose_iris', 'PRS_ln_FastingInsulin',
'PRS_bmi', 'PRS_overweight', 'PRS_hba1c', 'PRS_alzheimer', 'PRS_whr', 'PRS_ln_HOMA-B',
'PRS_ldl', 'PRS_obesity_class2', 'PRS_obesity_class1', 'PRS_diabetes_BMI_Unadjusted',
'PRS_Manning_BMI_ADJ_FG', 'PRS_waist', 'PRS_ashtma', 'PRS_HBA1C_ISI', 'PRS_HbA1c_MANTRA',
'PRS_diabetes_BMI_Adjusted', 'PRS_Heart_Rate', 'PRS_Manning_BMI_ADJ_FI', 'PRS_cholesterol', 'PRS_hdl',
'PRS_FastingGlucose', 'PRS_hips']
# Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA','t2d_mega_meta',"MAGIC_Scott_FG","triglycerides",'Magic_2hrGlucose','Manning_Fasting_Insulin'] #Keep empty if None
Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA', 't2d_mega_meta', "MAGIC_Scott_FG", 'Magic_2hrGlucose',
'bmi', 'anorexia', 'cardio', 'hips', 'waist', "overweight", 'obesity_class1',
'obesity_class2',
"ever_smoked", "hdl", "ldl", 'triglycerides', 'cholesterol',
'diabetes_BMI_Unadjusted',
'diabetes_BMI_Adjusted', 'FastingGlucose', 'ln_HOMA-B', 'ln_HOMA-IR',
'ln_FastingInsulin',
'Leptin_BMI', 'Leptin_Unadjusted_BMI', 'Heart_Rate', 'MAGIC_fastingProinsulin',
'MAGIC_Scott_FI_adjBMI', 'MAGIC_Scott_FI', 'MAGIC_HbA1C', 'Manning_FG',
'Manning_BMI_ADJ_FG',
'Manning_Fasting_Insulin', 'Manning_BMI_ADJ_FI', 'HBA1C_ISI'] #
USE_FAKE_QUE = False
NROWS = None # 1-500000 or None
NROWS_RETURN = None # How many returning participants to load
Split = True #Wheter or not to split data to train and test, should be false only for final testing
Logistic_regression=False #"Should be LR for Linear regression or LGBM for treees"
Use_imp_flag=True
DEBUG = False
USE_PROBA = True # Whether or not to either calculate probability if working on all participants or to use probabilities
# calculated if working with returning participants
USE_PRS = False # wether to use PRS reults
Use_SNPs = False
NFOLD = 5
Choose_N_Fold = 3 # How many CV to make for the initial Cross validation when choosing the hyperparameters
Basic_HYP_PAR_ITER = 20
Prob_HYP_PAR_ITER = 100
MEM = '30G'
N_THREADS = 10
P_THREADS = 2
Calc_Base_Prob = False
CALC_SHAP = True # Whether or not to calculate the SHAP values for the basic probabilities
SORT = True # Used mostly for debugging to activate the SORT_AUC_APS function
# Refit_model - path to model to be refitted in the first visit
Refit_Model = None # '/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/Diabetes_shap_model.txt'#None##Name of the model to be refitted or None
# /net/mraid08/export/jafar/Yochai/UKBB_Runs/AF_To_refit2_Diabetes/Diabetes_Results
Finalize_Only = False
Calc_Prob_Based_Prob = True
RE_USE_PROBA = False
Calc_Transfer_Learning = False # Used when we would like torefit several base models and not a specific model
REFIT_SERIAL_MODELS = False # #Checking wether to refit a model folder just made in previous step, or use a pedefined folder
# Refit_Return_Model_Path - path to model to be refitted in the first visit
Refit_Return_Model_Path = None # '/net/mraid08/export/jafar/Yochai/UKBB_Runs/mock_refit/Diabetes_Results/'#'/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/'#None#
HowHow = "left" # "inner" - take only participants who has probabilities for other disease as well, "left" - take all
CALC_P_SHAP = True # Whether or not to calculate the SHAP values for the Preob based predictions
SORT_Prob = True
Finalize_Prob_Based_Only = False
if REFIT_SERIAL_MODELS or Refit_Return_Model_Path:
Refit_Returned = True
else:
Refit_Returned = False
VISITS = [0, 1, 2] # [0,1,2]
NUM_OF_DEP_PLOT = 10
Lite = False # Used for debug
Thresh_in_Column = 0.7
Thresh_in_Row = 0.7
# CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
# "Type of special diet followed": "All"}
CHARAC_ID = {"Age at last visit": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
ETHNIC_CODE = {-3: "Prefer not to answer", -1: "Do not know", 1: "White", 2: "Mixed", 3: "Asian",
4: "Black or Black British", 5: "Chinese", 6: "Other ethnic group", 1001: "British", 1002: "Irish",
1003: "Any other white background", 2001: "White and Black Caribbean",
2002: "White and Black African", 2003: "White and Asian", 2004: "Any other mixed background",
3001: "Indian", 3002: "Pakistani", 3003: "Bangladeshi", 3004: "Any other Asian background",
4001: "Caribbean", 4002: "African", 4003: "Any other Black background"}
SEX_CODE = {"Female": 0, "Male": 1}
DIET_CODE = {"Gluten-free": 8, "Lactose-free": 9, "Low calorie": 10, "Vegetarian": 11, "Vegan": 12, "Other": 13}
Job_name_dict = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer",
"4041-0.0": "Gestational diabetes",
"21001-0.0": 'BMI'} # ,"Diabetes", "Cancer", "Gestational diabetes","Vascular"
No_symp_dict = {"6150-0.0": -7, "2443-0.0": 0, '2453-0.0': 0, '21001-0.0': "nan"}
# Hyp_Param_Dict_A['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [4, 8, 16, 32, 64, 128, 256]
Hyp_Param_Dict_A['is_unbalance'] = [True]
Hyp_Param_Dict_A['objective'] = ['binary']
Hyp_Param_Dict_A['boosting_type'] = ['gbdt'] # ,'rf','dart','goss'
Hyp_Param_Dict_A['metric'] = ["auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_A['num_boost_round'] = [10, 50, 100, 250, 500, 1000] # ,1000, 2000, 4000, 8000
Hyp_Param_Dict_A['learning_rate'] = [0.005, 0.01, 0.05, 0.1]
Hyp_Param_Dict_A["min_child_samples"] = [10, 25, 50, 250, 500]
Hyp_Param_Dict_A["subsample"] = [0.1, 0.25, 0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_A["colsample_bytree"] = [0.03, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_A["boost_from_average"] = [True]
Hyp_Param_Dict_A['num_threads'] = [N_THREADS]
Hyp_Param_Dict_A['lambda_l1'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['lambda_l2'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.25, 0.5, 0.75, 1]
# Hyp_Param_Dict_R['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [2, 4, 8, 16, 32, 64, 128]
Hyp_Param_Dict_R['is_unbalance'] = [True]
Hyp_Param_Dict_R['objective'] = ['binary']
Hyp_Param_Dict_R['boosting_type'] = ['gbdt']
Hyp_Param_Dict_R['metric'] = [
"auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_R['num_boost_round'] = [50, 100, 250, 500, 1000] # ,,1000, 2000, 4000, 8000
Hyp_Param_Dict_R['verbose'] = [-1]
Hyp_Param_Dict_R['learning_rate'] = [0.005, 0.01, 0.05]
Hyp_Param_Dict_R["min_child_samples"] = [5, 10, 25, 50]
Hyp_Param_Dict_R["subsample"] = [0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_R["colsample_bytree"] = [0.01, 0.05, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_R["boost_from_average"] = [True]
Hyp_Param_Dict_R['num_threads'] = [P_THREADS]
Hyp_Param_Dict_R['lambda_l1'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_R['lambda_l2'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.5, 0.75, 1]
Select_Traits_Gen = {}
for name in Select_Top_Traits_Gen_arr_names:
Select_Traits_Gen[name] = Top_Gen_Dict[name]
if (len(BASIC_JOB_NAME) != len(Sub_Class_array) or (len(BASIC_JOB_NAME) != len(Sub_Class_array)) or
(len(BASIC_JOB_NAME) != len(Job_ID))):
sys.exit("BASIC_JOB_NAME,Sub_Class_array and Job_ID should be same size")
| 59.912568
| 301
| 0.705764
|
667ee1028ad4f21a2d52a2c78280cd9fba1ce378
| 5,561
|
py
|
Python
|
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
import logging
import random
import string
import traceback
from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utilsV1 import FsUtils
logger = logging.getLogger(__name__)
log = logger
def run(ceph_cluster, **kw):
try:
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
clients = ceph_cluster.get_ceph_objects("client")
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}/"
fs_util.fuse_mount(clients, fuse_mounting_dir)
mount_test_case(clients, fuse_mounting_dir)
kernel_mounting_dir = f"/mnt/cephfs_kernel{mounting_dir}/"
mon_node_ips = fs_util.get_mon_node_ips()
fs_util.kernel_mount(clients, kernel_mounting_dir, ",".join(mon_node_ips))
mount_test_case(clients, kernel_mounting_dir)
log.info("Cleaning up!-----")
rc = fs_util.client_clean_up(
[],
clients,
kernel_mounting_dir,
"umount",
)
if rc != 0:
raise CommandFailed("fuse clients cleanup failed")
log.info("Fuse clients cleaned up successfully")
rc = fs_util.client_clean_up(
clients,
[],
fuse_mounting_dir,
"umount",
)
if rc != 0:
raise CommandFailed("kernel clients cleanup failed")
log.info("kernel clients cleaned up successfully")
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
def mount_test_case(clients, mounting_dir):
try:
tc1 = "11293"
tc2 = "11296"
tc3 = "11297"
tc4 = "11295"
dir1 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
dir2 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
dir3 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
results = []
return_counts = []
log.info("Create files and directories of 1000 depth and 1000 breadth")
for client in clients:
client.exec_command(
cmd=f"sudo mkdir -p {mounting_dir}{dir1} {mounting_dir}{dir2} {mounting_dir}{dir3}"
)
log.info(f"Execution of testcase {tc1} started")
out, rc = client.exec_command(
sudo=True,
cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
f"{mounting_dir}{dir1}",
long_running=True,
)
log.info(f"Execution of testcase {tc1} ended")
results.append(f"TC {tc1} passed")
log.info(f"Execution of testcase {tc2} started")
client.exec_command(
cmd=f"sudo cp -r {mounting_dir}{dir1}/* {mounting_dir}{dir2}/"
)
client.exec_command(
cmd=f"diff -qr {mounting_dir}{dir1} {mounting_dir}{dir2}/"
)
log.info(f"Execution of testcase {tc2} ended")
results.append(f"TC {tc2} passed")
log.info(f"Execution of testcase {tc3} started")
client.exec_command(
cmd=f"sudo mv -t {mounting_dir}{dir1}/* {mounting_dir}{dir2}/"
)
log.info(f"Execution of testcase {tc3} ended")
results.append(f"TC {tc3} passed")
log.info(f"Execution of testcase {tc4} started")
for client in clients:
if client.pkg_type != "deb":
client.exec_command(
cmd=f"sudo dd if=/dev/zero of={mounting_dir}{client.node.hostname}.txt bs=100M "
"count=5"
)
out1, rc1 = client.exec_command(
cmd=f"sudo ls -c -ltd -- {mounting_dir}{client.node.hostname}.*"
)
client.exec_command(
cmd=f"sudo dd if=/dev/zero of={mounting_dir}{client.node.hostname}.txt bs=200M "
"count=5"
)
out2, rc2 = client.exec_command(
cmd=f"sudo ls -c -ltd -- {mounting_dir}{client.node.hostname}.*"
)
a = out1.read().decode()
b = out2.read().decode()
if a != b:
return_counts.append(out1.channel.recv_exit_status())
return_counts.append(out2.channel.recv_exit_status())
else:
raise CommandFailed("Metadata info command failed")
break
log.info(f"Execution of testcase {tc4} ended")
log.info(return_counts)
rc_set = set(return_counts)
if len(rc_set) == 1:
results.append(f"TC {tc4} passed")
log.info("Testcase Results:")
for res in results:
log.info(res)
break
except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
| 37.073333
| 119
| 0.539112
|
2d436b32d3291a9d37cd3e9e0331141e1f1ecb9d
| 550
|
py
|
Python
|
src/utsc/core/yaml/anchor.py
|
utsc-networking/utsc-tools
|
d5bc10cf825f1be46999d5a42da62cc0df456f0c
|
[
"MIT"
] | null | null | null |
src/utsc/core/yaml/anchor.py
|
utsc-networking/utsc-tools
|
d5bc10cf825f1be46999d5a42da62cc0df456f0c
|
[
"MIT"
] | null | null | null |
src/utsc/core/yaml/anchor.py
|
utsc-networking/utsc-tools
|
d5bc10cf825f1be46999d5a42da62cc0df456f0c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from typing import TYPE_CHECKING
if TYPE_CHECKING: # MYPY
from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
anchor_attrib = "_yaml_anchor"
class Anchor:
__slots__ = "value", "always_dump"
attrib = anchor_attrib
def __init__(self):
# type: () -> None
self.value = None
self.always_dump = False
def __repr__(self):
# type: () -> Any
ad = ", (always dump)" if self.always_dump else ""
return "Anchor({!r}{})".format(self.value, ad)
| 23.913043
| 83
| 0.616364
|
49919c123ec4a48a5d0308e1b6b05e748ddd3922
| 5,048
|
py
|
Python
|
tests/unit/beacons/test_telegram_bot_msg.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/beacons/test_telegram_bot_msg.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/beacons/test_telegram_bot_msg.py
|
fake-name/salt
|
d8f04936e4407f51946e32e8166159778f6c31a5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Python libs
from __future__ import absolute_import
import datetime
import logging
# Salt libs
from salt.beacons import telegram_bot_msg
# Salt testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
# Third-party libs
try:
import telegram
HAS_TELEGRAM = True
except ImportError:
HAS_TELEGRAM = False
log = logging.getLogger(__name__)
@skipIf(not HAS_TELEGRAM, "telegram is not available")
class TelegramBotMsgBeaconTestCase(TestCase, LoaderModuleMockMixin):
"""
Test case for salt.beacons.telegram_bot
"""
def setup_loader_modules(self):
return {telegram_bot_msg: {}}
def test_validate_empty_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate(None)
self.assertEqual(
ret, (False, ("Configuration for telegram_bot_msg beacon must be a list.")),
)
def test_validate_missing_accept_from_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{"token": "bcd"}])
self.assertEqual(
ret,
(False, ("Not all required configuration for telegram_bot_msg are set."),),
)
def test_validate_missing_token_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{"accept_from": []}])
self.assertEqual(
ret,
(False, ("Not all required configuration for telegram_bot_msg are set."),),
)
def test_validate_config_not_list_in_accept_from(self, *args, **kwargs):
ret = telegram_bot_msg.validate(
[{"token": "bcd", "accept_from": {"nodict": "1"}}]
)
self.assertEqual(
ret,
(
False,
(
"Configuration for telegram_bot_msg, "
"accept_from must be a list of "
"usernames."
),
),
)
def test_validate_valid_config(self, *args, **kwargs):
ret = telegram_bot_msg.validate([{"token": "bcd", "accept_from": ["username"]}])
self.assertEqual(ret, (True, "Valid beacon configuration."))
def test_call_no_updates(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = "abc"
config = [{"token": token, "accept_from": ["tester"]}]
inst = MagicMock(name="telegram.Bot()")
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
inst.get_updates.return_value = []
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, "Valid beacon configuration"))
telegram_api.Bot.assert_called_once_with(token)
self.assertEqual(ret, [])
def test_call_telegram_return_no_updates_for_user(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = 'abc'
username = 'tester'
config = [{
'token': token,
'accept_from': [username]
}]
inst = MagicMock(name='telegram.Bot()')
telegram_api.Bot = MagicMock(name='telegram', return_value=inst)
log.debug('telegram %s', telegram)
username = 'different_user'
user = telegram.user.User(id=1, first_name='', username=username)
chat = telegram.chat.Chat(1, 'private', username=username)
date = datetime.datetime(2016, 12, 18, 0, 0)
message = telegram.message.Message(1, user, date=date, chat=chat)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, "Valid beacon configuration"))
telegram_api.Bot.assert_called_once_with(token)
self.assertEqual(ret, [])
def test_call_telegram_returning_updates(self):
with patch("salt.beacons.telegram_bot_msg.telegram") as telegram_api:
token = "abc"
username = "tester"
config = [{"token": token, "accept_from": [username]}]
inst = MagicMock(name="telegram.Bot()")
telegram_api.Bot = MagicMock(name="telegram", return_value=inst)
user = telegram.User(id=1, first_name="", username=username)
chat = telegram.Chat(1, "private", username=username)
date = datetime.datetime(2016, 12, 18, 0, 0)
message = telegram.Message(1, user, date=date, chat=chat)
update = telegram.update.Update(update_id=1, message=message)
inst.get_updates.return_value = [update]
ret = telegram_bot_msg.beacon(config)
self.assertEqual(ret, (True, "Valid beacon configuration"))
telegram_api.Bot.assert_called_once_with(token)
self.assertTrue(ret)
self.assertEqual(ret[0]["msgs"][0], message.to_dict())
| 35.801418
| 88
| 0.614501
|
ca4cf5fd6332c1c06019b69b9bd9719a6357539e
| 22,796
|
py
|
Python
|
tests/Hydro/KeplerDisk/TwoMatDisk.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 1
|
2020-10-21T01:56:55.000Z
|
2020-10-21T01:56:55.000Z
|
tests/Hydro/KeplerDisk/TwoMatDisk.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null |
tests/Hydro/KeplerDisk/TwoMatDisk.py
|
markguozhiming/spheral
|
bbb982102e61edb8a1d00cf780bfa571835e1b61
|
[
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | null | null | null |
#ATS:test(SELF, "--CRKSPH=False --n=50 --cfl=0.25 --Cl=1.0 --Cq=2.0 --filter=0 --nPerh=1.01 --balsaraCorrection=True --fractionPressureSupport=0.25 --serialDump=True --compatibleEnergy=False --goalTime=50", label="Kepler SPH balsara no-compat, nPerh=1.5 fp=0.05", np=20)
#ATS:test(SELF, "--CRKSPH=False --n=50 --cfl=0.25 --Cl=1.0 --Cq=2.0 --filter=0 --nPerh=1.01 --balsaraCorrection=True --fractionPressureSupport=0.25 --serialDump=True --compatibleEnergy=True --goalTime=50", label="Kepler SPH balsara w-compat, nPerh=1.5 fp=0.05", np=20)
#ATS:test(SELF, "--CRKSPH=False --n=50 --cfl=0.25 --Cl=1.0 --Cq=2.0 --filter=0 --nPerh=1.01 --balsaraCorrection=False --fractionPressureSupport=0.25 --serialDump=True --compatibleEnergy=False --goalTime=50", label="Kepler SPH no-compat, nPerh=1.5 fp=0.05", np=20)
#ATS:test(SELF, "--CRKSPH=False --n=50 --cfl=0.25 --Cl=1.0 --Cq=2.0 --filter=0 --nPerh=1.01 --balsaraCorrection=False --fractionPressureSupport=0.25 --serialDump=True --compatibleEnergy=True --goalTime=50", label="Kepler SPH w-compat, nPerh=1.5 fp=0.05", np=20)
#-------------------------------------------------------------------------------
# This test problem sets up a gas disk in a fixed potential from a softened
# point mass. The fractionPressureSupport parameter selects the ratio of
# pressure and rotational support in the disk. If all is working properly,
# the disk should be stable as initialized and should just rotate without any
# radial evolution.
#-------------------------------------------------------------------------------
from Spheral2d import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from findLastRestart import *
from math import *
import SpheralPointmeshSiloDump
# Load the mpi module if we're parallel.
import mpi
#mpi, rank, procs = mpi.loadmpi()
from GenerateNodeDistribution2d import *
title("2-D Keplerian disk with arbitrary pressure support.")
# serialDump thing for external viz
class sDump(object):
def __init__(self,nodeSet,directory):
self.nodeSet = nodeSet
self.directory = directory
def __call__(self, cycle, time, dt):
procs = mpi.procs
rank = mpi.rank
serialData = []
i,j = 0,0
for i in xrange(procs):
for nodeL in self.nodeSet:
if rank == i:
for j in xrange(nodeL.numInternalNodes):
serialData.append([nodeL.positions()[j],
3.0/(nodeL.Hfield()[j].Trace()),
nodeL.mass()[j],nodeL.massDensity()[j],
nodeL.specificThermalEnergy()[j]])
serialData = mpi.reduce(serialData,mpi.SUM)
if rank == 0:
f = open(self.directory + "/serialDump" + str(cycle) + ".ascii",'w')
for i in xrange(len(serialData)):
f.write("{0} {1} {2} {3} {4} {5} {6} {7}\n".format(i,serialData[i][0][0],serialData[i][0][1],0.0,serialData[i][1],serialData[i][2],serialData[i][3],serialData[i][4]))
f.close()
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(asph = False,
n = 100,
thetaMin = 0.0,
thetaMax = 2.0*pi,
rmin = 0.0,
rmax = 3.0,
nPerh = 1.51,
# Properties of the central gravitating particle.
G0 = 1.0,
M0 = 1.0,
Rc = 0.5,
R0 = Vector(0.0, 0.0),
# Properties of the gas disk.
rho0 = 1.0,
rd0 = 10.0,
sig = 2.5,
Rcutoff = 0.5,
# Material properties of the gas.
polytropicIndex = 2.0,
mu = 1.0,
SVPH = False,
CRKSPH = False,
ASPH = False,
SPH = True, # This just chooses the H algorithm -- you can use this with CRKSPH for instance.
XSPH = False,
epsilonTensile = 0.0,
nTensile = 8,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
#Qconstructor = TensorMonaghanGingoldViscosity2d,
KernelConstructor = NBSplineKernel,
order = 5,
boolReduceViscosity = False,
nh = 5.0,
aMin = 0.1,
aMax = 2.0,
Qhmult = 1.0,
boolCullenViscosity = False,
alphMax = 2.0,
alphMin = 0.02,
betaC = 0.7,
betaD = 0.05,
betaE = 1.0,
fKern = 1.0/3.0,
boolHopkinsCorrection = True,
Cl = 1.0,
Cq = 0.75,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
correctionOrder = LinearOrder,
hmin = 0.004,
hmax = 0.5,
hminratio = 0.1,
compatibleEnergy = True,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity, # VolumeScaledDensity,
HUpdate = IdealH,
filter = 0.0,
volumeType = CRKSumVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator and run time.
IntegratorConstructor = CheapSynchronousRK2Integrator,
steps = None,
goalTime = 10.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
redistributeStep = None,
restartStep = 500,
restoreCycle = -1,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
serialDump = False,
serialDumpEach = 100,
histFile = "history.ascii",
writeHistory = False,
historyInterval = 2.0,
clearDirectories = False,
dataDir = "twomat-%i",
outputFile = "None",
comparisonFile = "None",
vizCycle = None,
vizTime = 1.0,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState
)
polytropicConstant1 = G0*M0/(3.0*Rc*sqrt(rho0))
polytropicConstant2 = G0*M0/(3.0*Rc*sqrt(rho0*0.5))
# Decide on our hydro algorithm.
if SVPH:
if ASPH:
HydroConstructor = ASVPHFacetedHydro
else:
HydroConstructor = SVPHFacetedHydro
elif CRKSPH:
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
Qconstructor = CRKSPHMonaghanGingoldViscosity
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
# Data output info.
dataDir = dataDir % n
viscString = "MG"
if balsaraCorrection:
viscString = "Balsara"
elif boolCullenViscosity:
viscString = "Cullen"
dataDir = os.path.join(dataDir, "CRK=%s-Visc=%s-nPerh=%f-compatible=%s-volume=%s" % (CRKSPH,viscString,nPerh,compatibleEnergy,volumeType))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/KeplerianDisk-n=%i" % (dataDir,
n)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "Kepler-disk-2d"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# Define a helper class that knows how to specify our requested radial profiles
# for rho, v, and eps.
#-------------------------------------------------------------------------------
class KeplerianPressureDiskProfile:
def __init__(self,G,M,n,rc,rho0):
self.G = G
self.M = M
self.GM = G*M
self.gamma = (n+1.0)/n
self.rc = rc
self.rho0 = rho0
self.K = G*M/(3.0*rc*sqrt(rho0))
return
def rho(self,r):
a = self.GM*(self.gamma-1.0)/(self.K*self.gamma*sqrt(r**2+self.rc**2))
return pow(a,1.0/(self.gamma-1.0))
def pressure(self,r):
return self.K*self.rho(r)**self.gamma
def __call__(self,r):
return self.rho(r)
#-------------------------------------------------------------------------------
# Create a polytrope for the equation of state.
#-------------------------------------------------------------------------------
eos1 = PolytropicEquationOfStateMKS(polytropicConstant1,
polytropicIndex, mu)
eos2 = PolytropicEquationOfStateMKS(polytropicConstant2,
polytropicIndex, mu)
#-------------------------------------------------------------------------------
# Create our interpolation kernels -- one for normal hydro interactions, and
# one for use with the artificial viscosity
#-------------------------------------------------------------------------------
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
#-------------------------------------------------------------------------------
# Create the NodeList and distribute it's nodes.
#-------------------------------------------------------------------------------
diskNodes1 = makeFluidNodeList("diskNodes1", eos1,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
diskNodes2 = makeFluidNodeList("diskNodes2", eos2,
hmin = hmin,
hmax = hmax,
hminratio = hminratio,
nPerh = nPerh)
output("diskNodes1")
output("diskNodes1.hmin")
output("diskNodes1.hmax")
output("diskNodes1.hminratio")
output("diskNodes1.nodesPerSmoothingScale")
#output("diskNodes.epsilonTensile")
#output("diskNodes.nTensile")
#output("diskNodes.XSPH")
# Construct the neighbor object and associate it with the node list.
#neighbor1 = TreeNeighbor(diskNodes1,
# kernelExtent = WT.kernelExtent)
#diskNodes1.registerNeighbor(neighbor1)
#diskNodes2.registerNeighbor(neighbor2)
# Build the radial profile object that knows how to create the keplerian disk
# profile.
diskProfile1 = KeplerianPressureDiskProfile(G0, M0, polytropicIndex, Rc, rho0)
diskProfile2 = KeplerianPressureDiskProfile(G0, M0, polytropicIndex, Rc, rho0*0.5)
# Set node positions, masses, and H's for this domain.
from VoronoiDistributeNodes import distributeNodes2d as distributeNodes
print "Generating node distribution."
generator1 = GenerateNodesMatchingProfile2d(n*0.25, diskProfile1,
rmin = rmin,
rmax = rmax*0.25,
thetaMin = thetaMin,
thetaMax = thetaMax,
nNodePerh = nPerh)
n1 = generator1.globalNumNodes()
generator2 = GenerateNodesMatchingProfile2d(n*0.75, diskProfile2,
rmin = rmax*0.27,
rmax = rmax,
thetaMin = thetaMin,
thetaMax = thetaMax,
nNodePerh = nPerh,
m0 = generator1.m0)
n1 = generator1.globalNumNodes()
n2 = generator2.globalNumNodes()
print "Distributing nodes amongst processors."
distributeNodes((diskNodes1, generator1),(diskNodes2,generator2))
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.MIN)')
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.MAX)')
output('mpi.reduce(diskNodes1.numInternalNodes, mpi.SUM)')
# Loop over the nodes, and set the specific energies and velocities.
for nodes in [diskNodes1,diskNodes2]:
for i in xrange(nodes.numInternalNodes):
r = nodes.positions()[i].magnitude()
#nodes.specificThermalEnergy()[i] = diskProfile.eps(r)
#-------------------------------------------------------------------------------
# Set an external pressure on the disk equivalent to the pressure at the
# cutoff radius.
#-------------------------------------------------------------------------------
externalPressure = eos2.polytropicConstant*diskProfile2.rho(1.01*rmax)**eos2.gamma_
eos2.externalPressure = externalPressure
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output('db')
output('db.appendNodeList(diskNodes1)')
output('db.appendNodeList(diskNodes2)')
output('db.numNodeLists')
output('db.numFluidNodeLists')
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
q.epsilon2 = epsilon2
q.negligibleSoundSpeed = negligibleSoundSpeed
q.csMultiplier = csMultiplier
output('q')
output('q.Cl')
output('q.Cq')
output('q.limiter')
output('q.epsilon2')
output('q.negligibleSoundSpeed')
output('q.csMultiplier')
output('q.balsaraShearCorrection')
#-------------------------------------------------------------------------------
# Create the gravity physics object.
#-------------------------------------------------------------------------------
gravity = PointPotential(G0, M0, Rc, R0)
gravity.deltaPotentialFraction = deltaPhi
output("gravity.G")
output("gravity.mass")
output("gravity.coreRadius")
output("gravity.origin")
output("gravity.deltaPotentialFraction")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if SVPH:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
densityUpdate = densityUpdate,
XSVPH = XSPH,
linearConsistent = linearConsistent,
generateVoid = False,
HUpdate = HUpdate,
fcentroidal = fcentroidal,
fcellPressure = fcellPressure,
xmin = Vector(-2.0, -2.0),
xmax = Vector(3.0, 3.0))
# xmin = Vector(x0 - 0.5*(x2 - x0), y0 - 0.5*(y2 - y0)),
# xmax = Vector(x2 + 0.5*(x2 - x0), y2 + 0.5*(y2 - y0)))
elif CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HUpdate)
else:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
gradhCorrection = gradhCorrection,
XSPH = XSPH,
densityUpdate = densityUpdate,
HUpdate = HUpdate,
epsTensile = epsilonTensile,
nTensile = nTensile)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the MMRV physics object.
#-------------------------------------------------------------------------------
if boolReduceViscosity:
evolveReducingViscosityMultiplier = MorrisMonaghanReducingViscosity(q,nh,aMin,aMax)
packages.append(evolveReducingViscosityMultiplier)
elif boolCullenViscosity:
evolveCullenViscosityMultiplier = CullenDehnenViscosity(q,WTPi,alphMax,alphMin,betaC,betaD,betaE,fKern,boolHopkinsCorrection)
packages.append(evolveCullenViscosityMultiplier)
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the physics packages.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(gravity)
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
integrator.dtMin = dtMin
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
integrator.domainDecompositionIndependent = domainIndependent
integrator.verbose = dtverbose
integrator.rigorousBoundaries = rigorousBoundaries
# Blago! Currently a problem with periodic boundaries.
integrator.cullGhostNodes = False
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
output("integrator.domainDecompositionIndependent")
output("integrator.rigorousBoundaries")
output("integrator.verbose")
#-------------------------------------------------------------------------------
# Build the controller to run the simulation.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
redistributeStep = redistributeStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
vizMethod = vizMethod,
vizBaseName = vizBaseName,
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
vizDerivs = True,
restoreCycle = restoreCycle)
if serialDump:
dump = sDump([diskNodes1,diskNodes2],dataDir)
control.appendPeriodicWork(dump,serialDumpEach)
output('control')
#-------------------------------------------------------------------------------
# Function to measure the angular momentum and radial coordinate.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
if steps is None:
control.advance(goalTime)
else:
control.step(steps)
if outputFile != "None":
outputFile = os.path.join(dataDir, outputFile)
from SpheralGnuPlotUtilities import multiSort
P1 = ScalarField("pressure",diskNodes1)
P2 = ScalarField("pressure",diskNodes2)
diskNodes1.pressure(P1)
diskNodes2.pressure(P2)
xprof1 = mpi.reduce([x.x for x in diskNodes1.positions().internalValues()], mpi.SUM)
yprof1 = mpi.reduce([y.y for y in diskNodes1.positions().internalValues()], mpi.SUM)
rhoprof1 = mpi.reduce(diskNodes1.massDensity().internalValues(), mpi.SUM)
Pprof1 = mpi.reduce(P1.internalValues(), mpi.SUM)
rprof1 = mpi.reduce([ri.magnitude() for ri in diskNodes1.positions().internalValues()], mpi.SUM)
vx1 = mpi.reduce([v.x for v in diskNodes1.velocity().internalValues()], mpi.SUM)
vy1 = mpi.reduce([v.y for v in diskNodes1.velocity().internalValues()], mpi.SUM)
xprof2 = mpi.reduce([x.x for x in diskNodes2.positions().internalValues()], mpi.SUM)
yprof2 = mpi.reduce([y.y for y in diskNodes2.positions().internalValues()], mpi.SUM)
rhoprof2 = mpi.reduce(diskNodes2.massDensity().internalValues(), mpi.SUM)
Pprof2 = mpi.reduce(P2.internalValues(), mpi.SUM)
rprof2 = mpi.reduce([ri.magnitude() for ri in diskNodes2.positions().internalValues()], mpi.SUM)
vx2 = mpi.reduce([v.x for v in diskNodes2.velocity().internalValues()], mpi.SUM)
vy2 = mpi.reduce([v.y for v in diskNodes2.velocity().internalValues()], mpi.SUM)
np1 = int(diskNodes1.numInternalNodes)
np2 = int(diskNodes2.numInternalNodes)
if np1 is None:
np1 = 0
np1 = mpi.reduce(np1,mpi.SUM)
if np2 is None:
np2 = 0
np2 = mpi.reduce(np2,mpi.SUM)
vprof1 = []
vprof2 = []
if mpi.rank == 0:
for i in xrange(np1):
vprof1.append(xprof1[i]*vx1[i]/rprof1[i]+yprof1[i]*vy1[i]/rprof1[i])
for i in xrange(np2):
vprof2.append(xprof2[i]*vx2[i]/rprof2[i]+yprof2[i]*vy2[i]/rprof2[i])
mof = mortonOrderIndices(db)
mo1 = mpi.reduce(mof[0].internalValues(),mpi.SUM)
mo2 = mpi.reduce(mof[1].internalValues(),mpi.SUM)
if mpi.rank == 0:
multiSort(rprof1,mo1,xprof1,yprof1,rhoprof1,Pprof1,vprof1)
multiSort(rprof2,mo2,xprof2,yprof2,rhoprof2,Pprof2,vprof2)
f = open(outputFile, "w")
f.write("r x y rho P v mortonOrder\n")
for (ri, xi, yi, rhoi, Pi, vi, mi) in zip(rprof1,xprof1,yprof1,rhoprof1,Pprof1,vprof1,mo1):
f.write((7*"%16.12e "+"\n") % (ri,xi,yi,rhoi,Pi,vi,mi))
for (ri, xi, yi, rhoi, Pi, vi, mi) in zip(rprof2,xprof2,yprof2,rhoprof2,Pprof2,vprof2,mo2):
f.write((7*"%16.12e "+"\n") % (ri,xi,yi,rhoi,Pi,vi,mi))
f.close()
if comparisonFile != "None":
comparisonFile = os.path.join(dataDir, comparisonFile)
import filecmp
assert filecmp.cmp(outputFile,comparisonFile)
| 40.346903
| 270
| 0.518731
|
4884eebe759d91ddc832318d6faffdc2f8aa8339
| 18,469
|
py
|
Python
|
xarray/tests/test_indexing.py
|
kaipak/xarray
|
9ae7bce99ce33555d2e40e24e9c0ef5047eeef8f
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_indexing.py
|
kaipak/xarray
|
9ae7bce99ce33555d2e40e24e9c0ef5047eeef8f
|
[
"Apache-2.0"
] | null | null | null |
xarray/tests/test_indexing.py
|
kaipak/xarray
|
9ae7bce99ce33555d2e40e24e9c0ef5047eeef8f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import itertools
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable
from xarray.core import indexing, nputils
from xarray.core.pycompat import native_int_types
from . import (
IndexerMaker, ReturnItem, TestCase, assert_array_equal, raises_regex)
B = IndexerMaker(indexing.BasicIndexer)
class TestIndexers(TestCase):
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem() # noqa: E741 # allow ambiguous name
for i in [I[:], I[...], I[0, :, 10], I[..., 10], I[:5, ..., 0],
I[..., 0, :], I[y], I[y, y], I[..., y, y],
I[..., 0, 1, 2, 3, 4]]:
j = indexing.expanded_indexer(i, x.ndim)
assert_array_equal(x[i], x[j])
assert_array_equal(self.set_to_zero(x, i),
self.set_to_zero(x, j))
with raises_regex(IndexError, 'too many indices'):
indexing.expanded_indexer(I[1, 2, 3], 2)
def test_asarray_tuplesafe(self):
res = indexing._asarray_tuplesafe(('a', 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ('a', 1)
res = indexing._asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
index = pd.Index([1, 2, 3])
with raises_regex(KeyError, 'not all values found'):
indexing.convert_label_indexer(index, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(index, 0)
with raises_regex(ValueError, 'does not have a MultiIndex'):
indexing.convert_label_indexer(index, {'one': 0})
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=('one', 'two'))
with raises_regex(KeyError, 'not all values found'):
indexing.convert_label_indexer(mindex, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(mindex, 0)
with pytest.raises(ValueError):
indexing.convert_label_indexer(index, {'three': 0})
with pytest.raises((KeyError, IndexError)):
# pandas 0.21 changed this from KeyError to IndexError
indexing.convert_label_indexer(
mindex, (slice(None), 1, 'no_level'))
def test_convert_unsorted_datetime_index_raises(self):
index = pd.to_datetime(['2001', '2000', '2002'])
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
indexing.convert_label_indexer(index, slice('2001', '2002'))
def test_get_dim_indexers(self):
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=('one', 'two'))
mdata = DataArray(range(4), [('x', mindex)])
dim_indexers = indexing.get_dim_indexers(mdata, {'one': 'a', 'two': 1})
assert dim_indexers == {'x': {'one': 'a', 'two': 1}}
with raises_regex(ValueError, 'cannot combine'):
indexing.get_dim_indexers(mdata, {'x': 'a', 'two': 1})
with raises_regex(ValueError, 'do not exist'):
indexing.get_dim_indexers(mdata, {'y': 'a'})
with raises_regex(ValueError, 'do not exist'):
indexing.get_dim_indexers(mdata, {'four': 1})
def test_remap_label_indexers(self):
def test_indexer(data, x, expected_pos, expected_idx=None):
pos, idx = indexing.remap_label_indexers(data, {'x': x})
assert_array_equal(pos.get('x'), expected_pos)
assert_array_equal(idx.get('x'), expected_idx)
data = Dataset({'x': ('x', [1, 2, 3])})
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
mdata = DataArray(range(8), [('x', mindex)])
test_indexer(data, 1, 0)
test_indexer(data, np.int32(1), 0)
test_indexer(data, Variable([], 1), 0)
test_indexer(mdata, ('a', 1, -1), 0)
test_indexer(mdata, ('a', 1),
[True, True, False, False, False, False, False, False],
[-1, -2])
test_indexer(mdata, 'a', slice(0, 4, None),
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
test_indexer(mdata, ('a',),
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
test_indexer(mdata, [('a', 1, -1), ('b', 2, -2)], [0, 7])
test_indexer(mdata, slice('a', 'b'), slice(0, 8, None))
test_indexer(mdata, slice(('a', 1), ('b', 1)), slice(0, 6, None))
test_indexer(mdata, {'one': 'a', 'two': 1, 'three': -1}, 0)
test_indexer(mdata, {'one': 'a', 'two': 1},
[True, True, False, False, False, False, False, False],
[-1, -2])
test_indexer(mdata, {'one': 'a', 'three': -1},
[True, False, True, False, False, False, False, False],
[1, 2])
test_indexer(mdata, {'one': 'a'},
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
class TestLazyArray(TestCase):
def test_slice_slice(self):
I = ReturnItem() # noqa: E741 # allow ambiguous name
x = np.arange(100)
slices = [I[:3], I[:4], I[2:4], I[:1], I[:-1], I[5:-1], I[-5:-1],
I[::-1], I[5::-1], I[:3:-1], I[:30:-1], I[10:4:], I[::4],
I[4:4:4], I[:4:-4]]
for i in slices:
for j in slices:
expected = x[i][j]
new_slice = indexing.slice_slice(i, j, size=100)
actual = x[new_slice]
assert_array_equal(expected, actual)
def test_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v = Variable(['i', 'j', 'k'], original)
lazy = indexing.LazilyIndexedArray(x)
v_lazy = Variable(['i', 'j', 'k'], lazy)
I = ReturnItem() # noqa: E741 # allow ambiguous name
# test orthogonally applied indexers
indexers = [I[:], 0, -2, I[:3], [0, 1, 2, 3], [0], np.arange(10) < 5]
for i in indexers:
for j in indexers:
for k in indexers:
if isinstance(j, np.ndarray) and j.dtype.kind == 'b':
j = np.arange(20) < 5
if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
k = np.arange(30) < 5
expected = np.asarray(v[i, j, k])
for actual in [v_lazy[i, j, k],
v_lazy[:, j, k][i],
v_lazy[:, :, k][:, j][i]]:
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
assert isinstance(actual._data,
indexing.LazilyIndexedArray)
# make sure actual.key is appropriate type
if all(isinstance(k, native_int_types + (slice, ))
for k in v_lazy._data.key.tuple):
assert isinstance(v_lazy._data.key,
indexing.BasicIndexer)
else:
assert isinstance(v_lazy._data.key,
indexing.OuterIndexer)
# test sequentially applied indexers
indexers = [(3, 2), (I[:], 0), (I[:2], -1), (I[:4], [0]), ([4, 5], 0),
([0, 1, 2], [0, 1]), ([0, 3, 5], I[:2])]
for i, j in indexers:
expected = np.asarray(v[i][j])
actual = v_lazy[i][j]
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
assert isinstance(actual._data, indexing.LazilyIndexedArray)
assert isinstance(actual._data.array,
indexing.NumpyIndexingAdapter)
class TestCopyOnWriteArray(TestCase):
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.zeros(10))
def test_sub_array(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.CopyOnWriteArray)
child[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.arange(10))
assert_array_equal(child, np.zeros(5))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.CopyOnWriteArray(np.array(['foo', 'bar']))
assert np.array(x[B[0]][B[()]]) == 'foo'
class TestMemoryCachedArray(TestCase):
def test_wrapper(self):
original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
assert_array_equal(wrapped, np.arange(10))
assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter)
def test_sub_array(self):
original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.MemoryCachedArray)
assert_array_equal(child, np.arange(5))
assert isinstance(child.array, indexing.NumpyIndexingAdapter)
assert isinstance(wrapped.array, indexing.LazilyIndexedArray)
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.MemoryCachedArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.zeros(10))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.MemoryCachedArray(np.array(['foo', 'bar']))
assert np.array(x[B[0]][B[()]]) == 'foo'
def test_base_explicit_indexer():
with pytest.raises(TypeError):
indexing.ExplicitIndexer(())
class Subclass(indexing.ExplicitIndexer):
pass
value = Subclass((1, 2, 3))
assert value.tuple == (1, 2, 3)
assert repr(value) == 'Subclass((1, 2, 3))'
@pytest.mark.parametrize('indexer_cls', [indexing.BasicIndexer,
indexing.OuterIndexer,
indexing.VectorizedIndexer])
def test_invalid_for_all(indexer_cls):
with pytest.raises(TypeError):
indexer_cls(None)
with pytest.raises(TypeError):
indexer_cls(([],))
with pytest.raises(TypeError):
indexer_cls((None,))
with pytest.raises(TypeError):
indexer_cls(('foo',))
with pytest.raises(TypeError):
indexer_cls((1.0,))
with pytest.raises(TypeError):
indexer_cls((slice('foo'),))
with pytest.raises(TypeError):
indexer_cls((np.array(['foo']),))
def check_integer(indexer_cls):
value = indexer_cls((1, np.uint64(2),)).tuple
assert all(isinstance(v, int) for v in value)
assert value == (1, 2)
def check_slice(indexer_cls):
(value,) = indexer_cls((slice(1, None, np.int64(2)),)).tuple
assert value == slice(1, None, 2)
assert isinstance(value.step, native_int_types)
def check_array1d(indexer_cls):
(value,) = indexer_cls((np.arange(3, dtype=np.int32),)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, [0, 1, 2])
def check_array2d(indexer_cls):
array = np.array([[1, 2], [3, 4]], dtype=np.int64)
(value,) = indexer_cls((array,)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, array)
def test_basic_indexer():
check_integer(indexing.BasicIndexer)
check_slice(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array1d(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.BasicIndexer)
def test_outer_indexer():
check_integer(indexing.OuterIndexer)
check_slice(indexing.OuterIndexer)
check_array1d(indexing.OuterIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.OuterIndexer)
def test_vectorized_indexer():
with pytest.raises(TypeError):
check_integer(indexing.VectorizedIndexer)
check_slice(indexing.VectorizedIndexer)
check_array1d(indexing.VectorizedIndexer)
check_array2d(indexing.VectorizedIndexer)
with raises_regex(ValueError, 'numbers of dimensions'):
indexing.VectorizedIndexer((np.array(1, dtype=np.int64),
np.arange(5, dtype=np.int64)))
def test_unwrap_explicit_indexer():
indexer = indexing.BasicIndexer((1, 2))
target = None
unwrapped = indexing.unwrap_explicit_indexer(
indexer, target, allow=indexing.BasicIndexer)
assert unwrapped == (1, 2)
with raises_regex(NotImplementedError, 'Load your data'):
indexing.unwrap_explicit_indexer(
indexer, target, allow=indexing.OuterIndexer)
with raises_regex(TypeError, 'unexpected key type'):
indexing.unwrap_explicit_indexer(
indexer.tuple, target, allow=indexing.OuterIndexer)
def test_implicit_indexing_adapter():
array = np.arange(10)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer)
np.testing.assert_array_equal(array, np.asarray(implicit))
np.testing.assert_array_equal(array, implicit[:])
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
def nonzero(x):
if isinstance(x, np.ndarray) and x.dtype.kind == 'b':
x = x.nonzero()[0]
return x
original = np.random.rand(10, 20, 30)
v = Variable(['i', 'j', 'k'], original)
I = ReturnItem() # noqa: E741 # allow ambiguous name
# test orthogonally applied indexers
indexers = [I[:], 0, -2, I[:3], np.array([0, 1, 2, 3]), np.array([0]),
np.arange(10) < 5]
for i, j, k in itertools.product(indexers, repeat=3):
if isinstance(j, np.ndarray) and j.dtype.kind == 'b': # match size
j = np.arange(20) < 4
if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
k = np.arange(30) < 8
_, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
if new_order:
old_order = range(len(new_order))
expected_data = np.moveaxis(expected_data, old_order,
new_order)
outer_index = (nonzero(i), nonzero(j), nonzero(k))
actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
actual_data = v.data[actual]
np.testing.assert_array_equal(actual_data, expected_data)
def test_create_mask_outer_indexer():
indexer = indexing.OuterIndexer((np.array([0, -1, 2]),))
expected = np.array([False, True, False])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2]),))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(indexer, (5, 5, 5,))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_vectorized_indexer():
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), np.array([0, 1, -1])))
expected = np.array([False, True, True])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1])))
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(indexer, (5, 2))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_basic_indexer():
indexer = indexing.BasicIndexer((-1,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(True, actual)
indexer = indexing.BasicIndexer((0,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(False, actual)
def test_create_mask_dask():
da = pytest.importorskip('dask.array')
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2]),))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(indexer, (5, 5, 5,),
chunks_hint=((1, 1), (2, 1)))
assert actual.chunks == ((1, 1), (2, 1))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1])))
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(indexer, (5, 2), chunks_hint=((3,), (2,)))
assert isinstance(actual, da.Array)
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
indexing.create_mask(indexer, (5, 2), chunks_hint=())
def test_create_mask_error():
with raises_regex(TypeError, 'unexpected key type'):
indexing.create_mask((1, 2), (3, 4))
@pytest.mark.parametrize('indices, expected', [
(np.arange(5), np.arange(5)),
(np.array([0, -1, -1]), np.array([0, 0, 0])),
(np.array([-1, 1, -1]), np.array([1, 1, 1])),
(np.array([-1, -1, 2]), np.array([2, 2, 2])),
(np.array([-1]), np.array([0])),
(np.array([0, -1, 1, -1, -1]), np.array([0, 0, 1, 1, 1])),
(np.array([0, -1, -1, -1, 1]), np.array([0, 0, 0, 0, 1])),
])
def test_posify_mask_subindexer(indices, expected):
actual = indexing._posify_mask_subindexer(indices)
np.testing.assert_array_equal(expected, actual)
| 39.71828
| 79
| 0.585468
|
7d743f6338afc9f7ba729e953bf8883f55bb8359
| 1,227
|
py
|
Python
|
pyranges/methods/call.py
|
iamjli/pyranges
|
52a0979db99c8b4d88e3e8a63558356e8baec70b
|
[
"MIT"
] | 299
|
2019-03-22T18:28:01.000Z
|
2022-03-11T16:14:19.000Z
|
pyranges/methods/call.py
|
iamjli/pyranges
|
52a0979db99c8b4d88e3e8a63558356e8baec70b
|
[
"MIT"
] | 157
|
2019-04-06T18:05:27.000Z
|
2022-03-07T14:50:10.000Z
|
pyranges/methods/call.py
|
iamjli/pyranges
|
52a0979db99c8b4d88e3e8a63558356e8baec70b
|
[
"MIT"
] | 33
|
2019-04-12T14:44:53.000Z
|
2022-03-16T16:58:06.000Z
|
import pandas as pd
import pyranges as pr
def _handle_eval_return(self, result, col, as_pyranges, subset):
"""Handle return from eval.
If col is set, add/update cols. If subset is True, use return series to subset PyRanges.
Otherwise return PyRanges or dict of data."""
if as_pyranges:
if not result:
return pr.PyRanges()
first_hit = list(result.values())[0]
if isinstance(first_hit, pd.Series):
if first_hit.dtype == bool and subset:
return self[result]
elif col:
self.__setattr__(col, result)
return self
else:
raise Exception(
"Cannot return PyRanges when function returns a Series! Use as_pyranges=False."
)
return pr.PyRanges(result)
else:
return result
def _call(self, f, strand=None, as_pyranges=True, **kwargs):
if strand is None:
strand = self.stranded
if self.stranded and not strand:
self = self.unstrand()
result = self.apply(f, strand=strand, as_pyranges=False, **kwargs)
# result = _handle_eval_return(self, result, col, as_pyranges, subset)
return result
| 27.266667
| 99
| 0.607987
|
2a68170dcf2f846d4c5aba948b54702bb079da8c
| 15,503
|
py
|
Python
|
python/cuml/datasets/classification.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 2,743
|
2018-10-11T17:28:58.000Z
|
2022-03-31T19:20:50.000Z
|
python/cuml/datasets/classification.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 4,280
|
2018-10-11T22:29:57.000Z
|
2022-03-31T22:02:44.000Z
|
python/cuml/datasets/classification.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 454
|
2018-10-11T17:40:56.000Z
|
2022-03-25T17:07:09.000Z
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cuml.internals
from cuml.common.import_utils import has_sklearn
from cuml.datasets.utils import _create_rs_generator
import cupy as cp
import numpy as np
import nvtx
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if not has_sklearn():
raise RuntimeError("Scikit-learn is needed to run \
make_classification.")
from sklearn.utils.random import sample_without_replacement
if dimensions > 30:
return np.hstack([np.random.randint(2, size=(samples,
dimensions - 30)),
_generate_hypercube(samples, 30, rng)])
random_state = int(rng.randint(dimensions))
out = sample_without_replacement(2 ** dimensions, samples,
random_state=random_state).astype(
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
@nvtx.annotate(message="datasets.make_classification", domain="cuml_python")
@cuml.internals.api_return_generic()
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None, order='F',
dtype='float32', _centroids=None,
_informative_covariance=None,
_redundant_covariance=None,
_repeated_indices=None):
"""
Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of an `n_informative`-dimensional hypercube with sides of
length :py:`2*class_sep` and assigns an equal number of clusters to each
class. It introduces interdependence between these features and adds
various types of further noise to the data.
Without shuffling, `X` horizontally stacks features in the following
order: the primary `n_informative` features, followed by `n_redundant`
linear combinations of the informative features, followed by `n_repeated`
duplicates, drawn randomly with replacement from the informative and
redundant features. The remaining features are filled with random noise.
Thus, without shuffling, all useful features are contained in the columns
:py:`X[:, :n_informative + n_redundant + n_repeated]`.
Examples
--------
.. code-block:: python
from cuml.datasets.classification import make_classification
X, y = make_classification(n_samples=10, n_features=4,
n_informative=2, n_classes=2)
print("X:")
print(X)
print("y:")
print(y)
Output:
.. code-block:: python
X:
[[-2.3249989 -0.8679415 -1.1511791 1.3525577 ]
[ 2.2933831 1.3743551 0.63128835 -0.84648645]
[ 1.6361488 -1.3233329 0.807027 -0.894092 ]
[-1.0093077 -0.9990691 -0.00808992 0.00950443]
[ 0.99803793 2.068382 0.49570698 -0.8462848 ]
[-1.2750955 -0.9725835 -0.2390058 0.28081596]
[-1.3635055 -0.9637669 -0.31582272 0.37106958]
[ 1.1893625 2.227583 0.48750278 -0.8737561 ]
[-0.05753583 -1.0939395 0.8188342 -0.9620734 ]
[ 0.47910076 0.7648213 -0.17165393 0.26144698]]
y:
[0 1 0 0 1 0 0 1 0 1]
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features,
`n_repeated` duplicated features and
:py:`n_features-n_informative-n_redundant-n_repeated` useless features
drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : array-like of shape (n_classes,) or (n_classes - 1,),\
(default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if :py:`len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of
`weights` exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class is assigned randomly. Larger
values introduce noise in the labels and make the classification
task harder.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube size. Larger values spread
out the clusters/classes and make the classification task easier.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
order: str, optional (default='F')
The order of the generated samples
dtype : str, optional (default='float32')
Dtype of the generated samples
_centroids: array of centroids of shape (n_clusters, n_informative)
_informative_covariance: array for covariance between informative features
of shape (n_clusters, n_informative, n_informative)
_redundant_covariance: array for covariance between redundant features
of shape (n_informative, n_redundant)
_repeated_indices: array of indices for the repeated features
of shape (n_repeated, )
Returns
-------
X : device array of shape [n_samples, n_features]
The generated samples.
y : device array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1]_ and was designed to generate
the "Madelon" dataset. How we optimized for GPUs:
1. Firstly, we generate X from a standard univariate instead of zeros.
This saves memory as we don't need to generate univariates each
time for each feature class (informative, repeated, etc.) while
also providing the added speedup of generating a big matrix
on GPU
2. We generate :py:`order=F` construction. We exploit the
fact that X is a generated from a univariate normal, and
covariance is introduced with matrix multiplications. Which means,
we can generate X as a 1D array and just reshape it to the
desired order, which only updates the metadata and eliminates
copies
3. Lastly, we also shuffle by construction. Centroid indices are
permuted for each sample, and then we construct the data for
each centroid. This shuffle works for both :py:`order=C` and
:py:`order=F` and eliminates any need for secondary copies
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
cuml.internals.set_api_output_type("cupy")
generator = _create_rs_generator(random_state)
np_seed = int(generator.randint(n_samples, size=1))
np.random.seed(np_seed)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
# Use log2 to avoid overflow errors
if n_informative < np.log2(n_classes * n_clusters_per_class):
msg = "n_classes({}) * n_clusters_per_class({}) must be"
msg += " smaller or equal 2**n_informative({})={}"
raise ValueError(msg.format(n_classes, n_clusters_per_class,
n_informative, 2**n_informative))
if weights is not None:
if len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
if len(weights) == n_classes - 1:
if isinstance(weights, list):
weights = weights + [1.0 - sum(weights)]
else:
weights = np.resize(weights, n_classes)
weights[-1] = 1.0 - sum(weights[:-1])
else:
weights = [1.0 / n_classes] * n_classes
n_clusters = n_classes * n_clusters_per_class
# Distribute samples among clusters by weight
n_samples_per_cluster = [
int(n_samples * weights[k % n_classes] / n_clusters_per_class)
for k in range(n_clusters)]
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = generator.randn(n_samples * n_features, dtype=dtype)
X = X.reshape((n_samples, n_features), order=order)
y = cp.zeros(n_samples, dtype=np.int64)
# Build the polytope whose vertices become cluster centroids
if _centroids is None:
centroids = cp.array(_generate_hypercube(n_clusters, n_informative,
generator)).astype(dtype, copy=False)
else:
centroids = _centroids
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1, dtype=dtype)
centroids *= generator.rand(1, n_informative, dtype=dtype)
# Create redundant features
if n_redundant > 0:
if _redundant_covariance is None:
B = 2 * generator.rand(n_informative, n_redundant, dtype=dtype) - 1
else:
B = _redundant_covariance
# Create each cluster; a variant of make_blobs
if shuffle:
proba_samples_per_cluster = np.array(n_samples_per_cluster) / np.sum(
n_samples_per_cluster)
shuffled_sample_indices = cp.array(np.random.choice(
n_clusters,
n_samples,
replace=True,
p=proba_samples_per_cluster
))
for k, centroid in enumerate(centroids):
centroid_indices = cp.where(shuffled_sample_indices == k)
y[centroid_indices[0]] = k % n_classes
X_k = X[centroid_indices[0], :n_informative]
if _informative_covariance is None:
A = 2 * generator.rand(n_informative, n_informative,
dtype=dtype) - 1
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A)
# NOTE: This could be done outside the loop, but a current
# cupy bug does not allow that
# https://github.com/cupy/cupy/issues/3284
if n_redundant > 0:
X[centroid_indices[0], n_informative:n_informative
+ n_redundant] = cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[centroid_indices[0], :n_informative] = X_k
else:
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
if _informative_covariance is None:
A = 2 * generator.rand(n_informative, n_informative,
dtype=dtype) - 1
else:
A = _informative_covariance[k]
X_k = cp.dot(X_k, A) # introduce random covariance
if n_redundant > 0:
X[start:stop, n_informative:n_informative + n_redundant] = \
cp.dot(X_k, B)
X_k += centroid # shift the cluster to a vertex
X[start:stop, :n_informative] = X_k
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
if _repeated_indices is None:
indices = ((n - 1) * generator.rand(n_repeated,
dtype=dtype)
+ 0.5).astype(np.intp)
else:
indices = _repeated_indices
X[:, n:n + n_repeated] = X[:, indices]
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples, dtype=dtype) < flip_y
y[flip_mask] = generator.randint(n_classes, size=int(flip_mask.sum()))
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features, dtype=dtype) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features, dtype=dtype)
X *= scale
return X, y
| 43.793785
| 79
| 0.629427
|
793b7ffea4bda20a637723fa1e85f66911d38c92
| 8,695
|
py
|
Python
|
google-cloud-sdk/lib/googlecloudsdk/api_lib/app/api/appengine_domains_api_client.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/api_lib/app/api/appengine_domains_api_client.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/api_lib/app/api/appengine_domains_api_client.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for creating a client to talk to the App Engine Admin API."""
from googlecloudsdk.api_lib.app import operations_util
from googlecloudsdk.api_lib.app.api import appengine_api_client_base as base
from googlecloudsdk.api_lib.app.api import requests
from googlecloudsdk.calliope import base as calliope_base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import resources
DOMAINS_VERSION_MAP = {
calliope_base.ReleaseTrack.GA: 'v1beta',
calliope_base.ReleaseTrack.ALPHA: 'v1alpha',
calliope_base.ReleaseTrack.BETA: 'v1beta'
}
def GetApiClientForTrack(release_track):
api_version = DOMAINS_VERSION_MAP[release_track]
if api_version == 'v1alpha':
return AppengineDomainsApiAlphaClient.GetApiClient('v1alpha')
else:
return AppengineDomainsApiClient.GetApiClient(api_version)
class AppengineDomainsApiClient(base.AppengineApiClientBase):
"""Client used by gcloud to communicate with the App Engine API."""
def __init__(self, client):
base.AppengineApiClientBase.__init__(self, client)
self._registry = resources.REGISTRY.Clone()
# pylint: disable=protected-access
self._registry.RegisterApiByName('appengine', client._VERSION)
def CreateDomainMapping(self, domain, certificate_id):
"""Creates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the new domain.
Returns:
The created DomainMapping object.
"""
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
request = self.messages.AppengineAppsDomainMappingsCreateRequest(
parent=self._FormatApp(), domainMapping=domain_mapping)
operation = requests.MakeRequest(self.client.apps_domainMappings.Create,
request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def DeleteDomainMapping(self, domain):
"""Deletes a domain mapping for the given application.
Args:
domain: str, the domain to delete.
"""
request = self.messages.AppengineAppsDomainMappingsDeleteRequest(
name=self._FormatDomainMapping(domain))
operation = requests.MakeRequest(self.client.apps_domainMappings.Delete,
request)
operations_util.WaitForOperation(self.client.apps_operations, operation)
def GetDomainMapping(self, domain):
"""Gets a domain mapping for the given application.
Args:
domain: str, the domain to retrieve.
Returns:
The retrieved DomainMapping object.
"""
request = self.messages.AppengineAppsDomainMappingsGetRequest(
name=self._FormatDomainMapping(domain))
return requests.MakeRequest(self.client.apps_domainMappings.Get, request)
def ListDomainMappings(self):
"""Lists all domain mappings for the given application.
Returns:
A list of DomainMapping objects.
"""
request = self.messages.AppengineAppsDomainMappingsListRequest(
parent=self._FormatApp())
response = requests.MakeRequest(self.client.apps_domainMappings.List,
request)
return response.domainMappings
def UpdateDomainMapping(self, domain, certificate_id, no_certificate_id):
"""Updates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the domain.
no_certificate_id: bool, remove the certificate id from the domain.
Returns:
The updated DomainMapping object.
"""
mask_fields = []
if certificate_id or no_certificate_id:
mask_fields.append('sslSettings.certificateId')
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
if not mask_fields:
raise exceptions.MinimumArgumentException(
['--[no-]certificate-id'],
'Please specify at least one attribute to the domain-mapping update.')
request = self.messages.AppengineAppsDomainMappingsPatchRequest(
name=self._FormatDomainMapping(domain),
domainMapping=domain_mapping,
updateMask=','.join(mask_fields))
operation = requests.MakeRequest(self.client.apps_domainMappings.Patch,
request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def ListVerifiedDomains(self):
"""Lists all domains verified by the current user.
Returns:
A list of AuthorizedDomain objects.
"""
request = self.messages.AppengineAppsAuthorizedDomainsListRequest(
parent=self._FormatApp())
response = requests.MakeRequest(self.client.apps_authorizedDomains.List,
request)
return response.domains
def _FormatDomainMapping(self, domain):
res = self._registry.Parse(
domain,
params={'appsId': self.project},
collection='appengine.apps.domainMappings')
return res.RelativeName()
class AppengineDomainsApiAlphaClient(AppengineDomainsApiClient):
"""Client used by gcloud to communicate with the App Engine API."""
def CreateDomainMapping(self, domain, certificate_id, no_managed_certificate):
"""Creates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the new domain.
no_managed_certificate: bool, don't automatically provision a certificate.
Returns:
The created DomainMapping object.
"""
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
request = self.messages.AppengineAppsDomainMappingsCreateRequest(
parent=self._FormatApp(),
domainMapping=domain_mapping,
noManagedCertificate=no_managed_certificate)
operation = requests.MakeRequest(self.client.apps_domainMappings.Create,
request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
def UpdateDomainMapping(self,
domain,
certificate_id,
no_certificate_id,
no_managed_certificate=None):
"""Updates a domain mapping for the given application.
Args:
domain: str, the custom domain string.
certificate_id: str, a certificate id for the domain.
no_certificate_id: bool, remove the certificate id from the domain.
no_managed_certificate: bool, don't automatically provision a certificate.
Returns:
The updated DomainMapping object.
"""
mask_fields = []
if certificate_id or no_certificate_id:
mask_fields.append('sslSettings.certificateId')
if no_managed_certificate:
mask_fields.append('noManagedCertificate')
ssl = self.messages.SslSettings(certificateId=certificate_id)
domain_mapping = self.messages.DomainMapping(id=domain, sslSettings=ssl)
if not mask_fields:
raise exceptions.MinimumArgumentException(
['--[no-]certificate-id', '--no_managed_certificate'],
'Please specify at least one attribute to the domain-mapping update.')
request = self.messages.AppengineAppsDomainMappingsPatchRequest(
name=self._FormatDomainMapping(domain),
noManagedCertificate=no_managed_certificate,
domainMapping=domain_mapping,
updateMask=','.join(mask_fields))
operation = requests.MakeRequest(self.client.apps_domainMappings.Patch,
request)
return operations_util.WaitForOperation(self.client.apps_operations,
operation).response
| 35.929752
| 80
| 0.704888
|
d3cb5a84f2b68cf8e5ad3c57c0b0491ab6937c33
| 5,899
|
py
|
Python
|
src/miscellaneous scripts/Final File to Run/cnn_with_classWeights.py
|
Tirth27/Detecting-diabetic-retinopathy
|
86ba4fe616e15f72f509f1ed17a5b2dae8c84b88
|
[
"MIT"
] | 22
|
2020-01-22T09:44:48.000Z
|
2022-03-12T19:02:46.000Z
|
src/miscellaneous scripts/Final File to Run/cnn_with_classWeights.py
|
Tirth27/Detecting-diabetic-retinopathy
|
86ba4fe616e15f72f509f1ed17a5b2dae8c84b88
|
[
"MIT"
] | 3
|
2022-01-11T07:09:48.000Z
|
2022-02-18T21:45:45.000Z
|
src/miscellaneous scripts/Final File to Run/cnn_with_classWeights.py
|
Tirth27/Detecting-diabetic-retinopathy
|
86ba4fe616e15f72f509f1ed17a5b2dae8c84b88
|
[
"MIT"
] | 15
|
2020-12-20T06:56:36.000Z
|
2022-02-22T03:22:08.000Z
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.utils import normalize, np_utils, multi_gpu_model
from keras.callbacks import TensorBoard, EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, recall_score, f1_score, cohen_kappa_score
from sklearn.utils import class_weight
from skll.metrics import kappa
np.random.seed(1337)
def cnn_model(X_train, y_train, cl_weights, nb_filters, channels, nb_gpus, nb_classes, kernel_size, batch_size, nb_epoch):
print("X_train:- ", X_train.shape, "\ny_train:- ", y_train.shape, "\nnb_filters:- ", nb_filters)
print("channels:- ", channels, "\nnb_gpus:- ", nb_gpus, "\nkernel_size:- " , kernel_size)
print("batch_size:- ", batch_size, "\nnb_epoch:- ", nb_epoch, "\nweights:- ", cl_weights)
model = Sequential()
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), padding = 'valid', strides = 1,
input_shape = (img_rows, img_cols, channels),
activation = 'relu'))
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), activation = 'relu'))
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
#print('Model Flatten out to: ', model.output_shape)
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes, activation = 'softmax'))
#model = multi_gpu_model(model, gpus=nb_gpus)
model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam',
metrics = ['accuracy'])
stop = EarlyStopping(monitor="val_acc", min_delta=0.001, patience=2, verbose=0,
mode="auto")
tensorboard = TensorBoard(log_dir='tensorboard_logs/', histogram_freq = 0,
write_graph = True, write_images = True)
model.fit(X_train, y_train, batch_size = batch_size,
epochs = nb_epoch,
verbose = 1,
validation_split = 0.2,
class_weight = cl_weights,
callbacks = [stop, tensorboard])
return model
def save_model(model, score, model_name):
if score >= 0.75:
print('Saving Model')
model.save('saved_model/' + model_name + '_recall_' + str(round(score, 4)) + '.h5')
else:
print("Model Not Saved. Score:-", score)
if __name__ == '__main__':
img_rows, img_cols = 256, 256
channels = 3
nb_classes = 5
nb_filters = 32
kernel_size = (8, 8)
#kernal_size = (4, 4)
nb_gpus = 2
nb_epoch = 30
#nb_epoch = 50
batch_size = 512
#Data
labels = pd.read_csv("/Users/tirth/Documents/Diabetic Retinopathy/Model/sample.csv")
X = np.load("/Users/tirth/Documents/Diabetic Retinopathy/Model/sample.npy")
y = np.array(labels['level'])
#Class Weights (For imbalanced classes)
cl_weights = class_weight.compute_class_weight('balanced', np.unique(y), y)
#Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
#print(X_train.shape, X_test.shape, y_train, y_test)
#Reshape
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
print("X_train Shape: ", X_train.shape)
print("X_test Shape: ", X_test.shape)
#Normalize
X_train = X_train.astype('float32')
X_train /= 255
X_test = X_test.astype('float32')
X_test /=255
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
print("y_train Shape: ", y_train.shape)
print("y_test Shape: ", y_test.shape)
#Training
model = cnn_model(X_train = X_train, y_train = y_train, kernel_size = kernel_size,
nb_filters = nb_filters, channels = channels, nb_epoch = nb_epoch,
batch_size = batch_size, nb_classes = nb_classes, nb_gpus = nb_gpus,
cl_weights = cl_weights)
#Generates output predictions for input samples
y_pred = model.predict(X_test)
#Return loss value & metrics values for the model in test mode.
score = model.evaluate(X_test, y_test, verbose = 0)
print("Test score: ", score[0])
print("Test Accuracy: ", score[1])
y_test = np.argmax(y_test, axis = 1)
y_pred = np.argmax(y_pred, axis = 1)
##The best value is 1 and the worst value is 0
#Recall is intuitively the ability of classifer not to label as postive a sample that is negative.
precision = precision_score(y_test, y_pred)
#Recall is intuitively the ability of classifer to find all postive samples.
recall = recall_score(y_test, y_pred)
print("Precision: ", precision)
print("Recall: ", recall)
#Compliment
#The best value is 1 and the worst value is 0
precision = precision_score(y_test, y_pred, average = 'micro')
recall_s = recall_score(y_test, y_pred, average = 'micro')
f1 = f1_score(y_test, y_pred, average = 'micro')
#cohen_kappa is level of agreement between two annotators on a classification problem.
cohen_kappa = cohen_kappa_score(y_test, y_pred)
quad_kappa = kappa(y_test, y_pred, weights='quadratic')
print("----------------Compliment----------------")
print("Precision: ", precision)
print("Recall: ", recall_s)
print("F1: ", f1)
print("Cohen Kappa Score", cohen_kappa)
print("Quadratic Kappa: ", quad_kappa)
save_model(model = model, score = recall, model_name = "DR_Class")
print("Completed")
| 37.573248
| 122
| 0.662994
|
2859439f3e87bfe1af4fd27cb032d192315fbdf5
| 650
|
py
|
Python
|
book_center/tests/utils/validators/test_validate_bot_catcher_empty.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
book_center/tests/utils/validators/test_validate_bot_catcher_empty.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
book_center/tests/utils/validators/test_validate_bot_catcher_empty.py
|
geodimitrov/Python-Web-Framework-SoftUni
|
06b7e11aee0024a564d1b266d5ed6271351ac116
|
[
"MIT"
] | null | null | null |
from book_center.utils.validators import validate_bot_catcher_empty
from django.core.exceptions import ValidationError
from django.test import TestCase
class ValidateBotCatcherTests(TestCase):
def test_when_bot_detected__expect_exception(self):
value = 'bot'
expected_validation_error_message = 'Bot detected'
with self.assertRaises(ValidationError) as context:
validate_bot_catcher_empty(value)
self.assertEquals(expected_validation_error_message, context.exception.message)
def test_when_no_bot_detected__expect_execution(self):
value = ''
validate_bot_catcher_empty(value)
| 32.5
| 87
| 0.773846
|
14627cebad78eaf70a5fdf1c7c661dbc0832e71c
| 498
|
py
|
Python
|
test_tracker.py
|
dyboyce/capstone1
|
d6c2bb1246e346f3b03907bd0df44fe53d6082ea
|
[
"MIT"
] | null | null | null |
test_tracker.py
|
dyboyce/capstone1
|
d6c2bb1246e346f3b03907bd0df44fe53d6082ea
|
[
"MIT"
] | null | null | null |
test_tracker.py
|
dyboyce/capstone1
|
d6c2bb1246e346f3b03907bd0df44fe53d6082ea
|
[
"MIT"
] | null | null | null |
import unittest
import object_tracker
from yolov3.utils import Load_Yolo_model
import pandas as pd
class MyTestCase(unittest.TestCase):
def test_objecttracker(self):
yolo = Load_Yolo_model()
result = object_tracker.Object_tracking(yolo, "./IMAGES/test.mp4", "detection.avi", input_size=416, show=True, iou_threshold=0.1, rectangle_colors=(255,0,0), Track_only = ["person"])
self.assertIsInstance(result, pd.DataFrame)
if __name__ == '__main__':
unittest.main()
| 29.294118
| 190
| 0.732932
|
fe4a74e1d3479297058af73e1866f22c92bae111
| 7,731
|
py
|
Python
|
distrib/refresh_branch.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
distrib/refresh_branch.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
distrib/refresh_branch.py
|
jakuta-tech/Labtainers
|
f674204022ad5d13ad6bccaf02a14a283470d23f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
This software was created by United States Government employees at
The Center for Cybersecurity and Cyber Operations (C3O)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import os
import argparse
import subprocess
sys.path.append('../scripts/labtainer-student/bin')
import LocalBase
import InspectLocalReg
import LabtainerLogging
import ParseLabtainerConfig
import registry
import LabtainerBase
'''
Force the registry associated with the current git branch (see config/registry.config)
to match the premaster registry. Intended to be called from scripts, e.g., to establish
a new branch.
'''
def pull_push(image, source_registry, dest_registry):
with_registry = '%s/%s' % (source_registry, image)
cmd = 'docker pull %s' % with_registry
print(cmd)
os.system(cmd)
cmd = 'docker tag %s/%s %s/%s' % (source_registry, image, dest_registry, image)
print(cmd)
os.system(cmd)
cmd = 'docker push %s/%s' % (dest_registry, image)
print(cmd)
os.system(cmd)
def checkDates(image, source_reg, dest_reg, no_copy, lab, logger):
dest_created, dest_user, dest_version, tag, base = InspectLocalReg.inspectLocal(image, logger, dest_reg, no_pull=True)
if dest_created is not None:
with_reg = '%s/%s' % (source_reg, image)
source_created, source_user, source_version, tag, base = InspectLocalReg.inspectLocal(image, logger, source_reg, no_pull=True)
if source_created != dest_created:
print('DIFFERENT: %s:%s source created/version %s/%s destination: %s/%s' % (lab, image, source_created,
source_version, dest_created, dest_version))
logger.debug('DIFFERENT: %s:%s source created/version %s/%s destination: %s/%s' % (lab, image, source_created,
source_version, dest_created, dest_version))
if not no_copy:
pull_push(image, source_reg, dest_reg)
else:
print('%s not in %s, would add it' % (image, dest_reg))
if not no_copy:
pull_push(image, source_reg, dest_reg)
def doLab(lab_dir, lab, role, source_reg, dest_reg, logger, no_copy):
''' use dockerfiles to determine the set of containers '''
print('Lab: %s No_copy %r' % (lab, no_copy))
docker_dir = os.path.join(lab_dir, lab, 'dockerfiles')
if not os.path.isdir(docker_dir):
return
df_list = [f for f in os.listdir(docker_dir) if os.path.isfile(os.path.join(docker_dir, f))]
for df in df_list:
if df.endswith('.swp'):
continue
try:
parts = df.split('.')
image = '%s.%s.%s' % (parts[1], parts[2], role)
container = parts[2]
except:
print('could not get image from %s' % df);
continue
checkDates(image, source_reg, dest_reg, no_copy, lab, logger)
def doBases(source_registry, dest_registry, no_copy):
print('Comparing base images in %s to %s, and replacing content of %s if different' % (dest_registry, source_registry, dest_registry))
base_names = LabtainerBase.getBaseList()
for base in base_names:
with_registry = '%s/%s' % (source_registry, base)
print(base)
source_created, local_user = LocalBase.inspectLocal(base, lgr, source_registry)
dest_created, local_user = LocalBase.inspectLocal(base, lgr, dest_registry)
if source_created != dest_created:
print('Difference in %s, source: %s destination: %s' % (base, source_created, dest_created))
if not no_copy:
pull_push(base, source_registry, dest_registry)
def updateRegistry(source_registry, dest_registry, lgr, lab, no_copy, quiet=False):
labdir = os.path.join(os.getenv('LABTAINER_DIR'), 'labs')
if lab is not None:
doLab(labdir, lab, 'student', source_registry, dest_registry, lgr, no_copy)
else:
if not quiet:
msg = 'Will modify registry %s to match %s. Continue? (y/n)' % (dest_registry, source_registry)
response = input(msg)
if response.lower() != 'y':
print('Exiting')
exit(0)
grader = 'labtainer.grader'
checkDates(grader, source_registry, dest_registry, no_copy, 'grader', lgr)
doBases(source_registry, dest_registry, no_copy)
skip = []
with open('skip-labs') as fh:
for line in fh:
f = os.path.basename(line).strip()
#print('will skip [%s]' % f)
skip.append(f)
mycwd = os.getcwd()
os.chdir(labdir)
cmd = 'git ls-files ./ | cut -d/ -f1 | uniq'
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = child.communicate()
lab_list = output[0].decode('utf-8').strip().splitlines()
os.chdir(mycwd)
for lab in sorted(lab_list):
lab = lab.strip()
if lab not in skip:
doLab(labdir, lab, 'student', source_registry, dest_registry, lgr, no_copy)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare a source registry with a destination registry, and update the destination so they match')
parser.add_argument('-n', '--no_copy', action='store_true', default=False, help='Do not modify registry, just report differences')
parser.add_argument('-l', '--lab', action='store', help='only check this lab')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help='Do not prompt for confirmation.')
args = parser.parse_args()
config_file = os.path.join(os.getenv('LABTAINER_DIR'), 'config', 'labtainer.config')
labtainer_config = ParseLabtainerConfig.ParseLabtainerConfig(config_file, None)
lgr = LabtainerLogging.LabtainerLogging("refresh_branch.log", 'none', config_file)
''' source is the premaster mirror '''
source_registry = labtainer_config.test_registry
branch, dest_registry = registry.getBranchRegistry()
if dest_registry is None:
print('No registry found for branch %s' % branch)
exit(1)
updateRegistry(source_registry, dest_registry, lgr, args.lab, args.no_copy, args.quiet)
| 46.572289
| 147
| 0.676885
|
4bd523da3eb7ce7e5673411d45997ba29f30ce5d
| 2,590
|
py
|
Python
|
harness/determined/keras/_tf_keras_multi_gpu.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | 1
|
2020-09-23T12:02:32.000Z
|
2020-09-23T12:02:32.000Z
|
harness/determined/keras/_tf_keras_multi_gpu.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | null | null | null |
harness/determined/keras/_tf_keras_multi_gpu.py
|
ybt195/determined
|
913fdc3b81ef33c2760bdb128c8ce9179e4ab9b2
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
import determined as det
from determined import horovod
from determined.horovod import hvd
def get_horovod_config(
exp_config: Dict[str, Any],
hvd_config: horovod.HorovodContext,
profile_frequency: Optional[int],
profile_filename: str,
) -> Dict[str, Any]:
aggregation_frequency = hvd_config.aggregation_frequency
grad_updated_sizes_dict = None # type: Optional[Dict[int, List[int]]]
if aggregation_frequency > 1 and hvd_config.grad_updates_size_file:
grad_update_sizes_file_path = os.path.join(
exp_config.get("data", {}).get("data_dir", ""), hvd_config.grad_updates_size_file
)
if not os.path.isfile(grad_update_sizes_file_path):
raise AssertionError(
f"Please move {hvd_config.grad_updates_size_file} inside 'data_dir'."
)
with open(grad_update_sizes_file_path, "r") as json_file:
grad_updated_sizes_dict = json.load(json_file)
return {
"aggregation_frequency": aggregation_frequency,
"grad_updated_sizes_dict": grad_updated_sizes_dict,
"profile_frequency": profile_frequency,
"profile_filename": profile_filename,
"average_aggregated_gradients": hvd_config.average_aggregated_gradients,
}
def _get_multi_gpu_model_and_optimizer(
pre_compiled_model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
env: det.EnvContext,
hvd_config: horovod.HorovodContext,
profile_frequency: Optional[int],
profile_filename: str,
) -> Tuple[tf.keras.Model, tf.keras.optimizers.Optimizer]:
num_gpus = len(env.container_gpus)
new_model = pre_compiled_model
new_optimizer = optimizer
if num_gpus > 1 and not hvd_config.use:
new_model = tf.keras.utils.multi_gpu_model(pre_compiled_model, num_gpus)
# If using horovod, wrap the optimizer and check for an aggregation_frequency.
elif hvd_config.use:
# Horovod doesn't know how to handle string-based optimizers.
if isinstance(optimizer, str):
raise det.errors.InvalidExperimentException("string optimizers are not supported")
new_optimizer = hvd.DistributedOptimizer(
optimizer,
**get_horovod_config(
exp_config=env.experiment_config,
hvd_config=hvd_config,
profile_frequency=profile_frequency,
profile_filename=profile_filename,
),
)
return new_model, new_optimizer
| 37
| 94
| 0.702317
|
4c6e0b4b577636128c9e984af1bcff892f0c842e
| 182
|
py
|
Python
|
pyAudioAnalysis/data/Greenway/create_wav_file.py
|
asherif844/diarization
|
ee9c9a7d88def5110bed2a3696cc484d29def646
|
[
"Unlicense"
] | null | null | null |
pyAudioAnalysis/data/Greenway/create_wav_file.py
|
asherif844/diarization
|
ee9c9a7d88def5110bed2a3696cc484d29def646
|
[
"Unlicense"
] | null | null | null |
pyAudioAnalysis/data/Greenway/create_wav_file.py
|
asherif844/diarization
|
ee9c9a7d88def5110bed2a3696cc484d29def646
|
[
"Unlicense"
] | null | null | null |
from os import path
from pydub import AudioSegment
#source file
src="DrGCWConversation.mp3"
tgt="Conversation.wav"
sound = AudioSegment.from_mp3(src)
sound.export(tgt,format="wav")
| 20.222222
| 34
| 0.796703
|
1becdc5a7460719376e71261cca5190d16a439ba
| 36
|
py
|
Python
|
plusseg/modeling/backbone/__init__.py
|
tonysy/SegmentationToolbox.PyTorch
|
4d487dd81d0101bc5cdb7b2337776fdf1b5546ff
|
[
"MIT"
] | 465
|
2020-03-03T09:25:16.000Z
|
2022-03-30T09:34:34.000Z
|
plusseg/modeling/backbone/__init__.py
|
tonysy/SegmentationToolbox.PyTorch
|
4d487dd81d0101bc5cdb7b2337776fdf1b5546ff
|
[
"MIT"
] | 65
|
2020-03-13T12:45:29.000Z
|
2022-03-28T08:09:21.000Z
|
plusseg/modeling/backbone/__init__.py
|
tonysy/SegmentationToolbox.PyTorch
|
4d487dd81d0101bc5cdb7b2337776fdf1b5546ff
|
[
"MIT"
] | 160
|
2020-03-04T06:09:17.000Z
|
2022-03-30T02:31:38.000Z
|
from .backbone import build_backbone
| 36
| 36
| 0.888889
|
3821946a4ecc13719b3e45ff6312d8a13be30123
| 4,910
|
py
|
Python
|
transient/scan.py
|
ALSchwalm/transient
|
e3471a8c1b7e65bf29b2d1ee80c4634bc07b2088
|
[
"MIT"
] | 81
|
2020-05-16T15:42:41.000Z
|
2022-03-22T14:53:38.000Z
|
transient/scan.py
|
ALSchwalm/transient
|
e3471a8c1b7e65bf29b2d1ee80c4634bc07b2088
|
[
"MIT"
] | 146
|
2020-05-15T21:56:19.000Z
|
2022-01-14T02:29:49.000Z
|
transient/scan.py
|
ALSchwalm/transient
|
e3471a8c1b7e65bf29b2d1ee80c4634bc07b2088
|
[
"MIT"
] | 18
|
2020-05-16T16:43:01.000Z
|
2021-06-10T20:44:42.000Z
|
import base64
import datetime
import json
import logging
import os
import time
from typing import (
Optional,
List,
Dict,
Any,
)
from . import ssh
from . import utils
_PID_ROOT = "/proc"
SCAN_DATA_FD = "__TRANSIENT_DATA_FD"
SCAN_ENVIRON_SENTINEL = "__TRANSIENT_PROCESS"
class TransientInstance:
qemu_pid: int
transient_pid: int
start_time: datetime.datetime
primary_image: str
stateless: bool
name: Optional[str]
ssh_port: Optional[int]
def __init__(
self, qemu_pid: int, start_time: datetime.datetime, config: Dict[Any, Any]
):
self.name = None
self.ssh_port = None
self.__dict__.update(config)
self.start_time = start_time
self.qemu_pid = qemu_pid
def __repr__(self) -> str:
return f"TransientInstance(qemu_pid={self.qemu_pid}, start_time={self.start_time}, ...)"
def _read_pid_environ(pid_dir: str) -> Dict[str, str]:
raw_environ = open(os.path.join(pid_dir, "environ")).read()
variables = raw_environ.strip("\0").split("\0")
environ = {}
for variable in variables:
name, value = variable.split("=", maxsplit=1)
environ[name] = value
return environ
def _read_pid_start_time(pid_dir: str) -> datetime.datetime:
return datetime.datetime.fromtimestamp(os.stat(pid_dir).st_ctime)
def _read_pid_data(pid_dir: str, data_fd: int) -> Any:
with open(os.path.join(pid_dir, "fd", str(data_fd))) as f:
return json.loads(base64.b64decode(f.read()))
def find_transient_instances(
name: Optional[str] = None,
with_ssh: bool = False,
timeout: Optional[int] = None,
vmstore: Optional[str] = None,
) -> List[TransientInstance]:
"""Find running transient instances matching the given parameters
If 'name' is specified, only instances started with a equivalent 'name'
argument will be returned. 'with_ssh' will filter for instances that
were started with '--ssh' (or other options that imply '--ssh'). If the
'timeout' option is passed, this function will block until at least one
instance matching the provided parameters is found, or a timeout occurs.
If 'vmstore' is passed, only VMs backed by the given store are considered.
Note that 'timeout' may not be passed by itself.
"""
if name is None and with_ssh is False and timeout is not None:
raise RuntimeError(
f"find_transient_instances: 'timeout' cannot be specified without either 'name' or 'with_ssh'"
)
search_start_time = time.time()
instances = []
while timeout is None or (time.time() - search_start_time < timeout):
for proc in os.listdir(_PID_ROOT):
pid_dir = os.path.join(_PID_ROOT, proc)
if os.path.isdir(pid_dir) is False:
continue
try:
environ = _read_pid_environ(pid_dir)
except:
continue
if SCAN_ENVIRON_SENTINEL not in environ:
continue
logging.debug("Found transient process with pid={}".format(proc))
start_time = _read_pid_start_time(pid_dir)
try:
data = _read_pid_data(pid_dir, int(environ[SCAN_DATA_FD]))
except json.decoder.JSONDecodeError:
# A decode error will happen if the entry is scanned between the
# time the transient instances starts and the data fd is filled
# with the actual data. Ignore the entry in this case.
logging.debug("Skipping process because data was not valid JSON")
continue
if vmstore is not None and (
"vmstore" not in data or not utils.paths_equal(data["vmstore"], vmstore)
):
logging.debug(
"Skipping process because it is not in the expected VM store ('{}' != '{}')".format(
data["vmstore"], vmstore
)
)
continue
if name is not None and ("name" not in data or data["name"] != name):
logging.debug(
"Skipping process because it is does not have the expected name ('{}' != '{}')".format(
data["name"], name
)
)
continue
if with_ssh is True and "ssh_port" not in data:
logging.debug("Skipping process because it does not have an SSH port")
continue
instances.append(TransientInstance(int(proc), start_time, data))
if timeout is None or len(instances) > 0:
break
else:
delay_between = ssh.SSH_CONNECTION_TIME_BETWEEN_TRIES
logging.info(f"Unable to locate VM. Waiting {delay_between}s before retrying")
time.sleep(delay_between)
return instances
| 35.071429
| 107
| 0.613238
|
7f36fd509a15b1b80c1420e66f303d8fa24774e2
| 11,140
|
py
|
Python
|
homeassistant/generated/dhcp.py
|
MirkoLenz/hass-core
|
bda1f02371a70289c62576dcc7f18108c95cceb7
|
[
"Apache-2.0"
] | 1
|
2022-02-12T08:45:37.000Z
|
2022-02-12T08:45:37.000Z
|
homeassistant/generated/dhcp.py
|
MirkoLenz/hass-core
|
bda1f02371a70289c62576dcc7f18108c95cceb7
|
[
"Apache-2.0"
] | 83
|
2020-07-31T21:32:45.000Z
|
2022-03-31T10:00:54.000Z
|
homeassistant/generated/dhcp.py
|
MirkoLenz/hass-core
|
bda1f02371a70289c62576dcc7f18108c95cceb7
|
[
"Apache-2.0"
] | 4
|
2020-10-29T22:56:04.000Z
|
2020-12-12T21:51:40.000Z
|
"""Automatically generated by hassfest.
To update, run python3 -m script.hassfest
"""
# fmt: off
DHCP = [
{
"domain": "august",
"hostname": "connect",
"macaddress": "D86162*"
},
{
"domain": "august",
"hostname": "connect",
"macaddress": "B8B7F1*"
},
{
"domain": "august",
"hostname": "connect",
"macaddress": "2C9FFB*"
},
{
"domain": "august",
"hostname": "august*",
"macaddress": "E076D0*"
},
{
"domain": "axis",
"hostname": "axis-00408c*",
"macaddress": "00408C*"
},
{
"domain": "axis",
"hostname": "axis-accc8e*",
"macaddress": "ACCC8E*"
},
{
"domain": "axis",
"hostname": "axis-b8a44f*",
"macaddress": "B8A44F*"
},
{
"domain": "blink",
"hostname": "blink*",
"macaddress": "B85F98*"
},
{
"domain": "broadlink",
"macaddress": "34EA34*"
},
{
"domain": "broadlink",
"macaddress": "24DFA7*"
},
{
"domain": "broadlink",
"macaddress": "A043B0*"
},
{
"domain": "broadlink",
"macaddress": "B4430D*"
},
{
"domain": "emonitor",
"hostname": "emonitor*",
"macaddress": "0090C2*"
},
{
"domain": "flume",
"hostname": "flume-gw-*"
},
{
"domain": "flux_led",
"macaddress": "18B905*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "249494*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "7CB94C*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "ACCF23*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "B4E842*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "F0FE6B*",
"hostname": "[ba][lk]*"
},
{
"domain": "flux_led",
"macaddress": "8CCE4E*",
"hostname": "lwip*"
},
{
"domain": "flux_led",
"hostname": "zengge_[0-9a-f][0-9a-f]_*"
},
{
"domain": "flux_led",
"macaddress": "C82E47*",
"hostname": "sta*"
},
{
"domain": "fronius",
"macaddress": "0003AC*"
},
{
"domain": "goalzero",
"hostname": "yeti*"
},
{
"domain": "gogogate2",
"hostname": "ismartgate*"
},
{
"domain": "guardian",
"hostname": "gvc*",
"macaddress": "30AEA4*"
},
{
"domain": "guardian",
"hostname": "gvc*",
"macaddress": "B4E62D*"
},
{
"domain": "guardian",
"hostname": "guardian*",
"macaddress": "30AEA4*"
},
{
"domain": "hunterdouglas_powerview",
"hostname": "hunter*",
"macaddress": "002674*"
},
{
"domain": "isy994",
"hostname": "isy*",
"macaddress": "0021B9*"
},
{
"domain": "lyric",
"hostname": "lyric-*",
"macaddress": "48A2E6*"
},
{
"domain": "lyric",
"hostname": "lyric-*",
"macaddress": "B82CA0*"
},
{
"domain": "lyric",
"hostname": "lyric-*",
"macaddress": "00D02D*"
},
{
"domain": "myq",
"macaddress": "645299*"
},
{
"domain": "nest",
"macaddress": "18B430*"
},
{
"domain": "nest",
"macaddress": "641666*"
},
{
"domain": "nest",
"macaddress": "D8EB46*"
},
{
"domain": "nest",
"macaddress": "1C53F9*"
},
{
"domain": "nexia",
"hostname": "xl857-*",
"macaddress": "000231*"
},
{
"domain": "nuheat",
"hostname": "nuheat",
"macaddress": "002338*"
},
{
"domain": "nuki",
"hostname": "nuki_bridge_*"
},
{
"domain": "powerwall",
"hostname": "1118431-*",
"macaddress": "88DA1A*"
},
{
"domain": "powerwall",
"hostname": "1118431-*",
"macaddress": "000145*"
},
{
"domain": "rachio",
"hostname": "rachio-*",
"macaddress": "009D6B*"
},
{
"domain": "rachio",
"hostname": "rachio-*",
"macaddress": "F0038C*"
},
{
"domain": "rachio",
"hostname": "rachio-*",
"macaddress": "74C63B*"
},
{
"domain": "rainforest_eagle",
"macaddress": "D8D5B9*"
},
{
"domain": "ring",
"hostname": "ring*",
"macaddress": "0CAE7D*"
},
{
"domain": "roomba",
"hostname": "irobot-*",
"macaddress": "501479*"
},
{
"domain": "roomba",
"hostname": "roomba-*",
"macaddress": "80A589*"
},
{
"domain": "samsungtv",
"hostname": "tizen*"
},
{
"domain": "samsungtv",
"macaddress": "8CC8CD*"
},
{
"domain": "samsungtv",
"macaddress": "606BBD*"
},
{
"domain": "samsungtv",
"macaddress": "F47B5E*"
},
{
"domain": "samsungtv",
"macaddress": "4844F7*"
},
{
"domain": "screenlogic",
"hostname": "pentair: *",
"macaddress": "00C033*"
},
{
"domain": "sense",
"hostname": "sense-*",
"macaddress": "009D6B*"
},
{
"domain": "sense",
"hostname": "sense-*",
"macaddress": "DCEFCA*"
},
{
"domain": "sense",
"hostname": "sense-*",
"macaddress": "A4D578*"
},
{
"domain": "simplisafe",
"hostname": "simplisafe*",
"macaddress": "30AEA4*"
},
{
"domain": "smartthings",
"hostname": "st*",
"macaddress": "24FD5B*"
},
{
"domain": "smartthings",
"hostname": "smartthings*",
"macaddress": "24FD5B*"
},
{
"domain": "smartthings",
"hostname": "hub*",
"macaddress": "24FD5B*"
},
{
"domain": "smartthings",
"hostname": "hub*",
"macaddress": "D052A8*"
},
{
"domain": "smartthings",
"hostname": "hub*",
"macaddress": "286D97*"
},
{
"domain": "solaredge",
"hostname": "target",
"macaddress": "002702*"
},
{
"domain": "somfy_mylink",
"hostname": "somfy_*",
"macaddress": "B8B7F1*"
},
{
"domain": "squeezebox",
"hostname": "squeezebox*",
"macaddress": "000420*"
},
{
"domain": "tado",
"hostname": "tado*"
},
{
"domain": "tesla_wall_connector",
"hostname": "teslawallconnector_*",
"macaddress": "DC44271*"
},
{
"domain": "tesla_wall_connector",
"hostname": "teslawallconnector_*",
"macaddress": "98ED5C*"
},
{
"domain": "tesla_wall_connector",
"hostname": "teslawallconnector_*",
"macaddress": "4CFCAA*"
},
{
"domain": "tolo",
"hostname": "usr-tcp232-ed2"
},
{
"domain": "toon",
"hostname": "eneco-*",
"macaddress": "74C63B*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "60A4B7*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "005F67*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "1027F5*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "403F8C*"
},
{
"domain": "tplink",
"hostname": "ep*",
"macaddress": "E848B8*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "E848B8*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "909A4A*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "1C3BF3*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "50C7BF*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "68FF7B*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "98DAC4*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "B09575*"
},
{
"domain": "tplink",
"hostname": "hs*",
"macaddress": "C006C3*"
},
{
"domain": "tplink",
"hostname": "ep*",
"macaddress": "003192*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "003192*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "1C3BF3*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "50C7BF*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "68FF7B*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "98DAC4*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "B09575*"
},
{
"domain": "tplink",
"hostname": "k[lp]*",
"macaddress": "C006C3*"
},
{
"domain": "tplink",
"hostname": "lb*",
"macaddress": "1C3BF3*"
},
{
"domain": "tplink",
"hostname": "lb*",
"macaddress": "50C7BF*"
},
{
"domain": "tplink",
"hostname": "lb*",
"macaddress": "68FF7B*"
},
{
"domain": "tplink",
"hostname": "lb*",
"macaddress": "98DAC4*"
},
{
"domain": "tplink",
"hostname": "lb*",
"macaddress": "B09575*"
},
{
"domain": "tuya",
"macaddress": "105A17*"
},
{
"domain": "tuya",
"macaddress": "10D561*"
},
{
"domain": "tuya",
"macaddress": "1869D8*"
},
{
"domain": "tuya",
"macaddress": "381F8D*"
},
{
"domain": "tuya",
"macaddress": "508A06*"
},
{
"domain": "tuya",
"macaddress": "68572D*"
},
{
"domain": "tuya",
"macaddress": "708976*"
},
{
"domain": "tuya",
"macaddress": "7CF666*"
},
{
"domain": "tuya",
"macaddress": "84E342*"
},
{
"domain": "tuya",
"macaddress": "D4A651*"
},
{
"domain": "tuya",
"macaddress": "D81F12*"
},
{
"domain": "twinkly",
"hostname": "twinkly_*"
},
{
"domain": "verisure",
"macaddress": "0023C1*"
},
{
"domain": "vicare",
"macaddress": "B87424*"
},
{
"domain": "yeelight",
"hostname": "yeelink-*"
}
]
| 20.181159
| 47
| 0.402334
|
7d07eddcfbea1125404caf60a5f3e550775972bb
| 7,419
|
py
|
Python
|
reproducer/reproducer/pipeline/setup_repo.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | null | null | null |
reproducer/reproducer/pipeline/setup_repo.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | null | null | null |
reproducer/reproducer/pipeline/setup_repo.py
|
kingdido999/bugswarm
|
8ff2b3e71ca2598c354e8481c6b887cd5988816a
|
[
"BSD-3-Clause"
] | null | null | null |
import distutils.dir_util
import git
import os
import tarfile
import time
import urllib.request
import zipfile
from bugswarm.common import log
from reproducer.reproduce_exception import ReproduceError
def setup_repo(job, utils, job_dispatcher):
to_setup_repo = False
clone_repo = False
wait_for_repo_cloned = False
wait_for_repo_setup = False
build_id = job.build.build_id
if job.repo in job_dispatcher.cloned_repos and job_dispatcher.cloned_repos[job.repo] == -1:
# Already tried cloning this repository and failed. So skip it.
raise ReproduceError('Previously encountered an error while cloning a repository. Skipping.')
if build_id in job_dispatcher.workspace_locks and job_dispatcher.workspace_locks[build_id] == -1:
# Already tried setting up this repository and failed. So skip it.
raise ReproduceError('Previously encountered an error while setting up a repository. Skipping.')
# ------------ Clone repository -----------
job_dispatcher.lock.acquire()
if job.repo not in job_dispatcher.cloned_repos:
job_dispatcher.cloned_repos[job.repo] = 0
clone_repo = True
else:
if job_dispatcher.cloned_repos[job.repo] == 0:
wait_for_repo_cloned = True
job_dispatcher.lock.release()
if wait_for_repo_cloned:
while job_dispatcher.cloned_repos[job.repo] == 0:
time.sleep(3)
if job_dispatcher.cloned_repos[job.repo] == -1:
raise ReproduceError('already error in cloning repo')
if clone_repo:
try:
clone_project_repo_if_not_exists(utils, job)
except KeyboardInterrupt:
log.error('Caught a KeyboardInterrupt while cloning a repository.')
except Exception:
job_dispatcher.cloned_repos[job.repo] = -1
job_dispatcher.job_center.repos[job.repo].clone_error = True
job_dispatcher.job_center.repos[job.repo].set_all_jobs_in_repo_to_skip()
raise ReproduceError('Encountered an error while cloning a repository.')
else:
job_dispatcher.cloned_repos[job.repo] = 1
job_dispatcher.job_center.repos[job.repo].has_repo = True
# ------- setup_repo: Copy, reset, and tar -------
job_dispatcher.lock.acquire()
if build_id not in job_dispatcher.workspace_locks:
job_dispatcher.workspace_locks[build_id] = 0
to_setup_repo = True
else:
if job_dispatcher.workspace_locks[build_id] == 0:
wait_for_repo_setup = True
job_dispatcher.lock.release()
if wait_for_repo_setup:
while job_dispatcher.workspace_locks[build_id] == 0:
time.sleep(3)
if job_dispatcher.workspace_locks[build_id] == -1:
raise ReproduceError('already error in setup_repo')
if to_setup_repo:
try:
if job.resettable is False and job.github_archived is True:
download_repo(job, utils)
elif job.resettable is True:
copy_and_reset_repo(job, utils)
else:
raise ReproduceError('Job is neither resettable nor GitHub archived.')
except KeyboardInterrupt:
log.error('Caught a KeyboardInterrupt while setting up a repository.')
raise
except Exception as e:
job_dispatcher.workspace_locks[build_id] = -1
raise ReproduceError('Encountered an error while setting up a repository: {}'.format(e))
else:
job_dispatcher.workspace_locks[build_id] = 1
else:
log.debug('Job', job.job_id, 'is already set up.')
# Lastly, check if .travis.yml exists in the repository. If not, skip.
if not os.path.isfile(os.path.join(job_dispatcher.utils.get_reproducing_repo_dir(job), '.travis.yml')):
raise ReproduceError('Cannot find .travis.yml in repository. Skipping.')
def clone_project_repo_if_not_exists(utils, job):
if not utils.check_if_project_repo_exist(job.repo):
os.makedirs(utils.get_repo_storage_dir(job), exist_ok=True)
git.Repo.clone_from(utils.construct_github_repo_url(job.repo), utils.get_repo_storage_dir(job))
utils.fetch_pr_data(job)
def copy_and_reset_repo(job, utils):
log.info('Copying and resetting the repository.')
# Copy repository from stored project repositories to the workspace repository directory by untar-ing the storage
# repository tar file into the workspace directory.
with tarfile.open(utils.get_project_storage_repo_tar_path(job), 'w') as tar:
tar.add(utils.get_repo_storage_dir(job), arcname=job.repo)
repo_tar_obj = tarfile.TarFile(name=utils.get_project_storage_repo_tar_path(job))
utils.clean_workspace_job_dir(job)
repo_tar_obj.extractall(utils.get_workspace_sha_dir(job))
# git reset the workspace repository.
repo = git.Repo(utils.get_reproducing_repo_dir(job))
if job.is_pr:
repo.git.reset('--hard', job.base_sha)
repo.git.merge(job.sha)
else:
log.debug('Resetting repository to', job.sha, 'for job id', str(job.job_id) + '.')
repo.git.reset('--hard', job.sha)
def download_repo(job, utils):
# Make the workspace repository directory.
os.makedirs(utils.get_stored_repo_path(job), exist_ok=True)
# Download the repository.
if job.is_pr:
# Correct job sha is necessary for correct file path generation.
job.sha = job.travis_merge_sha
src = utils.construct_github_archive_repo_sha_url(job.repo, job.sha)
repo_unzip_name = job.repo.split('/')[1] + '-' + job.sha
log.info('Downloading the repository from the GitHub archive at {}.'.format(src))
urllib.request.urlretrieve(src, utils.get_project_storage_repo_zip_path(job))
# Copy repository from stored project repositories to the workspace repository directory by untar-ing the storage
# repository tar file into the workspace directory.
repo_zip_obj = zipfile.ZipFile(utils.get_project_storage_repo_zip_path(job))
repo_zip_obj.extractall(utils.get_stored_repo_path(job))
distutils.dir_util.copy_tree(os.path.join(utils.get_stored_repo_path(job), repo_unzip_name),
utils.get_reproducing_repo_dir(job))
distutils.dir_util.copy_tree(os.path.join(utils.get_repo_storage_dir(job), '.git'),
os.path.join(utils.get_reproducing_repo_dir(job), '.git'))
def tar_repo(job, utils, dir_to_be_tar=None):
if not dir_to_be_tar:
dir_to_be_tar = utils.get_reproducing_repo_dir(job)
reproduce_tmp_path = utils.get_reproduce_tmp_dir(job)
else:
reproduce_tmp_path = os.path.join(dir_to_be_tar, 'reproduce_tmp')
tar_dst_path = os.path.join(reproduce_tmp_path, utils.config.tarfile_name)
# Archive the repository into a tar file.
tar_file_tmp_path = os.path.join(dir_to_be_tar, utils.config.tarfile_name)
with tarfile.open(tar_file_tmp_path, 'w') as tar:
tar.add(dir_to_be_tar, arcname=job.repo)
# Omitting arcname=os.path.basename(source_dir) will maintain the entire path structure of source_dir in the tar
# file. (In most situations, that's probably inconvenient.)
# Make reproduce_tmp folder in workspace repository directory.
os.makedirs(reproduce_tmp_path, exist_ok=True)
# Move the tar file into the reproduce_tmp directory.
os.rename(tar_file_tmp_path, tar_dst_path)
| 42.637931
| 120
| 0.701307
|
131e18a8d3c3feea90a79cd51d913551193f3196
| 3,044
|
py
|
Python
|
Spark/4.py
|
bcspragu/Machine-Learning-Projects
|
b6832cbb9bb27d7e8253300f97a3ab84b1a555dc
|
[
"MIT"
] | null | null | null |
Spark/4.py
|
bcspragu/Machine-Learning-Projects
|
b6832cbb9bb27d7e8253300f97a3ab84b1a555dc
|
[
"MIT"
] | null | null | null |
Spark/4.py
|
bcspragu/Machine-Learning-Projects
|
b6832cbb9bb27d7e8253300f97a3ab84b1a555dc
|
[
"MIT"
] | 1
|
2018-09-26T13:13:03.000Z
|
2018-09-26T13:13:03.000Z
|
import numpy as np
import time
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from operator import add
from math import sqrt
conf = SparkConf()
sc = SparkContext(conf=conf)
# Map the data to a tuple of (hour, (project code, page name), page views)
# We combine project code and page name with a delimeter of dash
def parse(i):
def result(r):
s = r.split()
# The minus 5 accounts for the fact that we want to index our array
# starting at one, not six
return (i-5, (s[0], s[1]), int(s[2]))
return result
def to_vector(r):
# Create a numpy array of 18 elements
n = np.zeros(18)
# Set the array at the index of the number of hours minus 5 to the number
# of page view, unless it's the target value, which we store separately
target = 0
if r[0] != 18:
n[r[0]] = r[2]
else:
target = r[2]
# Our new tuple is ((project code, page name), (18-element array with
# arr[hour-6] set to page views, target value))
# The feature vector and target value are wrapped in a tuple so they can be
# added with one call to np.add
return (r[1], (n.reshape(18,1), target))
def set_bias(r):
# r[1] is our inner tuple, r[1][0] is the feature vector, r[1][0][0] is the
# first term of the feature vector, which is the bias and should be 1
r[1][0][0] = 1
return r
def split_code_name(r):
s = r[0]
return (s[0], s[1], r[1][0], r[1][1])
def x_xtranspose(r):
two_dim = r[2]
return np.dot(two_dim, two_dim.T)
def xy_scale(r):
return np.dot(r[2].T, r[3])
def predict(w):
def result(r):
return (r[0],r[1], np.dot(w.T, r[2])[0][0], r[3])
return result
def even(r):
return len(r[1]) % 2 == 0
def odd(r):
return len(r[1]) % 2 == 1
def squared_diff(r):
return (r[2]-r[3])**2
t1 = time.time()
# This one is for the server
base = "/wikistats/{0}.txt"
# This one is for local testing
# base = "/home/bsprague/Downloads/HW3Data/{0}.txt"
rdds = []
for i in range(6,24):
f = base.format(i)
rdd = sc.textFile(f)
# We use our function-returing function to evade Spark's lazy evaluation
rdd = rdd.map(parse(i))
rdds.append(rdd)
# Combine all of our rdds
rdd = sc.union(rdds).filter(lambda r: r[1][0] == "en")
# We use our vector function from above
rdd = rdd.map(to_vector)
# We add all of the hours together, which is effectively adding a bunch of
# zeros and one page view count per column
rdd = rdd.reduceByKey(np.add)
# Set the bias term to 1
rdd = rdd.map(set_bias)
# Split the project code and project name out of the tuple we used earlier
rdd = rdd.map(split_code_name)
train = rdd.filter(even)
test = rdd.filter(odd)
nxxt = train.map(x_xtranspose)
nres = nxxt.reduce(np.add)
nxy = train.map(xy_scale)
nres2 = nxy.reduce(np.add)
nweights = np.dot(np.linalg.inv(nres), nres2.T)
pred = test.map(predict(nweights))
sq = pred.map(squared_diff)
summed = sq.reduce(add)
print(sqrt(summed/test.count()))
t2 = time.time()
print("Runtime:", t2-t1)
| 25.579832
| 79
| 0.648489
|
c903d375c56a960e6bfabd3231b47178f8431142
| 410
|
py
|
Python
|
complete/01 - 10/Problem7/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem7/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
complete/01 - 10/Problem7/main.py
|
this-jacob/project-euler
|
8f9e700e2875e84d081eade44fd2107db0a0ae12
|
[
"MIT"
] | null | null | null |
import math
def main():
primes = 1
it = 2
while primes < 10001:
flag = True
for i in range(2, int(math.ceil( it ** 0.5)) + 1):
if it % i == 0:
flag = False
if it ** 0.5 == math.ceil(it ** 0.5):
flag = False
if flag:
print(it)
primes += 1
it += 1
if __name__ == '__main__':
main()
| 14.642857
| 58
| 0.404878
|
eaebbd05ca117bed5eef83bec2c032f4b3d8f2f6
| 15,752
|
py
|
Python
|
Model&Data/CVAE/main.py
|
LiangjunFeng/Generative-Any-Shot-Learning
|
693c4ab92f2eb04cc453c870782710a982f98e80
|
[
"Apache-2.0"
] | null | null | null |
Model&Data/CVAE/main.py
|
LiangjunFeng/Generative-Any-Shot-Learning
|
693c4ab92f2eb04cc453c870782710a982f98e80
|
[
"Apache-2.0"
] | null | null | null |
Model&Data/CVAE/main.py
|
LiangjunFeng/Generative-Any-Shot-Learning
|
693c4ab92f2eb04cc453c870782710a982f98e80
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import argparse
from cvae import run
# python main.py --dataset AWA1 --few_train False --generalized True --num_shots 0
# generalized ZSL
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 0 --generalized True > awa1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True > flo.log 2>&1 &
# naive feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_naive > apy.log 2>&1 &
# finetue feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_finetune > apy.log 2>&1 &
# reg feature
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > flo.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > cub.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > sun.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > awa2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset aPY --few_train False --num_shots 0 --generalized True --image_embedding res101_reg > apy.log 2>&1 &
# reg feature + att
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 0 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# few shot
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train False --num_shots 20 --generalized True --image_embedding res101_reg > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset CUB --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > cub0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset CUB --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > cub1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset CUB --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > cub2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset CUB --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > cub3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train False --num_shots 1 --generalized True --image_embedding res101_reg > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train False --num_shots 5 --generalized True --image_embedding res101_reg > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train False --num_shots 10 --generalized True --image_embedding res101_reg > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset SUN --few_train True --num_shots 1 --generalized True --image_embedding res101 > sun0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset SUN --few_train True --num_shots 5 --generalized True --image_embedding res101 > sun1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset SUN --few_train True --num_shots 10 --generalized True --image_embedding res101 > sun2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train False --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 1 --generalized True --image_embedding res101_naive > awa20.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 5 --generalized True --image_embedding res101_naive > awa21.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 10 --generalized True --image_embedding res101_naive > awa22.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA2 --few_train True --num_shots 20 --generalized True --image_embedding res101_naive > awa23.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train False --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 1 --generalized True --image_embedding res101 > awa10.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 5 --generalized True --image_embedding res101 > awa11.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 10 --generalized True --image_embedding res101 > awa12.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset AWA1 --few_train True --num_shots 20 --generalized True --image_embedding res101 > awa13.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive --class_embedding att > flo4.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive --class_embedding att > flo5.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive --class_embedding att > flo6.log 2>&1 &
# CUDA_VISIBLE_DEVICES=3 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive --class_embedding att > flo7.log 2>&1 &
# few shot + class
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 1 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train False --num_shots 5 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=2 nohup python -u main.py --dataset FLO --few_train False --num_shots 10 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train False --num_shots 20 --generalized True --image_embedding res101_reg --class_embedding att_GRU_biased > flo3.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 1 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo0.log 2>&1 &
# CUDA_VISIBLE_DEVICES=0 nohup python -u main.py --dataset FLO --few_train True --num_shots 5 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo1.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 10 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo2.log 2>&1 &
# CUDA_VISIBLE_DEVICES=1 nohup python -u main.py --dataset FLO --few_train True --num_shots 20 --generalized True --image_embedding res101_naive --class_embedding att_GRU_biased > flo3.log 2>&1 &
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='FLO', help='FLO')
parser.add_argument('--few_train', default = False, type = str2bool, help='use few train samples')
parser.add_argument('--num_shots', type=int, default=5, help='the number of shots, if few_train, then num_shots is for train classes, else for test classes')
parser.add_argument('--generalized', default=False, type = str2bool, help='enable generalized zero-shot learning')
parser.add_argument('--image_embedding', default='res101', help='res101')
parser.add_argument('--class_embedding', default='att', help='att')
args = parser.parse_args()
class myArgs():
def __init__(self, args):
self.dataset = args.dataset
self.few_train = args.few_train
self.num_shots = args.num_shots
self.generalized = args.generalized
self.image_embedding = args.image_embedding
self.class_embedding = args.class_embedding
self.m = 64 # batch_size
self.n_x = 2048 # dim of feature embedding
self.n_y = 312 # dim of label embedding
self.interNo = self.n_x / 2 # dim of H1
self.n_epoch = 30 # dim of H2
self.path = '../data/' + self.dataset + "/" # path
self.nSamples = 400 # number of generated samples perclass
self.scale = 1
if self.dataset == "FLO":
self.n_y = 1024
self.n_z = self.n_y
self.scale = 1e-9
elif self.dataset == "SUN":
self.n_y = 102
self.n_z = self.n_y
self.nSamples = 300
self.m = 256
self.n_epoch = 80
self.scale = 1e-9
elif self.dataset == "CUB":
self.n_y = 312
self.n_z = self.n_y
self.nSamples = 300
self.scale = 1e-5
elif self.dataset == "AWA1":
self.n_y = 85
self.n_z = self.n_y
elif self.dataset == "AWA2":
self.n_y = 85
self.n_z = self.n_y
elif self.dataset == "aPY":
self.n_y = 64
self.n_z = self.n_y
opt = myArgs(args)
run(opt)
| 86.076503
| 195
| 0.733875
|
a73570e78e3eab20acb36b54877978e396f033de
| 15,907
|
py
|
Python
|
test/cbapi/psc/test_devicev6_api.py
|
mtmcgrew/cbapi-python
|
6e81507ff30a57eb1f13ae829c28e6ee339d2ad1
|
[
"MIT"
] | null | null | null |
test/cbapi/psc/test_devicev6_api.py
|
mtmcgrew/cbapi-python
|
6e81507ff30a57eb1f13ae829c28e6ee339d2ad1
|
[
"MIT"
] | 1
|
2021-03-31T19:51:07.000Z
|
2021-03-31T19:51:07.000Z
|
test/cbapi/psc/test_devicev6_api.py
|
deepakmishraapi/cbresponse
|
420fa05d0f7b9d61e5682d7a69a4098f6c32e61c
|
[
"MIT"
] | null | null | null |
import pytest
from cbapi.errors import ApiError
from cbapi.psc.models import Device
from cbapi.psc.rest_api import CbPSCBaseAPI
from test.cbtest import StubResponse, patch_cbapi
def test_get_device(monkeypatch):
_was_called = False
def _get_device(url):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/devices/6023"
_was_called = True
return {"device_id": 6023, "organization_name": "thistestworks"}
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, GET=_get_device)
rc = api.select(Device, 6023)
assert _was_called
assert isinstance(rc, Device)
assert rc.device_id == 6023
assert rc.organization_name == "thistestworks"
def test_device_background_scan(monkeypatch):
_was_called = False
def _call_background_scan(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BACKGROUND_SCAN", "device_id": [6023], "options": {"toggle": "ON"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_background_scan)
api.device_background_scan([6023], True)
assert _was_called
def test_device_bypass(monkeypatch):
_was_called = False
def _call_bypass(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BYPASS", "device_id": [6023], "options": {"toggle": "OFF"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_bypass)
api.device_bypass([6023], False)
assert _was_called
def test_device_delete_sensor(monkeypatch):
_was_called = False
def _call_delete_sensor(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "DELETE_SENSOR", "device_id": [6023]}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_delete_sensor)
api.device_delete_sensor([6023])
assert _was_called
def test_device_uninstall_sensor(monkeypatch):
_was_called = False
def _call_uninstall_sensor(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UNINSTALL_SENSOR", "device_id": [6023]}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_uninstall_sensor)
api.device_uninstall_sensor([6023])
assert _was_called
def test_device_quarantine(monkeypatch):
_was_called = False
def _call_quarantine(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "QUARANTINE", "device_id": [6023], "options": {"toggle": "ON"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_quarantine)
api.device_quarantine([6023], True)
assert _was_called
def test_device_update_policy(monkeypatch):
_was_called = False
def _call_update_policy(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UPDATE_POLICY", "device_id": [6023], "options": {"policy_id": 8675309}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_update_policy)
api.device_update_policy([6023], 8675309)
assert _was_called
def test_device_update_sensor_version(monkeypatch):
_was_called = False
def _call_update_sensor_version(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UPDATE_SENSOR_VERSION", "device_id": [6023],
"options": {"sensor_version": {"RHEL": "2.3.4.5"}}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_call_update_sensor_version)
api.device_update_sensor_version([6023], {"RHEL": "2.3.4.5"})
assert _was_called
def test_query_device_with_all_bells_and_whistles(monkeypatch):
_was_called = False
def _run_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/devices/_search"
assert body == {"query": "foobar",
"criteria": {"ad_group_id": [14, 25], "os": ["LINUX"], "policy_id": [8675309],
"status": ["ALL"], "target_priority": ["HIGH"]},
"exclusions": {"sensor_version": ["0.1"]},
"sort": [{"field": "name", "order": "DESC"}]}
_was_called = True
return StubResponse({"results": [{"id": 6023, "organization_name": "thistestworks"}],
"num_found": 1})
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_run_query)
query = api.select(Device).where("foobar").set_ad_group_ids([14, 25]).set_os(["LINUX"]) \
.set_policy_ids([8675309]).set_status(["ALL"]).set_target_priorities(["HIGH"]) \
.set_exclude_sensor_versions(["0.1"]).sort_by("name", "DESC")
d = query.one()
assert _was_called
assert d.id == 6023
assert d.organization_name == "thistestworks"
def test_query_device_with_last_contact_time_as_start_end(monkeypatch):
_was_called = False
def _run_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/devices/_search"
assert body == {"query": "foobar",
"criteria": {"last_contact_time": {"start": "2019-09-30T12:34:56",
"end": "2019-10-01T12:00:12"}}, "exclusions": {}}
_was_called = True
return StubResponse({"results": [{"id": 6023, "organization_name": "thistestworks"}],
"num_found": 1})
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_run_query)
query = api.select(Device).where("foobar") \
.set_last_contact_time(start="2019-09-30T12:34:56", end="2019-10-01T12:00:12")
d = query.one()
assert _was_called
assert d.id == 6023
assert d.organization_name == "thistestworks"
def test_query_device_with_last_contact_time_as_range(monkeypatch):
_was_called = False
def _run_query(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/devices/_search"
assert body == {"query": "foobar", "criteria": {"last_contact_time": {"range": "-3w"}}, "exclusions": {}}
_was_called = True
return StubResponse({"results": [{"id": 6023, "organization_name": "thistestworks"}],
"num_found": 1})
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_run_query)
query = api.select(Device).where("foobar").set_last_contact_time(range="-3w")
d = query.one()
assert _was_called
assert d.id == 6023
assert d.organization_name == "thistestworks"
def test_query_device_invalid_last_contact_time_combinations():
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
with pytest.raises(ApiError):
api.select(Device).set_last_contact_time()
with pytest.raises(ApiError):
api.select(Device).set_last_contact_time(start="2019-09-30T12:34:56", end="2019-10-01T12:00:12",
range="-3w")
with pytest.raises(ApiError):
api.select(Device).set_last_contact_time(start="2019-09-30T12:34:56", range="-3w")
with pytest.raises(ApiError):
api.select(Device).set_last_contact_time(end="2019-10-01T12:00:12", range="-3w")
def test_query_device_invalid_criteria_values():
tests = [
{"method": "set_ad_group_ids", "arg": ["Bogus"]},
{"method": "set_policy_ids", "arg": ["Bogus"]},
{"method": "set_os", "arg": ["COMMODORE_64"]},
{"method": "set_status", "arg": ["Bogus"]},
{"method": "set_target_priorities", "arg": ["Bogus"]},
{"method": "set_exclude_sensor_versions", "arg": [12703]}
]
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
query = api.select(Device)
for t in tests:
meth = getattr(query, t["method"], None)
with pytest.raises(ApiError):
meth(t["arg"])
def test_query_device_invalid_sort_direction():
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
with pytest.raises(ApiError):
api.select(Device).sort_by("policy_name", "BOGUS")
def test_query_device_download(monkeypatch):
_was_called = False
def _run_download(url, query_params, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/devices/_search/download"
assert query_params == {"status": "ALL", "ad_group_id": "14,25", "policy_id": "8675309",
"target_priority": "HIGH", "query_string": "foobar", "sort_field": "name",
"sort_order": "DESC"}
_was_called = True
return "123456789,123456789,123456789"
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, RAW_GET=_run_download)
rc = api.select(Device).where("foobar").set_ad_group_ids([14, 25]).set_policy_ids([8675309]) \
.set_status(["ALL"]).set_target_priorities(["HIGH"]).sort_by("name", "DESC").download()
assert _was_called
assert rc == "123456789,123456789,123456789"
def test_query_device_do_background_scan(monkeypatch):
_was_called = False
def _background_scan(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BACKGROUND_SCAN",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}}, "options": {"toggle": "ON"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_background_scan)
api.select(Device).where("foobar").background_scan(True)
assert _was_called
def test_query_device_do_bypass(monkeypatch):
_was_called = False
def _bypass(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BYPASS",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}}, "options": {"toggle": "OFF"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_bypass)
api.select(Device).where("foobar").bypass(False)
assert _was_called
def test_query_device_do_delete_sensor(monkeypatch):
_was_called = False
def _delete_sensor(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "DELETE_SENSOR",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_delete_sensor)
api.select(Device).where("foobar").delete_sensor()
assert _was_called
def test_query_device_do_uninstall_sensor(monkeypatch):
_was_called = False
def _uninstall_sensor(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UNINSTALL_SENSOR",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_uninstall_sensor)
api.select(Device).where("foobar").uninstall_sensor()
assert _was_called
def test_query_device_do_quarantine(monkeypatch):
_was_called = False
def _quarantine(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "QUARANTINE",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}}, "options": {"toggle": "ON"}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_quarantine)
api.select(Device).where("foobar").quarantine(True)
assert _was_called
def test_query_device_do_update_policy(monkeypatch):
_was_called = False
def _update_policy(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UPDATE_POLICY",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}},
"options": {"policy_id": 8675309}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234", org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_update_policy)
api.select(Device).where("foobar").update_policy(8675309)
assert _was_called
def test_query_device_do_update_sensor_version(monkeypatch):
_was_called = False
def _update_sensor_version(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "UPDATE_SENSOR_VERSION",
"search": {"query": "foobar", "criteria": {}, "exclusions": {}},
"options": {"sensor_version": {"RHEL": "2.3.4.5"}}}
_was_called = True
return StubResponse(None, 204)
api = CbPSCBaseAPI(url="https://example.com", token="ABCD/1234",
org_key="Z100", ssl_verify=True)
patch_cbapi(monkeypatch, api, POST=_update_sensor_version)
api.select(Device).where("foobar").update_sensor_version({"RHEL": "2.3.4.5"})
assert _was_called
| 41.424479
| 118
| 0.649337
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.