hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04877321d7346de18c008eda4da9ef7235b7af55
| 1,545
|
py
|
Python
|
setup.py
|
dyyni/pyVat
|
bb6aaeb4abcf1cfddbd5f7ea57521944b33fdac6
|
[
"MIT"
] | 3
|
2020-09-19T23:08:01.000Z
|
2022-02-07T16:53:31.000Z
|
setup.py
|
dyyni/pyVat
|
bb6aaeb4abcf1cfddbd5f7ea57521944b33fdac6
|
[
"MIT"
] | null | null | null |
setup.py
|
dyyni/pyVat
|
bb6aaeb4abcf1cfddbd5f7ea57521944b33fdac6
|
[
"MIT"
] | 4
|
2020-03-02T19:12:35.000Z
|
2020-10-02T10:42:59.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="vat-format-checker",
version="0.0.4",
author="Radu Boncea",
author_email="radu.boncea@gmail.com",
description="A library for checking on European VAT formats",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/agilegeeks/pyVat",
packages=setuptools.find_packages(exclude=['tests']),
entry_points={
},
install_requires=[
],
python_requires='>=2.6',
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Database :: Front-Ends",
"Topic :: Office/Business :: Financial :: Accounting",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
)
| 35.113636
| 71
| 0.611003
|
c0408fef7e979c6afea86ff93f59d7bfa494e495
| 2,522
|
py
|
Python
|
python/src/nnabla/backward_function/log_sigmoid.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:19.000Z
|
2020-08-03T12:49:19.000Z
|
python/src/nnabla/backward_function/log_sigmoid.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-11-09T07:33:29.000Z
|
2020-11-09T07:33:29.000Z
|
python/src/nnabla/backward_function/log_sigmoid.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.functions as F
from .backward_function import BackwardFunction
class LogSigmoidBackward(BackwardFunction):
@property
def name(self):
return 'LogSigmoidBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
# Inputs
x0 = inputs[0].data
dy = inputs[1].data
# Outputs
dx0 = outputs[0].data
# Grads of inputs
g_x0 = inputs[0].grad
g_dy = inputs[1].grad
# Grads of outputs
g_dx0 = outputs[0].grad
# w.r.t. x0
sigmoid = F.sigmoid(x0)
if prop_down[0]:
if accum[0]:
g_x0 += g_dx0 * dy * sigmoid * (sigmoid - 1.0)
else:
g_x0.copy_from(g_dx0 * dy * sigmoid * (sigmoid - 1.0))
# w.r.t. dy
if prop_down[1]:
inp = nn.Variable(x0.shape).apply(
data=x0, grad=g_dy, need_grad=True)
out = nn.Variable(dy.shape).apply(grad=g_dx0)
self.forward_func.backward([inp], [out], accum=[accum[1]])
| 35.027778
| 74
| 0.615385
|
a5b8d04f0ba7c968777d09ec3967bffc158a06fa
| 6,988
|
py
|
Python
|
MidiController.py
|
mattipt/obs-nk
|
8bde655d1f92d1f0b528ade8b6ad5aadcd12ee5d
|
[
"MIT"
] | 3
|
2020-06-07T01:28:32.000Z
|
2020-09-24T22:51:09.000Z
|
MidiController.py
|
mattipt/obs-nk
|
8bde655d1f92d1f0b528ade8b6ad5aadcd12ee5d
|
[
"MIT"
] | null | null | null |
MidiController.py
|
mattipt/obs-nk
|
8bde655d1f92d1f0b528ade8b6ad5aadcd12ee5d
|
[
"MIT"
] | null | null | null |
import mido
import sys
import time
from collections import OrderedDict
class Controller:
def __init__(self, name='', channel=0):
"""
Initialise a connection to a MIDI device.
Makes a connection to the first MIDI device matching the substring.
Args:
name (str): Substring of the device name, to match.
channel (int): MIDI channel this controller uses.
Note that this requires all controls on this device to use the same channel.
"""
devices = [i for i in mido.get_ioport_names() if name in i]
if len(devices) == 0:
sys.exit('No controller devices found!')
if len(devices) > 1:
sys.stderr.write(
'Warning: multiple possible controller devices found: {}. Selecting first one.\n'.format(devices))
device = devices[0]
self.midi_port = mido.open_ioport(device, autoreset=True)
self.channel = channel
self.controls = dict()
self.leds = dict()
self.event_queue = OrderedDict()
def link(self, obs_connection):
self.obs_connection = obs_connection
self.obs_connection.link(self)
def add_fader(self, fader):
control = self.controls[fader['control']] = {'type': 'fader',
'min_value': fader.get('min_value', 0),
'max_value': fader.get('max_value', 127)}
if 'action' in fader.keys():
action_map = {
'volume': self.obs_connection.set_volume,
'sync': self.obs_connection.set_sync_offset
}
if fader['action'] in action_map.keys():
control['action'] = action_map[fader['action']]
if 'source' in fader.keys():
control['target'] = self.sources[fader['source']]
# Buttons are momentary and send an 'on' value when pressed and 'off' value when released
def add_button(self, button):
control = self.controls[button['control']] = {'type': 'button',
'min_value': button.get('off_value', 0),
'max_value': button.get('on_value', 127)}
if 'action' in button.keys():
action_map = {
'prev_scene': self.obs_connection.prev_scene,
'next_scene': self.obs_connection.next_scene,
'transition': self.obs_connection.transition,
'stream': self.obs_connection.set_stream,
'record': self.obs_connection.set_record,
'monitor': self.obs_connection.set_monitor
}
if button['action'] in action_map.keys():
control['action'] = action_map[button['action']]
if 'source' in button.keys():
control['target'] = self.sources[button['source']]
if 'led' in button.keys():
self.leds[button['led']] = button['control']
# Toggles are buttons with an internal on/off state. They send the on value and off value alternately
def add_toggle(self, toggle):
control = self.controls[toggle['control']] = {'type': 'toggle',
'min_value': toggle.get('off_value', 0),
'max_value': toggle.get('on_value', 127)}
if 'action' in toggle.keys():
action_map = {
'stream': self.obs_connection.set_stream,
'record': self.obs_connection.set_record,
'monitor': self.obs_connection.set_monitor
}
if toggle['action'] in action_map.keys():
control['action'] = action_map[toggle['action']]
if 'source' in toggle.keys():
control['target'] = self.sources[toggle['source']]
if 'led' in toggle.keys():
self.leds[toggle['led']] = toggle['control']
def configure(self, controller_config):
# print(controller_config)
self.sources = controller_config['sources']
print('Channel assignment:')
for num, name in self.sources.items():
print(' - {}: {}'.format(num, name))
for fader in controller_config['faders']:
self.add_fader(fader)
for button in controller_config['buttons']:
self.add_button(button)
for toggle in controller_config['toggles']:
self.add_toggle(toggle)
self.obs_connection.query_state()
def process_message(self, msg):
#print('Received message {}'.format(msg))
# Ignore message if the control has not been added:
if msg.control not in self.controls.keys():
return
# Buttons store the highest value and ignore release event; faders and toggles store the latest value
if self.controls[msg.control]['type'] == 'button':
if msg.value != self.controls[msg.control]['min_value']:
self.event_queue[msg.control] = max(
self.event_queue.get(msg.control, 0), msg.value)
else:
self.event_queue[msg.control] = msg.value
def set_state(self, control, state):
value = 127 if state == True else 0
msg = mido.Message(
'control_change', channel=self.channel, control=self.leds[control], value=value)
self.midi_port.send(msg)
def process_events(self):
# Wait until a message arrives
self.process_message(self.midi_port.receive())
# Process all remaining messages
for msg in self.midi_port.iter_pending():
self.process_message(msg)
def dispatch_commands(self):
for control, value in self.event_queue.items():
#print('{} => {}'.format(control, value))
ctl = self.controls[control]
if 'action' in ctl.keys():
if ctl['type'] == 'fader':
scaled_value = float(
value - ctl['min_value']) / (ctl['max_value'] - ctl['min_value'])
ctl['action'](ctl['target'], scaled_value)
elif ctl['type'] == 'toggle':
ctl_value = True if value == ctl['max_value'] else False
if 'target' in ctl.keys():
ctl['action'](ctl['target'], ctl_value)
else:
ctl['action'](ctl_value)
else: # Button
if 'target' in ctl.keys():
ctl['action'](ctl['target'])
else:
ctl['action']()
self.event_queue.clear()
def event_loop(self):
try:
while True:
self.process_events()
self.dispatch_commands()
except KeyboardInterrupt:
pass
def __del__(self):
self.midi_port.close()
| 41.844311
| 114
| 0.543646
|
964af8e2168a9c6e35356fa640499fb7072c3b24
| 3,149
|
py
|
Python
|
implementations/PointNet/MLP_tf2.py
|
PointCloudYC/PointCloud-Architectures
|
ff38033401badf264573c01c9d836b148f7d6e4a
|
[
"MIT"
] | 5
|
2020-08-17T04:23:04.000Z
|
2021-10-02T03:30:08.000Z
|
implementations/PointNet/MLP_tf2.py
|
PointCloudYC/PointCloud-Architectures
|
ff38033401badf264573c01c9d836b148f7d6e4a
|
[
"MIT"
] | null | null | null |
implementations/PointNet/MLP_tf2.py
|
PointCloudYC/PointCloud-Architectures
|
ff38033401badf264573c01c9d836b148f7d6e4a
|
[
"MIT"
] | 1
|
2021-03-18T03:29:10.000Z
|
2021-03-18T03:29:10.000Z
|
'''
1. import modules
'''
import numpy as np
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Conv2D, Activation, BatchNormalization
import tensorflow as tf
np.random.seed(1234)
tf.random.set_seed(1234)
'''
2. load data
'''
(x_train,y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train=(x_train.reshape(-1,784)/255).astype(np.float32)
x_test=(x_test.reshape(-1,784)/255).astype(np.float32)
y_train = np.eye(10)[y_train].astype(np.float32)
y_test = np.eye(10)[y_test].astype(np.float32)
'''
3. build a model
'''
# method 1
model = Sequential([
Dense(200,activation='relu',input_shape=(784,)),
Dense(10,activation='softmax')
])
model.summary()
# method 2
input = Input(shape=(784,))
x=Dense(200,activation='relu')(input)
output=Dense(10,activation='softmax')(x)
model = Model(inputs=input, outputs=output)
model.summary()
# method 3
class MLP(Model):
def __init__(self):
super().__init__()
self.dense=Dense(200,activation='relu')
self.out=Dense(10,activation='softmax')
def call(self,x):
x=self.dense(x)
y=self.out(x)
return y
model = MLP()
# note: use (None,784) rather (784,) which is used in the layer (check method 1)
model.build(input_shape=(None,784))
model.summary()
'''
4. compile a model
'''
criterion = tf.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
model.compile(optimizer=optimizer,loss=criterion,metrics=['accuracy'])
'''
5. train and evaluate a model
'''
# method1 use built-in functions
model.fit(x_train,y_train,epochs=2,batch_size=100)
loss, accuracy =model.evaluate(x_test,y_test)
print("loss is {}, accuracy is {}".format(loss,accuracy))
# method2 write customized loop
# define some TF functions
@tf.function
def compute_loss(label, pred):
return criterion(label, pred)
@tf.function
def train_step(x, t):
with tf.GradientTape() as tape:
preds = model(x)
loss = compute_loss(t, preds)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
train_acc(t, preds)
return preds
@tf.function
def test_step(x, t):
preds = model(x)
loss = compute_loss(t, preds)
test_loss(loss)
test_acc(t, preds)
return preds
epochs = 2
batch_size = 100
n_batches = x_train.shape[0] // batch_size
train_loss = tf.keras.metrics.Mean()
train_acc = tf.keras.metrics.CategoricalAccuracy()
test_loss = tf.keras.metrics.Mean()
test_acc = tf.keras.metrics.CategoricalAccuracy()
for epoch in range(epochs):
from sklearn.utils import shuffle
_x_train, _y_train = shuffle(x_train, y_train, random_state=42)
for batch in range(n_batches):
start = batch * batch_size
end = start + batch_size
train_step(_x_train[start:end], _y_train[start:end])
if epoch % 5 == 4 or epoch == epochs - 1:
preds = test_step(x_test, y_test)
print('Epoch: {}, Valid Cost: {:.3f}, Valid Acc: {:.3f}'.format(
epoch+1,
test_loss.result(),
test_acc.result()
))
| 26.024793
| 88
| 0.68625
|
cd105c9c8cab374e6086d28bf0a37d91765b3663
| 2,363
|
py
|
Python
|
sagas/nlu/uni_impl_knp.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 3
|
2020-01-11T13:55:38.000Z
|
2020-08-25T22:34:15.000Z
|
sagas/nlu/uni_impl_knp.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | null | null | null |
sagas/nlu/uni_impl_knp.py
|
samlet/stack
|
47db17fd4fdab264032f224dca31a4bb1d19b754
|
[
"Apache-2.0"
] | 1
|
2021-01-01T05:21:44.000Z
|
2021-01-01T05:21:44.000Z
|
from typing import Text, Any, Dict, List
from sagas.nlu.uni_intf import RootWordImpl, WordIntf, SentenceIntf
class KnpWordImpl(WordIntf):
def __init__(self, data, deps):
self.deps = deps
super().__init__(data)
def setup(self, tag):
from sagas.ja.knp_helper import get_by_keyset, tag_pos, pos_list, entity_list, get_segments
if tag.parent_id == -1:
governor = 0
else:
governor = tag.parent_id + 1
idx = tag.tag_id + 1 # start from 1
text = "".join(mrph.midasi for mrph in tag.mrph_list())
# lemma = tag.mrph_list()[0].midasi
repname = tag.normalized_repname.split('/')
predict_lemma = repname[0]
predict_phonetic = repname[1] if len(repname) > 1 else predict_lemma
rel = get_by_keyset(self.deps, {tag.tag_id, tag.parent_id})
if rel is None:
rel = tag.dpndtype if governor!=0 else 'root'
features = {'index': idx, 'text': text, 'lemma': predict_lemma, 'phonetic':predict_phonetic,
'upos': tag_pos(tag), 'xpos': '_'.join(pos_list(tag)),
'feats': tag.fstring, 'governor': governor,
'dependency_relation': rel,
'entity': entity_list(tag),
'segments': get_segments(tag)
}
return features
class KnpSentImpl(SentenceIntf):
def __init__(self, sent:Any, text:Text, predicts, dep_sets):
self.dep_sets = dep_sets
super(KnpSentImpl, self).__init__(sent, text, predicts)
def setup(self, sent):
words = []
for tag in sent.tag_list():
words.append(KnpWordImpl(tag, self.dep_sets))
deps = []
return words, deps
class KnpParserImpl(object):
"""
>>> from sagas.nlu.uni_viz_checker import *
>>> viz_check(KnpParserImpl, 'ja', '私の趣味は、多くの小旅行をすることです。')
"""
def __init__(self, lang):
self.lang = lang
def __call__(self, sents):
import sagas.ja.knp_helper as kh
from sagas.ja.knp_helper import extract_predicates
result = kh.knp.parse(sents)
dep_sets, _, _, predict_tuples = extract_predicates(result, verbose=False)
return KnpSentImpl(result, text=sents,
predicts=predict_tuples,
dep_sets=dep_sets)
| 36.921875
| 100
| 0.595853
|
8d0e47d4ce33cf7b9c2f1613e397913ea17b9e86
| 2,736
|
py
|
Python
|
saleor/api/customer/views.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/api/customer/views.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/api/customer/views.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db.models import Q
from rest_framework import pagination
from rest_framework import generics
from django.contrib.auth import get_user_model
from .pagination import CustomPagination
from .serializers import (
CustomerListSerializer,
CreditWorthyCustomerSerializer,
CustomerUpdateSerializer,
)
from ...customer.models import Customer as Table
import logging
User = get_user_model()
debug_logger = logging.getLogger('debug_logger')
info_logger = logging.getLogger('info_logger')
error_logger = logging.getLogger('error_logger')
class CreditWorthyCustomerListAPIView(generics.ListAPIView):
serializer_class = CreditWorthyCustomerSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Table.objects.filter(creditable=True)
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(name__icontains=query)
).distinct()
return queryset_list
class CustomerListAPIView(generics.ListAPIView):
serializer_class = CustomerListSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Table.objects.all()
query = self.request.GET.get('q')
if query:
queryset_list = queryset_list.filter(
Q(name__icontains=query)|
Q(mobile__icontains=query)
).distinct()
return queryset_list
class CustomerDetailAPIView(generics.RetrieveAPIView):
queryset = Table.objects.all()
serializer_class = CustomerListSerializer
class CustomerUpdateAPIView(generics.RetrieveUpdateAPIView):
queryset = Table.objects.all()
serializer_class = CustomerUpdateSerializer
class CustomerPagListAPIView(generics.ListAPIView):
serializer_class = CustomerListSerializer
pagination_class = CustomPagination
queryset = Table.objects.all()
def get_queryset(self, *args, **kwargs):
queryset_list = Table.objects.all().select_related()
query = self.request.GET.get('q')
page_size = 'page_size'
if self.request.GET.get(page_size):
pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)
else:
pagination.PageNumberPagination.page_size = 10
if self.request.GET.get('date'):
queryset_list = queryset_list.filter(date_joined__icontains=self.request.GET.get('date'))
if query:
queryset_list = queryset_list.filter(
Q(name__icontains=query) |
Q(mobile__icontains=query) |
Q(email__icontains=query)
).distinct()
return queryset_list
| 33.777778
| 101
| 0.678363
|
8d7f4aa61304e71154004d3be42322d4c015e5c8
| 8,086
|
py
|
Python
|
aiochannels/channel.py
|
isanich/aiochannels
|
0f2fc1466ade008bf9c470b3e681412ddbb01a73
|
[
"MIT"
] | null | null | null |
aiochannels/channel.py
|
isanich/aiochannels
|
0f2fc1466ade008bf9c470b3e681412ddbb01a73
|
[
"MIT"
] | null | null | null |
aiochannels/channel.py
|
isanich/aiochannels
|
0f2fc1466ade008bf9c470b3e681412ddbb01a73
|
[
"MIT"
] | null | null | null |
import asyncio
import weakref
import logging
from collections import deque, AsyncIterable
from .aiterable_deque import AiterableDeque
from .utils import asyncinit, asynclshift
log = logging.getLogger(__name__)
class ChannelError(Exception):
pass
@asyncinit
class Channel(AsyncIterable):
@asynclshift
class Sender:
__slots__ = ('channel', '_send_queue', '_pending_channel_task', '_pending_self_task')
def __init__(self, channel, bs):
self.channel = channel
self._send_queue = asyncio.Queue(maxsize=bs)
self._pending_channel_task = None
self._pending_self_task = None
async def send(self, data):
self._pending_self_task = self.channel.loop.create_task(self._send_queue.put(data))
await self._pending_self_task
if self.channel._run_channel_task.done():
raise ChannelError('Channe loop stopped with error!') from self.channel._loop_task_exception
@property
def is_attached(self):
return self in self.channel._senders
async def detach(self):
if self.is_attached:
if self._pending_self_task and not self._pending_self_task.done():
self._pending_self_task.cancel()
if self._pending_channel_task and not self._pending_channel_task.done():
self._pending_channel_task.cancel()
await self.channel._senders.remove(self)
async def attach(self):
if not self.is_attached:
await self.channel._senders.append(self)
async def __alshift__(self, data):
await self.send(data)
class Getter(AsyncIterable):
__slots__ = ('channel', '_received_queue', '_pending_channel_task',
'_pending_self_task', '_callbacks', '_silent_task')
def __init__(self, channel, bs, silent):
self.channel = channel
self._received_queue = asyncio.Queue(maxsize=bs)
self._callbacks = []
self._pending_channel_task = None
self._pending_self_task = None
self._silent_task = channel.loop.create_task(self._get_silently()) if silent else None
async def _get_silently(self):
await asyncio.sleep(0) # let getter to finish init and attach
while self.is_attached:
await self.get()
async def get_forever(self):
while self.is_attached:
yield await self.get()
async def get(self):
self.channel._getters_awaiting.set()
self._pending_self_task = self.channel.loop.create_task(self._received_queue.get())
for cb in self._callbacks:
self._pending_self_task.add_done_callback(cb)
data = await self._pending_self_task
self._received_queue.task_done()
if self.channel._run_channel_task.done():
raise ChannelError('Channe loop stopped with error!') from self.channel._loop_task_exception
else:
return data
@property
def is_attached(self):
return self in self.channel._getters
async def attach(self):
if not self.is_attached:
await self.channel._getters.append(self)
if self._silent_task:
self._silent_task = self.channel.loop.create_task(self._get_silently())
async def detach(self):
if self.is_attached:
if self._pending_self_task and not self._pending_self_task.done():
self._pending_self_task.cancel()
if self._pending_channel_task and not self._pending_channel_task.done():
self._pending_channel_task.cancel()
if self._silent_task:
self._silent_task.cancel()
await self.channel._getters.remove(self)
def add_callback(self, callback):
def get_wrapper(cb):
def wrapper(task):
try:
res = task.result()
if asyncio.iscoroutinefunction(cb):
self.channel.loop.create_task(cb(res))
else:
cb(res)
except asyncio.CancelledError: # getter is detached
pass
except Exception:
raise
wrapper.cb = cb
return wrapper
self._callbacks.append(get_wrapper(callback))
def remove_callback(self, callback):
for cb_wrapper in self._callbacks:
if cb_wrapper.cb is callback:
self._callbacks.remove(cb_wrapper)
break
async def __aiter__(self):
while not self._received_queue.empty():
data = await self._received_queue.get()
self._received_queue.task_done()
yield data
async def __ainit__(self, buffer_size=1):
# AiterableDeque can be edited while async iteration so no aditional mutex is needed
self._senders = await AiterableDeque()
self._getters = await AiterableDeque()
self._getters_awaiting = asyncio.Event()
self.buffer_size = buffer_size
self.loop = asyncio.get_event_loop()
self._run_channel_task = self.loop.create_task(self._run_channel())
self._run_channel_task.add_done_callback(self._handle_channel_loop_stop)
self._loop_task_exception = ChannelError()
self._finalizer = weakref.finalize(self, self._cancel_pipe_task)
async def new_sender(self):
sender = Channel.Sender(self, self.buffer_size)
await self._senders.append(sender)
return sender
async def new_getter(self, *, silent=False):
getter = Channel.Getter(self, self.buffer_size, silent)
await self._getters.append(getter)
return getter
def close(self):
self._cancel_pipe_task()
async def _run_channel(self):
while await self._getters_awaiting.wait():
async for sender in self._senders:
if sender._send_queue.empty():
if sender._pending_self_task and not sender._pending_self_task.done():
await sender._pending_self_task
else:
continue
sender._pending_channel_task = self.loop.create_task(sender._send_queue.get())
data = await sender._pending_channel_task
sender._send_queue.task_done()
self._getters_awaiting.clear()
async for getter in self._getters:
getter._pending_channel_task = self.loop.create_task(getter._received_queue.put(data))
await getter._pending_channel_task
await asyncio.sleep(0)
def _handle_channel_loop_stop(self, future):
async def detach_all():
async for node in self._senders:
await node.detach()
async for node in self._getters:
await node.detach()
try:
future.result()
except Exception as e:
self._loop_task_exception = e
trace = getattr(future, '_source_traceback', None)
full_trace = ''.join(trace.format()) if trace else 'Not available.'
log.error(f'Channel loop error!\nFull traceback:\n{full_trace}\n'
f'Exc info:\n', exc_info=e)
self.loop.create_task(detach_all())
def _cancel_pipe_task(self):
if not self.loop.is_closed():
self._run_channel_task.cancel()
async def __aiter__(self):
async for sender in self._senders:
if sender._send_queue.empty():
continue
data = await sender._send_queue.get()
sender._send_queue.task_done()
yield data
| 36.26009
| 108
| 0.596463
|
666d6ca0ce71cb2078dee1048f68dfc1438c039c
| 1,008
|
py
|
Python
|
ocdskit/cli/commands/package_records.py
|
mariob0y/ocdskit
|
be377c35644d93b037e7ff7a6ad3014ccb20747f
|
[
"BSD-3-Clause"
] | 12
|
2018-12-04T08:53:33.000Z
|
2022-01-17T18:23:45.000Z
|
ocdskit/cli/commands/package_records.py
|
mariob0y/ocdskit
|
be377c35644d93b037e7ff7a6ad3014ccb20747f
|
[
"BSD-3-Clause"
] | 166
|
2017-11-19T01:42:19.000Z
|
2022-02-10T22:04:31.000Z
|
ocdskit/cli/commands/package_records.py
|
mariob0y/ocdskit
|
be377c35644d93b037e7ff7a6ad3014ccb20747f
|
[
"BSD-3-Clause"
] | 5
|
2018-08-01T16:59:08.000Z
|
2021-06-16T14:01:36.000Z
|
from ocdskit.cli.commands.base import OCDSCommand
from ocdskit.combine import package_records
from ocdskit.util import grouper
class Command(OCDSCommand):
name = 'package-records'
help = 'reads records from standard input, and prints one record package'
def add_arguments(self):
self.add_argument('extension', help='add this extension to the package', nargs='*')
self.add_argument('--size', type=int, help='the maximum number of records per package')
self.add_package_arguments('record')
def handle(self):
kwargs = self.parse_package_arguments()
kwargs['extensions'] = self.args.extension
if self.args.size: # assume `--size` is reasonable
for data in grouper(self.items(), self.args.size):
output = package_records(list(filter(None, data)), **kwargs)
self.print(output)
else:
output = package_records(self.items(), **kwargs)
self.print(output, streaming=True)
| 37.333333
| 95
| 0.662698
|
968ac7c6a3b3a0c38878e8754f080a2c04690651
| 4,120
|
py
|
Python
|
pytorch3d/io/pluggable_formats.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 5
|
2021-01-26T11:57:26.000Z
|
2021-06-24T14:56:07.000Z
|
pytorch3d/io/pluggable_formats.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch3d/io/pluggable_formats.py
|
shubham-goel/pytorch3d
|
e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21
|
[
"BSD-3-Clause"
] | 1
|
2021-07-29T12:11:44.000Z
|
2021-07-29T12:11:44.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Optional, Tuple, Union
from iopath.common.file_io import PathManager
from pytorch3d.structures import Meshes, Pointclouds
"""
This module has the base classes which must be extended to define
an interpreter for loading and saving data in a particular format.
These can be registered on an IO object so that they can be used in
its load_* and save_* functions.
"""
def endswith(path, suffixes: Tuple[str, ...]) -> bool:
"""
Returns whether the path ends with one of the given suffixes.
If `path` is not actually a path, returns True. This is useful
for allowing interpreters to bypass inappropriate paths, but
always accepting streams.
"""
if isinstance(path, Path):
return path.suffix.lower() in suffixes
if isinstance(path, str):
return path.lower().endswith(suffixes)
return True
class MeshFormatInterpreter:
"""
This is a base class for an interpreter which can read or write
a mesh in a particular format.
"""
def read(
self,
path: Union[str, Path],
include_textures: bool,
device,
path_manager: PathManager,
**kwargs,
) -> Optional[Meshes]:
"""
Read the data from the specified file and return it as
a Meshes object.
Args:
path: path to load.
include_textures: whether to try to load texture information.
device: torch.device to load data on to.
path_manager: PathManager to interpret the path.
Returns:
None if self is not the appropriate object to interpret the given
path.
Otherwise, the read Meshes object.
"""
raise NotImplementedError()
def save(
self,
data: Meshes,
path: Union[str, Path],
path_manager: PathManager,
binary: Optional[bool],
**kwargs,
) -> bool:
"""
Save the given Meshes object to the given path.
Args:
data: mesh to save
path: path to save to, which may be overwritten.
path_manager: PathManager to interpret the path.
binary: If there is a choice, whether to save in a binary format.
Returns:
False: if self is not the appropriate object to write to the given path.
True: on success.
"""
raise NotImplementedError()
class PointcloudFormatInterpreter:
"""
This is a base class for an interpreter which can read or write
a point cloud in a particular format.
"""
def read(
self, path: Union[str, Path], device, path_manager: PathManager, **kwargs
) -> Optional[Pointclouds]:
"""
Read the data from the specified file and return it as
a Pointclouds object.
Args:
path: path to load.
device: torch.device to load data on to.
path_manager: PathManager to interpret the path.
Returns:
None if self is not the appropriate object to interpret the given
path.
Otherwise, the read Pointclouds object.
"""
raise NotImplementedError()
def save(
self,
data: Pointclouds,
path: Union[str, Path],
path_manager: PathManager,
binary: Optional[bool],
**kwargs,
) -> bool:
"""
Save the given Pointclouds object to the given path.
Args:
data: point cloud object to save
path: path to save to, which may be overwritten.
path_manager: PathManager to interpret the path.
binary: If there is a choice, whether to save in a binary format.
Returns:
False: if self is not the appropriate object to write to the given path.
True: on success.
"""
raise NotImplementedError()
| 30.072993
| 84
| 0.61699
|
e8e88d278aabe48db5017ec27947f7c45cd73213
| 71
|
py
|
Python
|
tinysched/__init__.py
|
dbjohnson/tinysched
|
f44957fa45aabeb046ca330fdc7f7becc4171f81
|
[
"MIT"
] | null | null | null |
tinysched/__init__.py
|
dbjohnson/tinysched
|
f44957fa45aabeb046ca330fdc7f7becc4171f81
|
[
"MIT"
] | null | null | null |
tinysched/__init__.py
|
dbjohnson/tinysched
|
f44957fa45aabeb046ca330fdc7f7becc4171f81
|
[
"MIT"
] | 3
|
2018-10-06T14:47:42.000Z
|
2018-10-30T23:42:22.000Z
|
__all__ = ['tinysched']
__version__ = '0.1.2'
from .scheduler import *
| 17.75
| 24
| 0.690141
|
c9c085ccf514fd0ed0e5739f3123e46e4883fd6c
| 363
|
py
|
Python
|
trade_remedies_public/config/constants.py
|
uktrade/trade-remedies-public
|
bf2984c49c445bc5f8c5087e37a3cf59d06a5a0f
|
[
"MIT"
] | 1
|
2020-08-27T09:50:42.000Z
|
2020-08-27T09:50:42.000Z
|
trade_remedies_public/config/constants.py
|
uktrade/trade-remedies-public
|
bf2984c49c445bc5f8c5087e37a3cf59d06a5a0f
|
[
"MIT"
] | 5
|
2020-08-27T22:19:06.000Z
|
2021-11-10T11:29:45.000Z
|
trade_remedies_public/config/constants.py
|
uktrade/trade-remedies-public
|
bf2984c49c445bc5f8c5087e37a3cf59d06a5a0f
|
[
"MIT"
] | null | null | null |
SECURITY_GROUP_ORGANISATION_OWNER = "Organisation Owner"
SECURITY_GROUP_ORGANISATION_USER = "Organisation User"
SECURITY_GROUP_THIRD_PARTY_USER = "Third Party User"
SECURITY_GROUP_TRA_INVESTIGATOR = "TRA Investigator"
SECURITY_GROUP_TRA_ADMINISTRATOR = "TRA Administrator"
ROLE_APPLICANT = 1
CASE_ROLE_APPLICANT = "Applicant"
CASE_ROLE_RESPONDENT = "Respondent"
| 36.3
| 56
| 0.853994
|
4984b4b4709fc4846e30f4b0d704176f336c6ad1
| 6,270
|
py
|
Python
|
fluent.runtime/tests/format/test_attributes.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 155
|
2017-02-15T11:39:45.000Z
|
2022-03-15T19:06:58.000Z
|
fluent.runtime/tests/format/test_attributes.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 113
|
2017-03-14T16:47:57.000Z
|
2022-02-03T20:53:07.000Z
|
fluent.runtime/tests/format/test_attributes.py
|
shlomyb-di/python-fluent
|
284507d5aed60a2d4bc9b4433ff7fef121529d6f
|
[
"Apache-2.0"
] | 18
|
2017-02-08T01:22:51.000Z
|
2021-12-21T03:07:34.000Z
|
import unittest
from fluent.runtime import FluentBundle, FluentResource
from fluent.runtime.errors import FluentReferenceError
from ..utils import dedent_ftl
class TestAttributesWithStringValues(unittest.TestCase):
def setUp(self):
self.bundle = FluentBundle(['en-US'], use_isolating=False)
self.bundle.add_resource(FluentResource(dedent_ftl("""
foo = Foo
.attr = Foo Attribute
bar = { foo } Bar
.attr = Bar Attribute
ref-foo = { foo.attr }
ref-bar = { bar.attr }
""")))
def test_can_be_referenced_for_entities_with_string_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-foo').value, {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_referenced_for_entities_with_pattern_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-bar').value, {})
self.assertEqual(val, 'Bar Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_formatted_directly_for_entities_with_string_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('foo').attributes['attr'], {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_formatted_directly_for_entities_with_pattern_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('bar').attributes['attr'], {})
self.assertEqual(val, 'Bar Attribute')
self.assertEqual(len(errs), 0)
class TestAttributesWithSimplePatternValues(unittest.TestCase):
def setUp(self):
self.bundle = FluentBundle(['en-US'], use_isolating=False)
self.bundle.add_resource(FluentResource(dedent_ftl("""
foo = Foo
bar = Bar
.attr = { foo } Attribute
baz = { foo } Baz
.attr = { foo } Attribute
qux = Qux
.attr = { qux } Attribute
ref-bar = { bar.attr }
ref-baz = { baz.attr }
ref-qux = { qux.attr }
""")))
def test_can_be_referenced_for_entities_with_string_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-bar').value, {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_formatted_directly_for_entities_with_string_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('bar').attributes['attr'], {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_referenced_for_entities_with_pattern_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-baz').value, {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_can_be_formatted_directly_for_entities_with_pattern_values(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('baz').attributes['attr'], {})
self.assertEqual(val, 'Foo Attribute')
self.assertEqual(len(errs), 0)
def test_works_with_self_references(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-qux').value, {})
self.assertEqual(val, 'Qux Attribute')
self.assertEqual(len(errs), 0)
def test_works_with_self_references_direct(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('qux').attributes['attr'], {})
self.assertEqual(val, 'Qux Attribute')
self.assertEqual(len(errs), 0)
class TestMissing(unittest.TestCase):
def setUp(self):
self.bundle = FluentBundle(['en-US'], use_isolating=False)
self.bundle.add_resource(FluentResource(dedent_ftl("""
foo = Foo
bar = Bar
.attr = Bar Attribute
baz = { foo } Baz
qux = { foo } Qux
.attr = Qux Attribute
ref-foo = { foo.missing }
ref-bar = { bar.missing }
ref-baz = { baz.missing }
ref-qux = { qux.missing }
attr-only =
.attr = Attr Only Attribute
ref-double-missing = { missing.attr }
""")))
def test_msg_with_string_value_and_no_attributes(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-foo').value, {})
self.assertEqual(val, '{foo.missing}')
self.assertEqual(errs,
[FluentReferenceError(
'Unknown attribute: foo.missing')])
def test_msg_with_string_value_and_other_attributes(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-bar').value, {})
self.assertEqual(val, '{bar.missing}')
self.assertEqual(errs,
[FluentReferenceError(
'Unknown attribute: bar.missing')])
def test_msg_with_pattern_value_and_no_attributes(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-baz').value, {})
self.assertEqual(val, '{baz.missing}')
self.assertEqual(errs,
[FluentReferenceError(
'Unknown attribute: baz.missing')])
def test_msg_with_pattern_value_and_other_attributes(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-qux').value, {})
self.assertEqual(val, '{qux.missing}')
self.assertEqual(errs,
[FluentReferenceError(
'Unknown attribute: qux.missing')])
def test_attr_only_attribute(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('attr-only').attributes['attr'], {})
self.assertEqual(val, 'Attr Only Attribute')
self.assertEqual(len(errs), 0)
def test_missing_message_and_attribute(self):
val, errs = self.bundle.format_pattern(self.bundle.get_message('ref-double-missing').value, {})
self.assertEqual(val, '{missing.attr}')
self.assertEqual(errs, [FluentReferenceError('Unknown attribute: missing.attr')])
| 42.653061
| 107
| 0.63126
|
4f8452cc71735a6e39cb67e1ae71c16e2722fed8
| 513
|
py
|
Python
|
gen-demoh5.py
|
cdluminate/leicht
|
1ef84f1ad70539b64a2c0972a67086c31d035969
|
[
"MIT"
] | 7
|
2019-02-22T16:45:18.000Z
|
2022-01-07T18:12:07.000Z
|
gen-demoh5.py
|
cdluminate/leicht
|
1ef84f1ad70539b64a2c0972a67086c31d035969
|
[
"MIT"
] | null | null | null |
gen-demoh5.py
|
cdluminate/leicht
|
1ef84f1ad70539b64a2c0972a67086c31d035969
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import h5py
import numpy as np
import sys
if len(sys.argv)==1: # demo.h5
f = h5py.File('demo.h5', 'w')
f['data'] = np.random.rand(10, 784)
f['label'] = np.arange(10)
else:
f = h5py.File('mnist.fake.h5', 'w') # fake mnist for benchmarking
f['/train/images'] = np.random.rand(37800, 784)*255
f['/train/labels'] = np.random.rand(37800, 1 )* 10
f['/val/images'] = np.random.rand(4200, 784)*255
f['/val/labels'] = np.random.rand(4200, 1 )* 10
f.flush()
f.close()
| 27
| 69
| 0.602339
|
efcf95fca4fe255bd9623db6f238a6e0245386a3
| 251
|
py
|
Python
|
manage.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
manage.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
manage.py
|
yeaske/picscope
|
efb38459631b7aee8b2db4f38da1f437c2d96ad8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "picscope.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.818182
| 72
| 0.772908
|
4465088d2a3ea3b9d9523e7e8074e22b0301fa10
| 209
|
py
|
Python
|
test/scripts/quit.py
|
dbgate/datadmin
|
3c86ec23c2adea8fb88985b8d7a9e5ce8f9eaa6f
|
[
"MIT"
] | null | null | null |
test/scripts/quit.py
|
dbgate/datadmin
|
3c86ec23c2adea8fb88985b8d7a9e5ce8f9eaa6f
|
[
"MIT"
] | null | null | null |
test/scripts/quit.py
|
dbgate/datadmin
|
3c86ec23c2adea8fb88985b8d7a9e5ce8f9eaa6f
|
[
"MIT"
] | null | null | null |
def main():
pass
menu1 = root.MainMenuStrip.Items['mnuFile']
menu9 = menu1.DropDownItems['mnuQuit']
menu9.PerformClick();DoEvents()
if procedure is None: main()
if procedure == 'main': main()
| 23.222222
| 47
| 0.669856
|
762fd0490c450afce51b268197373f3f7e5766b0
| 633
|
py
|
Python
|
Documentation/Max Documentation/_create_tmp.py
|
AlexHarker/FrameLib
|
04b9882561c83d3240c6cb07f14861244d1d6272
|
[
"BSD-3-Clause"
] | 33
|
2017-08-13T00:02:41.000Z
|
2022-03-10T23:02:17.000Z
|
Documentation/Max Documentation/_create_tmp.py
|
AlexHarker/FrameLib
|
04b9882561c83d3240c6cb07f14861244d1d6272
|
[
"BSD-3-Clause"
] | 60
|
2018-02-01T23:33:36.000Z
|
2022-03-23T23:25:13.000Z
|
Documentation/Max Documentation/_create_tmp.py
|
AlexHarker/FrameLib
|
04b9882561c83d3240c6cb07f14861244d1d6272
|
[
"BSD-3-Clause"
] | 8
|
2018-02-01T20:18:46.000Z
|
2020-07-03T12:53:04.000Z
|
import os
from shutil import rmtree
from FrameLibDocs.classes import Documentation
def main(docs: Documentation):
docs.temporary_dir.mkdir(exist_ok=True)
docs.databases_dir.mkdir(exist_ok=True)
docs.raw_xml_dir.mkdir(exist_ok=True)
# Interfaces
for files in docs.interfaces_dir.iterdir():
files.unlink()
# Refpages
for files in docs.refpages_dir.iterdir():
if files.is_dir():
try:
rmtree(files.resolve())
except OSError:
print("Error cleaning out existing docs directories")
if __name__ == "__main__":
main(Documentation())
| 25.32
| 69
| 0.661927
|
e7cd2ee3036fe509e6df0aafe252b57dc4919ad8
| 8,702
|
py
|
Python
|
aurora/synth_diags.py
|
ToFuProject/Aurora
|
27556691e3094a3711fedbe1277bac67c6197600
|
[
"MIT"
] | null | null | null |
aurora/synth_diags.py
|
ToFuProject/Aurora
|
27556691e3094a3711fedbe1277bac67c6197600
|
[
"MIT"
] | null | null | null |
aurora/synth_diags.py
|
ToFuProject/Aurora
|
27556691e3094a3711fedbe1277bac67c6197600
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2021 Francesco Sciortino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import RectBivariateSpline, interp1d
from scipy.integrate import trapz
import matplotlib.tri as tri
plt.ion()
from scipy.constants import e as q_electron, m_p
def line_int_weights(
R_path, Z_path, rhop_path, dist_path, R_axis=None, rhop_out=None, CF_lam=None
):
"""Obtain weights for line integration on a rhop grid, given the 3D path of line integration in the (R,Z,Phi)
coordinates, as well as the value of sqrt of normalized poloidal flux at each point along the path.
Parameters
----------
R_path : array (np,)
Values of the R coordinate [m] along the line integration path.
Z_path : array (np,)
Values of the Z coordinate [m] along the line integration path.
rhop_path : array (np,)
Values of the rhop coordinate along the line integration path.
dist_path : array (np,)
Vector starting from 0 to the maximum distance [m] considered along the line integration.
R_axis : float
R value at the magnetic axis [m]. Only used for centrifugal asymmetry effects if CF_lam is not None.
rhop_out : array (nr,)
The sqrt of normalized poloidal flux grid on which weights should be computed. If left to None, an
equally-spaced grid with 201 points from the magnetic axis to the LCFS is used.
CF_lam : array (nr,)
Centrifugal (CF) asymmetry exponential factor, returned by the :py:func:`~aurora.synth_diags.centrifugal_asym`
function. If provided, this is taken to be on an input rhop_out grid. If left to None, no CF asymmetry is considered.
"""
if rhop_out is None:
rhop_out = np.linspace(0, 1, 201)
# response matrix for effective line integration
response = interp1d(
rhop_out,
np.eye(len(rhop_out)),
axis=1,
bounds_error=False,
copy=False,
fill_value=0,
assume_sorted=True,
kind="linear",
)(rhop_path)
if CF_lam is not None:
# interpolate CF lambda to ray/beam path
interp_lam = interp1d(
rhop_out,
CF_lam,
copy=False,
assume_sorted=True,
bounds_error=False,
fill_value=0,
)(rhop_path)
asym = np.exp(interp_lam * (R_path ** 2 - R_axis ** 2))
else:
asym = 1.0
# compute weights by summing over beam/ray path
weights = trapz(response[None] * asym, dist_path, axis=2)
return weights
def centrifugal_asym(
rhop,
Rlfs,
omega,
Zeff,
A_imp,
Z_imp,
Te,
Ti,
main_ion_A=2,
plot=False,
nz=None,
geqdsk=None,
):
r"""Estimate impurity poloidal asymmetry effects from centrifugal forces.
The result of this function is :math:`\lambda`, defined such that
.. math::
n(r,\theta) = n_0(r) \times \exp\left(\lambda(\rho) (R(r,\theta)^2- R_0^2)\right)
See Odstrcil et al. 2018 Plasma Phys. Control. Fusion 60 014003 for details on centrifugal asymmetries.
Also see Appendix A of Angioni et al 2014 Nucl. Fusion 54 083028 for details on these should also be
accounted for when comparing transport coefficients used in Aurora (on a rvol grid) to coefficients used
in codes that use other coordinate systems (e.g. based on rmid).
Parameters
----------
rhop : array (nr,)
Sqrt of normalized poloidal flux grid.
Rlfs : array (nr,)
Major radius on the Low Field Side (LFS), at points corresponding to rhop values
omega : array (nt,nr) or (nr,) [ rad/s ]
Toroidal rotation on Aurora temporal time_grid and radial rhop_grid (or, equivalently, rvol_grid) grids.
Zeff : array (nt,nr), (nr,) or float
Effective plasma charge on Aurora temporal time_grid and radial rhop_grid (or, equivalently, rvol_grid) grids.
Alternatively, users may give Zeff as a float (taken constant over time and space).
A_imp : float
Impurity ion atomic mass number (e.g. 40 for Ca)
Z_imp : array (nr, ) or int
Charge state of the impurity of interest. This can be an array, giving the expected charge state at every
radial position, or just a float.
Te : array (nr,nt)
Electron temperature (eV)
Ti : array (nr, nt)
Background ion temperature (eV)
main_ion_A : int, optional
Background ion atomic mass number. Default is 2 for D.
plot : bool
If True, plot asymmetry factor :math:`\lambda` vs. radius and show the predicted 2D impurity density distribution
at the last time point.
nz : array (nr,nZ)
Impurity charge state densities (output of Aurora at a specific time slice), only used for 2D plotting.
geqdsk : dict
Dictionary containing the `omfit_classes.omfit_eqdsk` reading of the EFIT g-file.
Returns
-------
CF_lam : array (nr,)
Asymmetry factor, defined as :math:`\lambda` in the expression above.
"""
if omega.ndim == 1:
omega = omega[None, :] # take constant in time
if isinstance(Zeff, (int, float)):
Zeff = np.array(Zeff) * np.ones_like(Ti)
if Zeff.ndim == 1:
Zeff = Zeff[None, :] # take constant in time
# deuterium mach number
mach = np.sqrt(2.0 * m_p / q_electron * (omega * Rlfs[None, :]) ** 2 / (2.0 * Ti))
# valid for deuterium plasma with Zeff almost constants on flux surfaces
CF_lam = (
A_imp
/ 2.0
* (mach / Rlfs[None, :]) ** 2
* (1.0 - Z_imp * main_ion_A / A_imp * Zeff * Te / (Ti + Zeff * Te))
)
# centrifugal asymmetry is only relevant on closed flux surfaces
CF_lam[:, rhop > 1.0] = 0
if plot:
# show centrifugal asymmetry lambda as a function of radius
fig, ax = plt.subplots()
ax.plot(rhop, CF_lam.T)
ax.set_xlabel(r"$\rho_p$")
ax.set_ylabel(r"$\lambda$")
# plot expected radial impurity density over the poloidal cross section
fig, ax = plt.subplots()
if isinstance(Z_imp, (int, float)):
# select charge state of interest
nz_sel = nz[:, int(Z_imp) - 1]
else:
# use total impurity density if Z_imp was given as a vector
nz_sel = nz.sum(1)
rhop_surfs = np.sqrt(geqdsk["fluxSurfaces"]["geo"]["psin"])
Rs = []
Zs = []
vals = []
for ii, surf in enumerate(geqdsk["fluxSurfaces"]["flux"]):
# FSA nz on this flux surface at the last time point
nz_sel_i = interp1d(rhop, nz_sel)(rhop_surfs[ii])
CF_lam_i = interp1d(rhop, CF_lam[-1, :])(rhop_surfs[ii])
Rs = np.concatenate((Rs, geqdsk["fluxSurfaces"]["flux"][ii]["R"]))
Zs = np.concatenate((Zs, geqdsk["fluxSurfaces"]["flux"][ii]["Z"]))
vals = np.concatenate(
(
vals,
nz_sel_i
* np.exp(
CF_lam_i
* (
geqdsk["fluxSurfaces"]["flux"][ii]["R"] ** 2
- geqdsk["RMAXIS"] ** 2
)
),
)
)
triang = tri.Triangulation(Rs, Zs)
cntr1 = ax.tricontourf(triang, vals, levels=300)
ax.plot(geqdsk["RBBBS"], geqdsk["ZBBBS"], c="k")
ax.scatter(geqdsk["RMAXIS"], geqdsk["ZMAXIS"], marker="x", c="k")
ax.axis("equal")
ax.set_xlabel("R [m]")
ax.set_ylabel("Z [m]")
plt.tight_layout()
return CF_lam
| 37.670996
| 126
| 0.626638
|
fc18f57d56aadba5b91b407093304ce933611bd6
| 36,219
|
py
|
Python
|
docker/docker-airflow/spark_files/spark-2.4.5-bin-hadoop2.6/python/pyspark/sql/session.py
|
saurabhkhandelwal15/airflow_data_pipeline
|
f1832ef5edaae95c144a15012b6c301c55394b1b
|
[
"MIT"
] | 2,327
|
2020-03-01T09:47:34.000Z
|
2021-11-25T12:38:42.000Z
|
docker/docker-airflow/spark_files/spark-2.4.5-bin-hadoop2.6/python/pyspark/sql/session.py
|
saurabhkhandelwal15/airflow_data_pipeline
|
f1832ef5edaae95c144a15012b6c301c55394b1b
|
[
"MIT"
] | 209
|
2020-03-01T17:14:12.000Z
|
2021-11-08T20:35:42.000Z
|
docker/docker-airflow/spark_files/spark-2.4.5-bin-hadoop2.6/python/pyspark/sql/session.py
|
saurabhkhandelwal15/airflow_data_pipeline
|
f1832ef5edaae95c144a15012b6c301c55394b1b
|
[
"MIT"
] | 686
|
2020-03-03T17:24:51.000Z
|
2021-11-25T23:39:12.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive SerDes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances."""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions, etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
jsqlContext = self._wrapped._jsqlContext
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func,
create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of either :class:`Row`,
:class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value".
Each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation (e.g. row, tuple, int, boolean,
etc.), :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` instances active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 41.53555
| 100
| 0.595958
|
a7fed2cb718b5b1d9deebae46bd96b47be23a42c
| 2,418
|
py
|
Python
|
apps/mac/iterm/iterm.py
|
dmartzol/knausj_talon
|
97972d938647c38dd330f681264a756d2dfe24d3
|
[
"MIT"
] | null | null | null |
apps/mac/iterm/iterm.py
|
dmartzol/knausj_talon
|
97972d938647c38dd330f681264a756d2dfe24d3
|
[
"MIT"
] | null | null | null |
apps/mac/iterm/iterm.py
|
dmartzol/knausj_talon
|
97972d938647c38dd330f681264a756d2dfe24d3
|
[
"MIT"
] | null | null | null |
from talon import Context, Module, actions, imgui, settings, ui
import os
ctx = Context()
ctx.matches = r"""
app: iterm2
"""
directories_to_remap = {}
directories_to_exclude = {}
@ctx.action_class("user")
class user_actions:
# def file_manager_current_path():
# title = ui.active_window().title
# if "~" in title:
# title = os.path.expanduser(title)
# if title in directories_to_remap:
# title = directories_to_remap[title]
# if title in directories_to_exclude:
# title = None
# return title
# def file_manager_show_properties():
# """Shows the properties for the file"""
# def file_manager_open_directory(path: str):
# """opens the directory that's already visible in the view"""
# actions.insert("cd ")
# path = '"{}"'.format(path)
# actions.insert(path)
# actions.key("enter")
# actions.user.file_manager_refresh_title()
# def file_manager_select_directory(path: str):
# """selects the directory"""
# actions.insert(path)
# def file_manager_new_folder(name: str):
# """Creates a new folder in a gui filemanager or inserts the command to do so for terminals"""
# name = '"{}"'.format(name)
# actions.insert("mkdir " + name)
# def file_manager_open_file(path: str):
# """opens the file"""
# actions.insert(path)
# actions.key("enter")
# def file_manager_select_file(path: str):
# """selects the file"""
# actions.insert(path)
def terminal_list_directories():
actions.insert("ls")
actions.key("enter")
def terminal_list_all_directories():
actions.insert("ls -a")
actions.key("enter")
def terminal_change_directory(path: str):
actions.insert("cd {}".format(path))
if path:
actions.key("enter")
def terminal_change_directory_root():
"""Root of current drive"""
actions.insert("cd /")
actions.key("enter")
def terminal_clear_screen():
"""Clear screen"""
actions.key("ctrl-l")
def terminal_run_last():
actions.key("up enter")
def terminal_kill_all():
actions.key("ctrl-c")
actions.insert("y")
actions.key("enter")
def terminal_clear_line():
"""Clear line"""
actions.key("ctrl-u")
| 26.571429
| 103
| 0.59512
|
ce731b3e3c85135e73dd038c713a6f3449178c96
| 680
|
py
|
Python
|
example/protocolfeed.py
|
zepheira/amara3-xml
|
5d9646c372607f4b25598f3d4c9c94548e2a1093
|
[
"Apache-2.0"
] | 7
|
2015-02-03T05:03:42.000Z
|
2021-04-17T15:39:39.000Z
|
example/protocolfeed.py
|
zepheira/amara3-xml
|
5d9646c372607f4b25598f3d4c9c94548e2a1093
|
[
"Apache-2.0"
] | 4
|
2015-05-08T15:36:55.000Z
|
2021-02-03T17:24:50.000Z
|
example/protocolfeed.py
|
zepheira/amara3-xml
|
5d9646c372607f4b25598f3d4c9c94548e2a1093
|
[
"Apache-2.0"
] | 2
|
2018-11-11T03:08:54.000Z
|
2019-07-24T06:02:35.000Z
|
#Sample usage, from project root dir:
#python example/markdownlinkchecker.py README.md
import sys
from collections import deque
from amara3.uxml.parser import parsefrags, event
docfragments = deque()
with open(sys.argv[1]) as f:
for line in f.readlines():
docfragments.append(line)
for ev in parsefrags(docfragments):
print (ev)
from amara3.util import coroutine
from amara3.uxml.parser import parser, event
@coroutine
def handler():
while True:
ev = yield
print(ev)
return
h = handler()
p = parser(h)
p.send(('<hello id', False))
p.send(('="12"', False))
p.send(('>', False))
p.send(('world', False))
p.send(('</hello>', True))
| 17.894737
| 48
| 0.673529
|
5cf637fdbaf1d44c430549a4d9e18ed34e64d29e
| 1,456
|
py
|
Python
|
blog/models.py
|
Yubisel/webempresa
|
a96086a225946aff0c79c537da6d6f11a852f4ad
|
[
"MIT"
] | null | null | null |
blog/models.py
|
Yubisel/webempresa
|
a96086a225946aff0c79c537da6d6f11a852f4ad
|
[
"MIT"
] | null | null | null |
blog/models.py
|
Yubisel/webempresa
|
a96086a225946aff0c79c537da6d6f11a852f4ad
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.timezone import now
from django.contrib.auth.models import User
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=100, verbose_name="nombre")
created = models.DateTimeField(auto_now_add=True, verbose_name="fecha de creacion")
updated = models.DateTimeField(auto_now=True, verbose_name="fecha de modificacion")
class Meta:
verbose_name = "categoria"
verbose_name_plural = "categorias"
ordering = ["-created"]
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length=200, verbose_name="titulo")
content = models.TextField(verbose_name="contenido")
published = models.DateTimeField(verbose_name="fecha de publicacion", default=now)
image = models.ImageField(verbose_name="imagen", upload_to="blog", null=True, blank=True)
autor = models.ForeignKey(User, verbose_name="autor", on_delete=models.CASCADE)
categories = models.ManyToManyField(Category, verbose_name="categorias", related_name="get_posts")
created = models.DateTimeField(auto_now_add=True, verbose_name="fecha de creacion")
updated = models.DateTimeField(auto_now=True, verbose_name="fecha de modificacion")
class Meta:
verbose_name = "entrada"
verbose_name_plural = "entradas"
ordering = ["-created"]
def __str__(self):
return self.title
| 41.6
| 102
| 0.728709
|
8662836b1aa3ad20180a142fb5f2602add09994f
| 218
|
py
|
Python
|
LeetCode/35.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | 1
|
2020-12-08T02:01:18.000Z
|
2020-12-08T02:01:18.000Z
|
LeetCode/35.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
LeetCode/35.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
#
# LeetCode
#
# Problem - 35
# URL - https://leetcode.com/problems/search-insert-position/
#
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
return bisect.bisect_left(nums, target)
| 19.818182
| 62
| 0.697248
|
caa5657f0210495a77c325ebec767124a834e68c
| 564
|
py
|
Python
|
WebMirror/management/rss_parser_funcs/feed_parse_extractCultureasiatlBlogspotCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 193
|
2016-08-02T22:04:35.000Z
|
2022-03-09T20:45:41.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractCultureasiatlBlogspotCom.py
|
fake-name/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 533
|
2016-08-23T20:48:23.000Z
|
2022-03-28T15:55:13.000Z
|
WebMirror/management/rss_parser_funcs/feed_parse_extractCultureasiatlBlogspotCom.py
|
rrosajp/ReadableWebProxy
|
ed5c7abe38706acc2684a1e6cd80242a03c5f010
|
[
"BSD-3-Clause"
] | 19
|
2015-08-13T18:01:08.000Z
|
2021-07-12T17:13:09.000Z
|
def extractCultureasiatlBlogspotCom(item):
'''
Parser for 'cultureasiatl.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 25.636364
| 104
| 0.641844
|
b50987bb565d363477b1914de084caed168290d3
| 8,751
|
py
|
Python
|
pele_platform/Frag/libraries.py
|
cescgina/pele_platform
|
9db1f69c35bb2b188047ddcdb4b3280964f86b77
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Frag/libraries.py
|
cescgina/pele_platform
|
9db1f69c35bb2b188047ddcdb4b3280964f86b77
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Frag/libraries.py
|
cescgina/pele_platform
|
9db1f69c35bb2b188047ddcdb4b3280964f86b77
|
[
"Apache-2.0"
] | null | null | null |
import glob
import os
import subprocess
import shutil
from rdkit import Chem
from pele_platform.constants import constants as cs
OUTPUT = "input.conf"
def get_symmetry_groups(mol):
"""
Computes the symmetry class for each atom and returns a list with the idx of non-symmetric atoms.
Parameters
----------
mol : rdkit molecule object.
Fragment from custom-made library.
Returns
-------
symmetry_list : list
List with atom indices.
"""
rank = {}
symmetry_list = []
symmetry_rank_list = []
counter = 0
for counter, atom in enumerate(mol.GetAtoms()):
rank[atom.GetIdx()] = list(Chem.CanonicalRankAtoms(mol, breakTies=False))[counter]
for idx, symmetry_rank in rank.items():
if symmetry_rank not in symmetry_rank_list:
symmetry_rank_list.append(symmetry_rank)
symmetry_list.append(idx)
return symmetry_list
def growing_sites(fragment,
user_bond):
"""
Retrieves all possible growing sites (hydrogens) on the fragment. Takes PDB fragment file as input.
Parameters
----------
fragment : string
Path to fragment pdb file.
user_bond : string
Connection point from which the user wants to grow the fragments.
Returns
-------
bonds : list
List of strings representing sites, e.g. "benzene.pdb C6-H6 C1-H2"
"""
bonds = []
mol = Chem.MolFromPDBFile(fragment, removeHs=False)
symmetry_list = get_symmetry_groups(mol)
if mol:
heavy_atoms = [a for a in mol.GetAtoms() if a.GetSymbol() != "H"]
for a in heavy_atoms:
hydrogens = [n for n in a.GetNeighbors() if n.GetSymbol() == "H" and n.GetIdx() in symmetry_list]
at_name = a.GetMonomerInfo().GetName().strip()
for h in hydrogens:
h_name = h.GetMonomerInfo().GetName().strip()
bonds.append("{} {} {}-{}".format(fragment, user_bond, at_name, h_name))
return bonds
def sdf_to_pdb(file_list,
logger, tmpdirname):
"""
Converts sdf files to pdb.
Parameters
----------
file_list : list
List of paths of fragments in sdf format.
logger : string
File with status messages
tmpdirname : string
Path of temporary directory.
Returns
-------
out : list
List of paths with converted sdf files.
"""
out = []
if file_list:
converted_mae = []
# convert all SDF to MAE
schrodinger_path = os.path.join(cs.SCHRODINGER, "utilities/structconvert")
command_mae = "{} -isd {} -omae {}"
command_pdb = "{} -imae {} -opdb {}"
for file in file_list:
shutil.copy(file, tmpdirname)
fout = os.path.splitext(os.path.basename(file))[0] + ".mae"
fout_path = os.path.join(tmpdirname, os.path.basename(file))
try:
command_mae = command_mae.format(schrodinger_path, fout_path, fout)
subprocess.call(command_mae.split())
converted_mae.append(fout)
except Exception as e:
logger.info("Error occured while converting SD files to mae.", e)
# convert all MAE to PDB, it will result in a lot of numbered pdb files
for c in converted_mae:
shutil.move(c, tmpdirname)
c = os.path.join(tmpdirname, c)
fout = c.replace(".mae", ".pdb")
try:
command_pdb = command_pdb.format(schrodinger_path, c, fout)
subprocess.call(command_pdb.split())
os.remove(c)
except Exception as e:
logger.info("Error occured while converting mae to PDB.", e)
pdb_pattern = os.path.join(tmpdirname, converted_mae[0])
converted_pdb = glob.glob(pdb_pattern[:-4]+"*"+".pdb")
# ~~~ If it's stupid but it works (?), it isn't stupid. ~~~
# read in PDB file created by Schrodinger, substitute residue name and add chain ID
for c in converted_pdb:
with open(c, "r") as fin:
lines = fin.readlines()
new_lines = []
for line in lines:
if line.startswith("HETATM") or line.startswith("ATOM"):
new_lines.append(line)
new_lines = [l.replace("UNK", "GRW") for l in new_lines if "UNK" in l]
new_lines = [l[:21]+"L"+l[22:] for l in new_lines]
with open(c, "w") as fout:
for line in new_lines:
fout.write(line)
out = converted_pdb
return out
def get_library(frag_library):
"""
Checks the path of the fragment library provided on the input.yaml file.
Parameters
----------
frag_library : string
Path to fragment library.
Returns
-------
path : string
Path to the fragment library.
"""
directory = os.path.dirname(os.path.abspath(__file__))
path = frag_library if os.path.exists(frag_library) else os.path.join(directory, "Libraries", frag_library.strip())
if not os.path.exists(path):
raise OSError(f"File {frag_library} doesn't exist and is not one of our internal libraries. Please check the frag_library flag in input.yaml.")
return path
def get_fragment_files(path,
logger, tmpdirname):
"""
Gets all pdb and sdf files of each fragment in the library.
Parameters
----------
path : string
Path to the fragment library.
logger : string
File with status messages.
tmpdirname : string
Path of temporary directory.
Returns
-------
all_files : list
List of paths of the fragments in the fragment library.
"""
fragment_files = []
extensions = ['*.pdb', '*.sdf']
for e in extensions:
fragment_files.extend(glob.glob(os.path.join(path, e.upper())))
fragment_files.extend(glob.glob(os.path.join(path, e.lower())))
# convert SDF to PDB, if necessary
sdf_files = [elem for elem in fragment_files if ".sdf" in elem.lower()]
pdb_files = [elem for elem in fragment_files if ".pdb" in elem.lower()]
all_files = pdb_files + sdf_to_pdb(sdf_files, logger, tmpdirname)
return all_files
def write_config_file(output_name,
bond_list):
"""
Generates the configuration file.
"""
with open(output_name, "w+") as conf_file:
for line in bond_list:
conf_file.write(line+"\n")
def main(user_bond,
frag_library, logger, tmpdirname):
# find the library and extract fragments
path = get_library(frag_library)
all_files = get_fragment_files(path, logger, tmpdirname)
# get all possible growing sites
bond_list = []
for file in all_files:
bond_list.extend(growing_sites(file, user_bond))
# write input.conf
write_config_file(OUTPUT, bond_list)
return OUTPUT
| 37.883117
| 184
| 0.481774
|
78a4063468db5a5bb35cea977fd2edfa12865d0f
| 619
|
py
|
Python
|
HumanPose/dataset/mpii.py
|
harrisonford/mybabybrain-model
|
06cce4e1aada0fb7b25616869bd17de164995c21
|
[
"MIT"
] | null | null | null |
HumanPose/dataset/mpii.py
|
harrisonford/mybabybrain-model
|
06cce4e1aada0fb7b25616869bd17de164995c21
|
[
"MIT"
] | 14
|
2020-03-24T18:14:17.000Z
|
2022-02-10T01:36:17.000Z
|
HumanPose/dataset/mpii.py
|
harrisonford/mybabybrain-model
|
06cce4e1aada0fb7b25616869bd17de164995c21
|
[
"MIT"
] | null | null | null |
from HumanPose.dataset.pose_dataset import PoseDataset
class MPII(PoseDataset):
def __init__(self, cfg):
cfg.all_joints = [[0, 5], [1, 4], [2, 3], [6, 11], [7, 10], [8, 9], [12], [13]]
cfg.all_joints_names = ['ankle', 'knee', 'hip', 'wrist', 'elbow', 'shoulder', 'chin', 'forehead']
cfg.num_joints = 14
super().__init__(cfg)
def mirror_joint_coords(self, joints, image_width):
joints[:, 1] = image_width - joints[:, 1]
return joints
def get_pose_segments(self):
return [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13]]
| 36.411765
| 105
| 0.55412
|
adfe5abe5f228b833be7d715e0b746d6ed7bc74b
| 3,976
|
py
|
Python
|
urlfix/dirurlfix.py
|
prabhupad26/urlfix
|
9dac12f0e7c4513a964bf9b2407f0095e7dacfd6
|
[
"MIT"
] | 1
|
2021-07-05T05:04:24.000Z
|
2021-07-05T05:04:24.000Z
|
urlfix/dirurlfix.py
|
prabhupad26/urlfix
|
9dac12f0e7c4513a964bf9b2407f0095e7dacfd6
|
[
"MIT"
] | null | null | null |
urlfix/dirurlfix.py
|
prabhupad26/urlfix
|
9dac12f0e7c4513a964bf9b2407f0095e7dacfd6
|
[
"MIT"
] | null | null | null |
from .urlfix import URLFix, file_format
import os
from warnings import warn
def replace_urls_root(in_dir, recursive=False, sub_recursive=False, **kwargs):
"""
:param in_dir: Input directory
:param recursive: Bool, should URLs be replaced in sub-directories if they exist?
:param kwargs: Other arguments to URLFix.replace_urls
:param sub_recursive: Bool, should URLs be replaced sub-recursively? Defaults to False.
:return: Files with outdated links validated/replaced, as requested.
"""
for root, sub_dirs, root_files in os.walk(in_dir):
number_moved = [] # Hold results
if root_files:
# sort root files such that changes are OS independent
root_files = sorted(root_files)
for root_file in root_files:
root_file = os.path.join(in_dir, root_file)
if file_format(root_file) not in ["md", "txt"]:
print(f"{root_file} is of an unsupported file format, skipping...")
continue
if '_output' in root_file:
print(f"File {root_file} is a fix of another file")
continue # skip output files
if "inplace" in kwargs and kwargs["inplace"]:
number_moved.append(URLFix(root_file).replace_urls(**kwargs))
else:
output_file = root_file.replace(f'.{file_format(root_file)}',
f'_output.{file_format(root_file)}')
if os.path.exists(output_file):
print(f"File already fixed: {root_file}")
continue # skip file that's already been fixed
with open(output_file, 'w'):
pass # create an empty output file
number_moved.append(URLFix(root_file, output_file).replace_urls(**kwargs))
if sub_dirs:
if not recursive:
use_grammar = "sub-directory" if len(sub_dirs) == 1 else "sub-directories"
warn(f"Found {use_grammar} {','.join(sub_dirs)} but recursion was set to False, exiting..")
else:
for sub_dir in sub_dirs:
# Create full paths to sub directories
full_sub_dir_path = os.path.join(in_dir, sub_dir)
# Add verbosity
print(f"Now updating files in {full_sub_dir_path}")
# Create new dirurlfix object and recurse
# If sub directories, sub-recurse in this sub directory, currently set to one level
number_moved.append(replace_urls_root(full_sub_dir_path, recursive=sub_recursive, **kwargs))
print('All files have been updated, thank you for using urlfix.')
# To flatten or not? For now, do not flatten so we know that the second and next are non-root replacements
return number_moved
class DirURLFix(object):
"""
Replace Outdated URLs given a directory of files.
"""
def __init__(self, input_dir, recursive=False, sub_recursive=False):
"""
:param input_dir: Path to input_dir.
:param recursive: Should links be replaced in sub directories? defaults to False
:param sub_recursive: Bool, should URLs be replaced sub-recursively? Defaults to False
"""
self.input_dir = input_dir
self.recursive = recursive
self.sub_recursive = sub_recursive
def replace_urls(self, **kwargs):
if not os.path.exists(self.input_dir):
raise OSError("Path does not exist!")
if not os.path.isdir(self.input_dir):
raise NotADirectoryError("Input path must be a directory!")
return replace_urls_root(in_dir=self.input_dir, recursive=self.recursive, sub_recursive=self.sub_recursive,
**kwargs)
| 47.903614
| 116
| 0.595322
|
6cd2239c74a7136d1ed9c68eff9a221afda452dc
| 2,147
|
py
|
Python
|
aplpy/slicer.py
|
teuben/aplpy
|
5d8176cd550bd119af7055aa66f57abc06c86cdb
|
[
"MIT"
] | 98
|
2015-02-16T19:25:18.000Z
|
2022-03-19T13:41:45.000Z
|
aplpy/slicer.py
|
teuben/aplpy
|
5d8176cd550bd119af7055aa66f57abc06c86cdb
|
[
"MIT"
] | 244
|
2015-01-12T10:40:59.000Z
|
2022-03-25T05:43:38.000Z
|
aplpy/slicer.py
|
teuben/aplpy
|
5d8176cd550bd119af7055aa66f57abc06c86cdb
|
[
"MIT"
] | 57
|
2015-01-26T16:38:54.000Z
|
2022-03-30T15:09:00.000Z
|
def slice_hypercube(data, header, dimensions=[0, 1], slices=[]):
"""
Extract a slice from an n-dimensional HDU data/header pair, and return the
new data (without changing the header).
"""
if type(slices) == int:
slices = (slices, )
else:
slices = slices[:]
shape = data.shape
if len(shape) < 2:
raise Exception("FITS file does not have enough dimensions")
elif len(shape) == 2:
wcsaxes_slices = ('x', 'y')
if dimensions[1] < dimensions[0]:
data = data.transpose()
wcsaxes_slices = ('y', 'x')
return data, wcsaxes_slices
else:
if slices:
wcsaxes_slices = slices[:]
if dimensions[0] < dimensions[1]:
slices.insert(dimensions[0], slice(None, None, None))
slices.insert(dimensions[1], slice(None, None, None))
wcsaxes_slices.insert(dimensions[0], 'x')
wcsaxes_slices.insert(dimensions[1], 'y')
else:
slices.insert(dimensions[1], slice(None, None, None))
slices.insert(dimensions[0], slice(None, None, None))
wcsaxes_slices.insert(dimensions[1], 'y')
wcsaxes_slices.insert(dimensions[0], 'x')
if type(slices) == list:
slices = tuple(slices)
wcsaxes_slices = tuple(wcsaxes_slices)
data = data[slices[::-1]]
if dimensions[1] < dimensions[0]:
data = data.transpose()
else:
message = """
Attempted to read in %i-dimensional FITS cube, but
dimensions and slices were not specified. Please specify these
using the dimensions= and slices= argument. The cube dimensions
are:\n\n""" % len(shape)
for i in range(1, len(shape) + 1):
message += " " * 10
message += " %i %s %i\n" % (i - 1,
header["CTYPE%i" % i],
header["NAXIS%i" % i])
raise Exception(message)
return data, wcsaxes_slices
| 30.239437
| 78
| 0.520261
|
66cf6f3b745f56dd84cd172f94cca90cf137bdd8
| 2,019
|
py
|
Python
|
test/filters/test_unsharp_mask.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/filters/test_unsharp_mask.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/filters/test_unsharp_mask.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import pytest
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import kornia
import kornia.testing as utils # test utils
class Testunsharp:
@pytest.mark.parametrize("batch_shape", [(1, 4, 8, 15), (2, 3, 11, 7)])
def test_cardinality(self, batch_shape, device, dtype):
kernel_size = (5, 7)
sigma = (1.5, 2.1)
input = torch.rand(batch_shape, device=device, dtype=dtype)
actual = kornia.filters.unsharp_mask(input, kernel_size, sigma, "replicate")
assert actual.shape == batch_shape
def test_noncontiguous(self, device, dtype):
batch_size = 3
input = torch.rand(3, 5, 5, device=device, dtype=dtype).expand(batch_size, -1, -1, -1)
kernel_size = (3, 3)
sigma = (1.5, 2.1)
actual = kornia.filters.unsharp_mask(input, kernel_size, sigma, "replicate")
assert_allclose(actual, actual)
def test_gradcheck(self, device, dtype):
# test parameters
batch_shape = (1, 3, 5, 5)
kernel_size = (3, 3)
sigma = (1.5, 2.1)
# evaluate function gradient
input = torch.rand(batch_shape, device=device, dtype=dtype)
input = utils.tensor_to_gradcheck_var(input) # to var
assert gradcheck(
kornia.filters.unsharp_mask,
(input, kernel_size, sigma, "replicate"),
raise_exception=True,
)
def test_jit(self, device, dtype):
op = kornia.filters.unsharp_mask
op_script = torch.jit.script(op)
params = [(3, 3), (1.5, 1.5)]
img = torch.ones(1, 3, 5, 5, device=device, dtype=dtype)
assert_allclose(op(img, *params), op_script(img, *params))
def test_module(self, device, dtype):
params = [(3, 3), (1.5, 1.5)]
op = kornia.filters.unsharp_mask
op_module = kornia.filters.UnsharpMask(*params)
img = torch.ones(1, 3, 5, 5, device=device, dtype=dtype)
assert_allclose(op(img, *params), op_module(img))
| 33.65
| 94
| 0.62209
|
b1918bd4ad21c48f34fd9e90f8b0a311969ce6eb
| 18,358
|
py
|
Python
|
notebook/utils.py
|
ryy1221/MutSigCVsyn
|
2a0a404bdc96044e825e93b76df230b613c52adc
|
[
"MIT"
] | null | null | null |
notebook/utils.py
|
ryy1221/MutSigCVsyn
|
2a0a404bdc96044e825e93b76df230b613c52adc
|
[
"MIT"
] | null | null | null |
notebook/utils.py
|
ryy1221/MutSigCVsyn
|
2a0a404bdc96044e825e93b76df230b613c52adc
|
[
"MIT"
] | null | null | null |
from Bio import SeqIO
from Bio.Seq import Seq
from bisect import bisect_left
import mmap
import re
nuc = ["A", "T", "C", "G"]
table = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',
'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',
}
gc_di_nuc = "CG" # definition of CG dinucleotide, from 5' to 3'
stop_aa = ["TAA", "TAG", "TGA"]
start_aa = "ATG"
# Error definition
class FracError(Exception):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return 'FracError, {0} '.format(self.message)
else:
return 'FracError has been raised'
class CategError(Exception):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return 'CategError, {0} '.format(self.message)
else:
return 'CategError has been raised'
class AnnotationError(Exception):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = None
def __str__(self):
if self.message:
return 'AnnotationError, {0} '.format(self.message)
else:
return 'AnnotationError has been raised'
# Determine if the mutation is a transition mutation.
def is_transition(ori_allele, alt_allele):
p = False
transition = [("A", "G"), ("G", "A"), ("T", "C"), ("C", "T")]
transversion = [("C", "A"), ("C", "G"), ("G", "T"), ("T", "G"), ("G", "C"), ("A", "C"), ("A", "T"), ("T", "A")]
if (ori_allele, alt_allele) in transition:
p = True
elif (ori_allele, alt_allele) in transversion:
p = False
else:
print(ori_allele, alt_allele)
raise CategError("NEITHER TRANSITION NOR TRANSVERSION MUTATION")
return p
# this function calculate the contribution of the coverage of a certain base in CpG or not to different categs
def validate_categ(original_allele, altered_allele,
tri_nucleotide): # This before and after base is not same with tri-nucleotide context. It's not in codon context.
base_before = tri_nucleotide[0]
base_after = tri_nucleotide[2]
# Initialize the categ fractions
categ = 0
# If a C:G basepairs mutation
# If mutation in CpG dinucleotide
if original_allele == "A" or original_allele == "T":
if is_transition(original_allele, altered_allele):
categ = 5
else:
categ = 6
elif (original_allele + base_after == gc_di_nuc) or (
base_before + original_allele == gc_di_nuc): # This is a CG dinucleotide
if is_transition(original_allele, altered_allele):
categ = 1
else:
categ = 2
else:
if is_transition(original_allele, altered_allele):
categ = 3
else:
categ = 4
return categ
# if base in gc dinucleotide
def is_cpg(base, base_before, base_after):
flag = False
if (base + base_after == gc_di_nuc) or (base_before + base == gc_di_nuc):
flag = True
return flag
# if base is A:T pair
def is_at(base):
flag = False
if base == "A" or base == "T":
flag = True
return flag
# if a mutation cause silent codon
def is_silent(before_context, after_context):
before_aa = table[before_context]
after_aa = table[after_context]
flag = False
if before_aa == after_aa:
flag = True
return flag
# just add count to whatever dictionary passed
def add_count(base, base_before, base_after, mut_bases, dict_cov):
dict_cov[6] += float(1) / 3
if is_at(base):
if is_transition(base, mut_bases):
dict_cov[4] += float(1) / 3
else:
dict_cov[5] += float(1) / 3
elif is_cpg(base, base_before, base_after): # This is a CG dinucleotide
if is_transition(base, mut_bases):
dict_cov[0] += float(1) / 3
else:
dict_cov[1] += float(1) / 3
else:
if is_transition(base, mut_bases):
dict_cov[2] += float(1) / 3
else:
dict_cov[3] += float(1) / 3
return dict_cov
# Transform all the elements in the input list into numeric.
def list_tonumeric(test_list):
test_list = list(map(int, test_list))
return test_list
# Group the positions in a list into position start and end pairs in a gene.
def list_to_pairs(list_old):
new_list = []
for i in range(0, len(list_old), 2):
new_list.append(list_old[i:i + 2])
return new_list
# This code is for process sequences on negative strand For the sequences in list, this code will get the reverse
# complement for each sequence and then reverse the sequence order.
def negative_process(list_sequence):
new_list = []
for sequences in list_sequence:
sequences = sequences.reverse_complement()
new_list.append(sequences)
new_list = new_list[::-1]
return new_list
# Find out if the base fall into non-coding region or coding region.
def get_region(region_list, base_pos, region_f):
r_flag = "nc"
j_flag = 0
for regions in region_list:
if regions[1] - 1 >= base_pos >= regions[0]:
r_flag = region_f
if r_flag == "coding":
if base_pos == regions[1] - 1 or base_pos == regions[1] - 2 or base_pos == regions[0] or base_pos == \
regions[0] + 1:
j_flag = 1
break
return r_flag, j_flag
def get_annotation(annotation_file_path):
name_dict = {} # {transcript:[gene_name]}
transcript_dict = {} # {'gene_name':['ENST00000']}
transcript_info_dict = {} # {transcriptID:'exon'[]; 'CDS'[];'gene'[];'UTR
with open(annotation_file_path, 'r') as annotation_f:
for lines in annotation_f:
if lines.startswith("chr"):
line_split = lines.split('\t')
chr_n, source, feature, start_pos, end_pos = line_split[0:5]
strand, frame, attribute = line_split[6:9]
position_list = [int(start_pos), int(end_pos)]
# feature = line_split[2] # 'exon' / 'CDS'/ 'UTR'/ 'start_codon'/ 'stop_codon'/ 'transcript'/ 'gene'
# if this is a protein coding gene and is the primary transcript:
if 'protein_coding' in attribute and "KNOWN" in attribute and 'appris_principal' in attribute:
# parse the attributes
attribute_split = attribute.split(';')
gene_id_col, transcript_id_col, gene_type_col, gene_status_col, gene_name_col, transcript_type_col, \
transcript_status_col, transcript_name_col = attribute_split[0:8]
# not include the version number
gene_id = re.findall(r'(ENSG\d+|ENSGR\d+)', gene_id_col)[0]
transcript_id = re.findall(r'(ENST\d+|ENSTR\d+)', transcript_id_col)[0]
gene_status = re.findall(r'\"(.*?)\"', gene_status_col)[0] # KNOWN or not
gene_name = re.findall(r'\"(.*?)\"', gene_name_col)[0]
transcript_status = re.findall(r'\"(.*?)\"', transcript_status_col)[0] # KNOWN or not
if gene_name not in transcript_dict:
transcript_dict[gene_name] = []
if transcript_id not in transcript_dict[gene_name]:
transcript_dict[gene_name].append(transcript_id)
if transcript_id not in transcript_info_dict:
transcript_info_dict[transcript_id] = {}
transcript_info_dict[transcript_id]['strand'] = strand
transcript_info_dict[transcript_id]['chr'] = chr_n
transcript_info_dict[transcript_id]['exon'] = []
transcript_info_dict[transcript_id]['CDS'] = []
transcript_info_dict[transcript_id]['UTR'] = []
transcript_info_dict[transcript_id]['transcript'] = []
# if the transcript and genes are known, parse according to feature type
if feature == 'exon':
# exon_n = re.findall(r'\d+', attribute_split[8])
transcript_info_dict[transcript_id]['exon'].append(position_list)
elif feature == 'CDS':
transcript_info_dict[transcript_id]['CDS'].append(position_list)
elif feature == 'UTR':
transcript_info_dict[transcript_id]['UTR'].append(position_list)
elif feature == 'transcript':
transcript_info_dict[transcript_id]['transcript'] = position_list
if transcript_id not in name_dict:
name_dict[transcript_id] = gene_name
# delete the transcript record that have 2 or more principle transcripts, only keep the longest transcript
for names in transcript_dict:
if len(transcript_dict[names]) > 1:
store_max_t = 0
for t in transcript_dict[names]:
len_t = abs(transcript_info_dict[t]['transcript'][1] - transcript_info_dict[t]['transcript'][0])
if len_t >= store_max_t:
store_max_t = len_t
else:
del transcript_info_dict[t]
return name_dict, transcript_info_dict
# get mrna positions in gene, then delete the utr positions
# the returned result is the cds positions
def get_mrna_position(transcript, info_dict, strand_gene):
list_all_pos = info_dict[transcript]['UTR'] + info_dict[transcript]['CDS']
list_all_pos.sort()
# get the index of utr pair
utr_index_list = []
for pairs in info_dict[transcript]['UTR']:
utr_idx = list_all_pos.index(pairs)
utr_index_list.append(utr_idx)
# if negative strand, the start position is the biggest position
list_gene_position = []
if strand_gene == '-':
transcript_start = list_all_pos[-1][1]
for pairs in list_all_pos:
for positions in pairs:
gene_position = -(positions - transcript_start)
list_gene_position.append(gene_position)
else:
transcript_start = list_all_pos[0][0]
for pairs in list_all_pos:
for positions in pairs:
gene_position = positions - transcript_start
list_gene_position.append(gene_position)
# put the mrna positions into pairs
list_gene_position.sort()
mrna_list = list_to_pairs(list_gene_position)
# negative strand need to turn the utr index list around
if strand_gene == '-':
exon_n = len(mrna_list) - 1
utr_index_list = [abs(i - exon_n) for i in utr_index_list]
# remove utr positions according to the utr index
for utr_index in sorted(utr_index_list,
reverse=True): # reverse the list so that index in list won't change after deletion
del mrna_list[utr_index]
return mrna_list
# get the transcript sequence
def get_transcript_sequence(transcript, info_dict, fasta_dict, strand_gene):
chromosome = info_dict[transcript]['chr']
transcript_start_pos = info_dict[transcript]['transcript'][0]
transcript_end_pos = info_dict[transcript]['transcript'][1]
if strand_gene == '-':
transcript_seq = fasta_dict[chromosome][transcript_start_pos - 1:transcript_end_pos].reverse_complement()
else:
transcript_seq = fasta_dict[chromosome][transcript_start_pos - 1:transcript_end_pos]
return transcript_seq
# get the cdna sequence from the transcript sequence, inputs are cdna positions within the gene
def get_cdna_sequence(cds_list, strand_gene, sequence_transcript):
list_cds_seq = []
for cds_pairs in cds_list:
cds_start = cds_pairs[0]
cds_end = cds_pairs[1]
if strand_gene == '-':
list_cds_seq.append(sequence_transcript[cds_start:cds_end + 1])
else:
list_cds_seq.append(sequence_transcript[cds_start:cds_end + 1])
concatenated_cds = Seq("")
for s in list_cds_seq:
concatenated_cds += s
return concatenated_cds.seq
# this function integrates bisect to find the leftmost value exactly equal to x. Could be only used for sorted list
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect_left(a, x)
if i != len(a) and a[i] == x:
return True
else:
return False
# this code loop through all the bases and get the coverage.
def calculate_coverage(transcript_seq, cds_seq, mrna_list, start_pos):
# create dictionary to store position values, use genome positions as key
cov_dict = {'nonsilent': {}, 'flank': {}, 'silent': {}}
# Initialize the base value
before_base = "N"
after_base = "N"
base_context = None
context_position = None
lp_flag = 0
region_flag = 'nc' # 'nc','coding'
base_transcirpt_pos = 0 # position in transcript
base_genome_pos = start_pos - 1 # position in gene, use genome position as the key
n_context = 0
for bases in transcript_seq:
base_genome_pos += 1
base_transcirpt_pos += 1
for idx in cov_dict:
if base_genome_pos not in cov_dict[idx]:
cov_dict[idx][base_genome_pos] = [0, 0, 0, 0, 0, 0, 0]
# skip the base if it's N, and give a low quality flag to the gene
if bases == "N":
lp_flag = 1
continue
# Determine before base and after base
if base_transcirpt_pos == 1:
after_base = transcript_seq[base_transcirpt_pos]
elif base_transcirpt_pos == len(transcript_seq):
before_base = transcript_seq[base_transcirpt_pos - 2]
else:
before_base = transcript_seq[base_transcirpt_pos - 2]
after_base = transcript_seq[base_transcirpt_pos]
# Determine regions and if the base is at junction position
for regions in mrna_list:
if regions[1] + 1 >= base_transcirpt_pos >= regions[0] + 1:
region_flag = 'coding'
if region_flag == "nc":
other_nuc = filter(lambda i: i != bases, nuc)
for mut_bases in other_nuc:
cov_dict['flank'][base_genome_pos] = add_count(bases, before_base, after_base, mut_bases,
cov_dict['flank'][base_genome_pos])
elif region_flag == "coding":
n_context += 1
if n_context <= 3:
junction_flag = 1
if n_context % 3 != 0:
context_position = n_context % 3 - 1
codon_n = (n_context // 3) * 3
base_context = cds_seq[codon_n:codon_n + 3]
else:
context_position = 2
codon_n = n_context
base_context = cds_seq[codon_n - 3:codon_n]
other_nuc = filter(lambda i: i != bases, nuc)
for mut_bases in other_nuc:
context_before = base_context # This is the original tri-nucleotide context
context_after = base_context[:context_position] + mut_bases + base_context[context_position + 1:]
try:
if is_silent(context_before, context_after):
cov_dict['silent'][base_genome_pos] = add_count(bases, before_base, after_base, mut_bases,
cov_dict['silent'][base_genome_pos])
else:
cov_dict['nonsilent'][base_genome_pos] = add_count(bases, before_base, after_base, mut_bases,
cov_dict['nonsilent'][base_genome_pos])
except KeyError:
lp_flag = 1
continue
region_flag = "nc"
return cov_dict, lp_flag
# this function is used when parallelzing the combination of transcripts and patient
def calculate_patient(position_dict_func, patient_dict, chr):
patient_categ_list = [0, 0, 0, 0, 0, 0, 0]
for positions, categs in position_dict_func.iteritems():
try:
if not index(patient_dict[chr], positions):
for i in range(0, 7):
patient_categ_list[i] += categs[i]
except KeyError:
for i in range(0, 7):
patient_categ_list[i] += categs[i]
round_patient_list = [round(num) for num in patient_categ_list]
return round_patient_list
def get_zero_position(coverage_file):
id_aliquot = coverage_file.split('.')[0].split('/')[-1]
dict_patient = {}
dict_patient[id_aliquot] = {}
with open(coverage_file, 'r+b') as wig_f:
mwig = mmap.mmap(wig_f.fileno(), 0)
itmwig = iter(mwig.readline, "")
next(itmwig)
for lines in itmwig:
if lines.startswith('fixed'):
line_list = re.findall(r'\d+', lines)
chr_n = line_list[0]
start_pos = int(line_list[1])
position = start_pos - 1
else:
position += 1
if chr_n not in dict_patient[id_aliquot]:
dict_patient[id_aliquot][chr_n] = []
if lines == '0\n':
dict_patient[id_aliquot][chr_n].append(position)
mwig.close()
return dict_patient
| 38.16632
| 133
| 0.584595
|
a4e36afedb855c732a590b976951d0bf2358e426
| 9,589
|
py
|
Python
|
singer_sdk/helpers/_state.py
|
meltano/sdk
|
83dde4fe922f9f91bd3c57277849a2a2daa8f09a
|
[
"Apache-2.0"
] | 13
|
2021-06-21T17:30:32.000Z
|
2021-12-06T18:45:34.000Z
|
singer_sdk/helpers/_state.py
|
meltano/sdk
|
83dde4fe922f9f91bd3c57277849a2a2daa8f09a
|
[
"Apache-2.0"
] | null | null | null |
singer_sdk/helpers/_state.py
|
meltano/sdk
|
83dde4fe922f9f91bd3c57277849a2a2daa8f09a
|
[
"Apache-2.0"
] | null | null | null |
"""Helper functions for state and bookmark management."""
import datetime
from typing import Any, Callable, List, Optional, Union, cast
from singer_sdk.exceptions import InvalidStreamSortException
from singer_sdk.helpers._typing import to_json_compatible
PROGRESS_MARKERS = "progress_markers"
PROGRESS_MARKER_NOTE = "Note"
SIGNPOST_MARKER = "replication_key_signpost"
STARTING_MARKER = "starting_replication_value"
def get_state_if_exists(
tap_state: dict,
tap_stream_id: str,
state_partition_context: Optional[dict] = None,
key: Optional[str] = None,
) -> Optional[Any]:
"""Return the stream or partition state, creating a new one if it does not exist.
Parameters
----------
tap_state : dict
the existing state dict which contains all streams.
tap_stream_id : str
the id of the stream
state_partition_context : Optional[dict], optional
keys which identify the partition context, by default None (not partitioned)
key : Optional[str], optional
name of the key searched for, by default None (return entire state if found)
Returns
-------
Optional[Any]
Returns the state if exists, otherwise None
Raises
------
ValueError
Raised if state is invalid or cannot be parsed.
"""
if "bookmarks" not in tap_state:
return None
if tap_stream_id not in tap_state["bookmarks"]:
return None
stream_state = tap_state["bookmarks"][tap_stream_id]
if not state_partition_context:
if key:
return stream_state.get(key, None)
return stream_state
if "partitions" not in stream_state:
return None # No partitions defined
matched_partition = _find_in_partitions_list(
stream_state["partitions"], state_partition_context
)
if matched_partition is None:
return None # Partition definition not present
if key:
return matched_partition.get(key, None)
return matched_partition
def get_state_partitions_list(
tap_state: dict, tap_stream_id: str
) -> Optional[List[dict]]:
"""Return a list of partitions defined in the state, or None if not defined."""
return (get_state_if_exists(tap_state, tap_stream_id) or {}).get("partitions", None)
def _find_in_partitions_list(
partitions: List[dict], state_partition_context: dict
) -> Optional[dict]:
found = [
partition_state
for partition_state in partitions
if partition_state["context"] == state_partition_context
]
if len(found) > 1:
raise ValueError(
f"State file contains duplicate entries for partition: "
"{state_partition_context}.\n"
f"Matching state values were: {str(found)}"
)
if found:
return cast(dict, found[0])
return None
def _create_in_partitions_list(
partitions: List[dict], state_partition_context: dict
) -> dict:
# Existing partition not found. Creating new state entry in partitions list...
new_partition_state = {"context": state_partition_context}
partitions.append(new_partition_state)
return new_partition_state
def get_writeable_state_dict(
tap_state: dict, tap_stream_id: str, state_partition_context: Optional[dict] = None
) -> dict:
"""Return the stream or partition state, creating a new one if it does not exist.
Parameters
----------
tap_state : dict
the existing state dict which contains all streams.
tap_stream_id : str
the id of the stream
state_partition_context : Optional[dict], optional
keys which identify the partition context, by default None (not partitioned)
Returns
-------
dict
Returns a writeable dict at the stream or partition level.
Raises
------
ValueError
Raise an error if duplicate entries are found.
"""
if tap_state is None:
raise ValueError("Cannot write state to missing state dictionary.")
if "bookmarks" not in tap_state:
tap_state["bookmarks"] = {}
if tap_stream_id not in tap_state["bookmarks"]:
tap_state["bookmarks"][tap_stream_id] = {}
stream_state = cast(dict, tap_state["bookmarks"][tap_stream_id])
if not state_partition_context:
return stream_state
if "partitions" not in stream_state:
stream_state["partitions"] = []
stream_state_partitions: List[dict] = stream_state["partitions"]
found = _find_in_partitions_list(stream_state_partitions, state_partition_context)
if found:
return found
return _create_in_partitions_list(stream_state_partitions, state_partition_context)
def write_stream_state(
tap_state,
tap_stream_id: str,
key,
val,
*,
state_partition_context: Optional[dict] = None,
) -> None:
"""Write stream state."""
state_dict = get_writeable_state_dict(
tap_state, tap_stream_id, state_partition_context=state_partition_context
)
state_dict[key] = val
def reset_state_progress_markers(stream_or_partition_state: dict) -> Optional[dict]:
"""Wipe the state once sync is complete.
For logging purposes, return the wiped 'progress_markers' object if it existed.
"""
progress_markers = stream_or_partition_state.pop(PROGRESS_MARKERS, {})
# Remove auto-generated human-readable note:
progress_markers.pop(PROGRESS_MARKER_NOTE, None)
# Return remaining 'progress_markers' if any:
return progress_markers or None
def write_replication_key_signpost(
stream_or_partition_state: dict,
new_signpost_value: Any,
) -> None:
"""Write signpost value."""
stream_or_partition_state[SIGNPOST_MARKER] = to_json_compatible(new_signpost_value)
def write_starting_replication_value(
stream_or_partition_state: dict,
initial_value: Any,
) -> None:
"""Write initial replication value to state."""
stream_or_partition_state[STARTING_MARKER] = to_json_compatible(initial_value)
def get_starting_replication_value(stream_or_partition_state: dict):
"""Retrieve initial replication marker value from state."""
if not stream_or_partition_state:
return None
return stream_or_partition_state.get(STARTING_MARKER)
def increment_state(
stream_or_partition_state: dict,
latest_record: dict,
replication_key: str,
is_sorted: bool,
) -> None:
"""Update the state using data from the latest record.
Raises InvalidStreamSortException if is_sorted=True and unsorted
data is detected in the stream.
"""
progress_dict = stream_or_partition_state
if not is_sorted:
if PROGRESS_MARKERS not in stream_or_partition_state:
stream_or_partition_state[PROGRESS_MARKERS] = {
PROGRESS_MARKER_NOTE: "Progress is not resumable if interrupted."
}
progress_dict = stream_or_partition_state[PROGRESS_MARKERS]
old_rk_value = to_json_compatible(progress_dict.get("replication_key_value"))
new_rk_value = to_json_compatible(latest_record[replication_key])
if old_rk_value is None or new_rk_value >= old_rk_value:
progress_dict["replication_key"] = replication_key
progress_dict["replication_key_value"] = new_rk_value
return
if is_sorted:
raise InvalidStreamSortException(
f"Unsorted data detected in stream. Latest value '{new_rk_value}' is "
f"smaller than previous max '{old_rk_value}'."
)
def _greater_than_signpost(
signpost: Union[datetime.datetime, str, int, float],
new_value: Union[datetime.datetime, str, int, float],
) -> bool:
"""Compare and return True if new_value is greater than signpost."""
return ( # fails if signpost and bookmark are incompatible types
new_value > signpost # type: ignore
)
def finalize_state_progress_markers(stream_or_partition_state: dict) -> Optional[dict]:
"""Promote or wipe progress markers once sync is complete."""
signpost_value = stream_or_partition_state.pop(SIGNPOST_MARKER, None)
stream_or_partition_state.pop(STARTING_MARKER, None)
if PROGRESS_MARKERS in stream_or_partition_state:
if "replication_key" in stream_or_partition_state[PROGRESS_MARKERS]:
# Replication keys valid (only) after sync is complete
progress_markers = stream_or_partition_state[PROGRESS_MARKERS]
stream_or_partition_state["replication_key"] = progress_markers.pop(
"replication_key"
)
new_rk_value = progress_markers.pop("replication_key_value")
if signpost_value and _greater_than_signpost(signpost_value, new_rk_value):
new_rk_value = signpost_value
stream_or_partition_state["replication_key_value"] = new_rk_value
# Wipe and return any markers that have not been promoted
return reset_state_progress_markers(stream_or_partition_state)
def log_sort_error(
ex: Exception,
log_fn: Callable,
stream_name: str,
current_context: Optional[dict],
state_partition_context: Optional[dict],
record_count: int,
partition_record_count: int,
) -> None:
"""Log a sort error."""
msg = f"Sorting error detected in '{stream_name}'." f"on record #{record_count}. "
if partition_record_count != record_count:
msg += (
f"Record was partition record "
f"#{partition_record_count} with"
f" state partition context {state_partition_context}. "
)
if current_context:
msg += f"Context was {str(current_context)}. "
msg += str(ex)
log_fn(msg)
| 34.003546
| 88
| 0.704453
|
1d4aa100d5d89f61b407d9e16a196bf34fd07164
| 1,381
|
py
|
Python
|
pano/views/node_facts.py
|
jeroenzeegers/panopuppet
|
5024bf3a8b87a54aade197385438cec80120fac6
|
[
"Apache-2.0"
] | null | null | null |
pano/views/node_facts.py
|
jeroenzeegers/panopuppet
|
5024bf3a8b87a54aade197385438cec80120fac6
|
[
"Apache-2.0"
] | 1
|
2021-06-11T00:04:03.000Z
|
2021-06-11T00:04:03.000Z
|
pano/views/node_facts.py
|
jeroenzeegers/panopuppet
|
5024bf3a8b87a54aade197385438cec80120fac6
|
[
"Apache-2.0"
] | null | null | null |
import pytz
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.views.decorators.cache import cache_page
from pano.puppetdb import puppetdb
from pano.puppetdb.puppetdb import set_server, get_server
from pano.settings import AVAILABLE_SOURCES, CACHE_TIME
__author__ = 'etaklar'
@login_required
@cache_page(CACHE_TIME)
def facts(request, certname=None):
context = {'timezones': pytz.common_timezones,
'SOURCES': AVAILABLE_SOURCES}
if request.method == 'GET':
if 'source' in request.GET:
source = request.GET.get('source')
set_server(request, source)
if request.method == 'POST':
request.session['django_timezone'] = request.POST['timezone']
return redirect(request.POST['return_url'])
source_url, source_certs, source_verify = get_server(request)
facts_params = {
'query':
{
1: '["=","certname","' + certname + '"]'
},
}
facts_list = puppetdb.api_get(
api_url=source_url,
cert=source_certs,
verify=source_verify,
path='facts',
params=puppetdb.mk_puppetdb_query(
facts_params, request),
)
context['certname'] = certname
context['facts_list'] = facts_list
return render(request, 'pano/facts.html', context)
| 30.021739
| 69
| 0.660391
|
576ca13a0bda6fe20295c589c04d9d97965401b0
| 6,890
|
py
|
Python
|
tests/test_stac_validator.py
|
vincentsarago/stac-validator
|
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_stac_validator.py
|
vincentsarago/stac-validator
|
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_stac_validator.py
|
vincentsarago/stac-validator
|
2bc527cdf5b05bab499100e88254a3bdd3d65fe1
|
[
"Apache-2.0"
] | null | null | null |
"""
Description: Test the validator
"""
__author__ = "James Banting"
import stac_validator
import trio
import pytest
def _run_validate(url, version="master"):
stac = stac_validator.StacValidate(url, version)
trio.run(stac.run)
return stac
def test_good_item_validation_v052():
stac = _run_validate("tests/test_data/good_item_v052.json", "v0.5.2")
assert stac.message == {
"asset_type": "item",
"path": "tests/test_data/good_item_v052.json",
"valid_stac": True,
}
def test_good_item_validation_v060():
stac = _run_validate("tests/test_data/good_item_v060.json")
assert stac.message == {
"asset_type": "item",
"path": "tests/test_data/good_item_v060.json",
"valid_stac": True,
}
def test_good_catalog_validation_v052():
stac = _run_validate("tests/test_data/good_catalog_v052.json", "v0.5.2")
assert stac.message == {
"asset_type": "catalog",
"path": "tests/test_data/good_catalog_v052.json",
"valid_stac": True,
"children": [],
}
# Need to fix test around async return - output is valid, but dict is out of order
# def test_nested_catalog_v052():
# stac = _run_validate(
# "tests/test_data/nested_catalogs/parent_catalog.json", "v0.5.2"
# )
# truth = {
# "asset_type": "catalog",
# "valid_stac": True,
# "children": [
# {
# "asset_type": "catalog",
# "valid_stac": False,
# "error": "'name' is a required property of []",
# "children": [],
# "path": "tests/test_data/nested_catalogs/999/invalid_catalog.json",
# },
# {
# "asset_type": "catalog",
# "valid_stac": True,
# "children": [
# {
# "asset_type": "item",
# "valid_stac": False,
# "error": "'type' is a required property of []",
# "path": "tests/test_data/nested_catalogs/105/INVALID_CBERS_4_MUX_20180808_057_105_L2.json",
# },
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/105/CBERS_4_MUX_20180713_057_105_L2.json",
# },
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/105/CBERS_4_MUX_20180808_057_105_L2.json",
# },
# ],
# "path": "tests/test_data/nested_catalogs/105/catalog.json",
# },
# {
# "asset_type": "catalog",
# "valid_stac": True,
# "children": [
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/122/CBERS_4_MUX_20180713_057_122_L2.json",
# },
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/122/CBERS_4_MUX_20180808_057_122_L2.json",
# },
# {
# "asset_type": "catalog",
# "valid_stac": True,
# "children": [
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/122/130/CBERS_4_MUX_20180713_098_122_L2.json",
# },
# {
# "asset_type": "item",
# "valid_stac": True,
# "path": "tests/test_data/nested_catalogs/122/130/CBERS_4_MUX_20180808_099_122_L2.json",
# },
# ],
# "path": "tests/test_data/nested_catalogs/122/130/catalog.json",
# },
# ],
# "path": "tests/test_data/nested_catalogs/122/catalog.json",
# },
# ],
# "path": "tests/test_data/nested_catalogs/parent_catalog.json",
# }
# assert stac.message == truth
def test_verbose_v052():
stac = _run_validate(
"tests/test_data/nested_catalogs/parent_catalog.json", "v0.5.2"
)
assert stac.status == {
"catalogs": {"valid": 4, "invalid": 1},
"collections": {"valid": 0, "invalid": 0},
"items": {"valid": 6, "invalid": 1},
}
def test_bad_url():
stac = _run_validate(
"https://s3.amazonaws.com/spacenet-stac/spacenet-dataset/AOI_4_Shanghai_MUL-PanSharpen_Cloud",
"v0.5.2",
)
assert stac.status == {
"valid_stac": False,
"error_type": "InvalidJSON",
"error_message": "https://s3.amazonaws.com/spacenet-stac/spacenet-dataset/AOI_4_Shanghai_MUL-PanSharpen_Cloud is not Valid JSON",
"path": "https:/s3.amazonaws.com/spacenet-stac/spacenet-dataset/AOI_4_Shanghai_MUL-PanSharpen_Cloud",
}
assert stac.message == {
"valid_stac": False,
"error_type": "InvalidJSON",
"error_message": "https://s3.amazonaws.com/spacenet-stac/spacenet-dataset/AOI_4_Shanghai_MUL-PanSharpen_Cloud is not Valid JSON",
"path": "https:/s3.amazonaws.com/spacenet-stac/spacenet-dataset/AOI_4_Shanghai_MUL-PanSharpen_Cloud",
}
@pytest.mark.stac_spec
def test_catalog_master():
stac = _run_validate(
"https://raw.githubusercontent.com/radiantearth/stac-spec/master/catalog-spec/examples/catalog.json"
)
assert stac.status == {
"catalogs": {"valid": 1, "invalid": 0},
"collections": {"valid": 0, "invalid": 0},
"items": {"valid": 0, "invalid": 0},
}
@pytest.mark.stac_spec
def test_collection_master():
stac = _run_validate(
"https://raw.githubusercontent.com/radiantearth/stac-spec/master/collection-spec/examples/sentinel2.json"
)
assert stac.status == {
"catalogs": {"valid": 0, "invalid": 0},
"collections": {"valid": 1, "invalid": 0},
"items": {"valid": 0, "invalid": 0},
}
@pytest.mark.item_spec
@pytest.mark.stac_spec
def test_item_master():
stac = _run_validate(
"https://raw.githubusercontent.com/radiantearth/stac-spec/master/item-spec/examples/sample-full.json"
)
assert stac.status == {
"catalogs": {"valid": 0, "invalid": 0},
"collections": {"valid": 0, "invalid": 0},
"items": {"valid": 1, "invalid": 0},
}
| 36.648936
| 137
| 0.518287
|
1917c4a87b2d058193e502480071adb4746b89ad
| 509
|
py
|
Python
|
benchmarks/DNNF-CIFAR-EQ/properties/local_targeted_diff_6_9_property_1.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | 1
|
2022-03-01T08:59:32.000Z
|
2022-03-01T08:59:32.000Z
|
benchmarks/DNNF-CIFAR-EQ/properties/local_targeted_diff_6_9_property_1.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
benchmarks/DNNF-CIFAR-EQ/properties/local_targeted_diff_6_9_property_1.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
from dnnv.properties import *
import numpy as np
N1 = Network("N1")
N2 = Network("N2")
x = Image(__path__.parent / "input_1.npy")
epsilon = Parameter("epsilon", type=float)
true_class = 6
other_class = 9
Forall(
x_,
Implies(
((x - epsilon) < x_ < (x + epsilon)) & (0 <= x_ <= 1),
And(
Implies(np.argmax(N1(x_)) == true_class, np.argmax(N2(x_)) != other_class),
Implies(np.argmax(N2(x_)) == true_class, np.argmax(N1(x_)) != other_class),
),
),
)
| 22.130435
| 87
| 0.569745
|
7b443950d263df10de98be9cd5f77447a0620b0a
| 11,037
|
py
|
Python
|
astropy/__init__.py
|
ananyashreyjain/astropy
|
18c4c13fcc5d2907c236ff3a3a9875c78e5a69c4
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/__init__.py
|
ananyashreyjain/astropy
|
18c4c13fcc5d2907c236ff3a3a9875c78e5a69c4
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/__init__.py
|
ananyashreyjain/astropy
|
18c4c13fcc5d2907c236ff3a3a9875c78e5a69c4
|
[
"BSD-3-Clause"
] | 1
|
2018-08-02T09:33:21.000Z
|
2018-08-02T09:33:21.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
import os
from warnings import warn
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.13.0'
class UnsupportedPythonError(Exception):
pass
# This is the same check as the one at the top of setup.py
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
# this indicates whether or not we are in astropy's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
import builtins
# This will set the _ASTROPY_SETUP_ to True by default if
# we are running Astropy's setup.py
builtins._ASTROPY_SETUP_ = _is_astropy_setup()
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
if not _ASTROPY_SETUP_:
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION
def _get_bibtex():
import re
if os.path.exists('CITATION'):
with open('CITATION', 'r') as citation:
refs = re.findall(r'\{[^()]*\}', citation.read())
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
else:
return ''
__bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
if not _ASTROPY_SETUP_:
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
| 32.084302
| 107
| 0.650267
|
3a1117d9a235ba1a67d06d0301f9d7adae52f3e5
| 409
|
py
|
Python
|
iso10g.py
|
brunston/nextTwilight
|
51b32cb211bdb7c7bddcfb98adceaa388fcfce33
|
[
"MIT"
] | 2
|
2015-04-17T08:54:42.000Z
|
2015-11-27T01:37:39.000Z
|
iso10g.py
|
brupoon/nextTwilight
|
51b32cb211bdb7c7bddcfb98adceaa388fcfce33
|
[
"MIT"
] | null | null | null |
iso10g.py
|
brupoon/nextTwilight
|
51b32cb211bdb7c7bddcfb98adceaa388fcfce33
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 17 10:19:03 2014
@author: demure
"""
import numpy as np
from matplotlib import pyplot as plt
data = np.loadtxt(r"C:\pyf\ast\iso10G.txt")
logteff = data[:,0]
mbol = data[:,1]
#logteff_inv = list(reversed(logteff))
plt.plot(logteff, mbol, 'g.',markersize = 5)
plt.title("Isochrone, 10Gyr")
plt.xlabel("logTeff")
plt.ylabel("Mbol")
plt.xlim(4.3,3.3)
plt.ylim(8,-8)
| 22.722222
| 44
| 0.677262
|
2783a9a3ee4791471b79df05e98f2f03818663ee
| 3,124
|
py
|
Python
|
src/pyff/test/__init__.py
|
Razumain/pyFF
|
55c51e0f3e64aef09ccf76bd42f0429d451d2428
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/test/__init__.py
|
Razumain/pyFF
|
55c51e0f3e64aef09ccf76bd42f0429d451d2428
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/test/__init__.py
|
Razumain/pyFF
|
55c51e0f3e64aef09ccf76bd42f0429d451d2428
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import logging
import subprocess
import sys
import tempfile
from unittest import TestCase
import os
import pkg_resources
from pyff import __version__ as pyffversion
class ExitException(Exception):
def __init__(self, code):
self.code = code
def __str__(self):
return "would have exited with %d" % self.code
def run_pyff(*args):
return run_cmdline('pyff', args)
def run_pyffd(*args):
return run_cmdline('pyffd', args)
def run_cmdline(script, *args):
argv = list(*args)
starter = tempfile.NamedTemporaryFile('w').name
print "starting %s using %s" % (script, starter)
with open(starter, 'w') as fd:
fd.write("""#!%s
import sys
import coverage
import os
from pkg_resources import load_entry_point
if __name__ == '__main__':
cov = coverage.coverage(cover_pylib=False, source=['pyff'], omit=['test'], include=['*.py'])
cov.start()
rv = 0
try:
rv = load_entry_point('pyFF==%s', 'console_scripts', '%s')()
except Exception, ex:
raise ex
finally:
cov.stop()
cov.save()
os.rename('.coverage','.coverage.%%d' %% os.getpid())
sys.exit(rv)
""" % (sys.executable, pyffversion, script))
os.chmod(starter, 0700)
argv.insert(0, starter)
proc = _pstart(argv)
out, err = proc.communicate()
rv = proc.wait()
os.unlink(starter)
print "---"
print out
print err
print "rv=%d" % rv
print "---"
return out, err, rv
def _pstart(args, outf=None, ignore_exit=False):
env = {}
logging.debug(" ".join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
return proc
def _p(args, outf=None, ignore_exit=False):
proc = _pstart(args)
out, err = proc.communicate()
if err is not None and len(err) > 0:
logging.error(err)
if outf is not None:
with open(outf, "w") as fd:
fd.write(out)
else:
if out is not None and len(out) > 0:
logging.debug(out)
rv = proc.wait()
if rv and not ignore_exit:
raise RuntimeError("command exited with code != 0: %d" % rv)
class SignerTestCase(TestCase):
datadir = None
private_keyspec = None
public_keyspec = None
def sys_exit(self, code):
raise ExitException(code)
@classmethod
def setUpClass(cls):
cls.datadir = pkg_resources.resource_filename(__name__, 'data')
cls.private_keyspec = tempfile.NamedTemporaryFile('w').name
cls.public_keyspec = tempfile.NamedTemporaryFile('w').name
_p(['openssl', 'genrsa',
'2048',
'-nodes'], outf=cls.private_keyspec, ignore_exit=True)
_p(['openssl', 'req',
'-x509',
'-sha1',
'-new',
'-subj', '/CN=Signer',
'-key', cls.private_keyspec,
'-out', cls.public_keyspec])
@classmethod
def tearDownClass(cls):
if os.path.exists(cls.private_keyspec):
os.unlink(cls.private_keyspec)
if os.path.exists(cls.public_keyspec):
os.unlink(cls.public_keyspec)
| 25.193548
| 96
| 0.612036
|
86847de30892ca3aa04564b6a80f7771ea6e7d5a
| 12,547
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_waf_profile_constraint_urlparamlength.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_waf_profile_constraint_urlparamlength.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_waf_profile_constraint_urlparamlength.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_waf_profile_constraint_urlparamlength
short_description: Maximum length of parameter in URL.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
profile:
description: the parameter (profile) in requested url
type: str
required: true
waf_profile_constraint_urlparamlength:
description: the top level parameters set
required: false
type: dict
suboptions:
action:
type: str
description: 'Action.'
choices:
- 'allow'
- 'block'
length:
type: int
description: 'Maximum length of URL parameter in bytes (0 to 2147483647).'
log:
type: str
description: 'Enable/disable logging.'
choices:
- 'disable'
- 'enable'
severity:
type: str
description: 'Severity.'
choices:
- 'low'
- 'medium'
- 'high'
status:
type: str
description: 'Enable/disable the constraint.'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Maximum length of parameter in URL.
fmgr_waf_profile_constraint_urlparamlength:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
profile: <your own value>
waf_profile_constraint_urlparamlength:
action: <value in [allow, block]>
length: <value of integer>
log: <value in [disable, enable]>
severity: <value in [low, medium, high]>
status: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/waf/profile/{profile}/constraint/url-param-length',
'/pm/config/global/obj/waf/profile/{profile}/constraint/url-param-length'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/waf/profile/{profile}/constraint/url-param-length/{url-param-length}',
'/pm/config/global/obj/waf/profile/{profile}/constraint/url-param-length/{url-param-length}'
]
url_params = ['adom', 'profile']
module_primary_key = None
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'adom': {
'required': True,
'type': 'str'
},
'profile': {
'required': True,
'type': 'str'
},
'waf_profile_constraint_urlparamlength': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'action': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'block'
],
'type': 'str'
},
'length': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'log': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'severity': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'low',
'medium',
'high'
],
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'waf_profile_constraint_urlparamlength'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 33.369681
| 153
| 0.498924
|
e4b885ba6fa1ad6b27c27beeb384aaf6aa7b6eb9
| 6,477
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ospfsralgorithmlist_6d5b092154ba202ff142c9a80bdf1352.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ospfsralgorithmlist_6d5b092154ba202ff142c9a80bdf1352.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ospfsralgorithmlist_6d5b092154ba202ff142c9a80bdf1352.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class OspfSRAlgorithmList(Base):
"""Ospf SR Algorithms
The OspfSRAlgorithmList class encapsulates a list of ospfSRAlgorithmList resources that are managed by the system.
A list of resources can be retrieved from the server using the OspfSRAlgorithmList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'ospfSRAlgorithmList'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'OspfSrAlgorithm': 'ospfSrAlgorithm',
}
def __init__(self, parent):
super(OspfSRAlgorithmList, self).__init__(parent)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def OspfSrAlgorithm(self):
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): SR Algorithm
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OspfSrAlgorithm']))
def update(self, Name=None):
"""Updates ospfSRAlgorithmList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves ospfSRAlgorithmList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ospfSRAlgorithmList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ospfSRAlgorithmList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching ospfSRAlgorithmList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ospfSRAlgorithmList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ospfSRAlgorithmList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, OspfSrAlgorithm=None):
"""Base class infrastructure that gets a list of ospfSRAlgorithmList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- OspfSrAlgorithm (str): optional regex of ospfSrAlgorithm
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 40.48125
| 174
| 0.662807
|
5445b65464ba6f0c591a9517c06632c89a9f22d7
| 25,399
|
py
|
Python
|
meta/train/env.py
|
mtcrawshaw/meta-world
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | 4
|
2021-09-21T07:24:26.000Z
|
2022-03-25T00:28:33.000Z
|
meta/train/env.py
|
mtcrawshaw/meta
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | null | null | null |
meta/train/env.py
|
mtcrawshaw/meta
|
b511885af4405715c7b35f8295cef88021a926be
|
[
"MIT"
] | null | null | null |
""" Environment wrappers + functionality. """
import pickle
from typing import Dict, Tuple, List, Any, Callable
import numpy as np
import torch
import gym
from gym import Env
from gym.spaces import Box, Discrete, Space
from baselines import bench
from baselines.common.running_mean_std import RunningMeanStd
from baselines.common.vec_env import (
ShmemVecEnv,
DummyVecEnv,
VecEnvWrapper,
VecNormalize,
)
def get_env(
env_name: str,
num_processes: int = 1,
seed: int = 1,
time_limit: int = None,
normalize_transition: bool = True,
normalize_first_n: int = None,
allow_early_resets: bool = False,
same_np_seed: bool = False,
**kwargs: Dict[str, Any],
) -> Env:
"""
Return environment object from environment name, with wrappers for added
functionality, such as multiprocessing and observation/reward normalization. Extra
arguments are passed to environment constructor.
Parameters
----------
env_name : str
Name of environment to create.
num_processes: int
Number of asynchronous copies of the environment to run simultaneously.
seed : int
Random seed for environment.
time_limit : int
Limit on number of steps for environment.
normalize_transition : bool
Whether or not to add environment wrapper to normalize observations and rewards.
normalize_first_n: int
If not equal to None, only normalize the first ``normalize_first_n`` elements of
the observation. If ``normalize_transition`` is False then this value is
ignored.
allow_early_resets: bool
Whether or not to allow environments before done=True is returned.
same_np_seed : bool
Whether or not to use the same numpy random seed across each process. This
should really only be used when training on MetaWorld, as it allows for multiple
processes to generate/act over the same set of goals.
Returns
-------
env : Env
Environment object.
"""
# Create vectorized environment.
seeds = [seed + i for i in range(num_processes)]
if same_np_seed:
np_seeds = [seed] * num_processes
else:
np_seeds = list(seeds)
env_creators = [
get_single_env_creator(
env_name, seeds[i], np_seeds[i], time_limit, allow_early_resets, **kwargs
)
for i in range(num_processes)
]
if num_processes > 1:
env = ShmemVecEnv(env_creators, context="fork")
elif num_processes == 1:
# Use DummyVecEnv if num_processes is 1 to avoid multiprocessing overhead.
env = DummyVecEnv(env_creators)
else:
raise ValueError("Invalid num_processes value: %s" % num_processes)
# Add environment wrappers to normalize observations/rewards and convert between
# numpy arrays and torch.Tensors.
if normalize_transition:
env = VecNormalizeEnv(env, first_n=normalize_first_n)
env = VecPyTorchEnv(env)
return env
def get_single_env_creator(
env_name: str,
seed: int = 1,
np_seed: int = 1,
time_limit: int = None,
allow_early_resets: bool = False,
**kwargs: Dict[str, Any],
) -> Callable[..., Env]:
"""
Return a function that returns environment object with given env name. Used to
create a vectorized environment i.e. an environment object holding multiple
asynchronous copies of the same envrionment. Extra arguments are passed to
environment constructor.
Parameters
----------
env_name : str
Name of environment to create.
seed : int
Random seed for environment.
np_seed : int
Random seed for numpy. We provide separate seeds in order to sidestep an issue
with goal generation in MetaWorld.
time_limit : int
Limit on number of steps for environment.
allow_early_resets : bool
Whether or not to allow environments before done=True is returned.
Returns
-------
env_creator : Callable[..., Env]
Function that returns environment object.
"""
def env_creator() -> Env:
# Set random seed. Note that we have to set seeds here despite having already
# set them in main.py, so that the seeds are different between child processes.
np.random.seed(np_seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Make environment object from either MetaWorld or Gym.
if is_metaworld_env_name(env_name):
env = MetaWorldEnv(env_name, **kwargs)
elif env_name == "unique-env":
env = UniqueEnv(**kwargs)
elif env_name == "parity-env":
env = ParityEnv(**kwargs)
else:
env = gym.make(env_name, **kwargs)
# Set environment seed.
env.seed(seed)
# Add environment wrapper to reset at time limit.
if time_limit is not None:
env = TimeLimitEnv(env, time_limit)
# Add environment wrapper to monitor rewards.
env = bench.Monitor(env, None, allow_early_resets=allow_early_resets)
# Add environment wrapper to compute success/failure for some environments. Note
# that this wrapper must be wrapped around a bench.Monitor instance.
if env_name in REWARD_THRESHOLDS:
reward_threshold = REWARD_THRESHOLDS[env_name]
env = SuccessEnv(env, reward_threshold)
return env
return env_creator
def get_num_tasks(env_name: str) -> int:
"""
Compute number of tasks to simultaneously handle. This will be 1 unless we are
training on a multi-task benchmark such as MetaWorld's MT10.
"""
num_tasks = 1
metaworld_benchmark_names = get_metaworld_benchmark_names()
if env_name in metaworld_benchmark_names:
if env_name == "MT10":
num_tasks = 10
elif env_name == "MT50":
num_tasks = 50
elif env_name == "ML10_train":
num_tasks = 10
elif env_name == "ML45_train":
num_tasks = 45
elif env_name == "ML10_test":
num_tasks = 5
elif env_name == "ML45_test":
num_tasks = 5
else:
raise NotImplementedError
return num_tasks
class VecNormalizeEnv(VecNormalize):
"""
Environment wrapper to normalize observations and rewards. We modify VecNormalize
from baselines in order to implement a key change: We want to be able to normalize
only a part of the observation. This is because in multi-task environments, the
"observation" is really a concatenation of an environment observation with a one-hot
vector which denotes the task-index. When normalizing the observation, we want to be
able to leave the one-hot vector as is. Note that this is only supported for
environments with observations that are flat vectors.
"""
def __init__(
self,
venv: Env,
ob: bool = True,
ret: bool = True,
clipob: float = 10.0,
cliprew: float = 10.0,
gamma: float = 0.99,
epsilon: float = 1e-8,
first_n: int = None,
) -> None:
"""
Modified init function of VecNormalize. The only change here is in modifying the
shape of self.ob_rms. The argument ``first_n`` controls how much of the
observation we want to normalize: for an observation ``obs``, we normalize the
vector ``obs[:first_n]``.
"""
VecEnvWrapper.__init__(self, venv)
if ob is not None:
if first_n is None:
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape)
else:
if len(self.observation_space.shape) == 1:
self.ob_rms = RunningMeanStd(shape=(first_n,))
else:
raise NotImplementedError
else:
self.ob_rms = None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
self.first_n = first_n
def _obfilt(self, obs: np.ndarray) -> np.ndarray:
# Take portion of observation to normalize, if necessary.
if self.first_n is None:
update_obs = obs
else:
update_obs = obs[:, : self.first_n]
# Normalize obs.
if self.ob_rms:
self.ob_rms.update(update_obs)
update_obs = np.clip(
(update_obs - self.ob_rms.mean)
/ np.sqrt(self.ob_rms.var + self.epsilon),
-self.clipob,
self.clipob,
)
# Reconstruct observation, if necessary.
if self.first_n is None:
obs = update_obs
else:
obs[:, : self.first_n] = update_obs
return obs
class VecPyTorchEnv(VecEnvWrapper):
"""
Environment wrapper to convert observations, actions and rewards to torch.Tensors,
given a vectorized environment.
"""
def reset(self) -> torch.Tensor:
""" Environment reset function. """
obs = self.venv.reset()
obs = torch.from_numpy(obs).float()
return obs
def step_async(self, actions: torch.Tensor) -> None:
""" Asynchronous portion of step. """
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self) -> Tuple[torch.Tensor, torch.Tensor, bool, Dict[str, Any]]:
""" Synchronous portion of step. """
obs, reward, done, info = self.venv.step_wait()
obs = torch.from_numpy(obs).float()
reward = torch.Tensor(reward).float().unsqueeze(-1)
return obs, reward, done, info
class TimeLimitEnv(gym.Wrapper):
""" Environment wrapper to reset environment when it hits time limit. """
def __init__(self, env: Env, time_limit: int) -> None:
""" Init function for TimeLimitEnv. """
super(TimeLimitEnv, self).__init__(env)
self._time_limit = time_limit
self._elapsed_steps = None
def step(self, action: Any) -> Any:
""" Step function for environment wrapper. """
assert self._elapsed_steps is not None
observation, reward, done, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._time_limit:
info["time_limit_hit"] = True
done = True
return observation, reward, done, info
def reset(self, **kwargs: Dict[str, Any]) -> Any:
""" Reset function for environment wrapper. """
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class MetaWorldEnv(Env):
""" Environment to wrap MetaWorld environments. """
def __init__(
self,
benchmark_name: str,
save_memory: bool = False,
add_observability: bool = False,
) -> None:
""" Init function for environment wrapper. """
# We import here so that we avoid importing metaworld if possible, since it is
# dependent on mujoco.
import metaworld
from metaworld import Task
# Set config for each benchmark.
if benchmark_name.startswith("MT1_"):
env_name = benchmark_name[4:]
benchmark = metaworld.MT1(env_name)
env_dict = {env_name: benchmark.train_classes[env_name]}
tasks = benchmark.train_tasks
resample_tasks = False
self.augment_obs = False
elif benchmark_name == "MT10":
benchmark = metaworld.MT10()
env_dict = benchmark.train_classes
tasks = benchmark.train_tasks
resample_tasks = False
self.augment_obs = True
elif benchmark_name == "MT50":
benchmark = metaworld.MT50()
env_dict = benchmark.train_classes
tasks = benchmark.train_tasks
resample_tasks = False
self.augment_obs = True
elif benchmark_name.startswith("ML1_train_"):
env_name = benchmark_name[10:]
benchmark = metaworld.ML1(env_name)
env_dict = {env_name: benchmark.train_classes[env_name]}
tasks = benchmark.train_tasks
resample_tasks = True
self.augment_obs = False
elif benchmark_name == "ML10_train":
benchmark = metaworld.ML10()
env_dict = benchmark.train_classes
tasks = benchmark.train_tasks
resample_tasks = True
self.augment_obs = True
elif benchmark_name == "ML45_train":
benchmark = metaworld.ML45()
env_dict = benchmark.train_classes
tasks = benchmark.train_tasks
resample_tasks = True
self.augment_obs = True
elif benchmark_name.startswith("ML1_test_"):
env_name = benchmark_name[9:]
benchmark = metaworld.ML1(env_name)
env_dict = {env_name: benchmark.test_classes[env_name]}
tasks = benchmark.test_tasks
resample_tasks = True
self.augment_obs = False
elif benchmark_name == "ML10_test":
benchmark = metaworld.ML10()
env_dict = benchmark.test_classes
tasks = benchmark.test_tasks
resample_tasks = True
self.augment_obs = True
elif benchmark_name == "ML45_test":
benchmark = metaworld.ML45()
env_dict = benchmark.test_classes
tasks = benchmark.test_tasks
resample_tasks = True
self.augment_obs = True
else:
raise NotImplementedError
# Construct list of tasks for each environment, adding observability to tasks if
# necessary.
env_tasks = {}
for task in tasks:
if add_observability:
task_data = dict(pickle.loads(task.data))
task_data["partially_observable"] = False
task = Task(env_name=task.env_name, data=pickle.dumps(task_data))
if task.env_name in env_tasks:
if resample_tasks:
env_tasks[task.env_name].append(task)
else:
env_tasks[task.env_name] = [task]
# Construct list of environment classes or class instances.
self.save_memory = save_memory
if self.save_memory:
self.envs_info = [
{"env_name": env_name, "env_cls": env_cls, "tasks": env_tasks[env_name]}
for (env_name, env_cls) in env_dict.items()
]
else:
self.envs_info = [
{"env_name": env_name, "env": env_cls(), "tasks": env_tasks[env_name]}
for (env_name, env_cls) in env_dict.items()
]
self.num_tasks = len(self.envs_info)
# Sample environment.
self._sample_environment()
def _sample_environment(self) -> None:
""" Sample a new environment and possibly a task for that environment. """
# Sample environment. We use torch here to get random choices in order to handle
# seeding separately from other parts of the code. We can't use numpy here
# because with multi-task benchmarks, each process is given the same numpy seed.
# We can't use random here because tune() uses it, and using it in both creates
# reproducibility issues with saving/loading during tuning.
self.active_task = torch.randint(high=self.num_tasks, size=(1,)).item()
if self.save_memory:
self.active_env = self.envs_info[self.active_task]["env_cls"]()
else:
self.active_env = self.envs_info[self.active_task]["env"]
# Sample task for environment.
env_tasks = self.envs_info[self.active_task]["tasks"]
task_choice = torch.randint(high=len(env_tasks), size=(1,)).item()
task = env_tasks[task_choice]
self.active_env.set_task(task)
def step(self, action: Any) -> Tuple[Any, Any, Any, Any]:
""" Run one timestep of environment dynamics. """
# Step active environment and add task index to observation if necessary.
obs, reward, done, info = self.active_env.step(action)
if self.augment_obs:
obs = self.add_task_to_obs(obs)
return obs, reward, done, info
def reset(self, **kwargs: Dict[str, Any]) -> Any:
""" Resets environment to initial state and returns observation. """
# Choose a new environment and sample a new task for it.
self._sample_environment()
# Reset environment and return observation.
obs = self.active_env.reset(**kwargs)
if self.augment_obs:
obs = self.add_task_to_obs(obs)
return obs
def add_task_to_obs(self, obs: Any) -> Any:
""" Augment an observation with the one-hot task index. """
assert len(obs.shape) == 1
one_hot = np.zeros(self.num_tasks)
one_hot[self.active_task] = 1.0
new_obs = np.concatenate([obs, one_hot])
return new_obs
@property
def observation_space(self) -> Space:
if self.augment_obs:
env_space = self.active_env.observation_space
assert len(env_space.shape) == 1
obs_dim = env_space.shape[0] + self.num_tasks
return Box(low=-np.inf, high=np.inf, shape=(obs_dim,))
else:
return self.active_env.observation_space
@property
def action_space(self) -> Space:
return self.active_env.action_space
class SuccessEnv(gym.Wrapper):
"""
Environment wrapper to compute success/failure for each episode.
"""
def __init__(self, env: Env, reward_threshold: int) -> None:
""" Init function for SuccessEnv. """
super(SuccessEnv, self).__init__(env)
self._reward_threshold = reward_threshold
def step(self, action: Any) -> Any:
""" Step function for environment wrapper. """
# Add success to info if done=True, with success=1.0 when the reward over the
# episode is greater than the given reward threshold, and 0.0 otherwise.
observation, reward, done, info = self.env.step(action)
if done:
info["success"] = float(info["episode"]["r"] >= self._reward_threshold)
else:
info["success"] = 0.0
return observation, reward, done, info
def get_metaworld_benchmark_names() -> List[str]:
""" Returns a list of Metaworld benchmark names. """
return [
"MT1",
"MT10",
"MT50",
"ML1_train",
"ML1_test",
"ML10_train",
"ML45_train",
"ML10_test",
"ML45_test",
]
def get_metaworld_mt_benchmark_names() -> List[str]:
""" Returns a list of Metaworld multi-task benchmark names. """
return ["MT1", "MT10", "MT50"]
def get_metaworld_ml_benchmark_names() -> List[str]:
""" Returns a list of Metaworld meta-learning benchmark names. """
return [
"ML1_train",
"ML1_test",
"ML10_train",
"ML45_train",
"ML10_test",
"ML45_test",
]
def get_metaworld_single_benchmark_names() -> List[str]:
""" Returns a list of Metaworld single-task benchmark names. """
return ["MT1", "ML1_train", "ML1_test"]
def get_metaworld_env_names() -> List[str]:
""" Returns a list of Metaworld environment names. """
return MT50_V2
def is_metaworld_env_name(env_name: str) -> bool:
"""
Whether or not `env_name` specifies a MetaWorld benchmark. This is the case when
`env_name` is equal to a benchmark with multiple tasks (such as "MT10" or
"ML10_train") or when `env_name` is a single task benchmark name (such as "MT1" or
"ML1_train") concatenated with "_" followed by a MetaWorld environment name.
"""
is_mw = False
env_names = get_metaworld_env_names()
benchmark_names = get_metaworld_benchmark_names()
single_benchmark_names = get_metaworld_single_benchmark_names()
# Check if `env_name` specifies an benchmark with multiple tasks.
if env_name in benchmark_names and env_name not in single_benchmark_names:
is_mw = True
# Check if `env_name` specifies a single task benchmark.
for single_benchmark_name in single_benchmark_names:
prefix = single_benchmark_name + "_"
if env_name.startswith(prefix):
remainder = env_name[len(prefix) :]
if remainder in env_names:
is_mw = True
return is_mw
class ParityEnv(Env):
""" Environment for testing. Only has two states, and two actions. """
def __init__(self) -> None:
""" Init function for ParityEnv. """
self.states = [np.array([1, 0]), np.array([0, 1])]
self.observation_space = Discrete(len(self.states))
self.action_space = Discrete(len(self.states))
self.initial_state_index = 0
self.state_index = self.initial_state_index
self.state = self.states[self.state_index]
def reset(self) -> np.ndarray:
""" Reset environment to initial state. """
self.state_index = self.initial_state_index
self.state = self.states[self.state_index]
return self.state
def step(self, action: int) -> Tuple[int, float, bool, dict]:
"""
Step function for environment. Returns an observation, a reward,
whether or not the environment is done, and an info dictionary, as is
the standard for OpenAI gym environments.
"""
reward = 1 if action == self.state_index else -1
self.state_index = (self.state_index + 1) % len(self.states)
self.state = self.states[self.state_index]
done = False
info: Dict = {}
return self.state, reward, done, info
class UniqueEnv(Env):
""" Environment for testing. Each step returns a unique observation and reward. """
def __init__(self) -> None:
""" Init function for UniqueEnv. """
self.observation_space = Box(low=0.0, high=np.inf, shape=(1,))
self.action_space = Discrete(2)
self.timestep = 1
def reset(self) -> np.ndarray:
""" Reset environment to initial state. """
self.timestep = 1
return np.array(float(self.timestep))
def step(self, action: float) -> Tuple[float, float, bool, dict]:
"""
Step function for environment. Returns an observation, a reward,
whether or not the environment is done, and an info dictionary, as is
the standard for OpenAI gym environments.
"""
reward = float(self.timestep)
done = False
self.timestep += 1
obs = float(self.timestep)
info: Dict = {}
return np.array(obs), reward, done, info
def get_base_env(env: Env) -> Env:
"""
Very hacky way to return a reference to the base environment underneath a series of
environment wrappers. In the case that an environment wrapper is vectorized (i.e.
wraps around multiple environments), we return an instance to the first environment
in the list.
"""
wrapped_names = ["env", "envs", "venv", "active_env"]
is_wrapper = lambda e: any(hasattr(e, name) for name in wrapped_names)
while is_wrapper(env):
if hasattr(env, "env"):
env = env.env
elif hasattr(env, "envs"):
env = env.envs[0]
elif hasattr(env, "venv"):
env = env.venv
elif hasattr(env, "active_env"):
env = env.active_env
else:
raise ValueError
return env
# HARDCODE. This is a hard-coding of a reward threshold for some environments. An
# episode is considered a success when the reward over that episode is greater than the
# corresponding threshold.
REWARD_THRESHOLDS = {
"CartPole-v1": 195,
"LunarLanderContinuous-v2": 200,
"Hopper-v2": 3800,
"Hopper-v3": 3800,
"HalfCheetah-v2": 4800,
"HalfCheetah-v3": 4800,
"Ant-v2": 6000,
"Ant-v3": 6000,
}
# HARDCODE. This is copied from the metaworld repo to avoid the need to import metaworld
# unnencessarily. Since metaworld relies on mujoco, we don't want to import it if we
# don't have to.
MT50_V2 = [
"assembly-v2",
"basketball-v2",
"bin-picking-v2",
"box-close-v2",
"button-press-topdown-v2",
"button-press-topdown-wall-v2",
"button-press-v2",
"button-press-wall-v2",
"coffee-button-v2",
"coffee-pull-v2",
"coffee-push-v2",
"dial-turn-v2",
"disassemble-v2",
"door-close-v2",
"door-lock-v2",
"door-open-v2",
"door-unlock-v2",
"hand-insert-v2",
"drawer-close-v2",
"drawer-open-v2",
"faucet-open-v2",
"faucet-close-v2",
"hammer-v2",
"handle-press-side-v2",
"handle-press-v2",
"handle-pull-side-v2",
"handle-pull-v2",
"lever-pull-v2",
"pick-place-wall-v2",
"pick-out-of-hole-v2",
"push-back-v2",
"pick-place-v2",
"plate-slide-v2",
"plate-slide-side-v2",
"plate-slide-back-v2",
"plate-slide-back-side-v2",
"peg-insert-side-v2",
"peg-unplug-side-v2",
"soccer-v2",
"stick-push-v2",
"stick-pull-v2",
"push-wall-v2",
"push-v2",
"reach-wall-v2",
"reach-v2",
"shelf-place-v2",
"sweep-into-v2",
"sweep-v2",
"window-open-v2",
"window-close-v2",
]
| 32.942931
| 88
| 0.622347
|
24e27c8dc73f2fc8560cce58427a4fcfda454f63
| 29,042
|
py
|
Python
|
Kai/run/testEntireChain.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:29:38.000Z
|
2022-01-17T17:29:38.000Z
|
Kai/run/testEntireChain.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | null | null | null |
Kai/run/testEntireChain.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2021-12-15T10:56:50.000Z
|
2021-12-15T10:56:50.000Z
|
from __future__ import division, print_function
import os, sys, subprocess
import ROOT
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetRecalib import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetUncertainties import *
from FourTopNAOD.Kai.modules.LeptonLogic import TriggerAndLeptonLogic
from FourTopNAOD.Kai.modules.JetMETLogic import JetMETLogic
from FourTopNAOD.Kai.modules.Stitcher import Stitcher
from FourTopNAOD.Kai.modules.HistCloser import HistCloser
import argparse
# import collections, copy, json, math
# from array import array
# import multiprocessing
# import inspect
####Old list from Nanovisor V1 baseline generation
# import os, time, collections, copy, json, multiprocessing
# from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
# from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
# from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
# from PhysicsTools.NanoAODTools.postprocessing.modules.common.PrefireCorr import *
# from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
# from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetRecalib import *
# from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetUncertainties import *
# from FourTopNAOD.Kai.modules.BaselineSelector import BaselineSelector
# from FourTopNAOD.Kai.modules.trigger import Trigger
# from FourTopNAOD.Kai.modules.MCTreeDev import MCTrees
parser = argparse.ArgumentParser(description='Test of entire chain of production')
parser.add_argument('--stage', dest='stage', action='store', type=str,
help='Stage to be processed: test or cache or correct or cutstring or process or combined or hist')
parser.add_argument('--era', dest='era', action='store', type=str, default=None,
help='Era to be processed: 2017 or 2018')
parser.add_argument('--subera', dest='subera', action='store', type=str, default=None,
help='Subera to be processed: A, B, C, D, E, F (year dependant)')
parser.add_argument('--rmin', dest='rmin', action='store', type=int, default=0,
help='inclusive range minimum (samples start counting at 0; default is 0)')
parser.add_argument('--rmax', dest='rmax', action='store', type=int, default=99999,
help='inclusive range maximum (samples start counting at 0; default is 99999)')
args = parser.parse_args()
#### Trigger tuples, not strictly necessary in this file
Tuples = []
Tuples.append(("2017", None, False, None))
Tuples.append(("2017", "B", True, "ElMu"))
Tuples.append(("2017", "C", True, "ElMu"))
Tuples.append(("2017", "D", True, "ElMu"))
Tuples.append(("2017", "E", True, "ElMu"))
Tuples.append(("2017", "F", True, "ElMu"))
Tuples.append(("2017", "B", True, "MuMu"))
Tuples.append(("2017", "C", True, "MuMu"))
Tuples.append(("2017", "D", True, "MuMu"))
Tuples.append(("2017", "E", True, "MuMu"))
Tuples.append(("2017", "F", True, "MuMu"))
Tuples.append(("2017", "B", True, "ElEl"))
Tuples.append(("2017", "C", True, "ElEl"))
Tuples.append(("2017", "D", True, "ElEl"))
Tuples.append(("2017", "E", True, "ElEl"))
Tuples.append(("2017", "F", True, "ElEl"))
Tuples.append(("2017", "B", True, "Mu"))
Tuples.append(("2017", "C", True, "Mu"))
Tuples.append(("2017", "D", True, "Mu"))
Tuples.append(("2017", "E", True, "Mu"))
Tuples.append(("2017", "F", True, "Mu"))
Tuples.append(("2017", "B", True, "El"))
Tuples.append(("2017", "C", True, "El"))
Tuples.append(("2017", "D", True, "El"))
Tuples.append(("2017", "E", True, "El"))
Tuples.append(("2017", "F", True, "El"))
Tuples.append(("2018", None, False, None))
Tuples.append(("2018", "A", True, "ElMu"))
Tuples.append(("2018", "B", True, "ElMu"))
Tuples.append(("2018", "C", True, "ElMu"))
Tuples.append(("2018", "D", True, "ElMu"))
Tuples.append(("2018", "A", True, "MuMu"))
Tuples.append(("2018", "B", True, "MuMu"))
Tuples.append(("2018", "C", True, "MuMu"))
Tuples.append(("2018", "D", True, "MuMu"))
Tuples.append(("2018", "A", True, "ElEl"))
Tuples.append(("2018", "B", True, "ElEl"))
Tuples.append(("2018", "C", True, "ElEl"))
Tuples.append(("2018", "D", True, "ElEl"))
Tuples.append(("2018", "A", True, "Mu"))
Tuples.append(("2018", "B", True, "Mu"))
Tuples.append(("2018", "C", True, "Mu"))
Tuples.append(("2018", "D", True, "Mu"))
Tuples.append(("2018", "A", True, "El"))
Tuples.append(("2018", "B", True, "El"))
Tuples.append(("2018", "C", True, "El"))
Tuples.append(("2018", "D", True, "El"))
if args.era:
Tuples = [tup for tup in Tuples if tup[0] == args.era]
if args.subera:
Tuples = [tup for tup in Tuples if tup[1] == args.subera]
Tuples.sort(key=lambda j : j[1]) #sort by subera
Tuples.sort(key=lambda j : j[0]) #then by era, again
#### Dictionary for the data jetrecalibrator
dataRecalib = {"2017": {"B": jetRecalib("Fall17_17Nov2017B_V32_DATA","Fall17_17Nov2017_V32_DATA"),
"C": jetRecalib("Fall17_17Nov2017C_V32_DATA","Fall17_17Nov2017_V32_DATA"),
"D": jetRecalib("Fall17_17Nov2017DE_V32_DATA","Fall17_17Nov2017_V32_DATA"),
"E": jetRecalib("Fall17_17Nov2017DE_V32_DATA","Fall17_17Nov2017_V32_DATA"),
"F": jetRecalib("Fall17_17Nov2017F_V32_DATA","Fall17_17Nov2017_V32_DATA"),
"NONE": "NothingToSeeHere"
}
}
#Test the module initialization and cutstrings produced
if args.stage == 'test':
Mods = []
for tup in Tuples:
Mods.append(JetMETLogic('baseline', era=tup[0], subera=tup[1], isData=tup[2], TriggerChannel=tup[3]))
for mod in Mods:
print(mod.getCutString())
#Cache files locally from a variety_pack_tuple, because remote files are wasting my time
elif args.stage == 'cache':
tuple_file = "dirtestTriggerLogic/variety_pack_tuples_2017_NANOv5.txt"
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
files_to_cache = []
file_names = []
local_tuple_lines = []
with open(tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
#Don't clean line, going to write back with file name replacement
tup = line.split(",")
files_to_cache.append(tup[0])
#Fuck wasting more time on this, numerology it is
file_names.append("dirtestTriggerLogic/file_{}.root".format(str(l)))
local_tuple_lines.append(line.replace(files_to_cache[-1], file_names[-1]))
xrd_list = zip(files_to_cache, file_names)
with open(local_tuple_file, "w") as out_f:
for line in local_tuple_lines:
out_f.write(line)
# subprocess.Popen(args="voms-proxy-info", shell=True, executable="/bin/zsh", env=dict(os.environ))
# subprocess.Popen(args="print $PWD", shell=True, executable="/bin/zsh", env=dict(os.environ))
for file_original, file_local in xrd_list:
pass
# os.system("echo xrdcp {} {}".format(file_original, file_local))
# subprocess.Popen(args="xrdcp {} {}".format(file_original, file_local), shell=True, executable="/bin/zsh", env=dict(os.environ))
#Call subprocess such that the script waits for it to finish. Otherwise, the script continues and can finish while subprocesses run in the background, and output will get muxed a bit. Useful, however, for interacting with shell while something runs!
subprocess.Popen(args="xrdcp {} {}".format(file_original, file_local), shell=True, executable="/bin/zsh", env=dict(os.environ)).wait()
#Test the counts from using the cutstrings
elif args.stage == 'cutstring':
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
with open(local_tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
# if l > 0:
# continue
cline = line.rstrip("\n\s\t")
tup = cline.split(",") #0 filename, 1 era, 2 subera, 3 isData, 4 isSignal, 5 nEvents, 6 nEvents+, 7 nEvents-, 8 crossSection, 9 channel
# for t in tup: print(t)
files = [tup[0]]
era = tup[1]
subera = tup[2]
isData = tup[3]
isSignal = tup[4]
nEvents = int(tup[5])
try:
nEventsPositive = int(tup[6])
nEventsNegative = int(tup[7])
except:
nEventsPositive = 0
nEventsNegative = 0
crossSection = float(tup[8])
channel = tup[9]
#Skip choices if requested, only with explicit parameter choice
if args.era and era != args.era:
continue
if args.subera and subera != args.subera:
continue
if isData in ["True", "TRUE", "true"]:
isData = True
else:
isData = False
if isSignal in ["True", "TRUE", "true"]:
isSignal = True
else:
isSignal = False
if channel not in ["ElMu", "MuMu", "ElEl", "Mu", "El"]:
# print("converting channel {} to None".format(channel))
channel = None
if era == "2017":
lumi = 41.53
elif era == "2018":
lumi = 60 #rough estimate
else:
lumi = 1
if not isData:
if nEventsNegative > 0 and nEventsPositive > 0:
weight = lumi * 1000 * crossSection / (nEventsPositive - nEventsNegative)
else:
weight = lumi * 1000 * crossSection / nEvents
subera = None
else:
weight = 1
# print("era= {}\t subera={}\t isData={}\t TriggerChannel={}\t weight={}".format(era, subera, str(isData), channel, weight))
modules = [JetMETLogic('baseline', era=era, subera=subera, isData=isData, weightMagnitude=weight)]
# print(modules[0].getCutString())
p = PostProcessor(".",
files,
cut=modules[0].getCutString(),
# cut=None,
branchsel=None,
modules=modules,
compression="LZMA:9",
friend=False,
postfix="_Chain",
jsonInput=None,
noOut=True,
# noOut=False,
justcount=True,
# justcount=False,
provenance=False,
haddFileName=None,
fwkJobReport=False,
histFileName=None,
histDirName=None,
outputbranchsel=None,
maxEntries=None,
firstEntry=0,
# prefetch=False,
prefetch=True,
longTermCache=False
)
p.run()
#Get the real number of events, +, - from the files, to do quicker studies
elif args.stage == 'correct':
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
corrected_lines = []
with open(local_tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
# if l > 0:
# continue
cline = line.rstrip("\n\s\t")
tup = cline.split(",") #0 filename, 1 era, 2 subera, 3 isData, 4 isSignal, 5 nEvents, 6 nEvents+, 7 nEvents-, 8 crossSection, 9 channel
# for t in tup: print(t)
files = [tup[0]]
era = tup[1]
subera = tup[2]
isData = tup[3]
isSignal = tup[4]
nEvents = int(tup[5])
try:
nEventsPositive = int(tup[6])
nEventsNegative = int(tup[7])
except:
nEventsPositive = 0
nEventsNegative = 0
crossSection = float(tup[8])
channel = tup[9]
#Skip choices if requested, only with explicit parameter choice
if args.era and era != args.era:
continue
if args.subera and subera != args.subera:
continue
if isData in ["True", "TRUE", "true"]:
isData = True
else:
isData = False
if isSignal in ["True", "TRUE", "true"]:
isSignal = True
else:
isSignal = False
if channel not in ["ElMu", "MuMu", "ElEl", "Mu", "El"]:
# print("converting channel {} to None".format(channel))
channel = None
if era == "2017":
lumi = 41.53
elif era == "2018":
lumi = 60 #rough estimate
else:
lumi = 1
root_file = ROOT.TFile.Open(files[0], 'r')
root_tree = root_file.Get('Events')
nEvents = int(root_tree.GetEntries())
if not isData:
nEventsPositive = int(root_tree.GetEntries('genWeight > 0'))
nEventsNegative = int(root_tree.GetEntries('genWeight < 0'))
else:
nEventsPositive = 0
nEventsNegative = 0
tup[5] = str(nEvents)
tup[6] = str(nEventsPositive)
tup[7] = str(nEventsNegative)
line_corrected = ",".join(tup) + "\n"
# print("line vs corrected line:")
# print(line + line_corrected)
corrected_lines.append(line_corrected)
with open(local_tuple_file, "w") as out_f:
for corrected_line in corrected_lines:
print(corrected_line)
out_f.write(corrected_line)
#Just run the module without the cutstring, for comparing counts
elif args.stage == 'process':
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
with open(local_tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
# if l > 0:
# continue
if l < args.rmin or l > args.rmax:
continue
cline = line.rstrip("\n\s\t")
tup = cline.split(",") #0 filename, 1 era, 2 subera, 3 isData, 4 isSignal, 5 nEvents, 6 nEvents+, 7 nEvents-, 8 crossSection, 9 channel
# for t in tup: print(t)
files = [tup[0]]
era = tup[1]
subera = tup[2]
isData = tup[3]
isSignal = tup[4]
nEvents = int(tup[5])
try:
nEventsPositive = int(tup[6])
nEventsNegative = int(tup[7])
except:
nEventsPositive = 0
nEventsNegative = 0
crossSection = float(tup[8])
channel = tup[9]
#Skip choices if requested, only with explicit parameter choice
if args.era and era != args.era:
continue
if args.subera and subera != args.subera:
continue
if isData in ["True", "TRUE", "true"]:
isData = True
else:
isData = False
if isSignal in ["True", "TRUE", "true"]:
isSignal = True
else:
isSignal = False
if channel not in ["ElMu", "MuMu", "ElEl", "Mu", "El"]:
# print("converting channel {} to None".format(channel))
channel = None
if era == "2017":
lumi = 41.53
elif era == "2018":
lumi = 60 #rough estimate
else:
lumi = 1
if not isData:
if nEvents == (nEventsNegative + nEventsPositive):
weight = lumi * 1000 * crossSection / (nEventsPositive - nEventsNegative)
else:
weight = lumi * 1000 * crossSection / nEvents
subera = None
else:
weight = 1
# print("era= {}\t subera={}\t isData={}\t TriggerChannel={}\t weight={}".format(era, subera, str(isData), channel, weight))
modules = []
if not isData:
if era == "2017":
modules.append(puWeightProducer(pufile_mc2017, pufile_data2017, "pu_mc", "pileup", verbose=False, doSysVar=True))
elif era == "2018":
modules.append(puWeightProducer(pufile_mc2018, pufile_data2018, "pu_mc", "pileup", verbose=False, doSysVar=True))
modules.append(TriggerAndLeptonLogic(passLevel='baseline',era=era, subera=subera, isData=isData, TriggerChannel=channel,
weightMagnitude=weight, fillHists=True, mode="Flag"))
if not isData:
if l == 2:
modules.append(Stitcher(mode="Pass", era=era, channel="DL", condition="Pass", weightMagnitude=weight, fillHists=True, HTBinWidth=50, desiredHTMin=200, desiredHTMax=800))
elif l == 4:
modules.append(Stitcher(mode="Fail", era=era, channel="DL", condition="Fail", weightMagnitude=weight, fillHists=True, HTBinWidth=50, desiredHTMin=200, desiredHTMax=800))
elif l == 1: #Try to fix on-the-fly the SL channel which still has the wrong weight...
modules.append(Stitcher(mode="Pass", era=era, channel="SL", condition="Pass", weightMagnitude=weight*0.15, fillHists=True, HTBinWidth=50, desiredHTMin=200, desiredHTMax=800))
elif l == 3:
modules.append(Stitcher(mode="Fail", era=era, channel="SL", condition="Fail", weightMagnitude=weight, fillHists=True, HTBinWidth=50, desiredHTMin=200, desiredHTMax=800))
# if isData: modules.append(dataRecalib[era][subera])
# if not isData: modules.append(jetmetUncertaintiesProducer("2017", "Fall17_17Nov2017_V32_MC", [ "All" ], redoJEC=True)) #Broken to hell because of their shenanigans in jetSmearer.py
# modules.append(JetMETLogic(passLevel='baseline',era=era, subera=subera, isData=isData, weightMagnitude=weight, fillHists=True, mode="Flag",
# jetPtVar = "pt_nom", jetMVar = "mass_nom", debug=True))
modules.append(JetMETLogic(passLevel='baseline',era=era, subera=subera, isData=isData, weightMagnitude=weight, fillHists=True, mode="Flag",
jetPtVar = "pt", jetMVar = "mass", debug=True)) #without _nom, since no newer JECs
modules.append(HistCloser())
# print(modules[0].getCutString())
print(modules)
p = PostProcessor("/eos/user/n/nmangane/SWAN_projects/TriggerLogicRDF",
files,
# cut=modules[0].getCutString(),
cut=None,
branchsel=None,
modules=modules,
compression="LZMA:9",
friend=False,
postfix="_Chain",
jsonInput=None,
# noOut=True,
noOut=False,
# justcount=True,
justcount=False,
provenance=False,
haddFileName=None,
fwkJobReport=False,
# histFileName="/eos/user/n/nmangane/SWAN_projects/" + files[0].replace("file", "hist").replace(".root", "_Chain.root"),
histFileName=files[0].replace("file", "hist").replace(".root", "_Chain.root"),
histDirName="plots",
outputbranchsel=None,
maxEntries=None,
# maxEntries=20001,
firstEntry=0,
# prefetch=False,
prefetch=True,
longTermCache=False
)
# print(files[0].replace("file", "hist"))
p.run()
elif args.stage == 'subprocess':
#tailor code to also handle ranges, to break things up more
for x in xrange(args.rmin, args.rmax+1):
print(x)
#Decide to wait, or let multiple subprocesses start in sequence but run in parallel, using .wait() or not
subprocess.Popen(args="python testEntireChain.py --stage process --era {0} --rmin {1} --rmax {1} > /tmp/nmangane/subprocess_{1}.o".format(args.era, x), shell=True, executable="/bin/zsh", env=dict(os.environ))#.wait()
#Simultaneously run the module and the cutstring
elif args.stage == 'combined':
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
with open(local_tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
# if l > 0:
# continue
cline = line.rstrip("\n\s\t")
tup = cline.split(",") #0 filename, 1 era, 2 subera, 3 isData, 4 isSignal, 5 nEvents, 6 nEvents+, 7 nEvents-, 8 crossSection, 9 channel
# for t in tup: print(t)
files = [tup[0]]
era = tup[1]
subera = tup[2]
isData = tup[3]
isSignal = tup[4]
nEvents = int(tup[5])
try:
nEventsPositive = int(tup[6])
nEventsNegative = int(tup[7])
except:
nEventsPositive = 0
nEventsNegative = 0
crossSection = float(tup[8])
channel = tup[9]
#Skip choices if requested, only with explicit parameter choice
if args.era and era != args.era:
continue
if args.subera and subera != args.subera:
continue
if isData in ["True", "TRUE", "true"]:
isData = True
else:
isData = False
if isSignal in ["True", "TRUE", "true"]:
isSignal = True
else:
isSignal = False
if channel not in ["ElMu", "MuMu", "ElEl", "Mu", "El"]:
# print("converting channel {} to None".format(channel))
channel = None
if era == "2017":
lumi = 41.53
elif era == "2018":
lumi = 60 #rough estimate
else:
lumi = 1
if not isData:
if nEvents == (nEventsNegative + nEventsPositive):
weight = lumi * 1000 * crossSection / (nEventsPositive - nEventsNegative)
else:
weight = lumi * 1000 * crossSection / nEvents
subera = None
else:
weight = 1
# print("era= {}\t subera={}\t isData={}\t TriggerChannel={}\t weight={}".format(era, subera, str(isData), channel, weight))
modules = [JetMETLogic('baseline', era=era, subera=subera, isData=isData, weightMagnitude=weight, fillHists=False)]
# print(modules[0].getCutString())
p = PostProcessor(".",
files,
cut=modules[0].getCutString(),
# cut=None,
branchsel=None,
modules=modules,
compression="LZMA:9",
friend=False,
postfix="_JetMET",
jsonInput=None,
noOut=True,
# noOut=False,
# justcount=True,
justcount=False,
provenance=False,
haddFileName=None,
fwkJobReport=False,
histFileName=None,
histDirName=None,
outputbranchsel=None,
maxEntries=None,
firstEntry=0,
# prefetch=False,
prefetch=True,
longTermCache=False
)
p.run()
elif args.stage == 'hist':
local_tuple_file = "dirtestTriggerLogic/local_variety_pack_tuples_2017_NANOv5.txt"
with open(local_tuple_file, "r") as in_f:
for l, line in enumerate(in_f):
# if l > 0:
# continue
cline = line.rstrip("\n\s\t")
tup = cline.split(",") #0 filename, 1 era, 2 subera, 3 isData, 4 isSignal, 5 nEvents, 6 nEvents+, 7 nEvents-, 8 crossSection, 9 channel
# for t in tup: print(t)
files = [tup[0]]
era = tup[1]
subera = tup[2]
isData = tup[3]
isSignal = tup[4]
nEvents = int(tup[5])
try:
nEventsPositive = int(tup[6])
nEventsNegative = int(tup[7])
except:
nEventsPositive = 0
nEventsNegative = 0
crossSection = float(tup[8])
channel = tup[9]
#Skip choices if requested, only with explicit parameter choice
if args.era and era != args.era:
continue
if args.subera and subera != args.subera:
continue
if isData in ["True", "TRUE", "true"]:
isData = True
else:
isData = False
if isSignal in ["True", "TRUE", "true"]:
isSignal = True
else:
isSignal = False
if channel not in ["ElMu", "MuMu", "ElEl", "Mu", "El"]:
# print("converting channel {} to None".format(channel))
channel = None
if era == "2017":
lumi = 41.53
elif era == "2018":
lumi = 60 #rough estimate
else:
lumi = 1
if not isData:
if nEvents == (nEventsNegative + nEventsPositive):
weight = lumi * 1000 * crossSection / (nEventsPositive - nEventsNegative)
else:
weight = lumi * 1000 * crossSection / nEvents
subera = None
theHistName = files[0].replace("file","hist_"+era)
else:
theHistName = files[0].replace("file","hist_"+era+subera+"_"+channel)
weight = 1
# print("era= {}\t subera={}\t isData={}\t TriggerChannel={}\t weight={}".format(era, subera, str(isData), channel, weight))
modules = [JetMETLogic('baseline', era=era, subera=subera, isData=isData, weightMagnitude=weight, fillHists=True)]
# print(modules[0].getCutString())
p = PostProcessor("dirtestTriggerLogic",
files,
# cut=modules[0].getCutString(),
cut=None,
branchsel=None,
modules=modules,
compression="LZMA:9",
friend=False,
postfix="_JetMET",
jsonInput=None,
noOut=True,
# noOut=False,
# justcount=True,
justcount=False,
provenance=False,
haddFileName=None,
fwkJobReport=False,
histFileName=theHistName,
histDirName="plots",
outputbranchsel=None,
maxEntries=None,
firstEntry=0,
# prefetch=False,
prefetch=True,
longTermCache=False
)
p.run()
| 46.4672
| 257
| 0.52851
|
b2778ff253236fbfc44dd305804ba4a992cc3ce4
| 837
|
py
|
Python
|
Problems/Euler Project 95.py
|
vishwas-21/Project-Euler
|
ecc6cd843425647582488bcaaaa1815439251d56
|
[
"MIT"
] | null | null | null |
Problems/Euler Project 95.py
|
vishwas-21/Project-Euler
|
ecc6cd843425647582488bcaaaa1815439251d56
|
[
"MIT"
] | null | null | null |
Problems/Euler Project 95.py
|
vishwas-21/Project-Euler
|
ecc6cd843425647582488bcaaaa1815439251d56
|
[
"MIT"
] | null | null | null |
import time
import math
start = time.time()
def sum_prop_div(n):
sumProp = 1
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
if i != n // i:
sumProp += i + n // i
else:
sumProp += i
return sumProp
def length_chain(n):
ans = 1
dic = {n : True}
temp = n
while True:
if temp >= 1000000:
return 0
temp = sum_prop_div(temp)
try:
_ = dic[temp]
if temp == n:
return ans
return 0
except:
dic[temp] = True
ans += 1
ma = 0
ans = 0
for i in range(10, 1000000):
temp = length_chain(i)
if temp > ma:
ans = i
ma = temp
if i % 25000 == 0:
print(i, ans, ma, "Time -", time.time() - start)
| 18.6
| 56
| 0.433692
|
951062f2b612221df9b887fa8975ee39eba5e471
| 250,219
|
py
|
Python
|
python/cudf/cudf/tests/test_dataframe.py
|
Quansight-Labs/cudf
|
d05de978f2d1f34b7629bd54ab9485df1f9949ef
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/tests/test_dataframe.py
|
Quansight-Labs/cudf
|
d05de978f2d1f34b7629bd54ab9485df1f9949ef
|
[
"Apache-2.0"
] | 1
|
2020-04-03T20:46:09.000Z
|
2020-04-20T15:22:10.000Z
|
python/cudf/cudf/tests/test_dataframe.py
|
Quansight-Labs/cudf
|
d05de978f2d1f34b7629bd54ab9485df1f9949ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.testing import _utils as utils
from cudf.testing._utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("columns", [["a", "b"], pd.Series(["a", "b"])])
def test_init_via_list_of_series(columns):
data = [pd.Series([1, 2]), pd.Series([3, 4])]
pdf = cudf.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(pdf, gdf)
@pytest.mark.parametrize("index", [None, [0, 1, 2]])
def test_init_with_missing_columns(index):
"""Test initialization when columns and data keys are disjoint."""
data = {"a": [1, 2, 3], "b": [2, 3, 4]}
columns = ["c", "d"]
pdf = cudf.DataFrame(data, columns=columns, index=index)
gdf = cudf.DataFrame(data, columns=columns, index=index)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
def test_astype_dict():
gdf = cudf.DataFrame({"a": [1, 2, 3], "b": ["1", "2", "3"]})
pdf = gdf.to_pandas()
assert_eq(pdf.astype({"a": "str"}), gdf.astype({"a": "str"}))
assert_eq(
pdf.astype({"a": "str", "b": np.int64}),
gdf.astype({"a": "str", "b": np.int64}),
)
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
@pytest.mark.parametrize("q", [0.2, 1, 0.001, [0.5], [], [0.005, 0.8, 0.03]])
@pytest.mark.parametrize("interpolation", ["higher", "lower", "nearest"])
def test_decimal_quantile(q, interpolation):
data = ["244.8", "32.24", "2.22", "98.14", "453.23", "5.45"]
gdf = cudf.DataFrame(
{"id": np.random.randint(0, 10, size=len(data)), "val": data}
)
gdf["id"] = gdf["id"].astype("float64")
gdf["val"] = gdf["val"].astype(cudf.Decimal64Dtype(7, 2))
pdf = gdf.to_pandas()
got = gdf.quantile(q, numeric_only=False, interpolation=interpolation)
expected = pdf.quantile(
q if isinstance(q, list) else [q],
numeric_only=False,
interpolation=interpolation,
)
assert_eq(got, expected)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if pd.api.types.is_categorical_dtype(expect_where):
np.testing.assert_array_equal(
expect_where.cat.codes,
got_where.cat.codes.astype(expect_where.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_where.cat.categories, got_where.cat.categories)
np.testing.assert_array_equal(
expect_mask.cat.codes,
got_mask.cat.codes.astype(expect_mask.cat.codes.dtype)
.fillna(-1)
.to_array(),
)
assert_eq(expect_mask.cat.categories, got_mask.cat.categories)
else:
assert_eq(
expect_where.fillna(-1),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1), got_mask.fillna(-1), check_dtype=False
)
else:
assert_exceptions_equal(
lfunc=ps_where.where,
rfunc=gs_where.where,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False
if error is NotImplementedError
else True,
)
assert_exceptions_equal(
lfunc=ps_mask.mask,
rfunc=gs_mask.mask,
lfunc_args_and_kwargs=(
[ps_condition],
{"other": ps_other, "inplace": inplace},
),
rfunc_args_and_kwargs=(
[gs_condition],
{"other": gs_other, "inplace": inplace},
),
compare_error_message=False,
)
@pytest.mark.parametrize(
"data,condition,other,has_cat",
[
(
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
),
pd.DataFrame(
{
"a": pd.Series(["a", "a", "b", "c", "a", "d", "d", "a"]),
"b": pd.Series(["o", "p", "q", "e", "p", "p", "a", "a"]),
}
)
!= "a",
None,
None,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
None,
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
!= "a",
"a",
True,
),
(
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
["a", "a", "b", "c", "a", "d", "d", "a"],
dtype="category",
),
"b": pd.Series(
["o", "p", "q", "e", "p", "p", "a", "a"],
dtype="category",
),
}
)
== "a",
"a",
True,
),
],
)
def test_df_string_cat_types_mask_where(data, condition, other, has_cat):
ps = data
gs = cudf.from_pandas(data)
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
expect_where = ps.where(ps_condition, other=ps_other)
got_where = gs.where(gs_condition, other=gs_other)
expect_mask = ps.mask(ps_condition, other=ps_other)
got_mask = gs.mask(gs_condition, other=gs_other)
if has_cat is None:
assert_eq(
expect_where.fillna(-1).astype("str"),
got_where.fillna(-1),
check_dtype=False,
)
assert_eq(
expect_mask.fillna(-1).astype("str"),
got_mask.fillna(-1),
check_dtype=False,
)
else:
assert_eq(expect_where, got_where, check_dtype=False)
assert_eq(expect_mask, got_mask, check_dtype=False)
@pytest.mark.parametrize(
"data,expected_upcast_type,error",
[
(
pd.Series([random.random() for _ in range(10)], dtype="float32"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float16"),
np.dtype("float32"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float64"),
np.dtype("float64"),
None,
),
(
pd.Series([random.random() for _ in range(10)], dtype="float128"),
None,
NotImplementedError,
),
],
)
def test_from_pandas_unsupported_types(data, expected_upcast_type, error):
pdf = pd.DataFrame({"one_col": data})
if error == NotImplementedError:
with pytest.raises(error):
cudf.from_pandas(data)
with pytest.raises(error):
cudf.Series(data)
with pytest.raises(error):
cudf.from_pandas(pdf)
with pytest.raises(error):
cudf.DataFrame(pdf)
else:
df = cudf.from_pandas(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.Series(data)
assert_eq(data, df, check_dtype=False)
assert df.dtype == expected_upcast_type
df = cudf.from_pandas(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
df = cudf.DataFrame(pdf)
assert_eq(pdf, df, check_dtype=False)
assert df["one_col"].dtype == expected_upcast_type
@pytest.mark.parametrize("nan_as_null", [True, False])
@pytest.mark.parametrize("index", [None, "a", ["a", "b"]])
def test_from_pandas_nan_as_null(nan_as_null, index):
data = [np.nan, 2.0, 3.0]
if index is None:
pdf = pd.DataFrame({"a": data, "b": data})
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
else:
pdf = pd.DataFrame({"a": data, "b": data}).set_index(index)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = cudf.DataFrame(
{
"a": column.as_column(data, nan_as_null=nan_as_null),
"b": column.as_column(data, nan_as_null=nan_as_null),
}
)
expected = expected.set_index(index)
got = cudf.from_pandas(pdf, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_from_pandas_for_series_nan_as_null(nan_as_null):
data = [np.nan, 2.0, 3.0]
psr = pd.Series(data)
expected = cudf.Series(column.as_column(data, nan_as_null=nan_as_null))
got = cudf.from_pandas(psr, nan_as_null=nan_as_null)
assert_eq(expected, got)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_copy(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype="float", copy=copy),
pdf.astype(dtype="float", copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype="float", copy=copy),
psr.astype(dtype="float", copy=copy),
)
assert_eq(gsr, psr)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype(dtype="int64", copy=copy)
expected = psr.astype(dtype="int64", copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize("copy", [True, False])
def test_df_series_dataframe_astype_dtype_dict(copy):
gdf = cudf.DataFrame({"col1": [1, 2], "col2": [3, 4]})
pdf = gdf.to_pandas()
assert_eq(
gdf.astype(dtype={"col1": "float"}, copy=copy),
pdf.astype(dtype={"col1": "float"}, copy=copy),
)
assert_eq(gdf, pdf)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
assert_eq(
gsr.astype(dtype={None: "float"}, copy=copy),
psr.astype(dtype={None: "float"}, copy=copy),
)
assert_eq(gsr, psr)
assert_exceptions_equal(
lfunc=psr.astype,
rfunc=gsr.astype,
lfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
rfunc_args_and_kwargs=([], {"dtype": {"a": "float"}, "copy": copy}),
)
gsr = cudf.Series([1, 2])
psr = gsr.to_pandas()
actual = gsr.astype({None: "int64"}, copy=copy)
expected = psr.astype({None: "int64"}, copy=copy)
assert_eq(expected, actual)
assert_eq(gsr, psr)
actual[0] = 3
expected[0] = 3
assert_eq(gsr, psr)
@pytest.mark.parametrize(
"data,columns",
[
([1, 2, 3, 100, 112, 35464], ["a"]),
(range(100), None),
([], None),
((-10, 21, 32, 32, 1, 2, 3), ["p"]),
((), None),
([[1, 2, 3], [1, 2, 3]], ["col1", "col2", "col3"]),
([range(100), range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3), (1, 2, 3)), ["tuple0", "tuple1", "tuple2"]),
([[1, 2, 3]], ["list col1", "list col2", "list col3"]),
([range(100)], ["range" + str(i) for i in range(100)]),
(((1, 2, 3),), ["k1", "k2", "k3"]),
],
)
def test_dataframe_init_1d_list(data, columns):
expect = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(data, columns=columns)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
expect = pd.DataFrame(data, columns=None)
actual = cudf.DataFrame(data, columns=None)
assert_eq(
expect, actual, check_index_type=False if len(data) == 0 else True
)
@pytest.mark.parametrize(
"data,cols,index",
[
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
["a", "b", "c", "d"],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 20, 30, 10],
),
(
np.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "b"],
[0, 1, 2, 3],
),
(np.array([11, 123, -2342, 232]), ["a"], [1, 2, 11, 12]),
(np.array([11, 123, -2342, 232]), ["a"], ["khsdjk", "a", "z", "kk"]),
(
cupy.ndarray(shape=(4, 2), dtype=float, order="F"),
["a", "z"],
["a", "z", "a", "z"],
),
(cupy.array([11, 123, -2342, 232]), ["z"], [0, 1, 1, 0]),
(cupy.array([11, 123, -2342, 232]), ["z"], [1, 2, 3, 4]),
(cupy.array([11, 123, -2342, 232]), ["z"], ["a", "z", "d", "e"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(np.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], ["a", "b"]),
(cupy.random.randn(2, 4), ["a", "b", "c", "d"], [1, 0]),
],
)
def test_dataframe_init_from_arrays_cols(data, cols, index):
gd_data = data
if isinstance(data, cupy.ndarray):
# pandas can't handle cupy arrays in general
pd_data = data.get()
# additional test for building DataFrame with gpu array whose
# cuda array interface has no `descr` attribute
numba_data = cuda.as_cuda_array(data)
else:
pd_data = data
numba_data = None
# verify with columns & index
pdf = pd.DataFrame(pd_data, columns=cols, index=index)
gdf = cudf.DataFrame(gd_data, columns=cols, index=index)
assert_eq(pdf, gdf, check_dtype=False)
# verify with columns
pdf = pd.DataFrame(pd_data, columns=cols)
gdf = cudf.DataFrame(gd_data, columns=cols)
assert_eq(pdf, gdf, check_dtype=False)
pdf = pd.DataFrame(pd_data)
gdf = cudf.DataFrame(gd_data)
assert_eq(pdf, gdf, check_dtype=False)
if numba_data is not None:
gdf = cudf.DataFrame(numba_data)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"col_data",
[
range(5),
["a", "b", "x", "y", "z"],
[1.0, 0.213, 0.34332],
["a"],
[1],
[0.2323],
[],
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar(col_data, assign_val):
pdf = pd.DataFrame({"a": col_data})
gdf = cudf.DataFrame({"a": col_data})
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"col_data",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
@pytest.mark.parametrize(
"assign_val",
[
1,
2,
np.array(2),
cupy.array(2),
0.32324,
np.array(0.34248),
cupy.array(0.34248),
"abc",
np.array("abc", dtype="object"),
np.array("abc", dtype="str"),
np.array("abc"),
None,
],
)
def test_dataframe_assign_scalar_with_scalar_cols(col_data, assign_val):
pdf = pd.DataFrame(
{
"a": cupy.asnumpy(col_data)
if isinstance(col_data, cupy.ndarray)
else col_data
},
index=["dummy_mandatory_index"],
)
gdf = cudf.DataFrame({"a": col_data}, index=["dummy_mandatory_index"])
pdf["b"] = (
cupy.asnumpy(assign_val)
if isinstance(assign_val, cupy.ndarray)
else assign_val
)
gdf["b"] = assign_val
assert_eq(pdf, gdf)
def test_dataframe_info_basic():
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 10 entries, a to 1111
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 0 10 non-null float64
1 1 10 non-null float64
2 2 10 non-null float64
3 3 10 non-null float64
4 4 10 non-null float64
5 5 10 non-null float64
6 6 10 non-null float64
7 7 10 non-null float64
8 8 10 non-null float64
9 9 10 non-null float64
dtypes: float64(10)
memory usage: 859.0+ bytes
"""
)
df = pd.DataFrame(
np.random.randn(10, 10),
index=["a", "2", "3", "4", "5", "6", "7", "8", "100", "1111"],
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
def test_dataframe_info_verbose_mem_usage():
buffer = io.StringIO()
df = pd.DataFrame({"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]})
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 3 entries, 0 to 2
Columns: 2 entries, a to b
dtypes: int64(1), object(1)
memory usage: 56.0+ bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=False)
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["safdas", "assa", "asdasd"]},
index=["sdfdsf", "sdfsdfds", "dsfdf"],
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
StringIndex: 3 entries, sdfdsf to dsfdf
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 3 non-null int64
1 b 3 non-null object
dtypes: int64(1), object(1)
memory usage: 91.0 bytes
"""
)
cudf.from_pandas(df).info(buf=buffer, verbose=True, memory_usage="deep")
s = buffer.getvalue()
assert str_cmp == s
buffer.truncate(0)
buffer.seek(0)
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0 bytes
"""
)
df.info(buf=buffer, verbose=True, memory_usage="deep")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
def test_dataframe_info_null_counts():
int_values = [1, 2, 3, 4, 5]
text_values = ["alpha", "beta", "gamma", "delta", "epsilon"]
float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
df = cudf.DataFrame(
{
"int_col": int_values,
"text_col": text_values,
"float_col": float_values,
}
)
buffer = io.StringIO()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Dtype
--- ------ -----
0 int_col int64
1 text_col object
2 float_col float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
"""
)
df.info(buf=buffer, verbose=True, null_counts=False)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, verbose=True, max_cols=0)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame()
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 0 entries
Empty DataFrame"""
)
df.info(buf=buffer, verbose=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df = cudf.DataFrame(
{
"a": [1, 2, 3, None, 10, 11, 12, None],
"b": ["a", "b", "c", "sd", "sdf", "sd", None, None],
}
)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Dtype
--- ------ -----
0 a int64
1 b object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
pd.options.display.max_info_rows = 2
df.info(buf=buffer, max_cols=2, null_counts=None)
pd.reset_option("display.max_info_rows")
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
str_cmp = textwrap.dedent(
"""\
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 8 entries, 0 to 7
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 a 6 non-null int64
1 b 6 non-null object
dtypes: int64(1), object(1)
memory usage: 238.0+ bytes
"""
)
df.info(buf=buffer, max_cols=2, null_counts=None)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
buffer.truncate(0)
buffer.seek(0)
df.info(buf=buffer, null_counts=True)
actual_string = buffer.getvalue()
assert str_cmp == actual_string
@pytest.mark.parametrize(
"data1",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[1, 2, 3, 4, 5, 6, 7],
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[
1.9876543,
2.9876654,
3.9876543,
4.1234587,
5.23,
6.88918237,
7.00001,
],
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
-0.1221,
-2.1221,
-0.112121,
21.1212,
],
],
)
@pytest.mark.parametrize("rtol", [0, 0.01, 1e-05, 1e-08, 5e-1, 50.12])
@pytest.mark.parametrize("atol", [0, 0.01, 1e-05, 1e-08, 50.12])
def test_cudf_isclose(data1, data2, rtol, atol):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, rtol=rtol, atol=atol))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(data1, data2, rtol=rtol, atol=atol)
assert_eq(expected, actual)
actual = cudf.isclose(
cupy.array(data1), cupy.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
np.array(data1), np.array(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
actual = cudf.isclose(
pd.Series(data1), pd.Series(data2), rtol=rtol, atol=atol
)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data1",
[
[
-1.9876543,
-2.9876654,
np.nan,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
np.nan,
-21.1212,
],
],
)
@pytest.mark.parametrize(
"data2",
[
[
-1.9876543,
-2.9876654,
-3.9876543,
-4.1234587,
-5.23,
-6.88918237,
-7.00001,
],
[
1.987654321,
2.987654321,
3.987654321,
0.1221,
2.1221,
0.112121,
-21.1212,
],
[
-1.987654321,
-2.987654321,
-3.987654321,
np.nan,
np.nan,
np.nan,
21.1212,
],
],
)
@pytest.mark.parametrize("equal_nan", [True, False])
def test_cudf_isclose_nulls(data1, data2, equal_nan):
array1 = cupy.array(data1)
array2 = cupy.array(data2)
expected = cudf.Series(cupy.isclose(array1, array2, equal_nan=equal_nan))
actual = cudf.isclose(
cudf.Series(data1), cudf.Series(data2), equal_nan=equal_nan
)
assert_eq(expected, actual, check_dtype=False)
actual = cudf.isclose(data1, data2, equal_nan=equal_nan)
assert_eq(expected, actual, check_dtype=False)
def test_cudf_isclose_different_index():
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 3, 4, 2],
)
expected = cudf.Series([True] * 6, index=s1.index)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[0, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 5, 10, 4, 2],
)
expected = cudf.Series(
[True, True, True, False, True, True], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
s1 = cudf.Series(
[-1.9876543, -2.9876654, -3.9876543, -4.1234587, -5.23, -7.00001],
index=[100, 1, 2, 3, 4, 5],
)
s2 = cudf.Series(
[-1.9876543, -2.9876654, -7.00001, -4.1234587, -5.23, -3.9876543],
index=[0, 1, 100, 10, 4, 2],
)
expected = cudf.Series(
[False, True, True, False, True, False], index=s1.index
)
assert_eq(expected, cudf.isclose(s1, s2))
def test_dataframe_to_dict_error():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [9, 5, 3]})
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df.to_dict()
with pytest.raises(
TypeError,
match=re.escape(
r"cuDF does not support conversion to host memory "
r"via `to_dict()` method. Consider using "
r"`.to_pandas().to_dict()` to construct a Python dictionary."
),
):
df["a"].to_dict()
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": [1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]}),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
}
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=[10, 20, 30, 40, 50, 60],
),
pd.DataFrame(
{
"one": [1, 2, 3, 4, 5, 10],
"two": ["abc", "def", "ghi", "xyz", "pqr", "abc"],
},
index=["a", "b", "c", "d", "e", "f"],
),
pd.DataFrame(index=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(columns=["a", "b", "c", "d", "e", "f"]),
pd.DataFrame(index=[10, 11, 12]),
pd.DataFrame(columns=[10, 11, 12]),
pd.DataFrame(),
pd.DataFrame({"one": [], "two": []}),
pd.DataFrame({2: [], 1: []}),
pd.DataFrame(
{
0: [1, 2, 3, 4, 5, 10],
1: ["abc", "def", "ghi", "xyz", "pqr", "abc"],
100: ["a", "b", "b", "x", "z", "a"],
},
index=[10, 20, 30, 40, 50, 60],
),
],
)
def test_dataframe_keys(df):
gdf = cudf.from_pandas(df)
assert_eq(df.keys(), gdf.keys())
@pytest.mark.parametrize(
"ps",
[
pd.Series([1, 2, 3, 4, 5, 10, 11, 12, 33, 55, 19]),
pd.Series(["abc", "def", "ghi", "xyz", "pqr", "abc"]),
pd.Series(
[1, 2, 3, 4, 5, 10],
index=["abc", "def", "ghi", "xyz", "pqr", "abc"],
),
pd.Series(
["abc", "def", "ghi", "xyz", "pqr", "abc"],
index=[1, 2, 3, 4, 5, 10],
),
pd.Series(index=["a", "b", "c", "d", "e", "f"], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
pd.Series(dtype="float64"),
pd.Series([], dtype="float64"),
],
)
def test_series_keys(ps):
gds = cudf.from_pandas(ps)
if len(ps) == 0 and not isinstance(ps.index, pd.RangeIndex):
assert_eq(ps.keys().astype("float64"), gds.keys())
else:
assert_eq(ps.keys(), gds.keys())
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
pd.DataFrame(),
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
pd.DataFrame([]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([], index=[100]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = cudf.from_pandas(other)
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({12: [], 22: []}),
pd.DataFrame([[1, 2], [3, 4]], columns=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[0, 1], index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=[1, 0], index=[7, 8]),
pd.DataFrame(
{
23: [315.3324, 3243.32432, 3232.332, -100.32],
33: [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
0: [315.3324, 3243.32432, 3232.332, -100.32],
1: [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
],
)
@pytest.mark.parametrize(
"other",
[
pd.Series([10, 11, 23, 234, 13]),
pytest.param(
pd.Series([10, 11, 23, 234, 13], index=[11, 12, 13, 44, 33]),
marks=pytest.mark.xfail(
reason="pandas bug: "
"https://github.com/pandas-dev/pandas/issues/35092"
),
),
{1: 1},
{0: 10, 1: 100, 2: 102},
],
)
@pytest.mark.parametrize("sort", [False, True])
def test_dataframe_append_series_dict(df, other, sort):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
if isinstance(other, pd.Series):
other_gd = cudf.from_pandas(other)
else:
other_gd = other
expected = pdf.append(other_pd, ignore_index=True, sort=sort)
actual = gdf.append(other_gd, ignore_index=True, sort=sort)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_series_mixed_index():
df = cudf.DataFrame({"first": [], "d": []})
sr = cudf.Series([1, 2, 3, 4])
with pytest.raises(
TypeError,
match=re.escape(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
),
):
df.append(sr, ignore_index=True)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[10, 20, 30]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
],
)
@pytest.mark.parametrize(
"other",
[
[pd.DataFrame([[5, 6], [7, 8]], columns=list("AB"))],
[
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("BD")),
pd.DataFrame([[5, 6], [7, 8]], columns=list("DE")),
],
[pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()],
[
pd.DataFrame(
{"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20]
),
pd.DataFrame(),
pd.DataFrame(),
pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")),
],
[
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[200]),
],
[pd.DataFrame([]), pd.DataFrame([], index=[100])],
[
pd.DataFrame([]),
pd.DataFrame([], index=[100]),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
],
[
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[0, 100, 200, 300],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_dataframe_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB")),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[10, 20]),
pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[7, 8]),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
}
),
pd.DataFrame(
{
"a": [315.3324, 3243.32432, 3232.332, -100.32],
"z": [0.3223, 0.32, 0.0000232, 0.32224],
},
index=[7, 20, 11, 9],
),
pd.DataFrame({"l": [10]}),
pd.DataFrame({"l": [10]}, index=[100]),
pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}),
pd.DataFrame(
{"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]},
index=[100, 200, 300, 400, 500, 0],
),
pd.DataFrame({"first_col": [], "second_col": [], "third_col": []}),
],
)
@pytest.mark.parametrize(
"other",
[
[[1, 2], [10, 100]],
[[1, 2, 10, 100, 0.1, 0.2, 0.0021]],
[[]],
[[], [], [], []],
[[0.23, 0.00023, -10.00, 100, 200, 1000232, 1232.32323]],
],
)
@pytest.mark.parametrize("sort", [False, True])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_dataframe_append_lists(df, other, sort, ignore_index):
pdf = df
other_pd = other
gdf = cudf.from_pandas(df)
other_gd = [
cudf.from_pandas(o) if isinstance(o, pd.DataFrame) else o
for o in other
]
expected = pdf.append(other_pd, sort=sort, ignore_index=ignore_index)
actual = gdf.append(other_gd, sort=sort, ignore_index=ignore_index)
if expected.shape != df.shape:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(
expected, actual, check_index_type=False if gdf.empty else True
)
def test_dataframe_append_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
ps = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Can only append a Series if ignore_index=True "
"or if the Series has a name",
):
df.append(ps)
def test_cudf_arrow_array_error():
df = cudf.DataFrame({"a": [1, 2, 3]})
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Table via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Table, consider "
"using .to_arrow()",
):
df.__arrow_array__()
sr = cudf.Series([1, 2, 3])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
sr = cudf.Series(["a", "b", "c"])
with pytest.raises(
TypeError,
match="Implicit conversion to a host PyArrow Array via __arrow_array__"
" is not allowed, To explicitly construct a PyArrow Array, consider "
"using .to_arrow()",
):
sr.__arrow_array__()
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_dataframe_sample_basic(n, frac, replace, axis):
# as we currently don't support column with same name
if axis == 1 and replace:
return
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5],
"float": [0.05, 0.2, 0.3, 0.2, 0.25],
"int": [1, 3, 5, 4, 2],
},
index=[1, 2, 3, 4, 5],
)
df = cudf.DataFrame.from_pandas(pdf)
random_state = 0
try:
pout = pdf.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
except BaseException:
assert_exceptions_equal(
lfunc=pdf.sample,
rfunc=df.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
"axis": axis,
},
),
)
else:
gout = df.sample(
n=n,
frac=frac,
replace=replace,
random_state=random_state,
axis=axis,
)
assert pout.shape == gout.shape
@pytest.mark.parametrize("replace", [True, False])
@pytest.mark.parametrize("random_state", [1, np.random.mtrand.RandomState(10)])
def test_dataframe_reproducibility(replace, random_state):
df = cudf.DataFrame({"a": cupy.arange(0, 1024)})
expected = df.sample(1024, replace=replace, random_state=random_state)
out = df.sample(1024, replace=replace, random_state=random_state)
assert_eq(expected, out)
@pytest.mark.parametrize("n", [0, 2, 5, 10, None])
@pytest.mark.parametrize("frac", [0.1, 0.5, 1, 2, None])
@pytest.mark.parametrize("replace", [True, False])
def test_series_sample_basic(n, frac, replace):
psr = pd.Series([1, 2, 3, 4, 5])
sr = cudf.Series.from_pandas(psr)
random_state = 0
try:
pout = psr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
except BaseException:
assert_exceptions_equal(
lfunc=psr.sample,
rfunc=sr.sample,
lfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
rfunc_args_and_kwargs=(
[],
{
"n": n,
"frac": frac,
"replace": replace,
"random_state": random_state,
},
),
)
else:
gout = sr.sample(
n=n, frac=frac, replace=replace, random_state=random_state
)
assert pout.shape == gout.shape
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_empty(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.empty, gdf.empty)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(),
pd.DataFrame(index=[100, 10, 1, 0]),
pd.DataFrame(columns=["a", "b", "c", "d"]),
pd.DataFrame(columns=["a", "b", "c", "d"], index=[100]),
pd.DataFrame(
columns=["a", "b", "c", "d"], index=[100, 10000, 2131, 133]
),
pd.DataFrame({"a": [1, 2, 3], "b": ["abc", "xyz", "klm"]}),
],
)
def test_dataframe_size(df):
pdf = df
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.size, gdf.size)
@pytest.mark.parametrize(
"ps",
[
pd.Series(dtype="float64"),
pd.Series(index=[100, 10, 1, 0], dtype="float64"),
pd.Series([], dtype="float64"),
pd.Series(["a", "b", "c", "d"]),
pd.Series(["a", "b", "c", "d"], index=[0, 1, 10, 11]),
],
)
def test_series_empty(ps):
ps = ps
gs = cudf.from_pandas(ps)
assert_eq(ps.empty, gs.empty)
@pytest.mark.parametrize(
"data",
[
[],
[1],
{"a": [10, 11, 12]},
{
"a": [10, 11, 12],
"another column name": [12, 22, 34],
"xyz": [0, 10, 11],
},
],
)
@pytest.mark.parametrize("columns", [["a"], ["another column name"], None])
def test_dataframe_init_with_columns(data, columns):
pdf = pd.DataFrame(data, columns=columns)
gdf = cudf.DataFrame(data, columns=columns)
assert_eq(
pdf,
gdf,
check_index_type=False if len(pdf.index) == 0 else True,
check_dtype=False if pdf.empty and len(pdf.columns) else True,
)
@pytest.mark.parametrize(
"data, ignore_dtype",
[
([pd.Series([1, 2, 3])], False),
([pd.Series(index=[1, 2, 3], dtype="float64")], False),
([pd.Series(name="empty series name", dtype="float64")], False),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
),
([pd.Series([1, 2, 3], name="hi")] * 10, False),
([pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10, False),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list(data, ignore_dtype, columns):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns)
actual = cudf.DataFrame(gd_data, columns=columns)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, ignore_dtype, index",
[
([pd.Series([1, 2, 3])], False, ["a", "b", "c"]),
([pd.Series(index=[1, 2, 3], dtype="float64")], False, ["a", "b"]),
(
[pd.Series(name="empty series name", dtype="float64")],
False,
["index1"],
),
(
[pd.Series([1]), pd.Series([], dtype="float64"), pd.Series([3])],
False,
["0", "2", "1"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
False,
["_", "+", "*"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, False, ["mean"] * 10),
(
[pd.Series([1, 2, 3], name=None, index=[10, 11, 12])] * 10,
False,
["abc"] * 10,
),
(
[
pd.Series([1, 2, 3], name=None, index=[10, 11, 12]),
pd.Series([1, 2, 30], name=None, index=[13, 144, 15]),
],
True,
["set_index_a", "set_index_b"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "b", "c"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], name="abc", dtype="float64"),
pd.Series(index=[10, 11, 12], dtype="float64"),
],
False,
["a", "v", "z"],
),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([1, -100, 200, -399, 400], name="abc"),
pd.Series([111, 222, 333], index=[10, 11, 12]),
],
False,
["a", "v", "z"],
),
],
)
@pytest.mark.parametrize(
"columns", [None, ["0"], [0], ["abc"], [144, 13], [2, 1, 0]]
)
def test_dataframe_init_from_series_list_with_index(
data, ignore_dtype, index, columns
):
gd_data = [cudf.from_pandas(obj) for obj in data]
expected = pd.DataFrame(data, columns=columns, index=index)
actual = cudf.DataFrame(gd_data, columns=columns, index=index)
if ignore_dtype:
assert_eq(expected.fillna(-1), actual.fillna(-1), check_dtype=False)
else:
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data, index",
[
([pd.Series([1, 2]), pd.Series([1, 2])], ["a", "b", "c"]),
(
[
pd.Series([1, 0.324234, 32424.323, -1233, 34242]),
pd.Series([], dtype="float64"),
pd.Series([3], name="series that is named"),
],
["_", "+"],
),
([pd.Series([1, 2, 3], name="hi")] * 10, ["mean"] * 9),
],
)
def test_dataframe_init_from_series_list_with_index_error(data, index):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
pd.DataFrame,
cudf.DataFrame,
([data], {"index": index}),
([gd_data], {"index": index}),
)
@pytest.mark.parametrize(
"data",
[
[pd.Series([1, 2, 3], index=["a", "a", "a"])],
[pd.Series([1, 2, 3], index=["a", "a", "a"])] * 4,
[
pd.Series([1, 2, 3], index=["a", "b", "a"]),
pd.Series([1, 2, 3], index=["b", "b", "a"]),
],
[
pd.Series([1, 2, 3], index=["a", "b", "z"]),
pd.Series([1, 2, 3], index=["u", "b", "a"]),
pd.Series([1, 2, 3], index=["u", "b", "u"]),
],
],
)
def test_dataframe_init_from_series_list_duplicate_index_error(data):
gd_data = [cudf.from_pandas(obj) for obj in data]
assert_exceptions_equal(
lfunc=pd.DataFrame,
rfunc=cudf.DataFrame,
lfunc_args_and_kwargs=([], {"data": data}),
rfunc_args_and_kwargs=([], {"data": gd_data}),
check_exception_type=False,
)
def test_dataframe_iterrows_itertuples():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
),
):
df.itertuples()
with pytest.raises(
TypeError,
match=re.escape(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
),
):
df.iterrows()
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"include",
[None, "all", ["object"], ["int"], ["object", "int", "category"]],
)
def test_describe_misc_include(df, include):
pdf = df.to_pandas()
expected = pdf.describe(include=include, datetime_is_numeric=True)
actual = df.describe(include=include, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [10, 22, 33],
"c": [0.3234, 0.23432, 0.0],
"d": ["hello", "world", "hello"],
}
),
cudf.DataFrame(
{
"a": [1, 2, 3],
"b": ["hello", "world", "hello"],
"c": [0.3234, 0.23432, 0.0],
}
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
pytest.param(
cudf.DataFrame(
{
"int_data": [1, 2, 3],
"str_data": ["hello", "world", "hello"],
"float_data": [0.3234, 0.23432, 0.0],
"timedelta_data": cudf.Series(
[1, 2, 1], dtype="timedelta64[ns]"
),
"datetime_data": cudf.Series(
[1, 2, 1], dtype="datetime64[ns]"
),
"category_data": cudf.Series(
["a", "a", "b"], dtype="category"
),
}
),
marks=pytest.mark.xfail(
reason="https://github.com/rapidsai/cudf/issues/6219"
),
),
],
)
@pytest.mark.parametrize(
"exclude", [None, ["object"], ["int"], ["object", "int", "category"]]
)
def test_describe_misc_exclude(df, exclude):
pdf = df.to_pandas()
expected = pdf.describe(exclude=exclude, datetime_is_numeric=True)
actual = df.describe(exclude=exclude, datetime_is_numeric=True)
for col in expected.columns:
if expected[col].dtype == np.dtype("object"):
expected[col] = expected[col].fillna(-1).astype("str")
actual[col] = actual[col].fillna(-1).astype("str")
assert_eq(expected, actual)
@pytest.mark.parametrize(
"df",
[
cudf.DataFrame({"a": [1, 2, 3]}),
cudf.DataFrame(
{"a": [1, 2, 3], "b": ["a", "z", "c"]}, index=["a", "z", "x"]
),
cudf.DataFrame(
{
"a": [1, 2, 3, None, 2, 1, None],
"b": ["a", "z", "c", "a", "v", "z", "z"],
}
),
cudf.DataFrame({"a": [], "b": []}),
cudf.DataFrame({"a": [None, None], "b": [None, None]}),
cudf.DataFrame(
{
"a": ["hello", "world", "rapids", "ai", "nvidia"],
"b": cudf.Series([1, 21, 21, 11, 11], dtype="timedelta64[s]"),
}
),
cudf.DataFrame(
{
"a": ["hello", None, "world", "rapids", None, "ai", "nvidia"],
"b": cudf.Series(
[1, 21, None, 11, None, 11, None], dtype="datetime64[s]"
),
}
),
],
)
@pytest.mark.parametrize("numeric_only", [True, False])
@pytest.mark.parametrize("dropna", [True, False])
def test_dataframe_mode(df, numeric_only, dropna):
pdf = df.to_pandas()
expected = pdf.mode(numeric_only=numeric_only, dropna=dropna)
actual = df.mode(numeric_only=numeric_only, dropna=dropna)
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize("lhs, rhs", [("a", "a"), ("a", "b"), (1, 1.0)])
def test_equals_names(lhs, rhs):
lhs = cudf.DataFrame({lhs: [1, 2]})
rhs = cudf.DataFrame({rhs: [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
def test_equals_dtypes():
lhs = cudf.DataFrame({"a": [1, 2.0]})
rhs = cudf.DataFrame({"a": [1, 2]})
got = lhs.equals(rhs)
expect = lhs.to_pandas().equals(rhs.to_pandas())
assert_eq(expect, got)
@pytest.mark.parametrize(
"df1",
[
pd.DataFrame({"a": [10, 11, 12]}, index=["a", "b", "z"]),
pd.DataFrame({"z": ["a"]}),
pd.DataFrame({"a": [], "b": []}),
],
)
@pytest.mark.parametrize(
"df2",
[
pd.DataFrame(),
pd.DataFrame({"a": ["a", "a", "c", "z", "A"], "z": [1, 2, 3, 4, 5]}),
],
)
@pytest.mark.parametrize(
"op",
[
operator.eq,
operator.ne,
operator.lt,
operator.gt,
operator.le,
operator.ge,
],
)
def test_dataframe_error_equality(df1, df2, op):
gdf1 = cudf.from_pandas(df1)
gdf2 = cudf.from_pandas(df2)
assert_exceptions_equal(op, op, ([df1, df2],), ([gdf1, gdf2],))
@pytest.mark.parametrize(
"df,expected_pdf",
[
(
cudf.DataFrame(
{
"a": cudf.Series([1, 2, None, 3], dtype="uint8"),
"b": cudf.Series([23, None, None, 32], dtype="uint16"),
}
),
pd.DataFrame(
{
"a": pd.Series([1, 2, None, 3], dtype=pd.UInt8Dtype()),
"b": pd.Series(
[23, None, None, 32], dtype=pd.UInt16Dtype()
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series([None, 123, None, 1], dtype="uint32"),
"b": cudf.Series(
[234, 2323, 23432, None, None, 224], dtype="uint64"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[None, 123, None, 1], dtype=pd.UInt32Dtype()
),
"b": pd.Series(
[234, 2323, 23432, None, None, 224],
dtype=pd.UInt64Dtype(),
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[-10, 1, None, -1, None, 3], dtype="int8"
),
"b": cudf.Series(
[111, None, 222, None, 13], dtype="int16"
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[-10, 1, None, -1, None, 3], dtype=pd.Int8Dtype()
),
"b": pd.Series(
[111, None, 222, None, 13], dtype=pd.Int16Dtype()
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[11, None, 22, 33, None, 2, None, 3], dtype="int32"
),
"b": cudf.Series(
[32431, None, None, 32322, 0, 10, -32324, None],
dtype="int64",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[11, None, 22, 33, None, 2, None, 3],
dtype=pd.Int32Dtype(),
),
"b": pd.Series(
[32431, None, None, 32322, 0, 10, -32324, None],
dtype=pd.Int64Dtype(),
),
}
),
),
(
cudf.DataFrame(
{
"a": cudf.Series(
[True, None, False, None, False, True, True, False],
dtype="bool_",
),
"b": cudf.Series(
[
"abc",
"a",
None,
"hello world",
"foo buzz",
"",
None,
"rapids ai",
],
dtype="object",
),
"c": cudf.Series(
[0.1, None, 0.2, None, 3, 4, 1000, None],
dtype="float64",
),
}
),
pd.DataFrame(
{
"a": pd.Series(
[True, None, False, None, False, True, True, False],
dtype=pd.BooleanDtype(),
),
"b": pd.Series(
[
"abc",
"a",
None,
"hello world",
"foo buzz",
"",
None,
"rapids ai",
],
dtype=pd.StringDtype(),
),
"c": pd.Series(
[0.1, None, 0.2, None, 3, 4, 1000, None],
dtype=pd.Float64Dtype(),
),
}
),
),
],
)
def test_dataframe_to_pandas_nullable_dtypes(df, expected_pdf):
actual_pdf = df.to_pandas(nullable=True)
assert_eq(actual_pdf, expected_pdf)
@pytest.mark.parametrize(
"data",
[
[{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}],
[{"a": 1, "b": 2, "c": None}, {"a": None, "b": 5, "c": 6}],
[{"a": 1, "b": 2}, {"a": 1, "b": 5, "c": 6}],
[{"a": 1, "b": 2}, {"b": 5, "c": 6}],
[{}, {"a": 1, "b": 5, "c": 6}],
[{"a": 1, "b": 2, "c": 3}, {"a": 4.5, "b": 5.5, "c": 6.5}],
],
)
def test_dataframe_init_from_list_of_dicts(data):
expect = pd.DataFrame(data)
got = cudf.DataFrame(data)
assert_eq(expect, got)
def test_dataframe_pipe():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
def add_int_col(df, column):
df[column] = df._constructor_sliced([10, 20, 30, 40])
return df
def add_str_col(df, column):
df[column] = df._constructor_sliced(["a", "b", "xyz", "ai"])
return df
expected = (
pdf.pipe(add_int_col, "one")
.pipe(add_int_col, column="two")
.pipe(add_str_col, "three")
)
actual = (
gdf.pipe(add_int_col, "one")
.pipe(add_int_col, column="two")
.pipe(add_str_col, "three")
)
assert_eq(expected, actual)
expected = (
pdf.pipe((add_str_col, "df"), column="one")
.pipe(add_str_col, column="two")
.pipe(add_int_col, "three")
)
actual = (
gdf.pipe((add_str_col, "df"), column="one")
.pipe(add_str_col, column="two")
.pipe(add_int_col, "three")
)
assert_eq(expected, actual)
def test_dataframe_pipe_error():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
def custom_func(df, column):
df[column] = df._constructor_sliced([10, 20, 30, 40])
return df
assert_exceptions_equal(
lfunc=pdf.pipe,
rfunc=gdf.pipe,
lfunc_args_and_kwargs=([(custom_func, "columns")], {"columns": "d"}),
rfunc_args_and_kwargs=([(custom_func, "columns")], {"columns": "d"}),
)
@pytest.mark.parametrize(
"op",
[
"count",
"cummin",
"cummax",
"cummax",
"cumprod",
"kurt",
"kurtosis",
"skew",
],
)
def test_dataframe_axis1_unsupported_ops(op):
df = cudf.DataFrame({"a": [1, 2, 3], "b": [8, 9, 10]})
with pytest.raises(
NotImplementedError, match="Only axis=0 is currently supported."
):
getattr(df, op)(axis=1)
def test_dataframe_from_pandas_duplicate_columns():
pdf = pd.DataFrame(columns=["a", "b", "c", "a"])
pdf["a"] = [1, 2, 3]
with pytest.raises(
ValueError, match="Duplicate column names are not allowed"
):
cudf.from_pandas(pdf)
@pytest.mark.parametrize(
"df",
[
pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 11, 20], "c": ["a", "bcd", "xyz"]}
),
pd.DataFrame(),
],
)
@pytest.mark.parametrize(
"columns",
[
None,
["a"],
["c", "a"],
["b", "a", "c"],
[],
pd.Index(["c", "a"]),
cudf.Index(["c", "a"]),
["abc", "a"],
["column_not_exists1", "column_not_exists2"],
],
)
@pytest.mark.parametrize("index", [["abc", "def", "ghi"]])
def test_dataframe_constructor_columns(df, columns, index):
def assert_local_eq(actual, df, expected, host_columns):
check_index_type = False if expected.empty else True
if host_columns is not None and any(
col not in df.columns for col in host_columns
):
assert_eq(
expected,
actual,
check_dtype=False,
check_index_type=check_index_type,
)
else:
assert_eq(expected, actual, check_index_type=check_index_type)
gdf = cudf.from_pandas(df)
host_columns = (
columns.to_pandas() if isinstance(columns, cudf.BaseIndex) else columns
)
expected = pd.DataFrame(df, columns=host_columns, index=index)
actual = cudf.DataFrame(gdf, columns=columns, index=index)
assert_local_eq(actual, df, expected, host_columns)
def test_dataframe_constructor_column_index_only():
columns = ["a", "b", "c"]
index = ["r1", "r2", "r3"]
gdf = cudf.DataFrame(index=index, columns=columns)
assert not id(gdf["a"]._column) == id(gdf["b"]._column) and not id(
gdf["b"]._column
) == id(gdf["c"]._column)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]},
{"a": [1.0, 2.0, 3.0], "b": [3.0, 4.0, 5.0], "c": [True, True, False]},
{"a": [1, 2, 3], "b": [3, 4, 5], "c": [True, True, False]},
{"a": [1, 2, 3], "b": [True, True, False], "c": [False, True, False]},
{
"a": [1.0, 2.0, 3.0],
"b": [True, True, False],
"c": [False, True, False],
},
{"a": [1, 2, 3], "b": [3, 4, 5], "c": [2.0, 3.0, 4.0]},
{"a": [1, 2, 3], "b": [2.0, 3.0, 4.0], "c": [5.0, 6.0, 4.0]},
],
)
@pytest.mark.parametrize(
"aggs",
[
["min", "sum", "max"],
("min", "sum", "max"),
{"min", "sum", "max"},
"sum",
{"a": "sum", "b": "min", "c": "max"},
{"a": ["sum"], "b": ["min"], "c": ["max"]},
{"a": ("sum"), "b": ("min"), "c": ("max")},
{"a": {"sum"}, "b": {"min"}, "c": {"max"}},
{"a": ["sum", "min"], "b": ["sum", "max"], "c": ["min", "max"]},
{"a": ("sum", "min"), "b": ("sum", "max"), "c": ("min", "max")},
{"a": {"sum", "min"}, "b": {"sum", "max"}, "c": {"min", "max"}},
],
)
def test_agg_for_dataframes(data, aggs):
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
expect = pdf.agg(aggs).sort_index()
got = gdf.agg(aggs).sort_index()
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize("aggs", [{"a": np.sum, "b": np.min, "c": np.max}])
def test_agg_for_unsupported_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(NotImplementedError):
gdf.agg(aggs)
@pytest.mark.parametrize("aggs", ["asdf"])
def test_agg_for_dataframe_with_invalid_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(
AttributeError,
match=f"{aggs} is not a valid function for 'DataFrame' object",
):
gdf.agg(aggs)
@pytest.mark.parametrize("aggs", [{"a": "asdf"}])
def test_agg_for_series_with_invalid_function(aggs):
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [3.0, 4.0, 5.0], "c": [True, True, False]}
)
with pytest.raises(
AttributeError,
match=f"{aggs['a']} is not a valid function for 'Series' object",
):
gdf.agg(aggs)
@pytest.mark.parametrize(
"aggs",
[
"sum",
["min", "sum", "max"],
{"a": {"sum", "min"}, "b": {"sum", "max"}, "c": {"min", "max"}},
],
)
def test_agg_for_dataframe_with_string_columns(aggs):
gdf = cudf.DataFrame(
{"a": ["m", "n", "o"], "b": ["t", "u", "v"], "c": ["x", "y", "z"]},
index=["a", "b", "c"],
)
with pytest.raises(
NotImplementedError,
match=re.escape(
"DataFrame.agg() is not supported for "
"frames containing string columns"
),
):
gdf.agg(aggs)
@pytest.mark.parametrize(
"join", ["left"],
)
@pytest.mark.parametrize(
"overwrite", [True, False],
)
@pytest.mark.parametrize(
"errors", ["ignore"],
)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [3, 4, 5]},
{"e": [1.0, 2.0, 3.0], "d": [3.0, 4.0, 5.0]},
{"c": [True, False, False], "d": [False, True, True]},
{"g": [2.0, np.nan, 4.0], "n": [np.nan, np.nan, np.nan]},
{"d": [np.nan, np.nan, np.nan], "e": [np.nan, np.nan, np.nan]},
{"a": [1.0, 2, 3], "b": pd.Series([4.0, 8.0, 3.0], index=[1, 2, 3])},
{
"d": [1.0, 2.0, 3.0],
"c": pd.Series([np.nan, np.nan, np.nan], index=[1, 2, 3]),
},
{
"a": [False, True, False],
"b": pd.Series([1.0, 2.0, np.nan], index=[1, 2, 3]),
},
{
"a": [np.nan, np.nan, np.nan],
"e": pd.Series([np.nan, np.nan, np.nan], index=[1, 2, 3]),
},
],
)
@pytest.mark.parametrize(
"data2",
[
{"b": [3, 5, 6], "e": [8, 2, 1]},
{"c": [True, False, True], "d": [3.0, 4.0, 5.0]},
{"e": [False, False, True], "g": [True, True, False]},
{"g": [np.nan, np.nan, np.nan], "c": [np.nan, np.nan, np.nan]},
{"a": [7, 5, 8], "b": pd.Series([2.0, 7.0, 9.0], index=[0, 1, 2])},
{
"b": [np.nan, 2.0, np.nan],
"c": pd.Series([2, np.nan, 5.0], index=[2, 3, 4]),
},
{
"a": [True, np.nan, True],
"d": pd.Series([False, True, np.nan], index=[0, 1, 3]),
},
],
)
def test_update_for_dataframes(data, data2, join, overwrite, errors):
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
other_pd = pd.DataFrame(data2)
other_gd = cudf.DataFrame(data2)
pdf.update(other=other_pd, join=join, overwrite=overwrite, errors=errors)
gdf.update(other=other_gd, join=join, overwrite=overwrite, errors=errors)
assert_eq(pdf, gdf, check_dtype=False)
@pytest.mark.parametrize(
"join", ["right"],
)
def test_update_for_right_join(join):
gdf = cudf.DataFrame({"a": [1, 2, 3], "b": [3.0, 4.0, 5.0]})
other_gd = cudf.DataFrame({"a": [1, np.nan, 3], "b": [np.nan, 2.0, 5.0]})
with pytest.raises(
NotImplementedError, match="Only left join is supported"
):
gdf.update(other_gd, join)
@pytest.mark.parametrize(
"errors", ["raise"],
)
def test_update_for_data_overlap(errors):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3.0, 4.0, 5.0]})
gdf = cudf.DataFrame({"a": [1, 2, 3], "b": [3.0, 4.0, 5.0]})
other_pd = pd.DataFrame({"a": [1, np.nan, 3], "b": [np.nan, 2.0, 5.0]})
other_gd = cudf.DataFrame({"a": [1, np.nan, 3], "b": [np.nan, 2.0, 5.0]})
assert_exceptions_equal(
lfunc=pdf.update,
rfunc=gdf.update,
lfunc_args_and_kwargs=([other_pd, errors], {}),
rfunc_args_and_kwargs=([other_gd, errors], {}),
)
@pytest.mark.parametrize(
"gdf",
[
cudf.DataFrame({"a": [[1], [2], [3]]}),
cudf.DataFrame(
{
"left-a": [0, 1, 2],
"a": [[1], None, [3]],
"right-a": ["abc", "def", "ghi"],
}
),
cudf.DataFrame(
{
"left-a": [[], None, None],
"a": [[1], None, [3]],
"right-a": ["abc", "def", "ghi"],
}
),
],
)
def test_dataframe_roundtrip_arrow_list_dtype(gdf):
table = gdf.to_arrow()
expected = cudf.DataFrame.from_arrow(table)
assert_eq(gdf, expected)
@pytest.mark.parametrize(
"gdf",
[
cudf.DataFrame({"a": [{"one": 3, "two": 4, "three": 10}]}),
cudf.DataFrame(
{
"left-a": [0, 1, 2],
"a": [{"x": 0.23, "y": 43}, None, {"x": 23.9, "y": 4.3}],
"right-a": ["abc", "def", "ghi"],
}
),
cudf.DataFrame(
{
"left-a": [{"a": 1}, None, None],
"a": [
{"one": 324, "two": 23432, "three": 324},
None,
{"one": 3.24, "two": 1, "three": 324},
],
"right-a": ["abc", "def", "ghi"],
}
),
],
)
def test_dataframe_roundtrip_arrow_struct_dtype(gdf):
table = gdf.to_arrow()
expected = cudf.DataFrame.from_arrow(table)
assert_eq(gdf, expected)
def test_dataframe_setitem_cupy_array():
np.random.seed(0)
pdf = pd.DataFrame(np.random.randn(10, 2))
gdf = cudf.from_pandas(pdf)
gpu_array = cupy.array([True, False] * 5)
pdf[gpu_array.get()] = 1.5
gdf[gpu_array] = 1.5
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data", [{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}]
)
@pytest.mark.parametrize(
"index", [{0: 123, 1: 4, 2: 6}],
)
@pytest.mark.parametrize(
"level", ["x", 0],
)
def test_rename_for_level_MultiIndex_dataframe(data, index, level):
pdf = pd.DataFrame(
data,
index=pd.MultiIndex.from_tuples([(0, 1, 2), (1, 2, 3), (2, 3, 4)]),
)
pdf.index.names = ["x", "y", "z"]
gdf = cudf.from_pandas(pdf)
expect = pdf.rename(index=index, level=level)
got = gdf.rename(index=index, level=level)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data", [{"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}]
)
@pytest.mark.parametrize(
"columns", [{"a": "f", "b": "g"}, {1: 3, 2: 4}, lambda s: 2 * s],
)
@pytest.mark.parametrize(
"level", [0, 1],
)
def test_rename_for_level_MultiColumn_dataframe(data, columns, level):
gdf = cudf.DataFrame(data)
gdf.columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
pdf = gdf.to_pandas()
expect = pdf.rename(columns=columns, level=level)
got = gdf.rename(columns=columns, level=level)
assert_eq(expect, got)
def test_rename_for_level_RangeIndex_dataframe():
gdf = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
pdf = gdf.to_pandas()
expect = pdf.rename(columns={"a": "f"}, index={0: 3, 1: 4}, level=0)
got = gdf.rename(columns={"a": "f"}, index={0: 3, 1: 4}, level=0)
assert_eq(expect, got)
@pytest.mark.xfail(reason="level=None not implemented yet")
def test_rename_for_level_is_None_MC():
gdf = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf.columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
pdf = gdf.to_pandas()
expect = pdf.rename(columns={"a": "f"}, level=None)
got = gdf.rename(columns={"a": "f"}, level=None)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
[
[[1, 2, 3], 11, "a"],
[None, 22, "e"],
[[4], 33, "i"],
[[], 44, "o"],
[[5, 6], 55, "u"],
], # nested
[
[1, 11, "a"],
[2, 22, "e"],
[3, 33, "i"],
[4, 44, "o"],
[5, 55, "u"],
], # non-nested
],
)
@pytest.mark.parametrize(
("labels", "label_to_explode"),
[
(None, 0),
(pd.Index(["a", "b", "c"]), "a"),
(
pd.MultiIndex.from_tuples(
[(0, "a"), (0, "b"), (1, "a")], names=["l0", "l1"]
),
(0, "a"),
),
],
)
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize(
"p_index",
[
None,
["ia", "ib", "ic", "id", "ie"],
pd.MultiIndex.from_tuples(
[(0, "a"), (0, "b"), (0, "c"), (1, "a"), (1, "b")]
),
],
)
def test_explode(data, labels, ignore_index, p_index, label_to_explode):
pdf = pd.DataFrame(data, index=p_index, columns=labels)
gdf = cudf.from_pandas(pdf)
expect = pdf.explode(label_to_explode, ignore_index)
got = gdf.explode(label_to_explode, ignore_index)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"df,ascending,expected",
[
(
cudf.DataFrame({"a": [10, 0, 2], "b": [-10, 10, 1]}),
True,
cudf.Series([1, 2, 0], dtype="int32"),
),
(
cudf.DataFrame({"a": [10, 0, 2], "b": [-10, 10, 1]}),
False,
cudf.Series([0, 2, 1], dtype="int32"),
),
],
)
def test_dataframe_argsort(df, ascending, expected):
actual = df.argsort(ascending=ascending)
assert_eq(actual, expected)
@pytest.mark.parametrize(
"data,columns,index",
[
(pd.Series([1, 2, 3]), None, None),
(pd.Series(["a", "b", None, "c"], name="abc"), None, None),
(
pd.Series(["a", "b", None, "c"], name="abc"),
["abc", "b"],
[1, 2, 3],
),
],
)
def test_dataframe_init_from_series(data, columns, index):
expected = pd.DataFrame(data, columns=columns, index=index)
actual = cudf.DataFrame(data, columns=columns, index=index)
assert_eq(
expected,
actual,
check_index_type=False if len(expected) == 0 else True,
)
@pytest.mark.parametrize(
"data, expected",
[
({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "c": [1.2, 1, 2, 3]}, False),
({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, True),
({"a": ["a", "b", "c"], "b": [4, 5, 6], "c": [7, 8, 9]}, False),
({"a": [True, False, False], "b": [False, False, True]}, True),
({"a": [True, False, False]}, True),
({"a": [[1, 2], [3, 4]]}, True),
({"a": [[1, 2], [3, 4]], "b": ["a", "b"]}, False),
({"a": [{"c": 5}, {"e": 5}], "b": [{"c": 5}, {"g": 7}]}, True),
({}, True),
],
)
def test_is_homogeneous_dataframe(data, expected):
actual = cudf.DataFrame(data)._is_homogeneous
assert actual == expected
@pytest.mark.parametrize(
"data, indexes, expected",
[
(
{"a": [1, 2, 3, 4], "b": [5, 6, 7, 8], "c": [1.2, 1, 2, 3]},
["a", "b"],
True,
),
(
{
"a": [1, 2, 3, 4],
"b": [5, 6, 7, 8],
"c": [1.2, 1, 2, 3],
"d": ["hello", "world", "cudf", "rapids"],
},
["a", "b"],
False,
),
(
{
"a": ["a", "b", "c"],
"b": [4, 5, 6],
"c": [7, 8, 9],
"d": [1, 2, 3],
},
["a", "b"],
True,
),
],
)
def test_is_homogeneous_multiIndex_dataframe(data, indexes, expected):
test_dataframe = cudf.DataFrame(data).set_index(indexes)
actual = cudf.DataFrame(test_dataframe)._is_homogeneous
assert actual == expected
@pytest.mark.parametrize(
"data, expected", [([1, 2, 3, 4], True), ([True, False], True)]
)
def test_is_homogeneous_series(data, expected):
actual = cudf.Series(data)._is_homogeneous
assert actual == expected
@pytest.mark.parametrize(
"levels, codes, expected",
[
(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
True,
),
(
[[1, 2, 3], [True, False, True]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
False,
),
],
)
def test_is_homogeneous_multiIndex(levels, codes, expected):
actual = cudf.MultiIndex(levels=levels, codes=codes)._is_homogeneous
assert actual == expected
@pytest.mark.parametrize(
"data, expected",
[([1, 2, 3], True), (["Hello", "World"], True), ([True, False], True)],
)
def test_is_homogeneous_index(data, expected):
actual = cudf.Index(data)._is_homogeneous
assert actual == expected
| 28.760805
| 79
| 0.524357
|
ad1cb0cd1cde8868d6fcf102bba0b10b331df33d
| 783
|
py
|
Python
|
13_Day_List_comprehension/lc.py
|
diegofregolente/30-Days-Of-Python
|
e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267
|
[
"Apache-2.0"
] | null | null | null |
13_Day_List_comprehension/lc.py
|
diegofregolente/30-Days-Of-Python
|
e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267
|
[
"Apache-2.0"
] | null | null | null |
13_Day_List_comprehension/lc.py
|
diegofregolente/30-Days-Of-Python
|
e0cad31f6d5ab1384ad6fa5a5d24a84771d6c267
|
[
"Apache-2.0"
] | null | null | null |
# List Comprehension Basic
language = 'Python'
lst = [i for i in language]
print(lst)
# Generating Numbers
numbers = [i for i in range(11)]
print(numbers)
# Square with LC
squares = [i * i for i in range(11)]
print(squares)
# List of Tuples with LC
numbers = [(i, i * i) for i in range(11)]
print(numbers)
# Lists of Even with LC
even_numbers = [i for i in range(11) if i % 2 == 0]
print(even_numbers)
# Lists of Odd with LC
odd_numbers = [i for i in range(11) if i % 2 != 0]
print(odd_numbers)
numbers = [-8, -7, -6, -5, 0, 2, 3, 7, 8, 9, 10]
positive_even_numbers = [i for i in numbers if i % 2 == 0 and i >= 0]
print(positive_even_numbers)
three_dimen_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9] ]
flattened_list = [n for r in three_dimen_list for n in r]
print(flattened_list)
| 24.46875
| 69
| 0.659004
|
41b7fd276571f517b7df8ec4ce99b6b5b30213c6
| 1,441
|
py
|
Python
|
containsDuplicates.py
|
anilgitme/py
|
6739f2ccfadc339dcfdd6118f4f2ad97f4452300
|
[
"MIT"
] | null | null | null |
containsDuplicates.py
|
anilgitme/py
|
6739f2ccfadc339dcfdd6118f4f2ad97f4452300
|
[
"MIT"
] | null | null | null |
containsDuplicates.py
|
anilgitme/py
|
6739f2ccfadc339dcfdd6118f4f2ad97f4452300
|
[
"MIT"
] | null | null | null |
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
noDups = {}
for num in nums:
if num not in noDups:
noDups[num] = 1
else:
return True
return False
# brute force
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
size = len(nums)
if size <= 1:
return False
for i in range(0, size):
for j in range(i + 1, size):
if nums[i] == nums[j]:
return True
return False
A = Solution()
print(A.containsDuplicate([3,0,1,4,1,5]))
print(A.containsDuplicate([0,1,2,3,4,4]))
print(A.containsDuplicate([0,1,2,3,4]))
print(A.containsDuplicate([-6,5,6,1,8,-3,0]))
print(A.containsDuplicate([]))
print(A.containsDuplicate([-1]))
#2
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
if len(nums) <= 1:
return False
noDuplicates = {}
for num in nums:
if num not in noDuplicates:
noDuplicates[num] = 1
else:
return True
return False
obj = Solution()
print(obj.containsDuplicate([0,1,3,9,0]))
print(obj.containsDuplicate([]))
print(obj.containsDuplicate([1]))
print(obj.containsDuplicate([9,99,999]))
print(obj.containsDuplicate([1,1,1,1,1]))
| 24.423729
| 59
| 0.530187
|
3d75bfdaa4fb099695ac1c3c050f8eb4070c7cb4
| 38,265
|
py
|
Python
|
zarr/tests/test_hierarchy.py
|
raphaeldussin/zarr-python
|
be29d3659daf83b8ff2b119ac98d97c47324706c
|
[
"MIT"
] | null | null | null |
zarr/tests/test_hierarchy.py
|
raphaeldussin/zarr-python
|
be29d3659daf83b8ff2b119ac98d97c47324706c
|
[
"MIT"
] | null | null | null |
zarr/tests/test_hierarchy.py
|
raphaeldussin/zarr-python
|
be29d3659daf83b8ff2b119ac98d97c47324706c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import atexit
import os
import pickle
import shutil
import tempfile
import textwrap
import unittest
import numpy as np
import pytest
from numcodecs import Zlib
from numpy.testing import assert_array_equal
from zarr.attrs import Attributes
from zarr.core import Array
from zarr.creation import open_array
from zarr.hierarchy import Group, group, open_group
from zarr.storage import (ABSStore, DBMStore, DirectoryStore, LMDBStore,
LRUStoreCache, MemoryStore, NestedDirectoryStore,
SQLiteStore, ZipStore, array_meta_key, atexit_rmglob,
atexit_rmtree, group_meta_key, init_array,
init_group)
from zarr.util import InfoReporter
from zarr.tests.util import skip_test_env_var
# noinspection PyStatementEffect
class TestGroup(unittest.TestCase):
@staticmethod
def create_store():
# can be overridden in sub-classes
return dict(), None
def create_group(self, store=None, path=None, read_only=False,
chunk_store=None, synchronizer=None):
# can be overridden in sub-classes
if store is None:
store, chunk_store = self.create_store()
init_group(store, path=path, chunk_store=chunk_store)
g = Group(store, path=path, read_only=read_only,
chunk_store=chunk_store, synchronizer=synchronizer)
return g
def test_group_init_1(self):
store, chunk_store = self.create_store()
g = self.create_group(store, chunk_store=chunk_store)
assert store is g.store
if chunk_store is None:
assert store is g.chunk_store
else:
assert chunk_store is g.chunk_store
assert not g.read_only
assert '' == g.path
assert '/' == g.name
assert '' == g.basename
assert isinstance(g.attrs, Attributes)
g.attrs['foo'] = 'bar'
assert g.attrs['foo'] == 'bar'
assert isinstance(g.info, InfoReporter)
assert isinstance(repr(g.info), str)
assert isinstance(g.info._repr_html_(), str)
def test_group_init_2(self):
store, chunk_store = self.create_store()
g = self.create_group(store, chunk_store=chunk_store,
path='/foo/bar/', read_only=True)
assert store is g.store
assert g.read_only
assert 'foo/bar' == g.path
assert '/foo/bar' == g.name
assert 'bar' == g.basename
assert isinstance(g.attrs, Attributes)
def test_group_init_errors_1(self):
store, chunk_store = self.create_store()
# group metadata not initialized
with pytest.raises(ValueError):
Group(store, chunk_store=chunk_store)
def test_group_init_errors_2(self):
store, chunk_store = self.create_store()
init_array(store, shape=1000, chunks=100, chunk_store=chunk_store)
# array blocks group
with pytest.raises(ValueError):
Group(store, chunk_store=chunk_store)
def test_create_group(self):
g1 = self.create_group()
# check root group
assert '' == g1.path
assert '/' == g1.name
# create level 1 child group
g2 = g1.create_group('foo')
assert isinstance(g2, Group)
assert 'foo' == g2.path
assert '/foo' == g2.name
# create level 2 child group
g3 = g2.create_group('bar')
assert isinstance(g3, Group)
assert 'foo/bar' == g3.path
assert '/foo/bar' == g3.name
# create level 3 child group
g4 = g1.create_group('foo/bar/baz')
assert isinstance(g4, Group)
assert 'foo/bar/baz' == g4.path
assert '/foo/bar/baz' == g4.name
# create level 3 group via root
g5 = g4.create_group('/a/b/c/')
assert isinstance(g5, Group)
assert 'a/b/c' == g5.path
assert '/a/b/c' == g5.name
# test non-str keys
class Foo(object):
def __init__(self, s):
self.s = s
def __str__(self):
return self.s
o = Foo('test/object')
go = g1.create_group(o)
assert isinstance(go, Group)
assert 'test/object' == go.path
go = g1.create_group(b'test/bytes')
assert isinstance(go, Group)
assert 'test/bytes' == go.path
# test bad keys
with pytest.raises(ValueError):
g1.create_group('foo') # already exists
with pytest.raises(ValueError):
g1.create_group('a/b/c') # already exists
with pytest.raises(ValueError):
g4.create_group('/a/b/c') # already exists
with pytest.raises(ValueError):
g1.create_group('')
with pytest.raises(ValueError):
g1.create_group('/')
with pytest.raises(ValueError):
g1.create_group('//')
# multi
g6, g7 = g1.create_groups('y', 'z')
assert isinstance(g6, Group)
assert g6.path == 'y'
assert isinstance(g7, Group)
assert g7.path == 'z'
def test_require_group(self):
g1 = self.create_group()
# test creation
g2 = g1.require_group('foo')
assert isinstance(g2, Group)
assert 'foo' == g2.path
g3 = g2.require_group('bar')
assert isinstance(g3, Group)
assert 'foo/bar' == g3.path
g4 = g1.require_group('foo/bar/baz')
assert isinstance(g4, Group)
assert 'foo/bar/baz' == g4.path
g5 = g4.require_group('/a/b/c/')
assert isinstance(g5, Group)
assert 'a/b/c' == g5.path
# test when already created
g2a = g1.require_group('foo')
assert g2 == g2a
assert g2.store is g2a.store
g3a = g2a.require_group('bar')
assert g3 == g3a
assert g3.store is g3a.store
g4a = g1.require_group('foo/bar/baz')
assert g4 == g4a
assert g4.store is g4a.store
g5a = g4a.require_group('/a/b/c/')
assert g5 == g5a
assert g5.store is g5a.store
# test path normalization
assert g1.require_group('quux') == g1.require_group('/quux/')
# multi
g6, g7 = g1.require_groups('y', 'z')
assert isinstance(g6, Group)
assert g6.path == 'y'
assert isinstance(g7, Group)
assert g7.path == 'z'
def test_create_dataset(self):
g = self.create_group()
# create as immediate child
d1 = g.create_dataset('foo', shape=1000, chunks=100)
assert isinstance(d1, Array)
assert (1000,) == d1.shape
assert (100,) == d1.chunks
assert 'foo' == d1.path
assert '/foo' == d1.name
assert g.store is d1.store
# create as descendant
d2 = g.create_dataset('/a/b/c/', shape=2000, chunks=200, dtype='i1',
compression='zlib', compression_opts=9,
fill_value=42, order='F')
assert isinstance(d2, Array)
assert (2000,) == d2.shape
assert (200,) == d2.chunks
assert np.dtype('i1') == d2.dtype
assert 'zlib' == d2.compressor.codec_id
assert 9 == d2.compressor.level
assert 42 == d2.fill_value
assert 'F' == d2.order
assert 'a/b/c' == d2.path
assert '/a/b/c' == d2.name
assert g.store is d2.store
# create with data
data = np.arange(3000, dtype='u2')
d3 = g.create_dataset('bar', data=data, chunks=300)
assert isinstance(d3, Array)
assert (3000,) == d3.shape
assert (300,) == d3.chunks
assert np.dtype('u2') == d3.dtype
assert_array_equal(data, d3[:])
assert 'bar' == d3.path
assert '/bar' == d3.name
assert g.store is d3.store
# compression arguments handling follows...
# compression_opts as dict
d = g.create_dataset('aaa', shape=1000, dtype='u1',
compression='blosc',
compression_opts=dict(cname='zstd', clevel=1, shuffle=2))
assert d.compressor.codec_id == 'blosc'
assert 'zstd' == d.compressor.cname
assert 1 == d.compressor.clevel
assert 2 == d.compressor.shuffle
# compression_opts as sequence
d = g.create_dataset('bbb', shape=1000, dtype='u1',
compression='blosc',
compression_opts=('zstd', 1, 2))
assert d.compressor.codec_id == 'blosc'
assert 'zstd' == d.compressor.cname
assert 1 == d.compressor.clevel
assert 2 == d.compressor.shuffle
# None compression_opts
d = g.create_dataset('ccc', shape=1000, dtype='u1', compression='zlib')
assert d.compressor.codec_id == 'zlib'
assert 1 == d.compressor.level
# None compression
d = g.create_dataset('ddd', shape=1000, dtype='u1', compression=None)
assert d.compressor is None
# compressor as compression
d = g.create_dataset('eee', shape=1000, dtype='u1', compression=Zlib(1))
assert d.compressor.codec_id == 'zlib'
assert 1 == d.compressor.level
def test_require_dataset(self):
g = self.create_group()
# create
d1 = g.require_dataset('foo', shape=1000, chunks=100, dtype='f4')
d1[:] = np.arange(1000)
assert isinstance(d1, Array)
assert (1000,) == d1.shape
assert (100,) == d1.chunks
assert np.dtype('f4') == d1.dtype
assert 'foo' == d1.path
assert '/foo' == d1.name
assert g.store is d1.store
assert_array_equal(np.arange(1000), d1[:])
# require
d2 = g.require_dataset('foo', shape=1000, chunks=100, dtype='f4')
assert isinstance(d2, Array)
assert (1000,) == d2.shape
assert (100,) == d2.chunks
assert np.dtype('f4') == d2.dtype
assert 'foo' == d2.path
assert '/foo' == d2.name
assert g.store is d2.store
assert_array_equal(np.arange(1000), d2[:])
assert d1 == d2
# bad shape - use TypeError for h5py compatibility
with pytest.raises(TypeError):
g.require_dataset('foo', shape=2000, chunks=100, dtype='f4')
# dtype matching
# can cast
d3 = g.require_dataset('foo', shape=1000, chunks=100, dtype='i2')
assert np.dtype('f4') == d3.dtype
assert d1 == d3
with pytest.raises(TypeError):
# cannot cast
g.require_dataset('foo', shape=1000, chunks=100, dtype='i4')
with pytest.raises(TypeError):
# can cast but not exact match
g.require_dataset('foo', shape=1000, chunks=100, dtype='i2',
exact=True)
def test_create_errors(self):
g = self.create_group()
# array obstructs group, array
g.create_dataset('foo', shape=100, chunks=10)
with pytest.raises(ValueError):
g.create_group('foo/bar')
with pytest.raises(ValueError):
g.require_group('foo/bar')
with pytest.raises(ValueError):
g.create_dataset('foo/bar', shape=100, chunks=10)
with pytest.raises(ValueError):
g.require_dataset('foo/bar', shape=100, chunks=10)
# array obstructs group, array
g.create_dataset('a/b', shape=100, chunks=10)
with pytest.raises(ValueError):
g.create_group('a/b')
with pytest.raises(ValueError):
g.require_group('a/b')
with pytest.raises(ValueError):
g.create_dataset('a/b', shape=100, chunks=10)
# group obstructs array
g.create_group('c/d')
with pytest.raises(ValueError):
g.create_dataset('c', shape=100, chunks=10)
with pytest.raises(ValueError):
g.require_dataset('c', shape=100, chunks=10)
with pytest.raises(ValueError):
g.create_dataset('c/d', shape=100, chunks=10)
with pytest.raises(ValueError):
g.require_dataset('c/d', shape=100, chunks=10)
# h5py compatibility, accept 'fillvalue'
d = g.create_dataset('x', shape=100, chunks=10, fillvalue=42)
assert 42 == d.fill_value
# h5py compatibility, ignore 'shuffle'
with pytest.warns(UserWarning, match="ignoring keyword argument 'shuffle'"):
g.create_dataset('y', shape=100, chunks=10, shuffle=True)
# read-only
g = self.create_group(read_only=True)
with pytest.raises(PermissionError):
g.create_group('zzz')
with pytest.raises(PermissionError):
g.require_group('zzz')
with pytest.raises(PermissionError):
g.create_dataset('zzz', shape=100, chunks=10)
with pytest.raises(PermissionError):
g.require_dataset('zzz', shape=100, chunks=10)
def test_create_overwrite(self):
try:
for method_name in 'create_dataset', 'create', 'empty', 'zeros', \
'ones':
g = self.create_group()
getattr(g, method_name)('foo', shape=100, chunks=10)
# overwrite array with array
d = getattr(g, method_name)('foo', shape=200, chunks=20,
overwrite=True)
assert (200,) == d.shape
# overwrite array with group
g2 = g.create_group('foo', overwrite=True)
assert 0 == len(g2)
# overwrite group with array
d = getattr(g, method_name)('foo', shape=300, chunks=30,
overwrite=True)
assert (300,) == d.shape
# overwrite array with group
d = getattr(g, method_name)('foo/bar', shape=400, chunks=40,
overwrite=True)
assert (400,) == d.shape
assert isinstance(g['foo'], Group)
except NotImplementedError:
pass
def test_getitem_contains_iterators(self):
# setup
g1 = self.create_group()
g2 = g1.create_group('foo/bar')
d1 = g2.create_dataset('/a/b/c', shape=1000, chunks=100)
d1[:] = np.arange(1000)
d2 = g1.create_dataset('foo/baz', shape=3000, chunks=300)
d2[:] = np.arange(3000)
# test __getitem__
assert isinstance(g1['foo'], Group)
assert isinstance(g1['foo']['bar'], Group)
assert isinstance(g1['foo/bar'], Group)
assert isinstance(g1['/foo/bar/'], Group)
assert isinstance(g1['foo/baz'], Array)
assert g2 == g1['foo/bar']
assert g1['foo']['bar'] == g1['foo/bar']
assert d2 == g1['foo/baz']
assert_array_equal(d2[:], g1['foo/baz'])
assert isinstance(g1['a'], Group)
assert isinstance(g1['a']['b'], Group)
assert isinstance(g1['a/b'], Group)
assert isinstance(g1['a']['b']['c'], Array)
assert isinstance(g1['a/b/c'], Array)
assert d1 == g1['a/b/c']
assert g1['a']['b']['c'] == g1['a/b/c']
assert_array_equal(d1[:], g1['a/b/c'][:])
# test __contains__
assert 'foo' in g1
assert 'foo/bar' in g1
assert 'foo/baz' in g1
assert 'bar' in g1['foo']
assert 'a' in g1
assert 'a/b' in g1
assert 'a/b/c' in g1
assert 'baz' not in g1
assert 'a/b/c/d' not in g1
assert 'a/z' not in g1
assert 'quux' not in g1['foo']
# test key errors
with pytest.raises(KeyError):
g1['baz']
with pytest.raises(KeyError):
g1['x/y/z']
# test __len__
assert 2 == len(g1)
assert 2 == len(g1['foo'])
assert 0 == len(g1['foo/bar'])
assert 1 == len(g1['a'])
assert 1 == len(g1['a/b'])
# test __iter__, keys()
# currently assumes sorted by key
assert ['a', 'foo'] == list(g1)
assert ['a', 'foo'] == list(g1.keys())
assert ['bar', 'baz'] == list(g1['foo'])
assert ['bar', 'baz'] == list(g1['foo'].keys())
assert [] == sorted(g1['foo/bar'])
assert [] == sorted(g1['foo/bar'].keys())
# test items(), values()
# currently assumes sorted by key
items = list(g1.items())
values = list(g1.values())
assert 'a' == items[0][0]
assert g1['a'] == items[0][1]
assert g1['a'] == values[0]
assert 'foo' == items[1][0]
assert g1['foo'] == items[1][1]
assert g1['foo'] == values[1]
items = list(g1['foo'].items())
values = list(g1['foo'].values())
assert 'bar' == items[0][0]
assert g1['foo']['bar'] == items[0][1]
assert g1['foo']['bar'] == values[0]
assert 'baz' == items[1][0]
assert g1['foo']['baz'] == items[1][1]
assert g1['foo']['baz'] == values[1]
# test array_keys(), arrays(), group_keys(), groups()
# currently assumes sorted by key
assert ['a', 'foo'] == list(g1.group_keys())
groups = list(g1.groups())
arrays = list(g1.arrays())
assert 'a' == groups[0][0]
assert g1['a'] == groups[0][1]
assert 'foo' == groups[1][0]
assert g1['foo'] == groups[1][1]
assert [] == list(g1.array_keys())
assert [] == arrays
assert ['bar'] == list(g1['foo'].group_keys())
assert ['baz'] == list(g1['foo'].array_keys())
groups = list(g1['foo'].groups())
arrays = list(g1['foo'].arrays())
assert 'bar' == groups[0][0]
assert g1['foo']['bar'] == groups[0][1]
assert 'baz' == arrays[0][0]
assert g1['foo']['baz'] == arrays[0][1]
# visitor collection tests
items = []
def visitor2(obj):
items.append(obj.path)
# noinspection PyUnusedLocal
def visitor3(name, obj=None):
items.append(name)
def visitor4(name, obj):
items.append((name, obj))
del items[:]
g1.visitvalues(visitor2)
assert [
"a",
"a/b",
"a/b/c",
"foo",
"foo/bar",
"foo/baz",
] == items
del items[:]
g1["foo"].visitvalues(visitor2)
assert [
"foo/bar",
"foo/baz",
] == items
del items[:]
g1.visit(visitor3)
assert [
"a",
"a/b",
"a/b/c",
"foo",
"foo/bar",
"foo/baz",
] == items
del items[:]
g1["foo"].visit(visitor3)
assert [
"bar",
"baz",
] == items
del items[:]
g1.visitkeys(visitor3)
assert [
"a",
"a/b",
"a/b/c",
"foo",
"foo/bar",
"foo/baz",
] == items
del items[:]
g1["foo"].visitkeys(visitor3)
assert [
"bar",
"baz",
] == items
del items[:]
g1.visititems(visitor3)
assert [
"a",
"a/b",
"a/b/c",
"foo",
"foo/bar",
"foo/baz",
] == items
del items[:]
g1["foo"].visititems(visitor3)
assert [
"bar",
"baz",
] == items
del items[:]
g1.visititems(visitor4)
for n, o in items:
assert g1[n] == o
del items[:]
g1["foo"].visititems(visitor4)
for n, o in items:
assert g1["foo"][n] == o
# visitor filter tests
# noinspection PyUnusedLocal
def visitor0(val, *args):
name = getattr(val, "path", val)
if name == "a/b/c/d":
return True # pragma: no cover
# noinspection PyUnusedLocal
def visitor1(val, *args):
name = getattr(val, "path", val)
if name == "a/b/c":
return True
assert g1.visit(visitor0) is None
assert g1.visitkeys(visitor0) is None
assert g1.visitvalues(visitor0) is None
assert g1.visititems(visitor0) is None
assert g1.visit(visitor1) is True
assert g1.visitkeys(visitor1) is True
assert g1.visitvalues(visitor1) is True
assert g1.visititems(visitor1) is True
def test_empty_getitem_contains_iterators(self):
# setup
g = self.create_group()
# test
assert [] == list(g)
assert [] == list(g.keys())
assert 0 == len(g)
assert 'foo' not in g
def test_iterators_recurse(self):
# setup
g1 = self.create_group()
g2 = g1.create_group('foo/bar')
d1 = g2.create_dataset('/a/b/c', shape=1000, chunks=100)
d1[:] = np.arange(1000)
d2 = g1.create_dataset('foo/baz', shape=3000, chunks=300)
d2[:] = np.arange(3000)
d3 = g2.create_dataset('zab', shape=2000, chunks=200)
d3[:] = np.arange(2000)
# test recursive array_keys
array_keys = list(g1['foo'].array_keys(recurse=False))
array_keys_recurse = list(g1['foo'].array_keys(recurse=True))
assert len(array_keys_recurse) > len(array_keys)
assert sorted(array_keys_recurse) == ['baz', 'zab']
# test recursive arrays
arrays = list(g1['foo'].arrays(recurse=False))
arrays_recurse = list(g1['foo'].arrays(recurse=True))
assert len(arrays_recurse) > len(arrays)
assert 'zab' == arrays_recurse[0][0]
assert g1['foo']['bar']['zab'] == arrays_recurse[0][1]
def test_getattr(self):
# setup
g1 = self.create_group()
g2 = g1.create_group('foo')
g2.create_dataset('bar', shape=100)
# test
assert g1['foo'] == g1.foo
assert g2['bar'] == g2.bar
# test that hasattr returns False instead of an exception (issue #88)
assert not hasattr(g1, 'unexistingattribute')
def test_setitem(self):
g = self.create_group()
try:
data = np.arange(100)
g['foo'] = data
assert_array_equal(data, g['foo'])
data = np.arange(200)
g['foo'] = data
assert_array_equal(data, g['foo'])
# 0d array
g['foo'] = 42
assert () == g['foo'].shape
assert 42 == g['foo'][()]
except NotImplementedError:
pass
def test_delitem(self):
g = self.create_group()
g.create_group('foo')
g.create_dataset('bar/baz', shape=100, chunks=10)
assert 'foo' in g
assert 'bar' in g
assert 'bar/baz' in g
try:
del g['bar']
with pytest.raises(KeyError):
del g['xxx']
except NotImplementedError:
pass
else:
assert 'foo' in g
assert 'bar' not in g
assert 'bar/baz' not in g
def test_move(self):
g = self.create_group()
data = np.arange(100)
g['boo'] = data
data = np.arange(100)
g['foo'] = data
try:
g.move('foo', 'bar')
assert 'foo' not in g
assert 'bar' in g
assert_array_equal(data, g['bar'])
g.move('bar', 'foo/bar')
assert 'bar' not in g
assert 'foo' in g
assert 'foo/bar' in g
assert isinstance(g['foo'], Group)
assert_array_equal(data, g['foo/bar'])
g.move('foo', 'foo2')
assert 'foo' not in g
assert 'foo/bar' not in g
assert 'foo2' in g
assert 'foo2/bar' in g
assert isinstance(g['foo2'], Group)
assert_array_equal(data, g['foo2/bar'])
g2 = g['foo2']
g2.move('bar', '/bar')
assert 'foo2' in g
assert 'foo2/bar' not in g
assert 'bar' in g
assert isinstance(g['foo2'], Group)
assert_array_equal(data, g['bar'])
with pytest.raises(ValueError):
g2.move('bar', 'bar2')
with pytest.raises(ValueError):
g.move('bar', 'boo')
except NotImplementedError:
pass
def test_array_creation(self):
grp = self.create_group()
a = grp.create('a', shape=100, chunks=10)
assert isinstance(a, Array)
b = grp.empty('b', shape=100, chunks=10)
assert isinstance(b, Array)
assert b.fill_value is None
c = grp.zeros('c', shape=100, chunks=10)
assert isinstance(c, Array)
assert 0 == c.fill_value
d = grp.ones('d', shape=100, chunks=10)
assert isinstance(d, Array)
assert 1 == d.fill_value
e = grp.full('e', shape=100, chunks=10, fill_value=42)
assert isinstance(e, Array)
assert 42 == e.fill_value
f = grp.empty_like('f', a)
assert isinstance(f, Array)
assert f.fill_value is None
g = grp.zeros_like('g', a)
assert isinstance(g, Array)
assert 0 == g.fill_value
h = grp.ones_like('h', a)
assert isinstance(h, Array)
assert 1 == h.fill_value
i = grp.full_like('i', e)
assert isinstance(i, Array)
assert 42 == i.fill_value
j = grp.array('j', data=np.arange(100), chunks=10)
assert isinstance(j, Array)
assert_array_equal(np.arange(100), j[:])
grp = self.create_group(read_only=True)
with pytest.raises(PermissionError):
grp.create('aa', shape=100, chunks=10)
with pytest.raises(PermissionError):
grp.empty('aa', shape=100, chunks=10)
with pytest.raises(PermissionError):
grp.zeros('aa', shape=100, chunks=10)
with pytest.raises(PermissionError):
grp.ones('aa', shape=100, chunks=10)
with pytest.raises(PermissionError):
grp.full('aa', shape=100, chunks=10, fill_value=42)
with pytest.raises(PermissionError):
grp.array('aa', data=np.arange(100), chunks=10)
with pytest.raises(PermissionError):
grp.create('aa', shape=100, chunks=10)
with pytest.raises(PermissionError):
grp.empty_like('aa', a)
with pytest.raises(PermissionError):
grp.zeros_like('aa', a)
with pytest.raises(PermissionError):
grp.ones_like('aa', a)
with pytest.raises(PermissionError):
grp.full_like('aa', a)
def test_paths(self):
g1 = self.create_group()
g2 = g1.create_group('foo/bar')
assert g1 == g1['/']
assert g1 == g1['//']
assert g1 == g1['///']
assert g1 == g2['/']
assert g1 == g2['//']
assert g1 == g2['///']
assert g2 == g1['foo/bar']
assert g2 == g1['/foo/bar']
assert g2 == g1['foo/bar/']
assert g2 == g1['//foo/bar']
assert g2 == g1['//foo//bar//']
assert g2 == g1['///foo///bar///']
assert g2 == g2['/foo/bar']
with pytest.raises(ValueError):
g1['.']
with pytest.raises(ValueError):
g1['..']
with pytest.raises(ValueError):
g1['foo/.']
with pytest.raises(ValueError):
g1['foo/..']
with pytest.raises(ValueError):
g1['foo/./bar']
with pytest.raises(ValueError):
g1['foo/../bar']
def test_pickle(self):
# setup group
g = self.create_group()
d = g.create_dataset('foo/bar', shape=100, chunks=10)
d[:] = np.arange(100)
path = g.path
name = g.name
n = len(g)
keys = list(g)
# round-trip through pickle
dump = pickle.dumps(g)
# some stores cannot be opened twice at the same time, need to close
# store before can round-trip through pickle
if hasattr(g.store, 'close'):
g.store.close()
g2 = pickle.loads(dump)
# verify
assert path == g2.path
assert name == g2.name
assert n == len(g2)
assert keys == list(g2)
assert isinstance(g2['foo'], Group)
assert isinstance(g2['foo/bar'], Array)
class TestGroupWithMemoryStore(TestGroup):
@staticmethod
def create_store():
return MemoryStore(), None
class TestGroupWithDirectoryStore(TestGroup):
@staticmethod
def create_store():
path = tempfile.mkdtemp()
atexit.register(atexit_rmtree, path)
store = DirectoryStore(path)
return store, None
@skip_test_env_var("ZARR_TEST_ABS")
class TestGroupWithABSStore(TestGroup):
@staticmethod
def create_store():
asb = pytest.importorskip("azure.storage.blob")
blob_client = asb.BlockBlobService(is_emulated=True)
blob_client.delete_container('test')
blob_client.create_container('test')
store = ABSStore(container='test', prefix='zarrtesting/', account_name='foo',
account_key='bar', blob_service_kwargs={'is_emulated': True})
store.rmdir()
return store, None
class TestGroupWithNestedDirectoryStore(TestGroup):
@staticmethod
def create_store():
path = tempfile.mkdtemp()
atexit.register(atexit_rmtree, path)
store = NestedDirectoryStore(path)
return store, None
class TestGroupWithZipStore(TestGroup):
@staticmethod
def create_store():
path = tempfile.mktemp(suffix='.zip')
atexit.register(os.remove, path)
store = ZipStore(path)
return store, None
class TestGroupWithDBMStore(TestGroup):
@staticmethod
def create_store():
path = tempfile.mktemp(suffix='.anydbm')
atexit.register(atexit_rmglob, path + '*')
store = DBMStore(path, flag='n')
return store, None
class TestGroupWithDBMStoreBerkeleyDB(TestGroup):
@staticmethod
def create_store():
bsddb3 = pytest.importorskip("bsddb3")
path = tempfile.mktemp(suffix='.dbm')
atexit.register(os.remove, path)
store = DBMStore(path, flag='n', open=bsddb3.btopen)
return store, None
class TestGroupWithLMDBStore(TestGroup):
@staticmethod
def create_store():
pytest.importorskip("lmdb")
path = tempfile.mktemp(suffix='.lmdb')
atexit.register(atexit_rmtree, path)
store = LMDBStore(path)
return store, None
class TestGroupWithSQLiteStore(TestGroup):
def create_store(self):
pytest.importorskip("sqlite3")
path = tempfile.mktemp(suffix='.db')
atexit.register(atexit_rmtree, path)
store = SQLiteStore(path)
return store, None
class TestGroupWithChunkStore(TestGroup):
@staticmethod
def create_store():
return dict(), dict()
def test_chunk_store(self):
# setup
store, chunk_store = self.create_store()
g = self.create_group(store, chunk_store=chunk_store)
# check attributes
assert store is g.store
assert chunk_store is g.chunk_store
# create array
a = g.zeros('foo', shape=100, chunks=10)
assert store is a.store
assert chunk_store is a.chunk_store
a[:] = np.arange(100)
assert_array_equal(np.arange(100), a[:])
# check store keys
expect = sorted([group_meta_key, 'foo/' + array_meta_key])
actual = sorted(store.keys())
assert expect == actual
expect = ['foo/' + str(i) for i in range(10)]
actual = sorted(chunk_store.keys())
assert expect == actual
class TestGroupWithStoreCache(TestGroup):
@staticmethod
def create_store():
store = LRUStoreCache(dict(), max_size=None)
return store, None
def test_group():
# test the group() convenience function
# basic usage
g = group()
assert isinstance(g, Group)
assert '' == g.path
assert '/' == g.name
# usage with custom store
store = dict()
g = group(store=store)
assert isinstance(g, Group)
assert store is g.store
# overwrite behaviour
store = dict()
init_array(store, shape=100, chunks=10)
with pytest.raises(ValueError):
group(store)
g = group(store, overwrite=True)
assert isinstance(g, Group)
assert store is g.store
def test_open_group():
# test the open_group() convenience function
store = 'data/group.zarr'
# mode == 'w'
g = open_group(store, mode='w')
assert isinstance(g, Group)
assert isinstance(g.store, DirectoryStore)
assert 0 == len(g)
g.create_groups('foo', 'bar')
assert 2 == len(g)
# mode in 'r', 'r+'
open_array('data/array.zarr', shape=100, chunks=10, mode='w')
for mode in 'r', 'r+':
with pytest.raises(ValueError):
open_group('doesnotexist', mode=mode)
with pytest.raises(ValueError):
open_group('data/array.zarr', mode=mode)
g = open_group(store, mode='r')
assert isinstance(g, Group)
assert 2 == len(g)
with pytest.raises(PermissionError):
g.create_group('baz')
g = open_group(store, mode='r+')
assert isinstance(g, Group)
assert 2 == len(g)
g.create_groups('baz', 'quux')
assert 4 == len(g)
# mode == 'a'
shutil.rmtree(store)
g = open_group(store, mode='a')
assert isinstance(g, Group)
assert isinstance(g.store, DirectoryStore)
assert 0 == len(g)
g.create_groups('foo', 'bar')
assert 2 == len(g)
with pytest.raises(ValueError):
open_group('data/array.zarr', mode='a')
# mode in 'w-', 'x'
for mode in 'w-', 'x':
shutil.rmtree(store)
g = open_group(store, mode=mode)
assert isinstance(g, Group)
assert isinstance(g.store, DirectoryStore)
assert 0 == len(g)
g.create_groups('foo', 'bar')
assert 2 == len(g)
with pytest.raises(ValueError):
open_group(store, mode=mode)
with pytest.raises(ValueError):
open_group('data/array.zarr', mode=mode)
# open with path
g = open_group(store, path='foo/bar')
assert isinstance(g, Group)
assert 'foo/bar' == g.path
def test_group_completions():
g = group()
d = dir(g)
assert 'foo' not in d
assert 'bar' not in d
assert 'baz' not in d
assert 'qux' not in d
assert 'xxx' not in d
assert 'yyy' not in d
assert 'zzz' not in d
assert '123' not in d
assert '456' not in d
g.create_groups('foo', 'bar', 'baz/qux', '123')
g.zeros('xxx', shape=100)
g.zeros('yyy', shape=100)
g.zeros('zzz', shape=100)
g.zeros('456', shape=100)
d = dir(g)
assert 'foo' in d
assert 'bar' in d
assert 'baz' in d
assert 'qux' not in d
assert 'xxx' in d
assert 'yyy' in d
assert 'zzz' in d
assert '123' not in d # not valid identifier
assert '456' not in d # not valid identifier
def test_group_key_completions():
g = group()
d = dir(g)
# noinspection PyProtectedMember
k = g._ipython_key_completions_()
# none of these names should be an attribute
assert 'foo' not in d
assert 'bar' not in d
assert 'baz' not in d
assert 'qux' not in d
assert 'xxx' not in d
assert 'yyy' not in d
assert 'zzz' not in d
assert '123' not in d
assert '456' not in d
assert 'asdf;' not in d
# none of these names should be an item
assert 'foo' not in k
assert 'bar' not in k
assert 'baz' not in k
assert 'qux' not in k
assert 'xxx' not in k
assert 'yyy' not in k
assert 'zzz' not in k
assert '123' not in k
assert '456' not in k
assert 'asdf;' not in k
g.create_groups('foo', 'bar', 'baz/qux', '123')
g.zeros('xxx', shape=100)
g.zeros('yyy', shape=100)
g.zeros('zzz', shape=100)
g.zeros('456', shape=100)
g.zeros('asdf;', shape=100)
d = dir(g)
# noinspection PyProtectedMember
k = g._ipython_key_completions_()
assert 'foo' in d
assert 'bar' in d
assert 'baz' in d
assert 'qux' not in d
assert 'xxx' in d
assert 'yyy' in d
assert 'zzz' in d
assert '123' not in d # not valid identifier
assert '456' not in d # not valid identifier
assert 'asdf;' not in d # not valid identifier
assert 'foo' in k
assert 'bar' in k
assert 'baz' in k
assert 'qux' not in k
assert 'xxx' in k
assert 'yyy' in k
assert 'zzz' in k
assert '123' in k
assert '456' in k
assert 'asdf;' in k
def _check_tree(g, expect_bytes, expect_text):
assert expect_bytes == bytes(g.tree())
assert expect_text == str(g.tree())
expect_repr = expect_text
assert expect_repr == repr(g.tree())
# test _repr_html_ lightly
# noinspection PyProtectedMember
html = g.tree()._repr_html_().strip()
assert html.startswith('<link')
assert html.endswith('</script>')
def test_tree():
# setup
g1 = group()
g2 = g1.create_group('foo')
g3 = g1.create_group('bar')
g3.create_group('baz')
g5 = g3.create_group('quux')
g5.create_dataset('baz', shape=100, chunks=10)
# test root group
expect_bytes = textwrap.dedent(u"""\
/
+-- bar
| +-- baz
| +-- quux
| +-- baz (100,) float64
+-- foo""").encode()
expect_text = textwrap.dedent(u"""\
/
├── bar
│ ├── baz
│ └── quux
│ └── baz (100,) float64
└── foo""")
_check_tree(g1, expect_bytes, expect_text)
# test different group
expect_bytes = textwrap.dedent(u"""\
foo""").encode()
expect_text = textwrap.dedent(u"""\
foo""")
_check_tree(g2, expect_bytes, expect_text)
# test different group
expect_bytes = textwrap.dedent(u"""\
bar
+-- baz
+-- quux
+-- baz (100,) float64""").encode()
expect_text = textwrap.dedent(u"""\
bar
├── baz
└── quux
└── baz (100,) float64""")
_check_tree(g3, expect_bytes, expect_text)
| 31.008914
| 86
| 0.553273
|
bf35440be20ead0a87f87a94cfe6fae4ac50a720
| 1,903
|
py
|
Python
|
eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/Tests/Filters.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/Tests/Filters.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/Cheetah-2.2.2-py2.7-linux-x86_64-ucs4.egg/Cheetah/Tests/Filters.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2020-07-25T21:03:18.000Z
|
2020-07-25T21:03:18.000Z
|
#!/usr/bin/env python
import sys
import Cheetah.Template
import Cheetah.Filters
import unittest_local_copy as unittest
majorVer, minorVer = sys.version_info[0], sys.version_info[1]
versionTuple = (majorVer, minorVer)
class BasicMarkdownFilterTest(unittest.TestCase):
'''
Test that our markdown filter works
'''
def test_BasicHeader(self):
template = '''
#from Cheetah.Filters import Markdown
#transform Markdown
$foo
Header
======
'''
expected = '''<p>bar</p>
<h1>Header</h1>'''
try:
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template == expected
except Exception, ex:
if ex.__class__.__name__ == 'MarkdownException' and majorVer == 2 and minorVer < 5:
print '>>> NOTE: Support for the Markdown filter will be broken for you. Markdown says: %s' % ex
return
raise
class BasicCodeHighlighterFilterTest(unittest.TestCase):
'''
Test that our code highlighter filter works
'''
def test_Python(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
def foo(self):
return '$foo'
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
def test_Html(self):
template = '''
#from Cheetah.Filters import CodeHighlighter
#transform CodeHighlighter
<html><head></head><body>$foo</body></html>
'''
template = Cheetah.Template.Template(template, searchList=[{'foo' : 'bar'}])
template = str(template)
assert template, (template, 'We should have some content here...')
if __name__ == '__main__':
unittest.main()
| 27.57971
| 112
| 0.633736
|
bd0b53385438ef4852e5fad4322aa35a3ac43406
| 1,868
|
py
|
Python
|
setup.py
|
nicc777/flask-prod-docker
|
b345b92f9fdf4c06e72bb852e9dc5478f29019e0
|
[
"MIT"
] | null | null | null |
setup.py
|
nicc777/flask-prod-docker
|
b345b92f9fdf4c06e72bb852e9dc5478f29019e0
|
[
"MIT"
] | 8
|
2020-03-21T06:13:47.000Z
|
2021-04-25T04:59:43.000Z
|
setup.py
|
nicc777/flask-prod-docker
|
b345b92f9fdf4c06e72bb852e9dc5478f29019e0
|
[
"MIT"
] | 1
|
2017-05-22T18:13:17.000Z
|
2017-05-22T18:13:17.000Z
|
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='example',
version='0.0.2',
description='An example Flask project, deployed in Docker and with AWS Cognito integration',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nicc777/flask-prod-docker',
author='Nico Coetzee',
author_email='nicc777@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='flask cognito docker',
#package_dir={'': 'src'},
#packages=find_packages(where='src'),
packages=find_packages(),
include_package_data=True,
install_requires=['Flask', 'cognitojwt', 'Flask-Cognito', 'gunicorn'],
python_requires='>=3.*, <4',
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
project_urls={
'Bug Reports': 'https://github.com/nicc777/flask-prod-docker/issues',
'Source': 'https://github.com/nicc777/flask-prod-docker',
},
)
| 32.77193
| 96
| 0.631692
|
31be14e735c5a362dcf3f59025265b6b80432445
| 7,640
|
py
|
Python
|
interact.py
|
echodarkstar/transfer-learning-conv-ai
|
b8fcf26055af6fcbb3c2b8e63d631f13e450afbf
|
[
"MIT"
] | null | null | null |
interact.py
|
echodarkstar/transfer-learning-conv-ai
|
b8fcf26055af6fcbb3c2b8e63d631f13e450afbf
|
[
"MIT"
] | null | null | null |
interact.py
|
echodarkstar/transfer-learning-conv-ai
|
b8fcf26055af6fcbb3c2b8e63d631f13e450afbf
|
[
"MIT"
] | null | null | null |
# # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import warnings
import torch
import torch.nn.functional as F
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from .train import SPECIAL_TOKENS, build_input_from_segments, add_special_tokens_
from .utils import get_dataset, download_pretrained_model
def top_filtering(logits, top_k=0., top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
if isinstance(logits, tuple): # for gpt2 and maybe others
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
if probs.max().item() == 1:
warnings.warn("Warning: model generating special token with probability 1.")
break # avoid infinitely looping over special token
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def run():
parser = ArgumentParser()
parser.add_argument("--dataset_path", type=str, default="", help="Path or url of the dataset. If empty download from S3.")
parser.add_argument("--dataset_cache", type=str, default='./dataset_cache', help="Path or url of the dataset cache")
parser.add_argument("--model", type=str, default="openai-gpt", help="Model type (openai-gpt or gpt2)", choices=['openai-gpt', 'gpt2']) # anything besides gpt2 will load openai-gpt
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=20, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=42, help="Seed")
parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
if args.model == 'gpt2':
raise ValueError("Interacting with GPT2 requires passing a finetuned model_checkpoint")
else:
args.model_checkpoint = download_pretrained_model()
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
add_special_tokens_(model, tokenizer)
logger.info("Sample a personality")
dataset = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
personality = random.choice(personalities)
logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
history = []
while True:
raw_text = input(">>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input(">>> ")
history.append(tokenizer.encode(raw_text))
with torch.no_grad():
out_ids = sample_sequence(personality, history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2*args.max_history+1):]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(out_text)
if __name__ == "__main__":
run()
| 50.263158
| 184
| 0.699738
|
29dc75e44fa531da0eec3ca22654aa2803fd933d
| 5,078
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/tests/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/tests/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/tests/utils.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""Common utility for testing third party oauth2 features."""
import json
from base64 import b64encode
from unittest import skip
import httpretty
from onelogin.saml2.utils import OneLogin_Saml2_Utils
from oauth2_provider.models import Application
from social_core.backends.facebook import API_VERSION as FACEBOOK_API_VERSION
from social_core.backends.facebook import FacebookOAuth2
from social_django.models import Partial, UserSocialAuth
from common.djangoapps.student.tests.factories import UserFactory
from .testutil import ThirdPartyAuthTestMixin, AUTH_FEATURE_ENABLED, AUTH_FEATURES_KEY
@httpretty.activate
class ThirdPartyOAuthTestMixin(ThirdPartyAuthTestMixin):
"""
Mixin with tests for third party oauth views. A TestCase that includes
this must define the following:
BACKEND: The name of the backend from python-social-auth
USER_URL: The URL of the endpoint that the backend retrieves user data from
UID_FIELD: The field in the user data that the backend uses as the user id
"""
social_uid = "test_social_uid"
access_token = "test_access_token"
client_id = "test_client_id"
CREATE_USER = True
def setUp(self): # lint-amnesty, pylint: disable=arguments-differ
super().setUp()
if self.CREATE_USER:
self.user = UserFactory.create(password='secret')
UserSocialAuth.objects.create(user=self.user, provider=self.BACKEND, uid=self.social_uid)
self.oauth_client = self._create_client()
if self.BACKEND == 'google-oauth2':
self.configure_google_provider(enabled=True, visible=True)
elif self.BACKEND == 'facebook':
self.configure_facebook_provider(enabled=True, visible=True)
def tearDown(self):
super().tearDown()
Partial.objects.all().delete()
def _create_client(self):
"""
Create an OAuth2 client application
"""
return Application.objects.create(
client_id=self.client_id,
client_type=Application.CLIENT_PUBLIC,
)
def _setup_provider_response(self, success=False, email=''):
"""
Register a mock response for the third party user information endpoint;
success indicates whether the response status code should be 200 or 400
"""
if success:
status = 200
response = {self.UID_FIELD: self.social_uid}
if email:
response.update({'email': email})
body = json.dumps(response)
else:
status = 400
body = json.dumps({})
self._setup_provider_response_with_body(status, body)
def _setup_provider_response_with_body(self, status, body):
"""
Register a mock response for the third party user information endpoint with given status and body.
"""
httpretty.register_uri(
httpretty.GET,
self.USER_URL,
body=body,
status=status,
content_type="application/json",
)
class ThirdPartyOAuthTestMixinFacebook:
"""Tests oauth with the Facebook backend"""
BACKEND = "facebook"
USER_URL = FacebookOAuth2.USER_DATA_URL.format(version=FACEBOOK_API_VERSION)
# In facebook responses, the "id" field is used as the user's identifier
UID_FIELD = "id"
class ThirdPartyOAuthTestMixinGoogle:
"""Tests oauth with the Google backend"""
BACKEND = "google-oauth2"
USER_URL = "https://www.googleapis.com/oauth2/v3/userinfo"
# In google-oauth2 responses, the "email" field is used as the user's identifier
UID_FIELD = "email"
def read_and_pre_process_xml(file_name):
"""
Read XML file with the name specified in the argument and pre process the xml so that it can be parsed.
Pre Processing removes line retune characters (i.e. "\n").
Arguments:
file_name (str): Name of the XML file.
Returns:
(str): Pre Processed contents of the file.
"""
with open(file_name) as xml_file:
return xml_file.read().replace('\n', '')
def prepare_saml_response_from_xml(xml, relay_state='testshib'):
"""
Pre Process XML so that it can be used as a SAML Response coming from SAML IdP.
This method will perform the following operations on the XML in given order
1. base64 encode XML.
2. URL encode the base64 encoded data.
Arguments:
xml (string): XML data
relay_state (string): Relay State of the SAML Response
Returns:
(str): Base64 and URL encoded XML.
"""
b64encoded_xml = b64encode(xml.encode())
return 'RelayState={relay_state}&SAMLResponse={saml_response}'.format(
relay_state=OneLogin_Saml2_Utils.escape_url(relay_state),
saml_response=OneLogin_Saml2_Utils.escape_url(b64encoded_xml)
)
def skip_unless_thirdpartyauth():
"""
Wraps unittest.skip in consistent logic to skip certain third_party_auth tests in CMS.
"""
if AUTH_FEATURE_ENABLED:
return lambda func: func
return skip("%s not enabled" % AUTH_FEATURES_KEY)
| 33.407895
| 107
| 0.687082
|
fadb86777297abab5f529ff5ff4b201ec2850a5f
| 1,304
|
py
|
Python
|
exercises/ex28.py
|
jinkyukim-me/StudyPython
|
6c98598c23c506101882392645fbb14d4aa998d4
|
[
"MIT"
] | null | null | null |
exercises/ex28.py
|
jinkyukim-me/StudyPython
|
6c98598c23c506101882392645fbb14d4aa998d4
|
[
"MIT"
] | null | null | null |
exercises/ex28.py
|
jinkyukim-me/StudyPython
|
6c98598c23c506101882392645fbb14d4aa998d4
|
[
"MIT"
] | null | null | null |
# Exercise 28. Boolean Practice
print("True and True : ",True and True)
print("False and True : ", False and True)
print("1 == 1 and 2 == 1 : ", 1 == 1 and 2 == 1)
print('"test" == "test" : ', "test" == "test")
print("1 == 1 or 2 != 1 : ", 1 == 1 or 2 != 1)
print("True and 1 == 1 : ", True and 1 == 1)
print("False and 0 != 0 : ", False and 0 != 0)
print("True or 1 == 1 : ", True or 1 == 1)
print('"test" == "testing" : ', "test" == "testing")
print("1 != 0 and 2 == 1 : ", 1 != 0 and 2 == 1)
print('"test" != "testing" : ', "test" != "testing")
print('"test" == 1 : ', "test" == 1)
print("not (True and False) :", not (True and False))
print("not (1 == 1 and 0 != 1) :", not (1 == 1 and 0 != 1))
print("not (10 == 1 or 1000 == 1000) :", not (10 == 1 or 1000 == 1000))
print("not (1 != 10 or 3 == 4) :", not (1 != 10 or 3 == 4))
print('not ("testing" == "testing" and "Zed" == "Cool Guy") : ', not ("testing" == "testing" and "Zed" == "Cool Guy"))
print('1 == 1 and (not ("testing" == 1 or 1 == 0)) : ', 1 == 1 and (not ("testing" == 1 or 1 == 0)))
print('"chunky" == "bacon" and (not (3 == 4 or 3 == 3)) : ', "chunky" == "bacon" and (not (3 == 4 or 3 == 3)))
print('3 == 3 and (not ("testing" == "testing" or "Python" == "Fun")) : ', 3 == 3 and (not ("testing" == "testing" or "Python" == "Fun")))
| 54.333333
| 138
| 0.497699
|
344913a1ad6e42887dd1e91e8bc968120cdf3f90
| 21,096
|
py
|
Python
|
examples/dmm/dmm.py
|
DEVESHTARASIA/pyro
|
7fce5508fe4f15a1a65a267e8d6df3aeead1a3ec
|
[
"MIT"
] | null | null | null |
examples/dmm/dmm.py
|
DEVESHTARASIA/pyro
|
7fce5508fe4f15a1a65a267e8d6df3aeead1a3ec
|
[
"MIT"
] | null | null | null |
examples/dmm/dmm.py
|
DEVESHTARASIA/pyro
|
7fce5508fe4f15a1a65a267e8d6df3aeead1a3ec
|
[
"MIT"
] | null | null | null |
"""
An implementation of a Deep Markov Model in Pyro based on reference [1].
This is essentially the DKS variant outlined in the paper. The primary difference
between this implementation and theirs is that in our version any KL divergence terms
in the ELBO are estimated via sampling, while they make use of the analytic formulae.
We also illustrate the use of normalizing flows in the variational distribution (in which
case analytic formulae for the KL divergences are in any case unavailable).
Reference:
[1] Structured Inference Networks for Nonlinear State Space Models [arXiv:1609.09869]
Rahul G. Krishnan, Uri Shalit, David Sontag
"""
import torch
import torch.nn as nn
import numpy as np
import pyro
from pyro.infer import SVI
from pyro.optim import ClippedAdam
import pyro.distributions as dist
from pyro.util import ng_ones
from pyro.distributions.transformed_distribution import InverseAutoregressiveFlow
from pyro.distributions.transformed_distribution import TransformedDistribution
import six.moves.cPickle as pickle
import polyphonic_data_loader as poly
from os.path import exists
import argparse
import time
from util import get_logger
class Emitter(nn.Module):
"""
Parameterizes the bernoulli observation likelihood `p(x_t | z_t)`
"""
def __init__(self, input_dim, z_dim, emission_dim):
super(Emitter, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim)
self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim)
self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim)
# initialize the two non-linearities used in the neural network
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, z_t):
"""
Given the latent z at a particular time step t we return the vector of
probabilities `ps` that parameterizes the bernoulli distribution `p(x_t|z_t)`
"""
h1 = self.relu(self.lin_z_to_hidden(z_t))
h2 = self.relu(self.lin_hidden_to_hidden(h1))
ps = self.sigmoid(self.lin_hidden_to_input(h2))
return ps
class GatedTransition(nn.Module):
"""
Parameterizes the gaussian latent transition probability `p(z_t | z_{t-1})`
See section 5 in the reference for comparison.
"""
def __init__(self, z_dim, transition_dim):
super(GatedTransition, self).__init__()
# initialize the six linear transformations used in the neural network
self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim)
self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)
self.lin_sig = nn.Linear(z_dim, z_dim)
self.lin_z_to_mu = nn.Linear(z_dim, z_dim)
# modify the default initialization of lin_z_to_mu
# so that it's starts out as the identity function
self.lin_z_to_mu.weight.data = torch.eye(z_dim)
self.lin_z_to_mu.bias.data = torch.zeros(z_dim)
# initialize the three non-linearities used in the neural network
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.softplus = nn.Softplus()
def forward(self, z_t_1):
"""
Given the latent `z_{t-1}` corresponding to the time step t-1
we return the mean and sigma vectors that parameterize the
(diagonal) gaussian distribution `p(z_t | z_{t-1})`
"""
# compute the gating function and one minus the gating function
gate_intermediate = self.relu(self.lin_gate_z_to_hidden(z_t_1))
gate = self.sigmoid(self.lin_gate_hidden_to_z(gate_intermediate))
one_minus_gate = ng_ones(gate.size()).type_as(gate) - gate
# compute the 'proposed mean'
proposed_mean_intermediate = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1))
proposed_mean = self.lin_proposed_mean_hidden_to_z(proposed_mean_intermediate)
# assemble the actual mean used to sample z_t, which mixes a linear transformation
# of z_{t-1} with the proposed mean modulated by the gating function
mu = one_minus_gate * self.lin_z_to_mu(z_t_1) + gate * proposed_mean
# compute the sigma used to sample z_t, using the proposed mean from above as input
# the softplus ensures that sigma is positive
sigma = self.softplus(self.lin_sig(self.relu(proposed_mean)))
# return mu, sigma which can be fed into Normal
return mu, sigma
class Combiner(nn.Module):
"""
Parameterizes `q(z_t | z_{t-1}, x_{t:T})`, which is the basic building block
of the guide (i.e. the variational distribution). The dependence on `x_{t:T}` is
through the hidden state of the RNN (see the PyTorch module `rnn` below)
"""
def __init__(self, z_dim, rnn_dim):
super(Combiner, self).__init__()
# initialize the three linear transformations used in the neural network
self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim)
self.lin_hidden_to_mu = nn.Linear(rnn_dim, z_dim)
self.lin_hidden_to_sigma = nn.Linear(rnn_dim, z_dim)
# initialize the two non-linearities used in the neural network
self.tanh = nn.Tanh()
self.softplus = nn.Softplus()
def forward(self, z_t_1, h_rnn):
"""
Given the latent z at at a particular time step t-1 as well as the hidden
state of the RNN `h(x_{t:T})` we return the mean and sigma vectors that
parameterize the (diagonal) gaussian distribution `q(z_t | z_{t-1}, x_{t:T})`
"""
# combine the rnn hidden state with a transformed version of z_t_1
h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn)
# use the combined hidden state to compute the mean used to sample z_t
mu = self.lin_hidden_to_mu(h_combined)
# use the combined hidden state to compute the sigma used to sample z_t
sigma = self.softplus(self.lin_hidden_to_sigma(h_combined))
# return mu, sigma which can be fed into Normal
return mu, sigma
class DMM(nn.Module):
"""
This PyTorch Module encapsulates the model as well as the
variational distribution (the guide) for the Deep Markov Model
"""
def __init__(self, input_dim=88, z_dim=100, emission_dim=100,
transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0,
num_iafs=0, iaf_dim=50, use_cuda=False):
super(DMM, self).__init__()
# instantiate PyTorch modules used in the model and guide below
self.emitter = Emitter(input_dim, z_dim, emission_dim)
self.trans = GatedTransition(z_dim, transition_dim)
self.combiner = Combiner(z_dim, rnn_dim)
self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu',
batch_first=True, bidirectional=False, num_layers=1,
dropout=rnn_dropout_rate)
# if we're using normalizing flows, instantiate those too
iafs = [InverseAutoregressiveFlow(z_dim, iaf_dim) for _ in range(num_iafs)]
self.iafs = nn.ModuleList(iafs)
# define a (trainable) parameters z_0 and z_q_0 that help define the probability
# distributions p(z_1) and q(z_1)
# (since for t = 1 there are no previous latents to condition on)
self.z_0 = nn.Parameter(torch.zeros(z_dim))
self.z_q_0 = nn.Parameter(torch.zeros(z_dim))
# define a (trainable) parameter for the initial hidden state of the rnn
self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))
self.use_cuda = use_cuda
# if on gpu cuda-ize all PyTorch (sub)modules
if use_cuda:
self.cuda()
# the model p(x_{1:T} | z_{1:T}) p(z_{1:T})
def model(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all PyTorch (sub)modules with pyro
# this needs to happen in both the model and guide
pyro.module("dmm", self)
# set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1})
z_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0))
# sample the latents z and observed x's one time step at a time
for t in range(1, T_max + 1):
# the next three lines of code sample z_t ~ p(z_t | z_{t-1})
# note that (both here and elsewhere) log_pdf_mask takes care of both
# (i) KL annealing; and
# (ii) raggedness in the observed data (i.e. different sequences
# in the mini-batch have different lengths)
# first compute the parameters of the diagonal gaussian distribution p(z_t | z_{t-1})
z_mu, z_sigma = self.trans(z_prev)
# then sample z_t according to dist.Normal(z_mu, z_sigma)
z_t = pyro.sample("z_%d" % t,
dist.normal,
z_mu,
z_sigma,
log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])
# compute the probabilities that parameterize the bernoulli likelihood
emission_probs_t = self.emitter(z_t)
# the next statement instructs pyro to observe x_t according to the
# bernoulli distribution p(x_t|z_t)
pyro.sample("obs_x_%d" % t,
dist.bernoulli,
emission_probs_t,
log_pdf_mask=mini_batch_mask[:, t - 1:t],
obs=mini_batch[:, t - 1, :])
# the latent sampled at this time step will be conditioned upon
# in the next time step so keep track of it
z_prev = z_t
# the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution)
def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor=1.0):
# this is the number of time steps we need to process in the mini-batch
T_max = mini_batch.size(1)
# register all PyTorch (sub)modules with pyro
pyro.module("dmm", self)
# if on gpu we need the fully broadcast view of the rnn initial state
# to be in contiguous gpu memory
h_0_contig = self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous()
# push the observed x's through the rnn;
# rnn_output contains the hidden state at each time step
rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig)
# reverse the time-ordering in the hidden state and un-pack it
rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths)
# set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...)
z_prev = self.z_q_0
# sample the latents z one time step at a time
for t in range(1, T_max + 1):
# the next two lines assemble the distribution q(z_t | z_{t-1}, x_{t:T})
z_mu, z_sigma = self.combiner(z_prev, rnn_output[:, t - 1, :])
z_dist = dist.normal
# if we are using normalizing flows, we apply the sequence of transformations
# parameterized by self.iafs to the base distribution defined in the previous line
# to yield a transformed distribution that we use for q(z_t|...)
if self.iafs.__len__() > 0:
z_dist = TransformedDistribution(z_dist, self.iafs)
# sample z_t from the distribution z_dist
z_t = pyro.sample("z_%d" % t,
z_dist,
z_mu,
z_sigma,
log_pdf_mask=annealing_factor * mini_batch_mask[:, t - 1:t])
# the latent sampled at this time step will be conditioned upon in the next time step
# so keep track of it
z_prev = z_t
# setup, training, and evaluation
def main(args):
# setup logging
log = get_logger(args.log)
log(args)
jsb_file_loc = "./data/jsb_processed.pkl"
# ingest training/validation/test data from disk
data = pickle.load(open(jsb_file_loc, "rb"))
training_seq_lengths = data['train']['sequence_lengths']
training_data_sequences = data['train']['sequences']
test_seq_lengths = data['test']['sequence_lengths']
test_data_sequences = data['test']['sequences']
val_seq_lengths = data['valid']['sequence_lengths']
val_data_sequences = data['valid']['sequences']
N_train_data = len(training_seq_lengths)
N_train_time_slices = np.sum(training_seq_lengths)
N_mini_batches = int(N_train_data / args.mini_batch_size +
int(N_train_data % args.mini_batch_size > 0))
log("N_train_data: %d avg. training seq. length: %.2f N_mini_batches: %d" %
(N_train_data, np.mean(training_seq_lengths), N_mini_batches))
# how often we do validation/test evaluation during training
val_test_frequency = 50
# the number of samples we use to do the evaluation
n_eval_samples = 1
# package repeated copies of val/test data for faster evaluation
# (i.e. set us up for vectorization)
def rep(x):
y = np.repeat(x, n_eval_samples, axis=0)
return y
# get the validation/test data ready for the dmm: pack into sequences, etc.
val_seq_lengths = rep(val_seq_lengths)
test_seq_lengths = rep(test_seq_lengths)
val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch(
np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences),
val_seq_lengths, cuda=args.cuda)
test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = poly.get_mini_batch(
np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences),
test_seq_lengths, cuda=args.cuda)
# instantiate the dmm
dmm = DMM(rnn_dropout_rate=args.rnn_dropout_rate, num_iafs=args.num_iafs,
iaf_dim=args.iaf_dim, use_cuda=args.cuda)
# setup optimizer
adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2),
"clip_norm": args.clip_norm, "lrd": args.lr_decay,
"weight_decay": args.weight_decay}
adam = ClippedAdam(adam_params)
# setup inference algorithm
elbo = SVI(dmm.model, dmm.guide, adam, "ELBO", trace_graph=False)
# now we're going to define some functions we need to form the main training loop
# saves the model and optimizer states to disk
def save_checkpoint():
log("saving model to %s..." % args.save_model)
torch.save(dmm.state_dict(), args.save_model)
log("saving optimizer states to %s..." % args.save_opt)
adam.save(args.save_opt)
log("done saving model and optimizer checkpoints to disk.")
# loads the model and optimizer states from disk
def load_checkpoint():
assert exists(args.load_opt) and exists(args.load_model), \
"--load-model and/or --load-opt misspecified"
log("loading model from %s..." % args.load_model)
dmm.load_state_dict(torch.load(args.load_model))
log("loading optimizer states from %s..." % args.load_opt)
adam.load(args.load_opt)
log("done loading model and optimizer states.")
# prepare a mini-batch and take a gradient step to minimize -elbo
def process_minibatch(epoch, which_mini_batch, shuffled_indices):
if args.annealing_epochs > 0 and epoch < args.annealing_epochs:
# compute the KL annealing factor approriate for the current mini-batch in the current epoch
min_af = args.minimum_annealing_factor
annealing_factor = min_af + (1.0 - min_af) * \
(float(which_mini_batch + epoch * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
# compute which sequences in the training set we should grab
mini_batch_start = (which_mini_batch * args.mini_batch_size)
mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data])
mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]
# grab a fully prepped mini-batch using the helper function in the data loader
mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \
= poly.get_mini_batch(mini_batch_indices, training_data_sequences,
training_seq_lengths, cuda=args.cuda)
# do an actual gradient step
loss = elbo.step(mini_batch, mini_batch_reversed, mini_batch_mask,
mini_batch_seq_lengths, annealing_factor)
# keep track of the training loss
return loss
# helper function for doing evaluation
def do_evaluation():
# put the RNN into evaluation mode (i.e. turn off drop-out if applicable)
dmm.rnn.eval()
# compute the validation and test loss n_samples many times
val_nll = elbo.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask,
val_seq_lengths) / np.sum(val_seq_lengths)
test_nll = elbo.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask,
test_seq_lengths) / np.sum(test_seq_lengths)
# put the RNN back into training mode (i.e. turn on drop-out if applicable)
dmm.rnn.train()
return val_nll, test_nll
# if checkpoint files provided, load model and optimizer states from disk before we start training
if args.load_opt != '' and args.load_model != '':
load_checkpoint()
#################
# TRAINING LOOP #
#################
times = [time.time()]
for epoch in range(args.num_epochs):
# if specified, save model and optimizer states to disk every checkpoint_freq epochs
if args.checkpoint_freq > 0 and epoch > 0 and epoch % args.checkpoint_freq == 0:
save_checkpoint()
# accumulator for our estimate of the negative log likelihood (or rather -elbo) for this epoch
epoch_nll = 0.0
# prepare mini-batch subsampling indices for this epoch
shuffled_indices = np.arange(N_train_data)
np.random.shuffle(shuffled_indices)
# process each mini-batch; this is where we take gradient steps
for which_mini_batch in range(N_mini_batches):
epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices)
# report training diagnostics
times.append(time.time())
epoch_time = times[-1] - times[-2]
log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" %
(epoch, epoch_nll / N_train_time_slices, epoch_time))
# do evaluation on test and validation data and report results
if val_test_frequency > 0 and epoch > 0 and epoch % val_test_frequency == 0:
val_nll, test_nll = do_evaluation()
log("[val/test epoch %04d] %.4f %.4f" % (epoch, val_nll, test_nll))
# parse command-line arguments and execute the main method
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-n', '--num-epochs', type=int, default=5000)
parser.add_argument('-lr', '--learning-rate', type=float, default=0.0004)
parser.add_argument('-b1', '--beta1', type=float, default=0.96)
parser.add_argument('-b2', '--beta2', type=float, default=0.999)
parser.add_argument('-cn', '--clip-norm', type=float, default=20.0)
parser.add_argument('-lrd', '--lr-decay', type=float, default=0.99996)
parser.add_argument('-wd', '--weight-decay', type=float, default=0.6)
parser.add_argument('-mbs', '--mini-batch-size', type=int, default=20)
parser.add_argument('-ae', '--annealing-epochs', type=int, default=1000)
parser.add_argument('-maf', '--minimum-annealing-factor', type=float, default=0.1)
parser.add_argument('-rdr', '--rnn-dropout-rate', type=float, default=0.1)
parser.add_argument('-iafs', '--num-iafs', type=int, default=0)
parser.add_argument('-id', '--iaf-dim', type=int, default=100)
parser.add_argument('-cf', '--checkpoint-freq', type=int, default=0)
parser.add_argument('-lopt', '--load-opt', type=str, default='')
parser.add_argument('-lmod', '--load-model', type=str, default='')
parser.add_argument('-sopt', '--save-opt', type=str, default='')
parser.add_argument('-smod', '--save-model', type=str, default='')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('-l', '--log', type=str, default='dmm.log')
args = parser.parse_args()
main(args)
| 48.385321
| 104
| 0.659699
|
547e75370413b6895362924e1b549e2995a10400
| 6,528
|
py
|
Python
|
c11_keras_dl_selu.py
|
jamielapointe/jl_tensorflow_tutorial
|
f8f3751454d1727face4385a0b98a8ebbd47d5f6
|
[
"MIT"
] | null | null | null |
c11_keras_dl_selu.py
|
jamielapointe/jl_tensorflow_tutorial
|
f8f3751454d1727face4385a0b98a8ebbd47d5f6
|
[
"MIT"
] | null | null | null |
c11_keras_dl_selu.py
|
jamielapointe/jl_tensorflow_tutorial
|
f8f3751454d1727face4385a0b98a8ebbd47d5f6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.ops import math_ops
from tensorflow.python.keras import backend as K
import numpy as np
from sklearn.metrics import (precision_score,
recall_score,
confusion_matrix)
def sparse_categorical_accuracy(y_true, y_pred):
y_true = math_ops.reduce_max(y_true, axis=-1)
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the expected labels are float, we need to cast the int returned by
# argmax to compare.
if K.dtype(y_true) == K.floatx():
y_pred = math_ops.cast(y_pred, K.floatx())
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
def as_keras_metric(method):
import functools
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
class Keras_DL_SELU(object):
width = 28
height = 28
channels = 1
def __init__(self):
self.X_train = None
self.X_test = None
self.X_valid = None
self.y_train = None
self.y_test = None
self.y_valid = None
self.X_train_scaled = None
self.X_valid_scaled = None
self.X_test_scaled = None
self.model = None
def run(self):
self.preprocess_data()
self.run_training()
def preprocess_data(self):
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
self.X_valid = train_images[55000:,:,:] / 255.
self.y_valid = train_labels[55000:]
self.X_train = train_images[0:55000,:,:] / 255.
self.y_train = train_labels[0:55000]
self.X_test = test_images / 255.
self.y_test = test_labels
self.X_valid = self.X_valid.reshape(-1, self.width*self.height*self.channels)
self.X_train = self.X_train.reshape(-1, self.width*self.height*self.channels)
self.X_test = self.X_test.reshape(-1, self.width*self.height*self.channels)
means = self.X_train.mean(axis=0, keepdims=True)
stds = self.X_train.std (axis=0, keepdims=True)
self.X_train_scaled = (self.X_train - means) / stds
self.X_valid_scaled = (self.X_valid - means) / stds
self.X_test_scaled = (self.X_test - means) / stds
def setup_sim_layer(self):
self.model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense( 10, activation=tf.nn.softmax)
])
def setup_mnist_relu_layer(self):
data_format = 'channels_first'
self.model = keras.Sequential([
keras.layers.Reshape(
target_shape = (1, 28, 28),
input_shape = (28*28,)
),
keras.layers.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu
),
keras.layers.MaxPooling2D(
(2,2),
(2,2),
padding='same',
data_format = data_format
),
keras.layers.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu
),
keras.layers.MaxPooling2D(
(2,2),
(2,2),
padding='same',
data_format = data_format
),
keras.layers.Flatten(),
keras.layers.Dense(1024, activation=tf.nn.relu),
keras.layers.Dropout(0.4),
keras.layers.Dense(10)
])
def setup_simple_selu_layer(self):
self.model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(128, activation=tf.nn.selu,
kernel_initializer='lecun_normal'),
keras.layers.Dense( 10)
])
self.X_train = self.X_train_scaled
self.X_valid = self.X_valid_scaled
self.X_test = self.X_test_scaled
def run_training(self):
precision = as_keras_metric(tf.metrics.precision)
recall = as_keras_metric(tf.metrics.recall)
# self.setup_sim_layer()
# self.setup_simple_selu_layer()
self.setup_mnist_relu_layer()
self.model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])#, precision, recall])
callbacks = [
# Interrupt training if `val_loss` stops improving for over 5 epochs
tf.keras.callbacks.EarlyStopping(patience=5, monitor='val_loss'),
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./logs'),
]
self.model.fit(self.X_train, self.y_train, epochs=400, batch_size=50,
callbacks=callbacks,
validation_data=(self.X_valid, self.y_valid))
test_loss, test_acc = \
self.model.evaluate(self.X_test, self.y_test, batch_size=50)
y_pred = self.model.predict(self.X_test, batch_size=50)
conf_matrix = confusion_matrix(np.asarray(self.y_test),
np.asarray(y_pred.argmax(axis=1)))
test_prec = precision_score(np.asarray(self.y_test),
np.asarray(y_pred.argmax(axis=1)),
average=None)
test_recall = recall_score (np.asarray(self.y_test),
np.asarray(y_pred.argmax(axis=1)),
average=None)
print('')
print('Test Loss :', test_loss)
print('Test accuracy :', test_acc)
print('Test precision:', test_prec)
print('Test recall :', test_recall)
print('')
print('Confusion Matrix:')
print(conf_matrix)
if __name__ == '__main__':
keras_dl_selu = Keras_DL_SELU()
keras_dl_selu.run()
| 35.478261
| 89
| 0.575827
|
6f7863c71802dd8723aa784df6ae95e6af36f22d
| 322
|
py
|
Python
|
Estudos/Python_Exercicios_Curso_Em_Video/ex085.py
|
wiltonjr4/Python_Language
|
4433b86b19c7d90bb6cb3a921a3ee396f1cd5b5d
|
[
"MIT"
] | null | null | null |
Estudos/Python_Exercicios_Curso_Em_Video/ex085.py
|
wiltonjr4/Python_Language
|
4433b86b19c7d90bb6cb3a921a3ee396f1cd5b5d
|
[
"MIT"
] | null | null | null |
Estudos/Python_Exercicios_Curso_Em_Video/ex085.py
|
wiltonjr4/Python_Language
|
4433b86b19c7d90bb6cb3a921a3ee396f1cd5b5d
|
[
"MIT"
] | null | null | null |
total = [[], []]
for p, c in enumerate(range(0, 7)):
num = int(input(f'Digite o {p+1}° valor: '))
if num % 2 == 0:
total[0].append(num)
else:
total[1].append(num)
print(f'Os valores pares digitados foram: {sorted(total[0])}')
print(f'Os valores ímpares digitados foram: {sorted(total[1])}')
| 24.769231
| 64
| 0.586957
|
6fa1d3555523eb1d2d8507765361a052f3edbc6b
| 2,243
|
py
|
Python
|
carball/analysis/stats/possession/turnovers.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 119
|
2018-09-14T02:14:19.000Z
|
2022-03-06T05:06:54.000Z
|
carball/analysis/stats/possession/turnovers.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 207
|
2018-09-06T18:53:06.000Z
|
2022-02-12T22:39:36.000Z
|
carball/analysis/stats/possession/turnovers.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 44
|
2018-09-10T16:54:13.000Z
|
2022-02-19T03:07:50.000Z
|
from typing import Dict
import pandas as pd
from carball.generated.api.stats.events_pb2 import Hit
from ....analysis.constants.field_constants import FieldConstants
from ....analysis.stats.stats import HitStat
from ....generated.api import game_pb2
from ....generated.api.player_pb2 import Player
from ....json_parser.game import Game
class TurnoverStat(HitStat):
field_constants = FieldConstants()
def initialize_hit_stat(self, game: Game, player_map: Dict[str, Player], data_frame: pd.DataFrame):
pass
def calculate_next_hit_stat(self, game: Game, proto_game: game_pb2.Game, saltie_hit: Hit, next_saltie_hit: Hit,
player_map: Dict[str, Player], hit_index: int):
hits = proto_game.game_stats.hits
hit_player = player_map[saltie_hit.player_id.id]
second_hit_player = player_map[next_saltie_hit.player_id.id]
# If there is a goal between 2nd hit and 3rd hit abort check
if not next_saltie_hit.HasField("next_hit_frame_number") or hit_index + 2 >= len(hits):
return
third_hit_player = player_map[hits[hit_index + 2].player_id.id]
if hit_player.is_orange != second_hit_player.is_orange and hit_player.is_orange != third_hit_player.is_orange:
# this is a turnover!
# if the hit occurred on the on the same half as my team
my_half = (saltie_hit.ball_data.pos_y > 0) == hit_player.is_orange
neutral_zone = self.field_constants.get_neutral_zone(saltie_hit.ball_data)
self.assign_turnover(hit_player.stats.possession, my_half, neutral_zone)
self.assign_turnover(proto_game.teams[hit_player.is_orange].stats.possession,
my_half, neutral_zone)
second_hit_player.stats.possession.won_turnovers += 1
proto_game.teams[second_hit_player.is_orange].stats.possession.won_turnovers += 1
def assign_turnover(self, possession_proto, is_turnover_my_half, is_neutral):
possession_proto.turnovers += 1
if is_turnover_my_half and not is_neutral:
possession_proto.turnovers_on_my_half += 1
elif not is_neutral:
possession_proto.turnovers_on_their_half += 1
| 47.723404
| 118
| 0.705751
|
fa48cdafa26e7a81b114a2e288e5d0be5de34aeb
| 2,156
|
py
|
Python
|
2020_July_Leetcode_30_days_challenge/Week_3_Remove Linked List Elements/by_iteration.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 32
|
2020-01-05T13:37:16.000Z
|
2022-03-26T07:27:09.000Z
|
2020_July_Leetcode_30_days_challenge/Week_3_Remove Linked List Elements/by_iteration.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | null | null | null |
2020_July_Leetcode_30_days_challenge/Week_3_Remove Linked List Elements/by_iteration.py
|
coderMaruf/leetcode-1
|
20ffe26e43999e44c8acf9800acb371a49bb5853
|
[
"MIT"
] | 8
|
2020-06-18T16:17:27.000Z
|
2022-03-15T23:58:18.000Z
|
'''
Description:
Remove all elements from a linked list of integers that have value val.
Example:
Input: 1->2->6->3->4->5->6, val = 6
Output: 1->2->3->4->5
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# Use dummy head make it easier to handle the case of head node removal
dummy_head = ListNode(-1)
dummy_head.next = head
prev, cur = dummy_head, head
while cur:
if cur.val == val:
# current node is target node
# update linkage, and remove target node with val
target = cur
prev.next = target.next
del target
# move to next position
cur = prev.next
else:
# move to next position
prev, cur = cur, cur.next
return dummy_head.next
# n : the length of linked list
## Time Complexity: O( n )
#
# The major overhead in time is the while loop iterating on cur, which is of O( n ).
## Space Complexity: O( 1 )
#
# The major overhead in space is the storage for two-pointers, which is of O( 1 ).
def linked_list_factory( elements ):
last_node = None
for element in reversed( elements ):
cur_node = ListNode( element )
cur_node.next = last_node
last_node = cur_node
return last_node
def linked_list_print( head: ListNode ):
cur = head
while cur:
print( cur.val, end = '->' )
cur = cur.next
print('None\n')
def test_bench():
head = linked_list_factory([1, 2, 6, 3, 4, 5, 6])
# before:
# 1->2->6->3->4->5->6->None
linked_list_print(head)
head = Solution().removeElements( head, 6)
# after:
# 1->2->3->4->5->None
linked_list_print(head)
if __name__ == '__main__':
test_bench()
| 18.912281
| 84
| 0.534323
|
6ca06c064637545e351302d4dcb7349c4b1b3388
| 3,085
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/os/ipynb_output_filter.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 66
|
2020-10-03T08:36:48.000Z
|
2022-03-20T23:16:20.000Z
|
infrastructure-provisioning/src/general/scripts/os/ipynb_output_filter.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 48
|
2019-02-28T12:11:33.000Z
|
2020-09-15T08:27:08.000Z
|
infrastructure-provisioning/src/general/scripts/os/ipynb_output_filter.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 44
|
2019-01-14T10:31:55.000Z
|
2020-09-22T17:53:33.000Z
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import sys
import json
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('utf8')
if __name__ == "__main__":
version = None
try:
from jupyter_nbformat import reads, write
except ImportError:
try:
# New IPython
from nbformat import reads, write
except ImportError:
# Deprecated since IPython 4.0
from IPython.nbformat.current import reads, write
version = 'json'
input_file = sys.stdin.read()
if not version:
data = json.loads(input_file)
version = data['nbformat']
data = reads(input_file, version)
try:
# IPython
sheets = data.worksheets
except AttributeError:
# Jupyter
sheets = [data]
for sheet in sheets:
for cell in sheet.cells:
# Uncomment next 2 lines and comment next section to clear all output in notebook
# if "outputs" in cell:
# cell.outputs = []
if hasattr(cell, "outputs") and len(cell.outputs) >= 1:
for field in cell.outputs[0]:
if field == "execution_count":
cell.outputs[0].execution_count = None
elif field == "metadata":
cell.outputs[0].metadata = dict()
for field in ("execution_count",):
if field in cell:
cell[field] = None
for field in ("prompt_number", "execution_number"):
if field in cell:
del cell[field]
if "metadata" in cell:
for field in ("collapsed", "scrolled", "ExecuteTime"):
if field in cell.metadata:
del cell.metadata[field]
if hasattr(sheet.metadata, "widgets"):
del sheet.metadata["widgets"]
if hasattr(sheet.metadata.language_info, "version"):
del sheet.metadata.language_info["version"]
if 'signature' in data.metadata:
data.metadata['signature'] = ""
write(data, sys.stdout, version)
| 33.532609
| 93
| 0.573096
|
6c941c5ed70c6470c40c476cb2fcc35fbeea2f04
| 1,177
|
py
|
Python
|
backend/utils/logging.py
|
kurisu-gh/DistributedReplays
|
7f9f15805bda2f7789162fb16cd856913f80b279
|
[
"Apache-2.0"
] | null | null | null |
backend/utils/logging.py
|
kurisu-gh/DistributedReplays
|
7f9f15805bda2f7789162fb16cd856913f80b279
|
[
"Apache-2.0"
] | null | null | null |
backend/utils/logging.py
|
kurisu-gh/DistributedReplays
|
7f9f15805bda2f7789162fb16cd856913f80b279
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Callable
backup_logger = logging.getLogger(__name__)
logger_callbacks = []
class ErrorLogger:
@staticmethod
def add_logging_callback(callback: Callable):
"""
Adds a callback for logging purposes.
:param callback: A function that takes in an exception
"""
logger_callbacks.append(callback)
@staticmethod
def log_error(exception: Exception, message: str = None, logger: logging.Logger = backup_logger):
"""
Logs an exception that occurs in the case that we can not throw an error.
This will show the stack trace along with the exception.
Uses a default logger if none is provided.
:param exception: The exception that occured.
:param message: An optional message.
:param logger: A logger to use. one is provided if nothing is used.
:return:
"""
if message is None:
message = str(exception)
logger.exception(message)
try:
for callback in logger_callbacks:
callback(exception)
except Exception as e:
backup_logger.exception(e)
| 30.179487
| 101
| 0.643161
|
38644170dd42a901f91c97273b9598d5f75a6243
| 4,571
|
py
|
Python
|
ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/classifiers/elasticnet_classifier.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | 1
|
2021-09-23T12:23:02.000Z
|
2021-09-23T12:23:02.000Z
|
ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/classifiers/elasticnet_classifier.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | null | null | null |
ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/classifiers/elasticnet_classifier.py
|
blocktorch/blocktorch
|
044aa269813ab22c5fd27f84272e5fb540fc522b
|
[
"MIT"
] | null | null | null |
"""Elastic Net Classifier. Uses Logistic Regression with elasticnet penalty as the base estimator."""
import warnings
import numpy as np
from sklearn.linear_model import LogisticRegression
from skopt.space import Real
from blocktorch.model_family import ModelFamily
from blocktorch.pipelines.components.estimators import Estimator
from blocktorch.problem_types import ProblemTypes
class ElasticNetClassifier(Estimator):
"""Elastic Net Classifier. Uses Logistic Regression with elasticnet penalty as the base estimator.
Args:
penalty ({"l1", "l2", "elasticnet", "none"}): The norm used in penalization. Defaults to "elasticnet".
C (float): Inverse of regularization strength. Must be a positive float. Defaults to 1.0.
l1_ratio (float): The mixing parameter, with 0 <= l1_ratio <= 1. Only used if penalty='elasticnet'. Setting l1_ratio=0 is equivalent to using penalty='l2',
while setting l1_ratio=1 is equivalent to using penalty='l1'. For 0 < l1_ratio <1, the penalty is a combination of L1 and L2. Defaults to 0.15.
multi_class ({"auto", "ovr", "multinomial"}): If the option chosen is "ovr", then a binary problem is fit for each label.
For "multinomial" the loss minimised is the multinomial loss fit across the entire probability distribution,
even when the data is binary. "multinomial" is unavailable when solver="liblinear".
"auto" selects "ovr" if the data is binary, or if solver="liblinear", and otherwise selects "multinomial". Defaults to "auto".
solver ({"newton-cg", "lbfgs", "liblinear", "sag", "saga"}): Algorithm to use in the optimization problem.
For small datasets, "liblinear" is a good choice, whereas "sag" and "saga" are faster for large ones.
For multiclass problems, only "newton-cg", "sag", "saga" and "lbfgs" handle multinomial loss; "liblinear" is limited to one-versus-rest schemes.
- "newton-cg", "lbfgs", "sag" and "saga" handle L2 or no penalty
- "liblinear" and "saga" also handle L1 penalty
- "saga" also supports "elasticnet" penalty
- "liblinear" does not support setting penalty='none'
Defaults to "saga".
n_jobs (int): Number of parallel threads used to run xgboost. Note that creating thread contention will significantly slow down the algorithm. Defaults to -1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Elastic Net Classifier"
hyperparameter_ranges = {"C": Real(0.01, 10), "l1_ratio": Real(0, 1)}
"""{
"C": Real(0.01, 10),
"l1_ratio": Real(0, 1)
}"""
model_family = ModelFamily.LINEAR_MODEL
"""ModelFamily.LINEAR_MODEL"""
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def __init__(
self,
penalty="elasticnet",
C=1.0,
l1_ratio=0.15,
multi_class="auto",
solver="saga",
n_jobs=-1,
random_seed=0,
**kwargs,
):
parameters = {
"penalty": penalty,
"C": C,
"l1_ratio": l1_ratio,
"n_jobs": n_jobs,
"multi_class": multi_class,
"solver": solver,
}
parameters.update(kwargs)
lr_classifier = LogisticRegression(random_state=random_seed, **parameters)
super().__init__(
parameters=parameters, component_obj=lr_classifier, random_seed=random_seed
)
def fit(self, X, y):
"""Fits ElasticNet classifier component to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
"""
warnings.filterwarnings("ignore", message="The max_iter was reached")
return super().fit(X, y)
@property
def feature_importance(self):
"""Feature importance for fitted ElasticNet classifier."""
coef_ = self._component_obj.coef_
# binary classification case
if len(coef_) <= 2:
return coef_[0]
else:
# multiclass classification case
return np.linalg.norm(coef_, axis=0, ord=2)
| 42.324074
| 166
| 0.644717
|
924807c060f5c37dc3578a8e46a41b1fce7cd4bb
| 2,864
|
py
|
Python
|
examples/misc/svg_filter_pie.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
examples/misc/svg_filter_pie.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
examples/misc/svg_filter_pie.py
|
jbbrokaw/matplotlib
|
86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
Demonstrate SVG filtering effects which might be used with mpl.
The pie chart drawing code is borrowed from pie_demo.py
Note that the filtering effects are only effective if your svg rederer
support it.
"""
import matplotlib
matplotlib.use("Svg")
import matplotlib.pyplot as plt
from matplotlib.patches import Shadow
# make a square figure and axes
fig1 = plt.figure(1, figsize=(6, 6))
ax = fig1.add_axes([0.1, 0.1, 0.8, 0.8])
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15, 30, 45, 10]
explode = (0, 0.05, 0, 0)
# We want to draw the shadow for each pie but we will not use "shadow"
# option as it does'n save the references to the shadow patches.
pies = ax.pie(fracs, explode=explode, labels=labels, autopct='%1.1f%%')
for w in pies[0]:
# set the id with the label.
w.set_gid(w.get_label())
# we don't want to draw the edge of the pie
w.set_ec("none")
for w in pies[0]:
# create shadow patch
s = Shadow(w, -0.01, -0.01)
s.set_gid(w.get_gid()+"_shadow")
s.set_zorder(w.get_zorder() - 0.1)
ax.add_patch(s)
# save
from StringIO import StringIO
f = StringIO()
plt.savefig(f, format="svg")
import xml.etree.cElementTree as ET
# filter definition for shadow using a gaussian blur
# and lighteneing effect.
# The lightnening filter is copied from http://www.w3.org/TR/SVG/filters.html
# I tested it with Inkscape and Firefox3. "Gaussian blur" is supported
# in both, but the lightnening effect only in the inkscape. Also note
# that, inkscape's exporting also may not support it.
filter_def = """
<defs xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
<filter id='dropshadow' height='1.2' width='1.2'>
<feGaussianBlur result='blur' stdDeviation='2'/>
</filter>
<filter id='MyFilter' filterUnits='objectBoundingBox' x='0' y='0' width='1' height='1'>
<feGaussianBlur in='SourceAlpha' stdDeviation='4%' result='blur'/>
<feOffset in='blur' dx='4%' dy='4%' result='offsetBlur'/>
<feSpecularLighting in='blur' surfaceScale='5' specularConstant='.75'
specularExponent='20' lighting-color='#bbbbbb' result='specOut'>
<fePointLight x='-5000%' y='-10000%' z='20000%'/>
</feSpecularLighting>
<feComposite in='specOut' in2='SourceAlpha' operator='in' result='specOut'/>
<feComposite in='SourceGraphic' in2='specOut' operator='arithmetic'
k1='0' k2='1' k3='1' k4='0'/>
</filter>
</defs>
"""
tree, xmlid = ET.XMLID(f.getvalue())
# insert the filter definition in the svg dom tree.
tree.insert(0, ET.XML(filter_def))
for i, pie_name in enumerate(labels):
pie = xmlid[pie_name]
pie.set("filter", 'url(#MyFilter)')
shadow = xmlid[pie_name + "_shadow"]
shadow.set("filter", 'url(#dropshadow)')
fn = "svg_filter_pie.svg"
print("Saving '%s'" % fn)
ET.ElementTree(tree).write(fn)
| 29.833333
| 91
| 0.673883
|
ea93824bae4f42a53dbd5b018734f006402d15ef
| 3,090
|
py
|
Python
|
models/pointnet2_sem_seg_msg.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
models/pointnet2_sem_seg_msg.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
models/pointnet2_sem_seg_msg.py
|
yufeiwang63/Pointnet_Pointnet2_pytorch
|
f9078a71b973c13ae7ffa897e142dc7b1e8e88be
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
from haptic.Pointnet_Pointnet2_pytorch.models.pointnet2_utils import PointNetSetAbstractionMsg,PointNetFeaturePropagation
class get_model(nn.Module):
def __init__(self, use_batch_norm, num_classes):
super(get_model, self).__init__()
self.sa1 = PointNetSetAbstractionMsg(1024, [0.05, 0.1], [16, 32], 7, [[16, 16, 32], [32, 32, 64]], use_batch_norm=use_batch_norm)
self.sa2 = PointNetSetAbstractionMsg(256, [0.1, 0.2], [16, 32], 32+64, [[64, 64, 128], [64, 96, 128]], use_batch_norm=use_batch_norm)
self.sa3 = PointNetSetAbstractionMsg(64, [0.2, 0.4], [16, 32], 128+128, [[128, 196, 256], [128, 196, 256]], use_batch_norm=use_batch_norm)
self.sa4 = PointNetSetAbstractionMsg(16, [0.4, 0.8], [16, 32], 256+256, [[256, 256, 512], [256, 384, 512]], use_batch_norm=use_batch_norm)
self.fp4 = PointNetFeaturePropagation(512+512+256+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp3 = PointNetFeaturePropagation(128+128+256, [256, 256], use_batch_norm=use_batch_norm)
self.fp2 = PointNetFeaturePropagation(32+64+256, [256, 128], use_batch_norm=use_batch_norm)
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 128], use_batch_norm=use_batch_norm)
self.conv1 = nn.Conv1d(128, 128, 1)
if use_batch_norm:
self.bn1 = nn.BatchNorm1d(128)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(128, num_classes, 1)
self.use_batch_norm = use_batch_norm
def forward(self, xyz):
l0_points = xyz
l0_xyz = xyz[:,:3,:]
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
if self.use_batch_norm:
x = self.drop1(F.relu(self.bn1(self.conv1(l0_points))))
else:
x = F.relu(self.conv1(l0_points))
x = self.conv2(x)
# this is not needed with BCElogit loss
# x = F.log_softmax(x, dim=1)
x = x.permute(0, 2, 1)
return x, l4_points
class get_loss_original(nn.Module):
def __init__(self):
super(get_loss_original, self).__init__()
def forward(self, pred, target, trans_feat, weight):
total_loss = F.nll_loss(pred, target, weight=weight)
return total_loss
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
self.loss = nn.BCEWithLogitsLoss()
def forward(self, pred, target, trans_feat, weight):
total_loss = self.loss(pred, target)
return total_loss
if __name__ == '__main__':
import torch
model = get_model(13)
xyz = torch.rand(6, 9, 2048)
(model(xyz))
| 42.916667
| 146
| 0.653074
|
d9c564f8709f94560a0f634fb56021f4170deca8
| 3,053
|
py
|
Python
|
core/python/kungfu/practice/master.py
|
Grass-CLP/kungfu
|
6085a9e642c092c97af0df849fb18ceeb8896bba
|
[
"Apache-2.0"
] | null | null | null |
core/python/kungfu/practice/master.py
|
Grass-CLP/kungfu
|
6085a9e642c092c97af0df849fb18ceeb8896bba
|
[
"Apache-2.0"
] | null | null | null |
core/python/kungfu/practice/master.py
|
Grass-CLP/kungfu
|
6085a9e642c092c97af0df849fb18ceeb8896bba
|
[
"Apache-2.0"
] | null | null | null |
import sys
import traceback
import json
import time
import functools
import pyyjj
import pywingchun
from . import os_signal
from kungfu import nanomsg
from kungfu.log import create_logger
import kungfu.service.kfs as kfs
from kungfu.service.kfs import system
from kungfu.service.kfs import calendar
import kungfu.yijinjing.journal as kfj
import kungfu.yijinjing.time as kft
from kungfu.log import create_logger
from kungfu.wingchun.calendar import Calendar
import kungfu.yijinjing.msg as yjj_msg
SECOND_IN_NANO = int(1e9)
class Master(pyyjj.master):
def __init__(self, ctx):
pyyjj.master.__init__(self, pyyjj.location(kfj.MODES['live'], kfj.CATEGORIES['system'], 'master', 'master', ctx.locator), ctx.low_latency)
self.ctx = ctx
self.ctx.master = self
self.ctx.logger = create_logger("watcher", ctx.log_level, self.io_device.home)
self.ctx.apprentices = {}
ctx.calendar = Calendar(ctx)
ctx.trading_day = ctx.calendar.trading_day
self.publish_time(yjj_msg.TradingDay, ctx.calendar.trading_day_ns)
ctx.master = self
def on_notice(self, event):
try:
kfs.handle(event.msg_type, self.ctx, json.loads(event.to_string()))
except Exception as err:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ctx.logger.error('Invalid passive notice %s, [%s] %s', event.to_string(), exc_type, traceback.format_exception(exc_type, exc_obj, exc_tb))
def on_interval_check(self, nanotime):
kfs.run_tasks(self.ctx)
def on_register(self, event):
self.send_time(event.source, yjj_msg.TradingDay, self.ctx.calendar.trading_day_ns)
def on_exit(self):
self.ctx.logger.info('kungfu master stopping')
for pid in self.ctx.apprentices:
apprentice = self.ctx.apprentices[pid]['process']
if apprentice.is_running():
self.ctx.logger.info('terminating apprentice %s pid %d', self.ctx.apprentices[pid]['location'].uname, pid)
apprentice.terminate()
count = 0
time_to_wait = 10
while count < time_to_wait:
remaining = list(
map(lambda pid: [self.ctx.apprentices[pid]['location'].uname] if self.ctx.apprentices[pid]['process'].is_running() else [],
self.ctx.apprentices))
remaining = functools.reduce(lambda x, y: x + y, remaining) if remaining else []
if remaining:
self.ctx.logger.info('terminating apprentices, remaining %s, count down %ds', remaining, time_to_wait - count)
time.sleep(1)
count = count + 1
else:
break
for pid in self.ctx.apprentices:
apprentice = self.ctx.apprentices[pid]['process']
if apprentice.is_running():
self.ctx.logger.warn('killing apprentice %s pid %d', self.ctx.apprentices[pid]['location'].uname, pid)
apprentice.kill()
self.ctx.logger.info('kungfu master stopped')
| 35.917647
| 155
| 0.654111
|
e1f0685f08772361aa627b99a31decab8fd7fb79
| 2,156
|
py
|
Python
|
items/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
items/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
items/__init__.py
|
Belvarm/roguelike-tutorial
|
ea989c080b0f7dd61c38b5719ab8e502a45a0489
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
import graphic
from actions import Impossible
if TYPE_CHECKING:
from actions import ActionWithItem
from location import Location
from inventory import Inventory
class Item(graphic.Graphic):
render_order = 1
def __init__(self) -> None:
self.owner: Optional[Inventory] = None
self.location: Optional[Location] = None
def lift(self) -> None:
"""Remove this item from any of its containers."""
if self.owner:
self.owner.contents.remove(self)
self.owner = None
if self.location:
item_list = self.location.map.items[self.location.xy]
item_list.remove(self)
if not item_list:
del self.location.map.items[self.location.xy]
self.location = None
def place(self, location: Location) -> None:
"""Place this item on the floor at the given location."""
assert not self.location, "This item already has a location."
assert not self.owner, "Can't be placed because this item is currently owned."
self.location = location
items = location.map.items
try:
items[location.xy].append(self)
except KeyError:
items[location.xy] = [self]
def plan_activate(self, action: ActionWithItem) -> ActionWithItem:
"""Item activated as part of an action.
Assume that action has an actor which is holding this items entity.
"""
return action
def action_activate(self, action: ActionWithItem) -> None:
raise Impossible(f"You can do nothing with the {self.name}.")
def consume(self, action: ActionWithItem) -> None:
"""Remove this item from the actors inventory."""
assert action.item is self
action.item.lift()
def action_drink(self, action: ActionWithItem) -> None:
"""Drink this item."""
raise Impossible("You can't drink that.")
def action_eat(self, action: ActionWithItem) -> None:
"""Eat this item."""
raise Impossible("You can't eat that.")
| 32.179104
| 86
| 0.637291
|
4eb7a6965e734a14df75736d41bb8050fd850cb0
| 7,470
|
py
|
Python
|
tests/test_utils/test_misc.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils/test_misc.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils/test_misc.py
|
mrzhuzhe/mmdetection
|
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import tempfile
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import (center_of_mass, filter_scores_and_topk,
flip_tensor, mask2ndarray, select_single_mlvl)
from mmdet.utils import find_latest_checkpoint
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
@pytest.mark.parametrize('mask', [
torch.ones((28, 28)),
torch.zeros((28, 28)),
torch.rand(28, 28) > 0.5,
torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
])
def test_center_of_mass(mask):
center_h, center_w = center_of_mass(mask)
if mask.shape[0] == 4:
assert center_h == 1.5
assert center_w == 1.5
assert isinstance(center_h, torch.Tensor) \
and isinstance(center_w, torch.Tensor)
assert 0 <= center_h <= 28 \
and 0 <= center_w <= 28
def test_flip_tensor():
img = np.random.random((1, 3, 10, 10))
src_tensor = torch.from_numpy(img)
# test flip_direction parameter error
with pytest.raises(AssertionError):
flip_tensor(src_tensor, 'flip')
# test tensor dimension
with pytest.raises(AssertionError):
flip_tensor(src_tensor[0], 'vertical')
hfilp_tensor = flip_tensor(src_tensor, 'horizontal')
expected_hflip_tensor = torch.from_numpy(img[..., ::-1, :].copy())
expected_hflip_tensor.allclose(hfilp_tensor)
vfilp_tensor = flip_tensor(src_tensor, 'vertical')
expected_vflip_tensor = torch.from_numpy(img[..., ::-1].copy())
expected_vflip_tensor.allclose(vfilp_tensor)
diag_filp_tensor = flip_tensor(src_tensor, 'diagonal')
expected_diag_filp_tensor = torch.from_numpy(img[..., ::-1, ::-1].copy())
expected_diag_filp_tensor.allclose(diag_filp_tensor)
def test_select_single_mlvl():
mlvl_tensors = [torch.rand(2, 1, 10, 10)] * 5
mlvl_tensor_list = select_single_mlvl(mlvl_tensors, 1)
assert len(mlvl_tensor_list) == 5 and mlvl_tensor_list[0].ndim == 3
def test_filter_scores_and_topk():
score = torch.tensor([[0.1, 0.3, 0.2], [0.12, 0.7, 0.9], [0.02, 0.8, 0.08],
[0.4, 0.1, 0.08]])
bbox_pred = torch.tensor([[0.2, 0.3], [0.4, 0.7], [0.1, 0.1], [0.5, 0.1]])
score_thr = 0.15
nms_pre = 4
# test results type error
with pytest.raises(NotImplementedError):
filter_scores_and_topk(score, score_thr, nms_pre, (score, ))
filtered_results = filter_scores_and_topk(
score, score_thr, nms_pre, results=dict(bbox_pred=bbox_pred))
filtered_score, labels, keep_idxs, results = filtered_results
assert filtered_score.allclose(torch.tensor([0.9, 0.8, 0.7, 0.4]))
assert labels.allclose(torch.tensor([2, 1, 1, 0]))
assert keep_idxs.allclose(torch.tensor([1, 2, 1, 3]))
assert results['bbox_pred'].allclose(
torch.tensor([[0.4, 0.7], [0.1, 0.1], [0.4, 0.7], [0.5, 0.1]]))
def test_find_latest_checkpoint():
with tempfile.TemporaryDirectory() as tmpdir:
path = tmpdir
latest = find_latest_checkpoint(path)
# There are no checkpoints in the path.
assert latest is None
path = tmpdir + '/none'
latest = find_latest_checkpoint(path)
# The path does not exist.
assert latest is None
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/latest.pth', 'w') as f:
f.write('latest')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/latest.pth'
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/iter_4000.pth', 'w') as f:
f.write('iter_4000')
with open(tmpdir + '/iter_8000.pth', 'w') as f:
f.write('iter_8000')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/iter_8000.pth'
with tempfile.TemporaryDirectory() as tmpdir:
with open(tmpdir + '/epoch_1.pth', 'w') as f:
f.write('epoch_1')
with open(tmpdir + '/epoch_2.pth', 'w') as f:
f.write('epoch_2')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == tmpdir + '/epoch_2.pth'
| 36.617647
| 80
| 0.608701
|
7ae23317b4d1495a492890e0843cb0f425d8e89b
| 42,794
|
py
|
Python
|
Docker-Compose2UML_nonokawa2019/docker-compose2uml/src/main/resources/Lib/site-packages/docker/models/containers.py
|
kisolab/astah_plugins_released
|
e1ed674e998f953ab9becefc9bd9c474b8c569b7
|
[
"Apache-2.0"
] | null | null | null |
Docker-Compose2UML_nonokawa2019/docker-compose2uml/src/main/resources/Lib/site-packages/docker/models/containers.py
|
kisolab/astah_plugins_released
|
e1ed674e998f953ab9becefc9bd9c474b8c569b7
|
[
"Apache-2.0"
] | null | null | null |
Docker-Compose2UML_nonokawa2019/docker-compose2uml/src/main/resources/Lib/site-packages/docker/models/containers.py
|
kisolab/astah_plugins_released
|
e1ed674e998f953ab9becefc9bd9c474b8c569b7
|
[
"Apache-2.0"
] | 4
|
2021-02-11T03:51:39.000Z
|
2021-02-12T05:10:43.000Z
|
import copy
import ntpath
from collections import namedtuple
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import (
ContainerError, DockerException, ImageNotFound,
NotFound, create_unexpected_kwargs_error
)
from ..types import HostConfig
from ..utils import version_gte
from .images import Image
from .resource import Collection, Model
class Container(Model):
""" Local representation of a container object. Detailed configuration may
be accessed through the :py:attr:`attrs` attribute. Note that local
attributes are cached; users may call :py:meth:`reload` to
query the Docker daemon for the current properties, causing
:py:attr:`attrs` to be refreshed.
"""
@property
def name(self):
"""
The name of the container.
"""
if self.attrs.get('Name') is not None:
return self.attrs['Name'].lstrip('/')
@property
def image(self):
"""
The image of the container.
"""
image_id = self.attrs.get('ImageID', self.attrs['Image'])
if image_id is None:
return None
return self.client.images.get(image_id.split(':')[1])
@property
def labels(self):
"""
The labels of a container as dictionary.
"""
try:
result = self.attrs['Config'].get('Labels')
return result or {}
except KeyError:
raise DockerException(
'Label data is not available for sparse objects. Call reload()'
' to retrieve all information'
)
@property
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
if isinstance(self.attrs['State'], dict):
return self.attrs['State']['Status']
return self.attrs['State']
@property
def ports(self):
"""
The ports that the container exposes as a dictionary.
"""
return self.attrs.get('NetworkSettings', {}).get('Ports', {})
def attach(self, **kwargs):
"""
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach(self.id, **kwargs)
def attach_socket(self, **kwargs):
"""
Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach_socket(self.id, **kwargs)
def commit(self, repository=None, tag=None, **kwargs):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
<https://docs.docker.com/reference/api/docker_remote_api/>`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.commit(self.id, repository=repository, tag=tag,
**kwargs)
return self.client.images.get(resp['Id'])
def diff(self):
"""
Inspect changes on a container's filesystem.
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.diff(self.id)
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
socket=False, environment=None, workdir=None, demux=False):
"""
Run a command inside this container. Similar to
``docker exec``.
Args:
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
demux (bool): Return stdout and stderr separately
Returns:
(ExecResult): A tuple of (exit_code, output)
exit_code: (int):
Exit code for the executed command or ``None`` if
either ``stream`` or ``socket`` is ``True``.
output: (generator, bytes, or tuple):
If ``stream=True``, a generator yielding response chunks.
If ``socket=True``, a socket object for the connection.
If ``demux=True``, a tuple of two bytes: stdout and stderr.
A bytestring containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment,
workdir=workdir,
)
exec_output = self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket,
demux=demux
)
if socket or stream:
return ExecResult(None, exec_output)
return ExecResult(
self.client.api.exec_inspect(resp['Id'])['ExitCode'],
exec_output
)
def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Export the contents of the container's filesystem as a tar archive.
Args:
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.export(self.id, chunk_size)
def get_archive(self, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> f = open('./sh_bin.tar', 'wb')
>>> bits, stat = container.get_archive('/bin/sh')
>>> print(stat)
{'name': 'sh', 'size': 1075464, 'mode': 493,
'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''}
>>> for chunk in bits:
... f.write(chunk)
>>> f.close()
"""
return self.client.api.get_archive(self.id, path, chunk_size)
def kill(self, signal=None):
"""
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.kill(self.id, signal=signal)
def logs(self, **kwargs):
"""
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.logs(self.id, **kwargs)
def pause(self):
"""
Pauses all processes within this container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.pause(self.id)
def put_archive(self, path, data):
"""
Insert a file or folder in this container using a tar archive as
source.
Args:
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`~docker.errors.APIError` If an error occurs.
"""
return self.client.api.put_archive(self.id, path, data)
def remove(self, **kwargs):
"""
Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_container(self.id, **kwargs)
def rename(self, name):
"""
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.rename(self.id, name)
def resize(self, height, width):
"""
Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.resize(self.id, height, width)
def restart(self, **kwargs):
"""
Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.restart(self.id, **kwargs)
def start(self, **kwargs):
"""
Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.start(self.id, **kwargs)
def stats(self, **kwargs):
"""
Stream statistics for this container. Similar to the
``docker stats`` command.
Args:
decode (bool): If set to true, stream will be decoded into dicts
on the fly. Only applicable if ``stream`` is True.
False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stats(self.id, **kwargs)
def stop(self, **kwargs):
"""
Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stop(self.id, **kwargs)
def top(self, **kwargs):
"""
Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.top(self.id, **kwargs)
def unpause(self):
"""
Unpause all processes within the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.unpause(self.id)
def update(self, **kwargs):
"""
Update resource configuration of the containers.
Args:
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.update_container(self.id, **kwargs)
def wait(self, **kwargs):
"""
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.wait(self.id, **kwargs)
class ContainerCollection(Collection):
model = Container
def run(self, image, command=None, stdout=True, stderr=False,
remove=False, **kwargs):
"""
Run a container. By default, it will wait for the container to finish
and return its logs, similar to ``docker run``.
If the ``detach`` argument is ``True``, it will start the container
and immediately return a :py:class:`Container` object, similar to
``docker run -d``.
Example:
Run a container and get its output:
>>> import docker
>>> client = docker.from_env()
>>> client.containers.run('alpine', 'echo hello world')
b'hello world\\n'
Run a container and detach:
>>> container = client.containers.run('bfirsh/reticulate-splines',
detach=True)
>>> container.logs()
'Reticulating spline 1...\\nReticulating spline 2...\\n'
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cgroup_parent (str): Override the default parent cgroup.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_rt_period (int): Limit CPU real-time period in microseconds.
cpu_rt_runtime (int): Limit CPU real-time runtime in microseconds.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_cgroup_rules (:py:class:`list`): A list of cgroup rules to
apply to the container.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``<path_on_host>:<path_in_container>:<cgroup_permissions>``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
dns_search (:py:class:`list`): DNS search domains.
domainname (str or list): Set custom DNS search domains.
entrypoint (str or list): The entrypoint for the container.
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Additional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to confirm that the
container is healthy.
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
kernel_memory (int or str): Kernel memory limit
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
links (dict): Mapping of links using the
``{'container': 'alias'}`` format. The alias is optional.
Containers declared in this dict will be linked to the new
container using the provided alias. Default: ``None``.
log_config (LogConfig): Logging configuration.
lxc_conf (dict): LXC config.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
mem_reservation (int or str): Memory soft limit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
mounts (:py:class:`list`): Specification for mounts to be added to
the container. More powerful alternative to ``volumes``. Each
item in the list is expected to be a
:py:class:`docker.types.Mount` object.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 1e-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
``network_mode``.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
on the bridge network.
- ``none`` No networking for this container.
- ``container:<name|id>`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
platform (str): Platform in the format ``os[/arch[/variant]]``.
Only used if the method needs to pull the requested image.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp``,
``udp``, or ``sctp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
- The port number, as an integer. For example,
``{'2222/tcp': 3333}`` will expose port 2222 inside the
container as port 3333 on the host.
- ``None``, to assign a random host port. For example,
``{'2222/tcp': None}``.
- A tuple of ``(address, port)`` if you want to specify the
host interface. For example,
``{'1111/tcp': ('127.0.0.1', 1111)}``.
- A list of integers, if you want to bind multiple host ports
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
remove (bool): Remove the container when it has finished running.
Default: ``False``.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
runtime (str): Runtime to use with this container.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
stdin_open (bool): Keep ``STDIN`` open even if not attached.
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
Default: ``True``.
stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
stream (bool): If true and ``detach`` is false, return a log
generator instead of a string. Ignored if ``detach`` is true.
Default: ``False``.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of :py:class:`docker.types.Ulimit` instances.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
uts_mode (str): Sets the UTS namespace mode for the container.
Supported values are: ``host``
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
volume name, and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or
``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
depending on the value of the ``stdout`` and ``stderr`` arguments.
``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
or ``journald`` logging driver used. Thus, if you are using none of
these drivers, a ``None`` object is returned instead. See the
`Engine API documentation
<https://docs.docker.com/engine/api/v1.30/#operation/ContainerLogs/>`_
for full details.
If ``detach`` is ``True``, a :py:class:`Container` object is
returned instead.
Raises:
:py:class:`docker.errors.ContainerError`
If the container exits with a non-zero exit code and
``detach`` is ``False``.
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
stream = kwargs.pop('stream', False)
detach = kwargs.pop('detach', False)
platform = kwargs.pop('platform', None)
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
kwargs["auto_remove"] = True
else:
raise RuntimeError("The options 'detach' and 'remove' cannot "
"be used together in api versions < 1.25.")
if kwargs.get('network') and kwargs.get('network_mode'):
raise RuntimeError(
'The options "network" and "network_mode" can not be used '
'together.'
)
try:
container = self.create(image=image, command=command,
detach=detach, **kwargs)
except ImageNotFound:
self.client.images.pull(image, platform=platform)
container = self.create(image=image, command=command,
detach=detach, **kwargs)
container.start()
if detach:
return container
logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
out = None
if logging_driver == 'json-file' or logging_driver == 'journald':
out = container.logs(
stdout=stdout, stderr=stderr, stream=True, follow=True
)
exit_status = container.wait()['StatusCode']
if exit_status != 0:
out = None
if not kwargs.get('auto_remove'):
out = container.logs(stdout=False, stderr=True)
if remove:
container.remove()
if exit_status != 0:
raise ContainerError(
container, exit_status, command, image, out
)
return out if stream or out is None else b''.join(
[line for line in out]
)
def create(self, image, command=None, **kwargs):
"""
Create a container without starting it. Similar to ``docker create``.
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
``stderr``, and ``remove``.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
kwargs['image'] = image
kwargs['command'] = command
kwargs['version'] = self.client.api._version
create_kwargs = _create_container_args(kwargs)
resp = self.client.api.create_container(**create_kwargs)
return self.get(resp['Id'])
def get(self, container_id):
"""
Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp)
def list(self, all=False, before=None, filters=None, limit=-1, since=None,
sparse=False, ignore_removed=False):
"""
List containers. Similar to the ``docker ps`` command.
Args:
all (bool): Show all containers. Only running containers are shown
by default
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``<image-name>[:tag]``, ``<image-id>``, or
``<image@digest>``.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
<https://docs.docker.com/engine/reference/commandline/ps>`_.
sparse (bool): Do not inspect containers. Returns partial
information, but guaranteed not to block. Use
:py:meth:`Container.reload` on resulting objects to retrieve
all attributes. Default: ``False``
ignore_removed (bool): Ignore failures due to missing containers
when attempting to inspect containers from the original list.
Set to ``True`` if race conditions are likely. Has no effect
if ``sparse=True``. Default: ``False``
Returns:
(list of :py:class:`Container`)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.containers(all=all, before=before,
filters=filters, limit=limit,
since=since)
if sparse:
return [self.prepare_model(r) for r in resp]
else:
containers = []
for r in resp:
try:
containers.append(self.get(r['Id']))
# a container may have been removed while iterating
except NotFound:
if not ignore_removed:
raise
return containers
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
prune.__doc__ = APIClient.prune_containers.__doc__
# kwargs to copy straight from run to create
RUN_CREATE_KWARGS = [
'command',
'detach',
'domainname',
'entrypoint',
'environment',
'healthcheck',
'hostname',
'image',
'labels',
'mac_address',
'name',
'network_disabled',
'stdin_open',
'stop_signal',
'tty',
'use_config_proxy',
'user',
'working_dir',
]
# kwargs to copy straight from run to host_config
RUN_HOST_CONFIG_KWARGS = [
'auto_remove',
'blkio_weight_device',
'blkio_weight',
'cap_add',
'cap_drop',
'cgroup_parent',
'cpu_count',
'cpu_percent',
'cpu_period',
'cpu_quota',
'cpu_shares',
'cpuset_cpus',
'cpuset_mems',
'cpu_rt_period',
'cpu_rt_runtime',
'device_cgroup_rules',
'device_read_bps',
'device_read_iops',
'device_write_bps',
'device_write_iops',
'devices',
'dns_opt',
'dns_search',
'dns',
'extra_hosts',
'group_add',
'init',
'init_path',
'ipc_mode',
'isolation',
'kernel_memory',
'links',
'log_config',
'lxc_conf',
'mem_limit',
'mem_reservation',
'mem_swappiness',
'memswap_limit',
'mounts',
'nano_cpus',
'network_mode',
'oom_kill_disable',
'oom_score_adj',
'pid_mode',
'pids_limit',
'privileged',
'publish_all_ports',
'read_only',
'restart_policy',
'security_opt',
'shm_size',
'storage_opt',
'sysctls',
'tmpfs',
'ulimits',
'userns_mode',
'uts_mode',
'version',
'volume_driver',
'volumes_from',
'runtime'
]
def _create_container_args(kwargs):
"""
Convert arguments to create() to arguments to create_container().
"""
# Copy over kwargs which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_CREATE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
host_config_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_HOST_CONFIG_KWARGS:
host_config_kwargs[key] = kwargs.pop(key)
# Process kwargs which are split over both create and host_config
ports = kwargs.pop('ports', {})
if ports:
host_config_kwargs['port_bindings'] = ports
volumes = kwargs.pop('volumes', {})
if volumes:
host_config_kwargs['binds'] = volumes
network = kwargs.pop('network', None)
if network:
create_kwargs['networking_config'] = {network: None}
host_config_kwargs['network_mode'] = network
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error('run', kwargs)
create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
# Fill in any kwargs which need processing by create_host_config first
port_bindings = create_kwargs['host_config'].get('PortBindings')
if port_bindings:
# sort to make consistent for tests
create_kwargs['ports'] = [tuple(p.split('/', 1))
for p in sorted(port_bindings.keys())]
if volumes:
if isinstance(volumes, dict):
create_kwargs['volumes'] = [
v.get('bind') for v in volumes.values()
]
else:
create_kwargs['volumes'] = [
_host_volume_from_bind(v) for v in volumes
]
return create_kwargs
def _host_volume_from_bind(bind):
drive, rest = ntpath.splitdrive(bind)
bits = rest.split(':', 1)
if len(bits) == 1 or bits[1] in ('ro', 'rw'):
return drive + bits[0]
else:
return bits[1].rstrip(':ro').rstrip(':rw')
ExecResult = namedtuple('ExecResult', 'exit_code,output')
""" A result of Container.exec_run with the properties ``exit_code`` and
``output``. """
| 38.587917
| 82
| 0.564682
|
5b7eef74c9e604a935d921b723e9dd99f1c3dcce
| 2,654
|
py
|
Python
|
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/network/application_gateway/client_cert/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from .... pyaz_utils import _call_az
def add(data, gateway_name, name, resource_group):
'''
Add trusted client certificate of the application gateway.
Required Parameters:
- data -- Certificate public data.
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert add", locals())
def remove(gateway_name, name, resource_group):
'''
Remove an existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert remove", locals())
def list(gateway_name, resource_group):
'''
List the existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert list", locals())
def show(gateway_name, name, resource_group):
'''
Show an existing trusted client certificate of the application gateway.
Required Parameters:
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert show", locals())
def update(data, gateway_name, name, resource_group):
'''
Update trusted client certificate of the application gateway.
Required Parameters:
- data -- Certificate public data.
- gateway_name -- Name of the application gateway.
- name -- Name of the trusted client certificate that is unique within an Application Gateway
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az network application-gateway client-cert update", locals())
| 42.126984
| 128
| 0.727958
|
ac6d719eda3254140de2766460aeb488f02a6549
| 2,762
|
py
|
Python
|
server/auvsi_suas/models/distance.py
|
RMMichael/interop
|
b68a1b0b2324b5a1d9b2683b97299cb6f214cdb9
|
[
"Apache-2.0"
] | 175
|
2015-09-15T15:37:06.000Z
|
2022-02-14T23:21:48.000Z
|
server/auvsi_suas/models/distance.py
|
RMMichael/interop
|
b68a1b0b2324b5a1d9b2683b97299cb6f214cdb9
|
[
"Apache-2.0"
] | 376
|
2015-09-16T19:34:15.000Z
|
2022-02-19T12:55:55.000Z
|
server/auvsi_suas/models/distance.py
|
RMMichael/interop
|
b68a1b0b2324b5a1d9b2683b97299cb6f214cdb9
|
[
"Apache-2.0"
] | 109
|
2015-09-16T17:05:14.000Z
|
2022-01-26T12:49:38.000Z
|
"""Functions for computing distance."""
import logging
import math
import numpy as np
import pyproj
from auvsi_suas.models import units
logger = logging.getLogger(__name__)
proj_wgs84 = pyproj.Proj(init="epsg:4326")
proj_web_mercator = pyproj.Proj(init="epsg:3857")
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
Reference:
http://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points
Args:
lon1, lat1: The latitude and longitude of position 1
lon2, lat2: The latitude and longitude of position 2
Returns:
The distance in kilometers
"""
# convert decimal degrees to radians
lon1 = math.radians(lon1)
lat1 = math.radians(lat1)
lon2 = math.radians(lon2)
lat2 = math.radians(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
hav_a = (math.sin(dlat / 2)**2 +
math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2)
hav_c = 2 * math.asin(math.sqrt(hav_a))
# 6367 km is the radius of the Earth
dist_km = 6371 * hav_c
return dist_km
def distance_to(latitude_1, longitude_1, altitude_1, latitude_2, longitude_2,
altitude_2):
"""Get the distance in feet between the two positions.
Args:
latitude_1: The latitude of the first position.
longitude_1: The longitude of the first position.
altitude_1: The altitude in feet of the first position.
latitude_2: The latitude of the second position.
longitude_2: The longitude of the second position.
altitude_2: The altitude in feet of the second position.
"""
gps_dist_km = haversine(longitude_1, latitude_1, longitude_2, latitude_2)
gps_dist_ft = units.kilometers_to_feet(gps_dist_km)
alt_dist_ft = abs(altitude_1 - altitude_2)
return math.hypot(gps_dist_ft, alt_dist_ft)
def proj_utm(lat, lon):
"""Proj instance for the given zone.
Args:
lat: Latitude
lon: Longitude
Returns:
pyproj.Proj instance for the given zone
"""
zone = math.floor((lon + 180) / 6.0) + 1
# Special cases for Norway and Svalbard
if lat >= 56 and lat < 64 and lon >= 3 and lon < 12:
zone = 32
if lat >= 72 and lat < 84:
if lon >= 0 and lon < 9:
zone = 31
elif lon >= 9 and lon < 21:
zone = 33
elif lon >= 21 and lon < 33:
zone = 35
elif lon >= 33 and lon < 42:
zone = 37
north = (lat > 0)
ref = "+proj=utm +zone=%d +ellps=WGS84" % zone
if not north:
ref += " +south"
return pyproj.Proj(ref)
| 28.770833
| 118
| 0.636495
|
684ba354c0b6f9c716d0e6ec020c4744135585db
| 3,386
|
py
|
Python
|
src/encode_task_spp.py
|
CollinsLabBioComp/chip-seq-pipeline2
|
61f77dd94b3afe39bd718b30f1a9a6a7b9676c30
|
[
"MIT"
] | 1
|
2019-12-08T08:04:15.000Z
|
2019-12-08T08:04:15.000Z
|
src/encode_task_spp.py
|
kundajelab/cut-n-run-pipeline
|
0f9cf7870288d462f69449cb2b99faa9292af3bc
|
[
"MIT"
] | null | null | null |
src/encode_task_spp.py
|
kundajelab/cut-n-run-pipeline
|
0f9cf7870288d462f69449cb2b99faa9292af3bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# ENCODE DCC spp call peak wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
assert_file_not_empty, human_readable_number, log,
ls_l, mkdir_p, rm_f, run_shell_cmd, strip_ext_ta)
def parse_arguments():
parser = argparse.ArgumentParser(
prog='ENCODE spp call_peak')
parser.add_argument(
'tas', type=str, nargs=2,
help='Path for TAGALIGN file and control TAGALIGN file.')
parser.add_argument('--chrsz', type=str,
help='2-col chromosome sizes file.')
parser.add_argument('--fraglen', type=int, required=True,
help='Fragment length.')
parser.add_argument('--cap-num-peak', default=300000, type=int,
help='Capping number of peaks by taking top N peaks.')
parser.add_argument('--nth', type=int, default=1,
help='Number of threads to parallelize.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def spp(ta, ctl_ta, fraglen, cap_num_peak, nth, out_dir):
basename_ta = os.path.basename(strip_ext_ta(ta))
basename_ctl_ta = os.path.basename(strip_ext_ta(ctl_ta))
basename_prefix = '{}_x_{}'.format(basename_ta, basename_ctl_ta)
if len(basename_prefix) > 200: # UNIX cannot have filename > 255
basename_prefix = '{}_x_control'.format(basename_ta)
nth_param = '-p={}'.format(nth) if nth < 2 else ''
prefix = os.path.join(out_dir, basename_prefix)
rpeak = '{}.{}.regionPeak.gz'.format(
prefix,
human_readable_number(cap_num_peak))
rpeak_tmp = '{}.tmp'.format(rpeak)
rpeak_tmp_gz = '{}.tmp.gz'.format(rpeak)
cmd0 = 'Rscript --max-ppsize=500000 $(which run_spp.R) -c={} -i={} '
cmd0 += '-npeak={} -odir={} -speak={} -savr={} -rf {}'
cmd0 = cmd0.format(
ta,
ctl_ta,
cap_num_peak,
os.path.abspath(out_dir),
fraglen,
rpeak_tmp,
nth_param)
run_shell_cmd(cmd0)
# if we have scientific representation of chr coord. then convert it to int
cmd1 = 'zcat -f {} | awk \'BEGIN{{OFS="\\t"}}'
cmd1 += '{{if ($2<0) $2=0; '
cmd1 += 'print $1,int($2),int($3),$4,$5,$6,$7,$8,$9,$10;}}\' | '
cmd1 += 'gzip -f -nc > {}'
cmd1 = cmd1.format(
rpeak_tmp,
rpeak)
run_shell_cmd(cmd1)
rm_f([rpeak_tmp, rpeak_tmp_gz])
return rpeak
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
log.info('Calling peaks with spp...')
rpeak = spp(args.tas[0], args.tas[1],
args.fraglen, args.cap_num_peak, args.nth, args.out_dir)
log.info('Checking if output is empty...')
assert_file_not_empty(rpeak)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
| 32.557692
| 79
| 0.594507
|
7f6b75e00f6de67cd9129bcf13b50967d7126b2e
| 59
|
py
|
Python
|
tests/urlpatterns_reverse/erroneous_views_module.py
|
webjunkie/django
|
5dbca13f3baa2e1bafd77e84a80ad6d8a074712e
|
[
"BSD-3-Clause"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.5/tests/regressiontests/urlpatterns_reverse/erroneous_views_module.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.5/tests/regressiontests/urlpatterns_reverse/erroneous_views_module.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
import non_existent
def erroneous_view(request):
pass
| 11.8
| 28
| 0.779661
|
45eddffd0285fbccf3d6b3c9c35142ec3031c52d
| 4,982
|
py
|
Python
|
src/pyrin/caching/remote/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/caching/remote/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/caching/remote/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
caching remote decorators module.
"""
from functools import update_wrapper
import pyrin.caching.services as caching_services
def memcached(*old_method, **options):
"""
decorator to convert a method or function into a lazy one.
note that this cache type supports expire time and will consider method inputs
in caching. the result will be calculated once and then it will be cached.
each result will be cached using a tuple of class type, method name, inputs,
current user and component key as a key in the cache.
that this decorator could be used on both instance or class level methods and
properties or stand-alone functions.
to be able to use this decorator you must install memcached client dependency
using `pip install pyrin[memcached]` and also remove
`pyrin.caching.remote.handlers.memcached` from `ignored_modules` of
`packaging.ini` file.
:param function | property old_method: the original decorated method or function.
:keyword bool consider_user: specifies that current user must be included in
key generation. if not provided, it will be get
from `caching` config store.
:keyword int expire: expire time for given key in seconds.
if not provided, it will be get from `caching`
config store.
:returns: method or function result.
"""
def decorator(method):
"""
decorates the given method or function and makes it a lazy one.
:param function | property method: decorated method or function.
:returns: method or function result.
"""
def wrapper(*args, **kwargs):
"""
decorates the given method or function and makes it a lazy one.
:param object args: function positional arguments.
:param object kwargs: function keyword arguments.
:returns: method or function result.
"""
result = caching_services.try_get('memcached', method, args,
kwargs, **options)
if result is not None:
return result
result = method(*args, **kwargs)
caching_services.try_set('memcached', result, method,
args, kwargs, **options)
return result
return update_wrapper(wrapper, method)
if len(old_method) > 0:
return decorator(old_method[0])
return decorator
def redis(*old_method, **options):
"""
decorator to convert a method or function into a lazy one.
note that this cache type supports expire time and will consider method inputs
in caching. the result will be calculated once and then it will be cached.
each result will be cached using a tuple of class type, method name, inputs,
current user and component key as a key in the cache.
that this decorator could be used on both instance or class level methods and
properties or stand-alone functions.
to be able to use this decorator you must install redis client dependency
using `pip install pyrin[redis]` and also remove
`pyrin.caching.remote.handlers.redis` from `ignored_modules` of
`packaging.ini` file.
:param function | property old_method: the original decorated method or function.
:keyword bool consider_user: specifies that current user must be included in
key generation. if not provided, it will be get
from `caching` config store.
:keyword int expire: expire time for given key in milliseconds.
if not provided, it will be get from `caching`
config store.
:returns: method or function result.
"""
def decorator(method):
"""
decorates the given method or function and makes it a lazy one.
:param function | property method: decorated method or function.
:returns: method or function result.
"""
def wrapper(*args, **kwargs):
"""
decorates the given method or function and makes it a lazy one.
:param object args: function positional arguments.
:param object kwargs: function keyword arguments.
:returns: method or function result.
"""
result = caching_services.try_get('redis', method, args,
kwargs, **options)
if result is not None:
return result
result = method(*args, **kwargs)
caching_services.try_set('redis', result, method,
args, kwargs, **options)
return result
return update_wrapper(wrapper, method)
if len(old_method) > 0:
return decorator(old_method[0])
return decorator
| 34.839161
| 85
| 0.620032
|
4a13f300deb89c54eed42acfd7d0c86232c6b40d
| 8,234
|
py
|
Python
|
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | null | null | null |
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | null | null | null |
userena/tests/tests_forms.py
|
bsavelev/django-userena
|
1b841560ceef95c3f4dfd8f7e2bdef9f845bc417
|
[
"BSD-3-Clause"
] | 1
|
2019-07-27T19:23:35.000Z
|
2019-07-27T19:23:35.000Z
|
# encoding: utf-8
from __future__ import unicode_literals
from django.test import TestCase
from django.utils.translation import ugettext_lazy as _, override
from userena import forms
from userena import settings as userena_settings
from userena.utils import get_user_model
class SignupFormTests(TestCase):
""" Test the signup form. """
fixtures = ['users']
def test_signup_form(self):
"""
Test that the ``SignupForm`` checks for unique usernames and unique
e-mail addresses.
"""
invalid_data_dicts = [
# Non-alphanumeric username.
{'data': {'username': 'foo@bar',
'email': 'foo@example.com',
'password': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('username', [_('Username must contain only letters, numbers, dots and underscores.')])},
# Password is not the same
{'data': {'username': 'katy-',
'email': 'katy@newexample.com',
'password1': 'foo',
'password2': 'foo2',
'tos': 'on'},
'error': ('__all__', [_('The two password fields didn\'t match.')])},
# Already taken username
{'data': {'username': 'john',
'email': 'john@newexample.com',
'password1': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('username', [_('This username is already taken.')])},
# Forbidden username
{'data': {'username': 'SignUp',
'email': 'foo@example.com',
'password': 'foo',
'password2': 'foo2',
'tos': 'on'},
'error': ('username', [_('This username is not allowed.')])},
# Already taken email
{'data': {'username': 'alice',
'email': 'john@example.com',
'password': 'foo',
'password2': 'foo',
'tos': 'on'},
'error': ('email', [_('This email is already in use. Please supply a different email.')])},
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.SignupForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
# And finally, a valid form.
form = forms.SignupForm(data={'username': 'foo.bla',
'email': 'foo@example.com',
'password1': 'foo',
'password2': 'foo',
'tos': 'on'})
self.failUnless(form.is_valid())
class AuthenticationFormTests(TestCase):
""" Test the ``AuthenticationForm`` """
fixtures = ['users',]
def test_signin_form(self):
"""
Check that the ``SigninForm`` requires both identification and password
"""
invalid_data_dicts = [
{'data': {'identification': '',
'password': 'inhalefish'},
'error': ('identification', ['Either supply us with your email or username.'])},
{'data': {'identification': 'john',
'password': 'inhalefish'},
'error': ('__all__', ['Please enter a correct username or email and password. Note that both fields are case-sensitive.'])}
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.AuthenticationForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
valid_data_dicts = [
{'identification': 'john',
'password': 'blowfish'},
{'identification': 'john@example.com',
'password': 'blowfish'}
]
for valid_dict in valid_data_dicts:
form = forms.AuthenticationForm(valid_dict)
self.failUnless(form.is_valid())
def test_signin_form_email(self):
"""
Test that the signin form has a different label is
``USERENA_WITHOUT_USERNAME`` is set to ``True``
"""
userena_settings.USERENA_WITHOUT_USERNAMES = True
form = forms.AuthenticationForm(data={'identification': "john",
'password': "blowfish"})
correct_label = "Email"
self.assertEqual(form.fields['identification'].label,
correct_label)
# Restore default settings
userena_settings.USERENA_WITHOUT_USERNAMES = False
class SignupFormOnlyEmailTests(TestCase):
"""
Test the :class:`SignupFormOnlyEmail`.
This is the same form as :class:`SignupForm` but doesn't require an
username for a successfull signup.
"""
fixtures = ['users']
def test_signup_form_only_email(self):
"""
Test that the form has no username field. And that the username is
generated in the save method
"""
valid_data = {'email': 'hans@gretel.com',
'password1': 'blowfish',
'password2': 'blowfish'}
form = forms.SignupFormOnlyEmail(data=valid_data)
# Should have no username field
self.failIf(form.fields.get('username', False))
# Form should be valid.
self.failUnless(form.is_valid())
# Creates an unique username
user = form.save()
self.failUnless(len(user.username), 5)
class ChangeEmailFormTests(TestCase):
""" Test the ``ChangeEmailForm`` """
fixtures = ['users']
def test_change_email_form(self):
user = get_user_model().objects.get(pk=1)
invalid_data_dicts = [
# No change in e-mail address
{'data': {'email': 'john@example.com'},
'error': ('email', ['You\'re already known under this email.'])},
# An e-mail address used by another
{'data': {'email': 'jane@example.com'},
'error': ('email', ['This email is already in use. Please supply a different email.'])},
]
# Override locale settings since we are checking for existence of error
# messaged written in english. Note: it should not be necessasy but
# we have experienced such locale issues during tests on Travis builds.
# See: https://github.com/bread-and-pepper/django-userena/issues/446
with override('en'):
for invalid_dict in invalid_data_dicts:
form = forms.ChangeEmailForm(user, data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
# Test a valid post
form = forms.ChangeEmailForm(user,
data={'email': 'john@newexample.com'})
self.failUnless(form.is_valid())
def test_form_init(self):
""" The form must be initialized with a ``User`` instance. """
self.assertRaises(TypeError, forms.ChangeEmailForm, None)
class EditAccountFormTest(TestCase):
""" Test the ``EditAccountForm`` """
pass
| 37.9447
| 136
| 0.546393
|
50ed09f4520aa4f2cf7555a9b4e50a474d33e658
| 26,637
|
py
|
Python
|
source/deepsecurity/api/policy_firewall_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/api/policy_firewall_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/api/policy_firewall_rule_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class PolicyFirewallRuleDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def describe_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Describe a firewall rule # noqa: E501
Describe a firewall rule including policy-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `describe_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_firewall_rules_on_policy(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, **kwargs) # noqa: E501
return data
def list_firewall_rules_on_policy_with_http_info(self, policy_id, api_version, **kwargs): # noqa: E501
"""List firewall rules # noqa: E501
Lists all firewall rules assigned to a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_firewall_rules_on_policy_with_http_info(policy_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only rules assigned to the current policy.
:return: FirewallRules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_firewall_rules_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `list_firewall_rules_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_firewall_rules_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `list_firewall_rules_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRules', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_firewall_rule_on_policy(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs) # noqa: E501
return data
def modify_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, firewall_rule, api_version, **kwargs): # noqa: E501
"""Modify a firewall rule # noqa: E501
Modify a firewall rule assigned to a policy. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, firewall_rule, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to modify. (required)
:param FirewallRule firewall_rule: The settings of the firewall rule to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'firewall_rule', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule' is set
if ('firewall_rule' not in params or
params['firewall_rule'] is None):
raise ValueError("Missing the required parameter `firewall_rule` when calling `modify_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `modify_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'firewall_rule' in params:
body_params = params['firewall_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_firewall_rule_on_policy(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, **kwargs) # noqa: E501
return data
def reset_firewall_rule_on_policy_with_http_info(self, policy_id, firewall_rule_id, api_version, **kwargs): # noqa: E501
"""Reset firewall rule overrides # noqa: E501
Remove all overrides for a firewall rule from a policy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_firewall_rule_on_policy_with_http_info(policy_id, firewall_rule_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int policy_id: The ID number of the policy. (required)
:param int firewall_rule_id: The ID number of the firewall rule to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current policy.
:return: FirewallRule
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'firewall_rule_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_firewall_rule_on_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params or
params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'firewall_rule_id' is set
if ('firewall_rule_id' not in params or
params['firewall_rule_id'] is None):
raise ValueError("Missing the required parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_firewall_rule_on_policy`") # noqa: E501
if 'policy_id' in params and not re.search('\\d+', str(params['policy_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `policy_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'firewall_rule_id' in params and not re.search('\\d+', str(params['firewall_rule_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `firewall_rule_id` when calling `reset_firewall_rule_on_policy`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id'] # noqa: E501
if 'firewall_rule_id' in params:
path_params['firewallRuleID'] = params['firewall_rule_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/policies/{policyID}/firewall/rules/{firewallRuleID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FirewallRule', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 50.737143
| 311
| 0.637497
|
81cc1d1a52c5407940c3efe37272c346d5e582cd
| 1,242
|
py
|
Python
|
profiles_api/serializers.py
|
geonsangyoo/django-rest-api-beginner
|
935a83975a86005972cffc8fb8cc201cfb2976b2
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
geonsangyoo/django-rest-api-beginner
|
935a83975a86005972cffc8fb8cc201cfb2976b2
|
[
"MIT"
] | null | null | null |
profiles_api/serializers.py
|
geonsangyoo/django-rest-api-beginner
|
935a83975a86005972cffc8fb8cc201cfb2976b2
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email = validated_data['email'],
name = validated_data['name'],
password = validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwargs = {
'user_profile': {
'read_only': True
}
}
| 29.571429
| 68
| 0.598229
|
2c6c30de6408a10765e16116fe181f68179cdb30
| 2,021
|
py
|
Python
|
scripts/create_librispeech_trans.py
|
takasyo/TensorFlowASR
|
b518ea2aa68b3307ffe9dfd675f84e3ef38894a4
|
[
"Apache-2.0"
] | 1
|
2021-01-21T09:04:26.000Z
|
2021-01-21T09:04:26.000Z
|
scripts/create_librispeech_trans.py
|
takasyo/TensorFlowASR
|
b518ea2aa68b3307ffe9dfd675f84e3ef38894a4
|
[
"Apache-2.0"
] | null | null | null |
scripts/create_librispeech_trans.py
|
takasyo/TensorFlowASR
|
b518ea2aa68b3307ffe9dfd675f84e3ef38894a4
|
[
"Apache-2.0"
] | 1
|
2021-10-16T22:40:42.000Z
|
2021-10-16T22:40:42.000Z
|
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import argparse
import librosa
from tqdm.auto import tqdm
import unicodedata
from tensorflow_asr.utils.utils import preprocess_paths
parser = argparse.ArgumentParser(prog="Setup LibriSpeech Transcripts")
parser.add_argument("--dir", "-d", type=str,
default=None, help="Directory of dataset")
parser.add_argument("output", type=str,
default=None, help="The output .tsv transcript file path")
args = parser.parse_args()
assert args.dir and args.output
args.dir = preprocess_paths(args.dir)
args.output = preprocess_paths(args.output)
transcripts = []
text_files = glob.glob(os.path.join(args.dir, "**", "*.txt"), recursive=True)
for text_file in tqdm(text_files, desc="[Loading]"):
current_dir = os.path.dirname(text_file)
with open(text_file, "r", encoding="utf-8") as txt:
lines = txt.read().splitlines()
for line in lines:
line = line.split(" ", maxsplit=1)
audio_file = os.path.join(current_dir, line[0] + ".flac")
y, sr = librosa.load(audio_file, sr=None)
duration = librosa.get_duration(y, sr)
text = unicodedata.normalize("NFC", line[1].lower())
transcripts.append(f"{audio_file}\t{duration:.2f}\t{text}\n")
with open(args.output, "w", encoding="utf-8") as out:
out.write("PATH\tDURATION\tTRANSCRIPT\n")
for line in tqdm(transcripts, desc="[Writing]"):
out.write(line)
| 34.254237
| 78
| 0.700643
|
da1703f39aaf4e535d355f1336eee360486e1adb
| 11,091
|
py
|
Python
|
gslab_make/private/runprogramdirective.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | 12
|
2017-03-03T20:48:50.000Z
|
2020-11-27T23:37:15.000Z
|
gslab_make/private/runprogramdirective.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | 132
|
2017-01-11T23:32:01.000Z
|
2022-03-31T17:00:06.000Z
|
gslab_make/private/runprogramdirective.py
|
lboxell/gslab_python
|
0e6d687962146d8745cd80d5c888c69647863d2d
|
[
"MIT"
] | 10
|
2017-07-22T02:35:29.000Z
|
2021-02-16T00:09:44.000Z
|
#! /usr/bin/env python
import os
import re
import shutil
import subprocess
from exceptionclasses import CustomError, CritError, SyntaxError, LogicError
import messages as messages
import metadata as metadata
class RunProgramDirective(object):
def __init__(self, kwargs, program_bool = True):
dict((k.lower(), v) for k, v in kwargs.iteritems())
if program_bool:
if 'program' in kwargs.keys():
program_input = kwargs['program']
self.program_path = os.path.dirname(program_input)
program_base = os.path.basename(program_input)
self.program_name, self.program_ext = os.path.splitext(program_base)
else:
raise SyntaxError(messages.syn_error_noprogram)
if self.program_path == '':
self.program_path = './'
else:
self.program_ext = ''
if 'makelog' in kwargs.keys():
self.makelog = kwargs['makelog']
if self.makelog:
self.makelog = os.path.abspath( self.makelog )
else:
self.makelog = os.path.abspath( metadata.settings['makelog_file'] )
if 'option' in kwargs.keys():
self.option = kwargs['option']
self.option_dict = self.parse_options()
self.option_overlap_error_check(kwargs)
else:
self.option = '';
self.option_dict = {}
if 'log' in kwargs.keys():
self.log = os.path.abspath( kwargs['log'] )
else:
self.log = self.option_assigned('log', '')
if 'lst' in kwargs.keys():
self.lst = os.path.abspath( kwargs['lst'] )
else:
self.lst = self.option_assigned('lst', metadata.settings['output_dir'])
if 'changedir' in kwargs.keys():
self.changedir = kwargs['changedir']
else:
self.changedir = False
if 'executable' in kwargs.keys():
self.executable = kwargs['executable']
else:
self.executable = '';
if 'args' in kwargs.keys():
self.args = kwargs['args']
else:
self.args = '';
if 'handout' in kwargs.keys():
self.handout = kwargs['handout']
else:
self.handout = False
if 'comments' in kwargs.keys():
self.comments = kwargs['comments']
else:
self.comments = False
if 'pdfout' in kwargs.keys():
self.pdfout = os.path.abspath( kwargs['pdfout'] )
else:
pdfout_dir = ''
if self.handout or self.comments:
pdfout_dir = metadata.settings['temp_dir']
else:
pdfout_dir = metadata.settings['output_dir']
self.pdfout = self.option_assigned('pdfout', pdfout_dir)
self.osname = os.name
self.option = self.update_option()
def parse_options(self):
option_list = self.option.strip().replace("=", " ").split()
option_dict = {}
for opt in option_list:
if opt:
if opt[0] in metadata.option_start_chars:
option = opt
option_dict[option] = ''
else:
option_dict[option] += opt + ' '
return option_dict
def option_overlap_error_check(self, kwargs):
prog = [prog for prog, ext in metadata.extensions.iteritems() if ext == self.program_ext][0]
option_overlaps = metadata.option_overlaps.get(prog)
if not option_overlaps: return
for opt in option_overlaps:
if self.option_dict.has_key(option_overlaps[opt]) and kwargs.has_key(opt):
raise CritError(messages.crit_error_option_overlap
% (opt, option_overlaps[opt]))
def option_assigned(self, option, default):
assigned_value = default
prog = [prog for prog, ext in metadata.extensions.iteritems() if ext == self.program_ext][0]
option_overlaps = metadata.option_overlaps.get(prog)
if option_overlaps:
replace_option = option_overlaps.get(option)
if replace_option:
value = self.option_dict.get(replace_option)
if value:
print messages.note_option_replaced % (replace_option, option)
del self.option_dict[replace_option]
assigned_value = value
return assigned_value
def update_option(self):
prog = [prog for prog, ext in metadata.extensions.iteritems() if ext == self.program_ext][0]
if prog in metadata.option_overlaps.keys():
option = ''
for opt, arg in self.option_dict.iteritems():
option += str(opt + ' ' + arg + ' ')
return option
else:
return self.option
def error_check(self, prog):
if (self.osname != 'posix') & (self.osname != 'nt'):
raise CritError(messages.crit_error_unknown_system % self.osname)
ext = metadata.extensions[prog]
if self.program_ext == '':
self.program_ext = ext
if self.program_ext:
self.program = self.program_name + self.program_ext
self.program_full = os.path.join(self.program_path, self.program)
if not os.path.isfile(self.program_full):
raise CritError(messages.crit_error_no_file % self.program_full)
if self.program_ext != ext:
raise CritError(messages.crit_error_extension % self.program_full)
def execute_run(self, command):
print '\n'
current_directory = os.getcwd()
if self.changedir:
os.chdir(self.program_path)
if not self.log:
tempname = current_directory + '/make-templog.txt'
else:
tempname = os.path.abspath(self.log)
TEMPFILE = open(tempname, 'wb')
if self.makelog:
if not (metadata.makelog_started and os.path.isfile(self.makelog)):
raise CritError(messages.crit_error_nomakelog % self.makelog)
# Open main log file
try:
LOGFILE = open(self.makelog, 'ab')
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_log % self.makelog)
try:
# Execute command and print content to LOGFILE
print 'Executing: ', command
print >>LOGFILE, '\n\nExecute: ', command
subprocess.check_call(command, shell = True, stdout = TEMPFILE, stderr = TEMPFILE)
TEMPFILE.close()
LOGFILE.write(open(tempname, 'rU').read())
LOGFILE.close()
except Exception as errmsg:
# If fails then print errors to LOGFILE
TEMPFILE.close()
LOGFILE.write(open(tempname, 'rU').read())
print messages.crit_error_bad_command % command, '\n', str(errmsg)
print >> LOGFILE, messages.crit_error_bad_command % command, '\n', str(errmsg)
LOGFILE.close()
else:
try:
# Execute command
print 'Executing: ', command
subprocess.check_call(command, shell = True, stdout = TEMPFILE, stderr = TEMPFILE)
TEMPFILE.close()
except Exception as errmsg:
# If fails then print errors
TEMPFILE.close()
print messages.crit_error_bad_command % command, '\n', str(errmsg)
print >> TEMPFILE, messages.crit_error_bad_command % command, '\n', str(errmsg)
if not self.log:
os.remove(tempname)
if self.changedir:
os.chdir(current_directory)
def move_log(self, default_log):
if self.makelog:
if not (metadata.makelog_started and os.path.isfile(self.makelog)):
raise CritError(messages.crit_error_nomakelog % self.makelog)
if os.path.abspath(default_log) != os.path.abspath(self.log):
# Append default_log to main log
LOGFILE = open(self.makelog, 'ab')
try:
LOGFILE.write(open(default_log, 'rU').read())
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_no_file % default_log)
LOGFILE.close()
# Save default_log as self.log
if self.log:
shutil.copy2(default_log, self.log)
os.remove(default_log)
def move_lst(self, default_lst):
if not (os.path.isfile(default_lst)): return
if self.makelog:
if not (metadata.makelog_started and os.path.isfile(self.makelog)):
raise CritError(messages.crit_error_nomakelog % self.makelog)
if os.path.abspath(default_lst) != os.path.abspath(self.lst):
# Append default_lst to main log
LOGFILE = open(self.makelog, 'ab')
try:
LOGFILE.write(open(default_lst, 'rU').read())
except Exception as errmsg:
print errmsg
raise CritError(messages.crit_error_no_file % default_lst)
LOGFILE.close()
# Save default_lst as self.lst
if self.lst:
shutil.copy2(default_lst, self.lst)
os.remove(default_lst)
class RunRPackageDirective(RunProgramDirective):
def __init__(self, kwargs, program_bool = False):
RunProgramDirective.__init__(self, kwargs, program_bool)
if 'package' in kwargs.keys():
self.package = kwargs['package']
self.package = re.sub('\\\\', '/', self.package)
else:
raise SyntaxError(messages.syn_error_nopackage)
if 'lib' in kwargs.keys():
self.lib = '-l ' + kwargs['lib']
else:
self.lib = ''
def error_check(self, prog):
if (self.osname != 'posix') & (self.osname != 'nt'):
raise CritError(messages.crit_error_unknown_system % self.osname)
if self.package and not os.path.isfile(self.package):
raise CritError(messages.crit_error_no_package % self.package)
class RunCommandDirective(RunProgramDirective):
def __init__(self, kwargs, program_bool = False):
RunProgramDirective.__init__(self, kwargs, program_bool)
if 'command' in kwargs.keys():
self.command = kwargs['command']
self.command = re.sub('\\\\', '/', self.command)
else:
raise SyntaxError(messages.syn_error_nocommand)
| 36.483553
| 101
| 0.553692
|
e1d84bc6f5c9d001ff60c86fdd025558cbffaa85
| 21,858
|
py
|
Python
|
simulators/scasim/detector_properties.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
simulators/scasim/detector_properties.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 24
|
2019-08-09T15:03:20.000Z
|
2022-03-04T10:04:48.000Z
|
simulators/scasim/detector_properties.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 4
|
2019-06-16T15:03:23.000Z
|
2020-12-02T19:51:52.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Module detector_properties - Defines the properties of the MIRI SCA
detector.
NOTE: Other JWST detectors can be simulated as long as their
properties can be described by the parameters contained here.
The main differences are the material, pixel size and detector
thickness.
NOTE: These properties have been defined to match the data obtained
during FM testing of the detectors. For the best simulation, the
parameters should be kept up to date as detector knowledge improves.
Each set of properties is stored in a Python dictionary and the
properties belonging to a particular focal plane module can be
looked up using a dictionary of dictionaries.
Sources:
(1) JPL D-25632, MIRI Operational Concept Document, 4 March 2010.
(2) JPL D-46944, MIRI Flight Focal Plane Module End Item Data Package
(FPM EIDP), 10 May 2010.
(3) UA_MIRI_006, MIRI Test Report, Description of Bad Pixel Mask,
Version 4, 28 August 2011.
(4) The Mid-Infrared Instrument for the James Webb Space Telescope,
VIII: The MIRI Focal Plane System; M. E. Ressler et al.,
Publications of the Astronomical Society of Pacific, Volume 127,
Issue 953, pp. 675 (2015)
(5) JPL MIRI DFM 478 04.02, MIRI FPS Exposure Time Calculations (SCE FPGA2),
M. E. Ressler, October 2014.
:History:
15 Jul 2010: Created
22 Jul 2010: Pixel size and thickness added.
25 Jul 2010: Well depth increased to 100000 (ref 2).
27 Jul 2010: Subarray options added.
01 Aug 2010: Added CHIP_ID and DEFAULTs.
09 Aug 2010: Renamed. QE data file renamed to qe_measurement.txt
16 Aug 2010: Detector properties compiled into a list, since
MIRI contains three focal plane modules which can behave
slightly differently. There are now separate dark current
and QE measurements for each FPM.
24 Aug 2010: QE and dark current measurements now read from FITS files.
Detector thickness increased to 470 microns.
30 Aug 2010: Locate the configuration files using an absolute file path
so that scasim may be run from any directory.
03 Sep 2010: Added COSMIC_RAY_LEAKAGE_FRACTION. According to Mike Ressler,
a small fraction of cosmic ray events can cause charge
leakage rather than a charge jump.
Bad pixel maps added.
27 Sep 2010: Python environment for windows verified.
01 Oct 2010: Dropped groups added to readout modes.
12 Oct 2010: Number of frames per group added to readout modes (always 1
for MIRI but will allow SCAsim to be extended for other JWST
detectors).
18 Oct 2010: Group gap for GAP modes changed from 4 to 8 to increase the
exposure time that can be fitted into a reasonable sized
output file.
11 Nov 2010: DARK_MAP parameters added.
15 Nov 2010: Mistake in CHIP_ID corrected.
22 Nov 2010: Corrections to FPM and SCA IDs in FITS header.
24 Nov 2010: Added test Subarray modes.
15 Dec 2010: SCA_ID values changed from 104,105,106 to 493,494,495
(as expected by miri_cube)
07 Jan 2011: ID, SCA_ID and DETECTOR values updated to reflect the
values reported by Tim Grundy (RAL) on 20 Dec 2010.
Detector properties classified and looked up by SCA_ID
rather than by FPM_ID.
10 Mar 2011: Subarray modes MASK1065, MASK1550 and SLITLESSPRISM
corrected to match definitions in OCD Revision C
(4 March 2010). Subarray modes LRS3, AXIS64, AXIS128 and
AXIS256 added for test purposes. Subarray modes TEST32
and TEST64 removed.
25 Mar 2011: PERSISTENCE parameter added, which can be a linear factor
or a set of polynomial coefficients.
05 Apr 2011: Pixel response function added (not used yet).
21 Sep 2011: The bad pixel masks and dark maps derived from FM tests (3)
are now used by default.
05 Oct 2011: PERSISTENCE altered for more realistic simulation of
persistence effects.
NOTE: Should FRAME_RESETS be increased to simulate the
post-FM testing adjustment of detector controller parameters?
NOTE: Which is the correct detector naming convention:
Tim Grundy's or Jane Morrison's?
24 Oct 2011: Modified to use new "filesearching" module, which should
make it easier for users to substitute their own configuration
USE_FM_MEASUREMENTS flag removed (too complicated).
files. Pixel response function removed (never used).
13 Jun 2013: Changed detector and subarray names to match new data model.
25 Oct 2013: Make the primary keyword for determining the detector ID
'DETECTOR' rather than 'SCA_ID'. Detector properties are
looked up by detector name rather than sca_id.
24 Feb 2014: Use find_simulator_file function to find auxiliary data files
rather than searching PYTHONPATH (which takes a long time when
the software is installed using MIRICLE/ureka).
07 May 2014: Added zeropoint drift, linearity and latency factors.
Added charge trapping and slope drift factors.
08 May 2014: Charge trapping parameter added.
05 Jun 2014: Removed charge trapping parameters and added slow and fast
latency parameters. Decay parameters now given as a timescale.
19 Jun 2014: Slow and fast zeropoint drift parameters included.
27 Feb 2015: Subarray parameters brought up to date.
09 Mar 2015: Switch over to CDP-3 bad pixel masks. Well depth adjusted to
match CDP-3 pixel saturation results (approximately).
11 Mar 2015: Switch over to CDP-3 dark masks. Local detector naming
convention updated from 493,494,495 to IM,LW,SW.
21 May 2015: Amplifier level removed and gain and read noise defined by
calibration data products.
06 Aug 2015: Noise calibration factor added.
08 Sep 2015: Made compatible with Python 3.
02 Oct 2015: Removed readout modes which are no longer available.
08 Dec 2015: Correction to subarray parameters to match Mike Ressler's PASP
paper.
20 Mar 2016: Pedestal values added, to simulate electronics bias and keep
the final simulated DN values away from zero. Bad pixels can
be given a different pedestal value.
23 Mar 2016: Obtain MASK, PIXELFLAT and GAIN calibration from CDP files
found by searching the CDP repository rather than by
looking up a reference defined here. (The DARK map needs to
remain here because it doesn't directly match any CDP file.)
05 May 2016: Detector sensitivity coefficients updated.
14 Jul 2016: Pedestal values adjusted.
02 Nov 2016: Legacy PERSISTENCE coefficients zeroed. Legacy DARK and bad
pixel masks also removed.
14 Feb 2017: Zeropoint, pedestal level and noise factor adjusted.
20 Jun 2017: Mean gain is (e/DN), not (DN/e).
26 Jun 2017: DARK_CURRENT parameter added, to define the expected level of
dark current in e/s.
19 Jul 2017: Well depth adjusted to match expected saturation levels.
Nominal dark current changed from 0.12 to 0.21 e/s.
13 Oct 2017: New frame time calculation from Mike Ressler.
SLOW mode now uses 8 out of 9 samples. READOUT_MODE now defines
samplesum, sampleskip and refpixsampleskip parameters separately.
01 Nov 2017: Note that the detector drift and latent coefficients are only
valid for FAST mode.
13 Dec 2017: DARK_MAP property removed.
29 Jun 2018: Global parameters moved to miri.parameters.
@author: Steven Beard (UKATC)
"""
# Import global properties
from miri.parameters import READOUT_MODE
from miri.parameters import SUBARRAY as MIRI_SUBARRAY
from miri.simulators.find_simulator_file import find_simulator_file
#
# MIRI contains three focal plane modules, each of which contains a
# detector with a 1024 x 1024 zone illuminated by the instrument. Each
# focal plane module is identified by a unique Sensor Chip Assembly ID
# and a unique Focal Plane Module ID defined as follows. The SCA ID is
# used in MIRI documentation and the FPM ID is used in the FPM EIDP
# (see email from Tim Grundy, RAL):
# * For the MIRI imager:
# SCA 493 containing FPM S/N 106.
# * For the long wavelength arm of the MIRI MRS:
# SCA 494 containing FPM S/N 104.
# * For the short wavelength arm of the MIRI MRS:
# SCA 495 containing FPM S/N 105.
# Each 1024x1024 pixel detector has 4 extra non-illuminated reference
# columns just off the left and right edges of the illuminated zone.
# In addition, there is a separate bank of non-illuminated reference
# pixels ganged together known as reference outputs. These reference
# outputs are not contiguous with the illuminated zone, but the data
# is rearranged in level 1 FITS files so these reference outputs appear
# as extra rows on top of the normal detector image.
#
# Note that DARK_CURRENT_FILE describes how the dark current varies
# with detector temperature. A 2-D DARK_MAP describes how the dark current
# varies over the detector surface (including hot pixels which have
# excessive dark current). A 3 or 4-D DARK_MAP also describes how the
# dark current changes with group and integration.
#
# Note that detector drifts and latency have only been modelled in FAST
# mode. The parameters defined here are not valid in SLOW mode.
#
# The find_simulator_file function searches for a named file within a
# search path of simulator data files(starting with the current working
# directory) and returns the absolute path.
#
# TODO: Is there a better model to describe detector drifts?
_sca493 = {}
_sca493['SCA_ID'] = 493 # Numerical SCA ID
_sca493['FPM_ID'] = "FPMSN106" # Unique FPM ID
_sca493['NAME'] = "Sensor Chip Assembly 493 with Focal Plane Module 106"
_sca493['DETECTOR'] = "MIRIMAGE" # ASCII detector ID (previously "IM")
_sca493['CHIP'] = 'SiAs' # Type of detector chip
_sca493['COMMENTS'] = "Describes MIRI FPM S/N 106 detector data with ref pixels"
_sca493['ILLUMINATED_ROWS'] = 1024
_sca493['ILLUMINATED_COLUMNS'] = 1024
_sca493['LEFT_COLUMNS'] = 4 # Reference columns on detector
_sca493['RIGHT_COLUMNS'] = 4 # Reference columns on detector
_sca493['BOTTOM_ROWS'] = 0 # There are no extra rows at the bottom
_sca493['TOP_ROWS'] = 256 # Reference rows in level 1 FITS image
_sca493['PIXEL_SIZE'] = 25.0 # Pixel size in microns
_sca493['THICKNESS'] = 470.0 # Detector thickness in microns
_sca493['WELL_DEPTH'] = 354720 # Well depth in electrons
_sca493['PEDESTAL'] = 10000 # Pedestal value in electrons
_sca493['BAD_PEDESTAL'] = 1000 # Pedestal value for bad pixels in electrons
_sca493['MEAN_GAIN'] = 5.5 # Mean gain (e/DN) (Superceded by GAIN CDP)
_sca493['PERSISTENCE'] = 0.0 # Linear persistence factor (0.0 to 1.0)
# _sca493['PERSISTENCE'] = [1.0e-8, 0.03, 0.0] # Persistence coefficients [2nd,1st,0th]
# NOTE: The following 4 parameters are valid for FAST mode only.
_sca493['LATENCY_SLOW'] = [1.67e-9, 136000.0] # Slow latency parameters [gain(1/e),decay]
_sca493['LATENCY_FAST'] = [0.002, 300.0] # Fast latency parameters [gain,decay]
_sca493['ZP_SLOW'] = [45000.0, 0.0084] # Slow zeropoint drift [const(e),scale(e/s)]
# Fast zeropoint jumps as a function of integration number and flux [[const(e),scale(s)]
_sca493['ZP_FAST'] = [[0.0, -2.917], [0.0, -2.292], [0.0, -2.396], [0.0, -2.408]]
# [1.0, 0.0] means the integrator is perfectly linear
# [1.0, -1.0] means the counter starts at 100% sensitivity and reduces to 0% at full well.
# _sca493['SENSITIVITY'] = [1.0, 0.0] # Linearity sensitivity coeffs [const,slope]
_sca493['SENSITIVITY'] = [1.1, -0.4] # Linearity sensitivity coeffs [const,slope]
_sca493['CLOCK_TIME'] = 1.0e-5 # Detector clock time in seconds
_sca493['RESET_WIDTH'] = 4 # The width of the reset pulse in clock cycles
_sca493['RESET_OVERHEAD'] = 3 # Number of clock cycles per reset
_sca493['FRAME_RESETS'] = 0 # Extra resets between integrations
_sca493['TARGET_TEMPERATURE'] = 6.7 # Target temperature in K
_sca493['DARK_CURRENT_FILE'] = find_simulator_file('dark_currentIM.fits')
_sca493['DARK_CURRENT'] = 0.21 # Nominal dark current level (electrons/s)
_sca493['QE_FILE'] = find_simulator_file('qe_measurementIM.fits')
_sca493['NOISEFACTOR'] = 1.0 # Noise adjustment factor
_sca494 = {}
_sca494['SCA_ID'] = 494 # Numerical SCA ID
_sca494['FPM_ID'] = "FPMSN104" # Unique FPM ID
_sca494['NAME'] = "Sensor Chip Assembly 494 with Focal Plane Module 104"
_sca494['DETECTOR'] = "MIRIFULONG" # ASCII detector ID (previously "LW")
_sca494['CHIP'] = 'SiAs' # Type of detector chip
_sca494['COMMENTS'] = "Describes MIRI FPM S/N 104 detector data with ref pixels"
_sca494['ILLUMINATED_ROWS'] = 1024
_sca494['ILLUMINATED_COLUMNS'] = 1024
_sca494['LEFT_COLUMNS'] = 4 # Reference columns on detector
_sca494['RIGHT_COLUMNS'] = 4 # Reference columns on detector
_sca494['BOTTOM_ROWS'] = 0 # There are no extra rows at the bottom
_sca494['TOP_ROWS'] = 256 # Reference rows in level 1 FITS image
_sca494['PIXEL_SIZE'] = 25.0 # Pixel size in microns
_sca494['THICKNESS'] = 470.0 # Detector thickness in microns
_sca494['WELL_DEPTH'] = 359950 # Well depth in electrons
_sca494['PEDESTAL'] = 10000 # Pedestal value in electrons
_sca494['BAD_PEDESTAL'] = 1000 # Pedestal value for bad pixels in electrons
_sca494['MEAN_GAIN'] = 5.5 # Mean gain (e/DN) (Superceded by GAIN CDP)
_sca494['PERSISTENCE'] = 0.0 # Linear persistence factor (0.0 to 1.0)
# _sca494['PERSISTENCE'] = [1.0e-8, 0.03, 0.0] # Persistence coefficients [2nd,1st,0th]
# NOTE: The following 4 parameters are valid for FAST mode only.
_sca494['LATENCY_SLOW'] = [1.67e-9, 136000.0] # Slow latency parameters [gain(1/e),decay(s)]
_sca494['LATENCY_FAST'] = [0.002, 300.0] # Fast latency parameters [gain,decay(s)]
_sca494['ZP_SLOW'] = [45000.0, 0.0084] # Slow zeropoint drift [const(e),scale(e/s)]
# Fast zeropoint jumps as a function of integration number and flux [[const(e),scale(s)]
_sca494['ZP_FAST'] = [[0.0, -2.917], [0.0, -2.292], [0.0, -2.396], [0.0, -2.408]]
# [1.0, 0.0] means the integrator is perfectly linear
# [1.0, -1.0] means the counter starts at 100% sensitivity and reduces to 0% at full well.
# _sca494['SENSITIVITY'] = [1.0, 0.0] # Linearity sensitivity coeffs [const,slope]
_sca494['SENSITIVITY'] = [1.1, -0.4] # Linearity sensitivity coeffs [const,slope]
_sca494['CLOCK_TIME'] = 1.0e-5 # Detector clock time in seconds
_sca494['RESET_WIDTH'] = 4 # The width of the reset pulse in clock cycles
_sca494['RESET_OVERHEAD'] = 3 # Number of clock cycles per reset
_sca494['FRAME_RESETS'] = 0 # Extra resets between integrations
_sca494['TARGET_TEMPERATURE'] = 6.7 # Target temperature in K
_sca494['DARK_CURRENT_FILE'] = find_simulator_file('dark_currentLW.fits')
_sca494['DARK_CURRENT'] = 0.21 # Nominal dark current level (electrons/s)
_sca494['QE_FILE'] = find_simulator_file('qe_measurementLW.fits')
_sca494['NOISEFACTOR'] = 1.0 # Noise adjustment factor
_sca495 = {}
_sca495['SCA_ID'] = 495 # Numerical SCA ID
_sca495['FPM_ID'] = "FPMSN105" # Unique FPM ID
_sca495['NAME'] = "Sensor Chip Assembly 495 with Focal Plane Module 105"
_sca495['DETECTOR'] = "MIRIFUSHORT" # ASCII detector ID (previously "SW")
_sca495['CHIP'] = 'SiAs' # Type of detector chip
_sca495['COMMENTS'] = "Describes MIRI FPM S/N 105 detector data with ref pixels"
_sca495['ILLUMINATED_ROWS'] = 1024
_sca495['ILLUMINATED_COLUMNS'] = 1024
_sca495['LEFT_COLUMNS'] = 4 # Reference columns on detector
_sca495['RIGHT_COLUMNS'] = 4 # Reference columns on detector
_sca495['BOTTOM_ROWS'] = 0 # There are no extra rows at the bottom
_sca495['TOP_ROWS'] = 256 # Reference rows in level 1 FITS image
_sca495['PIXEL_SIZE'] = 25.0 # Pixel size in microns
_sca495['THICKNESS'] = 470.0 # Detector thickness in microns
_sca495['WELL_DEPTH'] = 358190 # Well depth in electrons
_sca495['PEDESTAL'] = 10000 # Pedestal value in electrons
_sca495['BAD_PEDESTAL'] = 1000 # Pedestal value for bad pixels in electrons
_sca495['MEAN_GAIN'] = 5.5 # Mean gain (e/DN) (Superceded by GAIN CDP)
_sca495['PERSISTENCE'] = 0.0 # Linear persistence factor (0.0 to 1.0)
# _sca495['PERSISTENCE'] = [1.0e-8, 0.03, 0.0] # Persistence coefficients [2nd,1st,0th]
# NOTE: The following 4 parameters are valid for FAST mode only.
_sca495['LATENCY_SLOW'] = [1.67e-9, 136000.0] # Slow latency parameters [gain(1/e),decay]
_sca495['LATENCY_FAST'] = [0.002, 300.0] # Fast latency parameters [gain,decay]
_sca495['ZP_SLOW'] = [45000.0, 0.0084] # Slow zeropoint drift [const(e),scale(e/s)]
# Fast zeropoint jumps as a function of integration number and flux [[const(e),scale(s)]
_sca495['ZP_FAST'] = [[0.0, -2.917], [0.0, -2.292], [0.0, -2.396], [0.0, -2.408]]
# [1.0, 0.0] means the integrator is perfectly linear
# [1.0, -1.0] means the counter starts at 100% sensitivity and reduces to 0% at full well.
# _sca495['SENSITIVITY'] = [1.0, 0.0] # Linearity sensitivity coeffs [const,slope]
_sca495['SENSITIVITY'] = [1.1, -0.4] # Linearity sensitivity coeffs [const,slope]
_sca495['CLOCK_TIME'] = 1.0e-5 # Detector clock time in seconds
_sca495['RESET_WIDTH'] = 4 # The width of the reset pulse in clock cycles
_sca495['RESET_OVERHEAD'] = 3 # Number of clock cycles per reset
_sca495['FRAME_RESETS'] = 0 # Extra resets between integrations
_sca495['TARGET_TEMPERATURE'] = 6.7 # Target temperature in K
# The pixel response function is the sensitivity variation across the
# surface of each individual pixel.
_sca495['PIXEL_RESPONSE'] = None # No pixel response function
_sca495['DARK_CURRENT_FILE'] = find_simulator_file('dark_currentSW.fits')
_sca495['DARK_CURRENT'] = 0.21 # Nominal dark current level (electrons/s)
_sca495['QE_FILE'] = find_simulator_file('qe_measurementSW.fits')
_sca495['NOISEFACTOR'] = 1.0 # Noise adjustment factor
# Other detector descriptions (e.g. for other JWST instruments) could be
# added here.
# The following constants may be imported by SCA simulator software modules.
#
# Dictionary of known focal plane modules. The detector properties for
# each FPM can be obtained by looking up its unique detector name in this
# dictionary and using the result as another dictionary (i.e. the overall
# data structure is a dictionary of dictionaries).
#
DETECTORS_DICT = {'MIRIMAGE' : _sca493,
'MIRIFULONG' : _sca494,
'MIRIFUSHORT' : _sca495}
# The readout modes have been obtained from miri.parameters.
# This is the default mode.
DEFAULT_READOUT_MODE = 'FAST'
def flip_subarray_params(input_params):
"""
Helper function to switch the row and column entries in a SUBARRAY tuple
"""
assert isinstance(input_params, (tuple,list))
assert len(input_params) == 4
output_params = [input_params[1],
input_params[0],
input_params[3],
input_params[2]]
return output_params
#
# Convert subarray parameters to the ordering needed by SCASim.
# TODO: Change SCASim to use the same ordering as the MIRI CDP software?
# Row and column numbers start at 1.
# The tuple contains (firstrow, firstcol, subrows, subcolumns)
#
SUBARRAY = {}
SUBARRAY['FULL'] = None
SUBARRAY['MASK1140'] = flip_subarray_params( MIRI_SUBARRAY['MASK1140'] )
SUBARRAY['MASK1550'] = flip_subarray_params( MIRI_SUBARRAY['MASK1550'] )
SUBARRAY['MASK1065'] = flip_subarray_params( MIRI_SUBARRAY['MASK1065'] )
SUBARRAY['MASKLYOT'] = flip_subarray_params( MIRI_SUBARRAY['MASKLYOT'] )
SUBARRAY['BRIGHTSKY'] = flip_subarray_params( MIRI_SUBARRAY['BRIGHTSKY'] )
SUBARRAY['SUB256'] = flip_subarray_params( MIRI_SUBARRAY['SUB256'] )
SUBARRAY['SUB128'] = flip_subarray_params( MIRI_SUBARRAY['SUB128'] )
SUBARRAY['SUB64'] = flip_subarray_params( MIRI_SUBARRAY['SUB64'] )
SUBARRAY['SLITLESSPRISM'] = flip_subarray_params( MIRI_SUBARRAY['SLITLESSPRISM'] )
# The following additional subarray options can be uncommented for testing
# special cases. The detectors are never actually read out using these modes.
# SUBARRAY['LRS1'] = ( 1, 1, 1024, 420)
# SUBARRAY['LRS2'] = ( 1, 292, 1024, 128)
# SUBARRAY['LRS3'] = ( 1, 292, 512, 64)
# SUBARRAY['AXIS256'] = ( 384, 388, 256, 256)
# SUBARRAY['AXIS128'] = ( 448, 452, 128, 128)
# SUBARRAY['AXIS64'] = ( 480, 484, 64, 64)
# SUBARRAY['TEST64'] = ( 128, 128, 64, 80)
# SUBARRAY['TEST32'] = ( 8, 8, 32, 32)
# SUBARRAY['RHS256'] = ( 1, 776, 256, 256)
STANDARD_SUBARRAYS = ('FULL', 'MASK1065', 'MASK1140', 'MASK1550', 'MASKLYOT',
'BRIGHTSKY', 'SUB256', 'SUB128', 'SUB64', 'SLITLESSPRISM')
DEFAULT_SUBARRAY = 'FULL'
#
# This fraction of cosmic ray events will cause charge leakage (negative jump)
# rather than a charge increase.
# NOTE: The negative jumps are more likely to be caused by a cosmic ray strike
# on the readout electronics rather than a true charge leakage.
#
COSMIC_RAY_LEAKAGE_FRACTION = 0.002
if __name__ == '__main__':
print( "NOTE: The DetectorProperties module is supposed to be " \
"imported by another module, not run as a main program." )
print( "The following detector properties are defined:" )
for detid in DETECTORS_DICT:
print( "DETECTOR %s\n--------------------" % detid )
detector = DETECTORS_DICT[detid]
for key in detector:
print( "%24s = %s" % (key, detector[key]) )
print( "READOUT_MODE\n------------" )
for key in READOUT_MODE:
print( "%24s = %s" % (key, READOUT_MODE[key]) )
print( "SUBARRAY\n--------" )
for key in SUBARRAY:
print( "%24s = %s" % (key, SUBARRAY[key]) )
print( "Finished." )
| 54.10396
| 92
| 0.707247
|
25e37d117f588df7bd2e63c33965a7a0f19bf7c7
| 1,091
|
py
|
Python
|
fc_sql/SQLModifier.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
fc_sql/SQLModifier.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
fc_sql/SQLModifier.py
|
fangqk1991/py-sql
|
61acf9c4ca8fe3544a9d6b79fd338f3548f2f838
|
[
"MIT"
] | null | null | null |
from .SQLException import SQLException
from .BuilderBase import BuilderBase
from .FCDatabase import FCDatabase
class SQLModifier(BuilderBase):
__updateColumns: list = None
__updateValues: list = None
def __init__(self, db: FCDatabase):
super().__init__(db)
self.__updateColumns = []
self.__updateValues = []
def update_kv(self, key, value):
self.__updateColumns.append('%s = ?' % key)
self.__updateValues.append(value)
def execute(self):
self._check_table_valid()
if len(self.__updateColumns) <= 0:
return
if len(self._conditionColumns) <= 0:
raise SQLException('%s: conditionColumns missing.' % __class__)
query = 'UPDATE %s SET %s WHERE %s' % (self._table,
', '.join(self.__updateColumns),
' AND '.join(self._conditions()))
self._database.update(query, self._stmt_values())
def _stmt_values(self):
return self.__updateValues + self._conditionValues
| 30.305556
| 80
| 0.598533
|
93652bf0fc81defd6635deceb62496df4a4526c5
| 27,251
|
py
|
Python
|
reco_utils/recommender/deeprec/models/base_model.py
|
rafsanulhasan/recommenders
|
cbc863f320624fc87d6d008a244fe55fcf954856
|
[
"MIT"
] | 2
|
2021-04-14T02:33:03.000Z
|
2021-05-13T03:15:44.000Z
|
reco_utils/recommender/deeprec/models/base_model.py
|
likebupt/recommenders
|
6815e5663ef87da1d0b9029bc9a8a367dc3d33a7
|
[
"MIT"
] | 1
|
2021-05-10T01:11:41.000Z
|
2021-05-10T01:11:41.000Z
|
reco_utils/recommender/deeprec/models/base_model.py
|
likebupt/recommenders
|
6815e5663ef87da1d0b9029bc9a8a367dc3d33a7
|
[
"MIT"
] | 1
|
2021-08-28T18:05:09.000Z
|
2021-08-28T18:05:09.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from os.path import join
import abc
import time
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from reco_utils.recommender.deeprec.deeprec_utils import cal_metric
__all__ = ["BaseModel"]
class BaseModel:
def __init__(self, hparams, iterator_creator, graph=None, seed=None):
"""Initializing the model. Create common logics which are needed by all deeprec models, such as loss function,
parameter set.
Args:
hparams (obj): A tf.contrib.training.HParams object, hold the entire set of hyperparameters.
iterator_creator (obj): An iterator to load the data.
graph (obj): An optional graph.
seed (int): Random seed.
"""
self.seed = seed
tf.compat.v1.set_random_seed(seed)
np.random.seed(seed)
self.graph = graph if graph is not None else tf.Graph()
self.iterator = iterator_creator(hparams, self.graph)
self.train_num_ngs = (
hparams.train_num_ngs if "train_num_ngs" in hparams else None
)
with self.graph.as_default():
self.hparams = hparams
self.layer_params = []
self.embed_params = []
self.cross_params = []
self.layer_keeps = tf.compat.v1.placeholder(tf.float32, name="layer_keeps")
self.keep_prob_train = None
self.keep_prob_test = None
self.is_train_stage = tf.compat.v1.placeholder(
tf.bool, shape=(), name="is_training"
)
self.group = tf.compat.v1.placeholder(tf.int32, shape=(), name="group")
self.initializer = self._get_initializer()
self.logit = self._build_graph()
self.pred = self._get_pred(self.logit, self.hparams.method)
self.loss = self._get_loss()
self.saver = tf.compat.v1.train.Saver(max_to_keep=self.hparams.epochs)
self.update = self._build_train_opt()
self.extra_update_ops = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.UPDATE_OPS
)
self.init_op = tf.compat.v1.global_variables_initializer()
self.merged = self._add_summaries()
# set GPU use with on demand growth
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
self.sess = tf.compat.v1.Session(
graph=self.graph, config=tf.compat.v1.ConfigProto(gpu_options=gpu_options)
)
self.sess.run(self.init_op)
@abc.abstractmethod
def _build_graph(self):
"""Subclass will implement this."""
pass
def _get_loss(self):
"""Make loss function, consists of data loss and regularization loss
Returns:
obj: Loss value
"""
self.data_loss = self._compute_data_loss()
self.regular_loss = self._compute_regular_loss()
self.loss = tf.add(self.data_loss, self.regular_loss)
return self.loss
def _get_pred(self, logit, task):
"""Make final output as prediction score, according to different tasks.
Args:
logit (obj): Base prediction value.
task (str): A task (values: regression/classification)
Returns:
obj: Transformed score
"""
if task == "regression":
pred = tf.identity(logit)
elif task == "classification":
pred = tf.sigmoid(logit)
else:
raise ValueError(
"method must be regression or classification, but now is {0}".format(
task
)
)
pred = tf.identity(pred, name='pred')
return pred
def _add_summaries(self):
tf.compat.v1.summary.scalar("data_loss", self.data_loss)
tf.compat.v1.summary.scalar("regular_loss", self.regular_loss)
tf.compat.v1.summary.scalar("loss", self.loss)
merged = tf.compat.v1.summary.merge_all()
return merged
def _l2_loss(self):
l2_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
for param in self.embed_params:
l2_loss = tf.add(
l2_loss, tf.multiply(self.hparams.embed_l2, tf.nn.l2_loss(param))
)
params = self.layer_params
for param in params:
l2_loss = tf.add(
l2_loss, tf.multiply(self.hparams.layer_l2, tf.nn.l2_loss(param))
)
return l2_loss
def _l1_loss(self):
l1_loss = tf.zeros([1], dtype=tf.float32)
# embedding_layer l2 loss
for param in self.embed_params:
l1_loss = tf.add(
l1_loss, tf.multiply(self.hparams.embed_l1, tf.norm(param, ord=1))
)
params = self.layer_params
for param in params:
l1_loss = tf.add(
l1_loss, tf.multiply(self.hparams.layer_l1, tf.norm(param, ord=1))
)
return l1_loss
def _cross_l_loss(self):
"""Construct L1-norm and L2-norm on cross network parameters for loss function.
Returns:
obj: Regular loss value on cross network parameters.
"""
cross_l_loss = tf.zeros([1], dtype=tf.float32)
for param in self.cross_params:
cross_l_loss = tf.add(
cross_l_loss, tf.multiply(self.hparams.cross_l1, tf.norm(param, ord=1))
)
cross_l_loss = tf.add(
cross_l_loss, tf.multiply(self.hparams.cross_l2, tf.norm(param, ord=2))
)
return cross_l_loss
def _get_initializer(self):
if self.hparams.init_method == "tnormal":
return tf.truncated_normal_initializer(
stddev=self.hparams.init_value, seed=self.seed
)
elif self.hparams.init_method == "uniform":
return tf.random_uniform_initializer(
-self.hparams.init_value, self.hparams.init_value, seed=self.seed
)
elif self.hparams.init_method == "normal":
return tf.random_normal_initializer(
stddev=self.hparams.init_value, seed=self.seed
)
elif self.hparams.init_method == "xavier_normal":
return tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed)
elif self.hparams.init_method == "xavier_uniform":
return tf.contrib.layers.xavier_initializer(uniform=True, seed=self.seed)
elif self.hparams.init_method == "he_normal":
return tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode="FAN_IN", uniform=False, seed=self.seed
)
elif self.hparams.init_method == "he_uniform":
return tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode="FAN_IN", uniform=True, seed=self.seed
)
else:
return tf.truncated_normal_initializer(
stddev=self.hparams.init_value, seed=self.seed
)
def _compute_data_loss(self):
if self.hparams.loss == "cross_entropy_loss":
data_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.reshape(self.logit, [-1]),
labels=tf.reshape(self.iterator.labels, [-1]),
)
)
elif self.hparams.loss == "square_loss":
data_loss = tf.sqrt(
tf.reduce_mean(
tf.squared_difference(
tf.reshape(self.pred, [-1]),
tf.reshape(self.iterator.labels, [-1]),
)
)
)
elif self.hparams.loss == "log_loss":
data_loss = tf.reduce_mean(
tf.compat.v1.losses.log_loss(
predictions=tf.reshape(self.pred, [-1]),
labels=tf.reshape(self.iterator.labels, [-1]),
)
)
elif self.hparams.loss == "softmax":
group = self.train_num_ngs + 1
logits = tf.reshape(self.logit, (-1, group))
if self.hparams.model_type == "NextItNet":
labels = (
tf.transpose(
tf.reshape(
self.iterator.labels,
(-1, group, self.hparams.max_seq_length),
),
[0, 2, 1],
),
)
labels = tf.reshape(labels, (-1, group))
else:
labels = tf.reshape(self.iterator.labels, (-1, group))
softmax_pred = tf.nn.softmax(logits, axis=-1)
boolean_mask = tf.equal(labels, tf.ones_like(labels))
mask_paddings = tf.ones_like(softmax_pred)
pos_softmax = tf.where(boolean_mask, softmax_pred, mask_paddings)
data_loss = -group * tf.reduce_mean(tf.math.log(pos_softmax))
else:
raise ValueError("this loss not defined {0}".format(self.hparams.loss))
return data_loss
def _compute_regular_loss(self):
"""Construct regular loss. Usually it's comprised of l1 and l2 norm.
Users can designate which norm to be included via config file.
Returns:
obj: Regular loss.
"""
regular_loss = self._l2_loss() + self._l1_loss() + self._cross_l_loss()
return tf.reduce_sum(regular_loss)
def _train_opt(self):
"""Get the optimizer according to configuration. Usually we will use Adam.
Returns:
obj: An optimizer.
"""
lr = self.hparams.learning_rate
optimizer = self.hparams.optimizer
if optimizer == "adadelta":
train_step = tf.train.AdadeltaOptimizer(lr)
elif optimizer == "adagrad":
train_step = tf.train.AdagradOptimizer(lr)
elif optimizer == "sgd":
train_step = tf.train.GradientDescentOptimizer(lr)
elif optimizer == "adam":
train_step = tf.compat.v1.train.AdamOptimizer(lr)
elif optimizer == "ftrl":
train_step = tf.train.FtrlOptimizer(lr)
elif optimizer == "gd":
train_step = tf.train.GradientDescentOptimizer(lr)
elif optimizer == "padagrad":
train_step = tf.train.ProximalAdagradOptimizer(lr)
elif optimizer == "pgd":
train_step = tf.train.ProximalGradientDescentOptimizer(lr)
elif optimizer == "rmsprop":
train_step = tf.train.RMSPropOptimizer(lr)
elif optimizer == "lazyadam":
train_step = tf.contrib.opt.LazyAdamOptimizer(lr)
else:
train_step = tf.train.GradientDescentOptimizer(lr)
return train_step
def _build_train_opt(self):
"""Construct gradient descent based optimization step
In this step, we provide gradient clipping option. Sometimes we what to clip the gradients
when their absolute values are too large to avoid gradient explosion.
Returns:
obj: An operation that applies the specified optimization step.
"""
train_step = self._train_opt()
gradients, variables = zip(*train_step.compute_gradients(self.loss))
if self.hparams.is_clip_norm:
gradients = [
None
if gradient is None
else tf.clip_by_norm(gradient, self.hparams.max_grad_norm)
for gradient in gradients
]
return train_step.apply_gradients(zip(gradients, variables))
def _active_layer(self, logit, activation, layer_idx=-1):
"""Transform the input value with an activation. May use dropout.
Args:
logit (obj): Input value.
activation (str): A string indicating the type of activation function.
layer_idx (int): Index of current layer. Used to retrieve corresponding parameters
Returns:
obj: A tensor after applying activation function on logit.
"""
if layer_idx >= 0 and self.hparams.user_dropout:
logit = self._dropout(logit, self.layer_keeps[layer_idx])
return self._activate(logit, activation)
def _activate(self, logit, activation):
if activation == "sigmoid":
return tf.nn.sigmoid(logit)
elif activation == "softmax":
return tf.nn.softmax(logit)
elif activation == "relu":
return tf.nn.relu(logit)
elif activation == "tanh":
return tf.nn.tanh(logit)
elif activation == "elu":
return tf.nn.elu(logit)
elif activation == "identity":
return tf.identity(logit)
else:
raise ValueError("this activations not defined {0}".format(activation))
def _dropout(self, logit, keep_prob):
"""Apply drops upon the input value.
Args:
logit (obj): The input value.
keep_prob (float): The probability of keeping each element.
Returns:
obj: A tensor of the same shape of logit.
"""
return tf.nn.dropout(x=logit, keep_prob=keep_prob)
def train(self, sess, feed_dict):
"""Go through the optimization step once with training data in feed_dict.
Args:
sess (obj): The model session object.
feed_dict (dict): Feed values to train the model. This is a dictionary that maps graph elements to values.
Returns:
list: A list of values, including update operation, total loss, data loss, and merged summary.
"""
feed_dict[self.layer_keeps] = self.keep_prob_train
feed_dict[self.is_train_stage] = True
return sess.run(
[
self.update,
self.extra_update_ops,
self.loss,
self.data_loss,
self.merged,
],
feed_dict=feed_dict,
)
def eval(self, sess, feed_dict):
"""Evaluate the data in feed_dict with current model.
Args:
sess (obj): The model session object.
feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values.
Returns:
list: A list of evaluated results, including total loss value, data loss value,
predicted scores, and ground-truth labels.
"""
feed_dict[self.layer_keeps] = self.keep_prob_test
feed_dict[self.is_train_stage] = False
return sess.run([self.pred, self.iterator.labels], feed_dict=feed_dict)
def infer(self, sess, feed_dict):
"""Given feature data (in feed_dict), get predicted scores with current model.
Args:
sess (obj): The model session object.
feed_dict (dict): Instances to predict. This is a dictionary that maps graph elements to values.
Returns:
list: Predicted scores for the given instances.
"""
feed_dict[self.layer_keeps] = self.keep_prob_test
feed_dict[self.is_train_stage] = False
return sess.run([self.pred], feed_dict=feed_dict)
def load_model(self, model_path=None):
"""Load an existing model.
Args:
model_path: model path.
Raises:
IOError: if the restore operation failed.
"""
act_path = self.hparams.load_saved_model
if model_path is not None:
act_path = model_path
try:
self.saver.restore(self.sess, act_path)
except:
raise IOError("Failed to find any matching files for {0}".format(act_path))
def fit(self, train_file, valid_file, test_file=None):
"""Fit the model with train_file. Evaluate the model on valid_file per epoch to observe the training status.
If test_file is not None, evaluate it too.
Args:
train_file (str): training data set.
valid_file (str): validation set.
test_file (str): test set.
Returns:
obj: An instance of self.
"""
if self.hparams.write_tfevents:
self.writer = tf.summary.FileWriter(
self.hparams.SUMMARIES_DIR, self.sess.graph
)
train_sess = self.sess
for epoch in range(1, self.hparams.epochs + 1):
step = 0
self.hparams.current_epoch = epoch
epoch_loss = 0
train_start = time.time()
for (
batch_data_input,
impression,
data_size,
) in self.iterator.load_data_from_file(train_file):
step_result = self.train(train_sess, batch_data_input)
(_, _, step_loss, step_data_loss, summary) = step_result
if self.hparams.write_tfevents:
self.writer.add_summary(summary, step)
epoch_loss += step_loss
step += 1
if step % self.hparams.show_step == 0:
print(
"step {0:d} , total_loss: {1:.4f}, data_loss: {2:.4f}".format(
step, step_loss, step_data_loss
)
)
train_end = time.time()
train_time = train_end - train_start
if self.hparams.save_model:
if not os.path.exists(self.hparams.MODEL_DIR):
os.makedirs(self.hparams.MODEL_DIR)
if epoch % self.hparams.save_epoch == 0:
save_path_str = join(self.hparams.MODEL_DIR, "epoch_" + str(epoch))
checkpoint_path = self.saver.save(
sess=train_sess, save_path=save_path_str
)
eval_start = time.time()
eval_res = self.run_eval(valid_file)
train_info = ",".join(
[
str(item[0]) + ":" + str(item[1])
for item in [("logloss loss", epoch_loss / step)]
]
)
eval_info = ", ".join(
[
str(item[0]) + ":" + str(item[1])
for item in sorted(eval_res.items(), key=lambda x: x[0])
]
)
if test_file is not None:
test_res = self.run_eval(test_file)
test_info = ", ".join(
[
str(item[0]) + ":" + str(item[1])
for item in sorted(test_res.items(), key=lambda x: x[0])
]
)
eval_end = time.time()
eval_time = eval_end - eval_start
if test_file is not None:
print(
"at epoch {0:d}".format(epoch)
+ "\ntrain info: "
+ train_info
+ "\neval info: "
+ eval_info
+ "\ntest info: "
+ test_info
)
else:
print(
"at epoch {0:d}".format(epoch)
+ "\ntrain info: "
+ train_info
+ "\neval info: "
+ eval_info
)
print(
"at epoch {0:d} , train time: {1:.1f} eval time: {2:.1f}".format(
epoch, train_time, eval_time
)
)
if self.hparams.write_tfevents:
self.writer.close()
return self
def group_labels(self, labels, preds, group_keys):
"""Devide labels and preds into several group according to values in group keys.
Args:
labels (list): ground truth label list.
preds (list): prediction score list.
group_keys (list): group key list.
Returns:
all_labels: labels after group.
all_preds: preds after group.
"""
all_keys = list(set(group_keys))
group_labels = {k: [] for k in all_keys}
group_preds = {k: [] for k in all_keys}
for l, p, k in zip(labels, preds, group_keys):
group_labels[k].append(l)
group_preds[k].append(p)
all_labels = []
all_preds = []
for k in all_keys:
all_labels.append(group_labels[k])
all_preds.append(group_preds[k])
return all_labels, all_preds
def run_eval(self, filename):
"""Evaluate the given file and returns some evaluation metrics.
Args:
filename (str): A file name that will be evaluated.
Returns:
dict: A dictionary contains evaluation metrics.
"""
load_sess = self.sess
preds = []
labels = []
imp_indexs = []
for batch_data_input, imp_index, data_size in self.iterator.load_data_from_file(
filename
):
step_pred, step_labels = self.eval(load_sess, batch_data_input)
preds.extend(np.reshape(step_pred, -1))
labels.extend(np.reshape(step_labels, -1))
imp_indexs.extend(np.reshape(imp_index, -1))
res = cal_metric(labels, preds, self.hparams.metrics)
if self.hparams.pairwise_metrics is not None:
group_labels, group_preds = self.group_labels(labels, preds, imp_indexs)
res_pairwise = cal_metric(
group_labels, group_preds, self.hparams.pairwise_metrics
)
res.update(res_pairwise)
return res
def predict(self, infile_name, outfile_name):
"""Make predictions on the given data, and output predicted scores to a file.
Args:
infile_name (str): Input file name, format is same as train/val/test file.
outfile_name (str): Output file name, each line is the predict score.
Returns:
obj: An instance of self.
"""
load_sess = self.sess
with tf.gfile.GFile(outfile_name, "w") as wt:
for batch_data_input, _, data_size in self.iterator.load_data_from_file(
infile_name
):
step_pred = self.infer(load_sess, batch_data_input)
step_pred = step_pred[0][:data_size]
step_pred = np.reshape(step_pred, -1)
wt.write("\n".join(map(str, step_pred)))
# line break after each batch.
wt.write("\n")
return self
def _attention(self, inputs, attention_size):
"""Soft alignment attention implement.
Args:
inputs (obj): Sequences ready to apply attention.
attention_size (int): The dimension of attention operation.
Returns:
obj: Weighted sum after attention.
"""
hidden_size = inputs.shape[2].value
if not attention_size:
attention_size = hidden_size
attention_mat = tf.get_variable(
name="attention_mat",
shape=[inputs.shape[-1].value, hidden_size],
initializer=self.initializer,
)
att_inputs = tf.tensordot(inputs, attention_mat, [[2], [0]])
query = tf.get_variable(
name="query",
shape=[attention_size],
dtype=tf.float32,
initializer=self.initializer,
)
att_logits = tf.tensordot(att_inputs, query, axes=1, name="att_logits")
att_weights = tf.nn.softmax(att_logits, name="att_weights")
output = inputs * tf.expand_dims(att_weights, -1)
return output
def _fcn_net(self, model_output, layer_sizes, scope):
"""Construct the MLP part for the model.
Args:
model_output (obj): The output of upper layers, input of MLP part
layer_sizes (list): The shape of each layer of MLP part
scope (obj): The scope of MLP part
Returns:s
obj: prediction logit after fully connected layer
"""
hparams = self.hparams
with tf.variable_scope(scope):
last_layer_size = model_output.shape[-1]
layer_idx = 0
hidden_nn_layers = []
hidden_nn_layers.append(model_output)
with tf.variable_scope("nn_part", initializer=self.initializer) as scope:
for idx, layer_size in enumerate(layer_sizes):
curr_w_nn_layer = tf.get_variable(
name="w_nn_layer" + str(layer_idx),
shape=[last_layer_size, layer_size],
dtype=tf.float32,
)
curr_b_nn_layer = tf.get_variable(
name="b_nn_layer" + str(layer_idx),
shape=[layer_size],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
)
tf.summary.histogram(
"nn_part/" + "w_nn_layer" + str(layer_idx), curr_w_nn_layer
)
tf.summary.histogram(
"nn_part/" + "b_nn_layer" + str(layer_idx), curr_b_nn_layer
)
curr_hidden_nn_layer = (
tf.tensordot(
hidden_nn_layers[layer_idx], curr_w_nn_layer, axes=1
)
+ curr_b_nn_layer
)
scope = "nn_part" + str(idx)
activation = hparams.activation[idx]
if hparams.enable_BN is True:
curr_hidden_nn_layer = tf.layers.batch_normalization(
curr_hidden_nn_layer,
momentum=0.95,
epsilon=0.0001,
training=self.is_train_stage,
)
curr_hidden_nn_layer = self._active_layer(
logit=curr_hidden_nn_layer, activation=activation, layer_idx=idx
)
hidden_nn_layers.append(curr_hidden_nn_layer)
layer_idx += 1
last_layer_size = layer_size
w_nn_output = tf.get_variable(
name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32
)
b_nn_output = tf.get_variable(
name="b_nn_output",
shape=[1],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
)
tf.summary.histogram(
"nn_part/" + "w_nn_output" + str(layer_idx), w_nn_output
)
tf.summary.histogram(
"nn_part/" + "b_nn_output" + str(layer_idx), b_nn_output
)
nn_output = (
tf.tensordot(hidden_nn_layers[-1], w_nn_output, axes=1)
+ b_nn_output
)
self.logit = nn_output
return nn_output
| 38.490113
| 118
| 0.551319
|
ebf1dc53aa648ac3a1bb11e3f73666244aecc09d
| 1,652
|
py
|
Python
|
python/datasources/county_adjacency.py
|
adams314/health-equity-tracker
|
2c6b63381a79227009376a255325d43300dda7cf
|
[
"MIT"
] | null | null | null |
python/datasources/county_adjacency.py
|
adams314/health-equity-tracker
|
2c6b63381a79227009376a255325d43300dda7cf
|
[
"MIT"
] | null | null | null |
python/datasources/county_adjacency.py
|
adams314/health-equity-tracker
|
2c6b63381a79227009376a255325d43300dda7cf
|
[
"MIT"
] | null | null | null |
from ingestion import gcs_to_bq_util
from datasources.data_source import DataSource
# Adjacent counties for each county in the United States from US Census data
class CountyAdjacency(DataSource):
@staticmethod
def get_id():
"""Returns the data source's unique id. """
return 'COUNTY_ADJACENCY'
@staticmethod
def get_table_name():
"""Returns the BigQuery table name where the data source's data will
stored. """
return 'county_adjacency'
def write_to_bq(self, dataset, gcs_bucket, filename):
"""Writes county adjacencies to BigQuery from the provided GCS bucket
dataset: The BigQuery dataset to write to
table_name: The name of the biquery table to write to
gcs_bucket: The name of the gcs bucket to read the data from
filename: The name of the file in the gcs bucket to read from"""
frame = gcs_to_bq_util.load_csv_as_dataframe(gcs_bucket, filename, dtype={
'fipscounty': 'string',
'fipsneighbor': 'string'
})
frame = frame[['fipscounty', 'fipsneighbor']]
frame = frame.rename(columns={
'fipscounty': 'county_geoid',
'fipsneighbor': 'neighbor_geoids'
})
frame = frame.groupby('county_geoid', as_index=False).agg(list)
column_types = {
'county_geoid': 'STRING',
'neighbor_geoids': 'STRING'
}
col_modes = {'neighbor_geoids': 'REPEATED'}
gcs_to_bq_util.append_dataframe_to_bq(
frame, dataset, self.get_staging_table_name(), column_types=column_types,
col_modes=col_modes)
| 36.711111
| 85
| 0.64891
|
2084a3facb71d7ee0fdfb2fff9b2376583773716
| 5,795
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/models/site_extension_info.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-web/azure/mgmt/web/models/site_extension_info.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-web/azure/mgmt/web/models/site_extension_info.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class SiteExtensionInfo(ProxyOnlyResource):
"""Site Extension Information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param extension_id: Site extension ID.
:type extension_id: str
:param title:
:type title: str
:param extension_type: Site extension type. Possible values include:
'Gallery', 'WebRoot'
:type extension_type: str or ~azure.mgmt.web.models.SiteExtensionType
:param summary: Summary description.
:type summary: str
:param description: Detailed description.
:type description: str
:param version: Version information.
:type version: str
:param extension_url: Extension URL.
:type extension_url: str
:param project_url: Project URL.
:type project_url: str
:param icon_url: Icon URL.
:type icon_url: str
:param license_url: License URL.
:type license_url: str
:param feed_url: Feed URL.
:type feed_url: str
:param authors: List of authors.
:type authors: list[str]
:param installer_command_line_params: Installer command line parameters.
:type installer_command_line_params: str
:param published_date_time: Published timestamp.
:type published_date_time: datetime
:param download_count: Count of downloads.
:type download_count: int
:param local_is_latest_version: <code>true</code> if the local version is
the latest version; <code>false</code> otherwise.
:type local_is_latest_version: bool
:param local_path: Local path.
:type local_path: str
:param installed_date_time: Installed timestamp.
:type installed_date_time: datetime
:param provisioning_state: Provisioning state.
:type provisioning_state: str
:param comment: Site Extension comment.
:type comment: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'extension_id': {'key': 'properties.extension_id', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'extension_type': {'key': 'properties.extension_type', 'type': 'SiteExtensionType'},
'summary': {'key': 'properties.summary', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'extension_url': {'key': 'properties.extension_url', 'type': 'str'},
'project_url': {'key': 'properties.project_url', 'type': 'str'},
'icon_url': {'key': 'properties.icon_url', 'type': 'str'},
'license_url': {'key': 'properties.license_url', 'type': 'str'},
'feed_url': {'key': 'properties.feed_url', 'type': 'str'},
'authors': {'key': 'properties.authors', 'type': '[str]'},
'installer_command_line_params': {'key': 'properties.installer_command_line_params', 'type': 'str'},
'published_date_time': {'key': 'properties.published_date_time', 'type': 'iso-8601'},
'download_count': {'key': 'properties.download_count', 'type': 'int'},
'local_is_latest_version': {'key': 'properties.local_is_latest_version', 'type': 'bool'},
'local_path': {'key': 'properties.local_path', 'type': 'str'},
'installed_date_time': {'key': 'properties.installed_date_time', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'comment': {'key': 'properties.comment', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SiteExtensionInfo, self).__init__(**kwargs)
self.extension_id = kwargs.get('extension_id', None)
self.title = kwargs.get('title', None)
self.extension_type = kwargs.get('extension_type', None)
self.summary = kwargs.get('summary', None)
self.description = kwargs.get('description', None)
self.version = kwargs.get('version', None)
self.extension_url = kwargs.get('extension_url', None)
self.project_url = kwargs.get('project_url', None)
self.icon_url = kwargs.get('icon_url', None)
self.license_url = kwargs.get('license_url', None)
self.feed_url = kwargs.get('feed_url', None)
self.authors = kwargs.get('authors', None)
self.installer_command_line_params = kwargs.get('installer_command_line_params', None)
self.published_date_time = kwargs.get('published_date_time', None)
self.download_count = kwargs.get('download_count', None)
self.local_is_latest_version = kwargs.get('local_is_latest_version', None)
self.local_path = kwargs.get('local_path', None)
self.installed_date_time = kwargs.get('installed_date_time', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.comment = kwargs.get('comment', None)
| 45.273438
| 108
| 0.639517
|
fc160bcea27c34433d77fd970a778e1b9415f68d
| 17,834
|
py
|
Python
|
pypeit/scripts/coadd_datacube.py
|
NathanSandford/PypeIt
|
89470d27422b7f8662642060b5687a5b2fda27ed
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/scripts/coadd_datacube.py
|
NathanSandford/PypeIt
|
89470d27422b7f8662642060b5687a5b2fda27ed
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/scripts/coadd_datacube.py
|
NathanSandford/PypeIt
|
89470d27422b7f8662642060b5687a5b2fda27ed
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
"""
This script enables the user to convert spec2D FITS files
from IFU instruments into a 3D cube with a defined WCS.
"""
import argparse
from astropy import units
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
import numpy as np
import copy, os
from pypeit import msgs, par, io, spec2dobj
from pypeit.spectrographs.util import load_spectrograph
from pypeit.core import datacube as dc_utils
from pypeit.core.flux_calib import load_extinction_data, extinction_correction
from pypeit.core.flexure import calculate_image_offset
from pypeit.core import parse
from IPython import embed
def parse_args(options=None, return_parser=False):
parser = argparse.ArgumentParser(description='Read in an array of spec2D files and convert them into a datacube',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file', type = str, default=None, help='filename.coadd3d file')
parser.add_argument('--det', default=1, type=int, help="Detector")
parser.add_argument('-o', '--overwrite', default=False, action='store_true',
help='Overwrite any existing files/directories')
if return_parser:
return parser
return parser.parse_args() if options is None else parser.parse_args(options)
def coadd_cube(files, parset, overwrite=False):
""" Main routine to coadd spec2D files into a 3D datacube
Args:
files (list):
List of all spec2D files
parset (:class:`pypeit.par.core.PypeItPar`):
An instance of the parameter set.
overwrite (bool):
Overwrite the output file, if it exists?
"""
# Get the detector number
det = 1 if parset is None else parset['rdx']['detnum']
# Load the spectrograph
spec2DObj = spec2dobj.Spec2DObj.from_file(files[0], det)
specname = spec2DObj.head0['PYP_SPEC']
spec = load_spectrograph(specname)
# Grab the parset, if not provided
if parset is None: parset = spec.default_pypeit_par()
cubepar = parset['reduce']['cube']
# Check the output file
outfile = cubepar['output_filename'] if ".fits" in cubepar['output_filename'] else cubepar['output_filename']+".fits"
out_whitelight = outfile.replace(".fits", "_whitelight.fits")
if os.path.exists(outfile) and not overwrite:
msgs.error("Output filename already exists:"+msgs.newline()+outfile)
elif os.path.exists(out_whitelight) and cubepar['save_whitelight'] and not overwrite:
msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight)
# Check the reference cube and image exist, if requested
ref_scale = None # This will be used to correct relative scaling among the various input frames
if cubepar['standard_cube'] is not None:
if not os.path.exists(cubepar['standard_cube']):
msgs.error("Standard cube does not exist:" + msgs.newline() + cubepar['reference_cube'])
cube = fits.open(cubepar['standard_cube'])
ref_scale = cube['REFSCALE'].data
if cubepar['reference_image'] is not None:
if not os.path.exists(cubepar['reference_image']):
msgs.error("Reference cube does not exist:" + msgs.newline() + cubepar['reference_image'])
if cubepar['flux_calibrate']:
msgs.error("Flux calibration is not currently implemented" + msgs.newline() +
"Please set 'flux_calibrate = False'")
# prep
numfiles = len(files)
combine = cubepar['combine']
all_ra, all_dec, all_wave = np.array([]), np.array([]), np.array([])
all_sci, all_ivar, all_idx, all_wghts = np.array([]), np.array([]), np.array([]), np.array([])
all_wcs = []
dspat = None if cubepar['spatial_delta'] is None else cubepar['spatial_delta']/3600.0 # binning size on the sky (/3600 to convert to degrees)
dwv = cubepar['wave_delta'] # binning size in wavelength direction (in Angstroms)
wave_ref = None
whitelight_img = None # This is the whitelight image based on all input spec2d frames
weights = np.ones(numfiles) # Weights to use when combining cubes
for ff, fil in enumerate(files):
# Load it up
spec2DObj = spec2dobj.Spec2DObj.from_file(fil, det)
detector = spec2DObj.detector
# Setup for PypeIt imports
msgs.reset(verbosity=2)
if ref_scale is None:
ref_scale = spec2DObj.scaleimg.copy()
# Extract the information
sciimg = (spec2DObj.sciimg-spec2DObj.skymodel) * (ref_scale/spec2DObj.scaleimg) # Subtract sky and apply relative sky
ivar = spec2DObj.ivarraw / (ref_scale/spec2DObj.scaleimg)**2
waveimg = spec2DObj.waveimg
bpmmask = spec2DObj.bpmmask
# Grab the slit edges
slits = spec2DObj.slits
wave0 = waveimg[waveimg != 0.0].min()
diff = waveimg[1:, :] - waveimg[:-1, :]
dwv = float(np.median(diff[diff != 0.0]))
msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv))
msgs.info("Constructing slit image")
slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spec2DObj.sci_spat_flexure)
onslit_gpm = (slitid_img_init > 0) & (bpmmask == 0)
# Grab the WCS of this frame
wcs = spec.get_wcs(spec2DObj.head0, slits, detector.platescale, wave0, dwv)
all_wcs.append(copy.deepcopy(wcs))
# Find the largest spatial scale of all images being combined
# TODO :: probably need to put this in the DetectorContainer
pxscl = detector.platescale * parse.parse_binning(detector.binning)[1] / 3600.0 # This should be degrees/pixel
slscl = spec.get_meta_value([spec2DObj.head0], 'slitwid')
if dspat is None:
dspat = max(pxscl, slscl)
elif max(pxscl, slscl) > dspat:
dspat = max(pxscl, slscl)
# Generate an RA/DEC image
msgs.info("Generating RA/DEC image")
raimg, decimg, minmax = slits.get_radec_image(wcs, initial=True, flexure=spec2DObj.sci_spat_flexure)
# Perform the DAR correction
if wave_ref is None:
wave_ref = 0.5*(np.min(waveimg[onslit_gpm]) + np.max(waveimg[onslit_gpm]))
# Get DAR parameters
raval = spec.get_meta_value([spec2DObj.head0], 'ra')
decval = spec.get_meta_value([spec2DObj.head0], 'dec')
obstime = spec.get_meta_value([spec2DObj.head0], 'obstime')
pressure = spec.get_meta_value([spec2DObj.head0], 'pressure')
temperature = spec.get_meta_value([spec2DObj.head0], 'temperature')
rel_humidity = spec.get_meta_value([spec2DObj.head0], 'humidity')
coord = SkyCoord(raval, decval, unit=(units.deg, units.deg))
location = spec.location # TODO :: spec.location should probably end up in the TelescopePar (spec.telescope.location)
ra_corr, dec_corr = dc_utils.dar_correction(waveimg[onslit_gpm], coord, obstime, location,
pressure, temperature, rel_humidity, wave_ref=wave_ref)
raimg[onslit_gpm] += ra_corr
decimg[onslit_gpm] += dec_corr
# Get copies of arrays to be saved
wave_ext = waveimg[onslit_gpm].copy()
flux_ext = sciimg[onslit_gpm].copy()
ivar_ext = ivar[onslit_gpm].copy()
# Perform extinction correction
msgs.info("Applying extinction correction")
longitude = spec.telescope['longitude']
latitude = spec.telescope['latitude']
airmass = spec2DObj.head0[spec.meta['airmass']['card']]
extinct = load_extinction_data(longitude, latitude)
# extinction_correction requires the wavelength is sorted
wvsrt = np.argsort(wave_ext)
ext_corr = extinction_correction(wave_ext[wvsrt] * units.AA, airmass, extinct)
# Correct for extinction
flux_sav = flux_ext[wvsrt] * ext_corr
ivar_sav = ivar_ext[wvsrt] / ext_corr ** 2
# sort back to the original ordering
resrt = np.argsort(wvsrt)
# Calculate the weights relative to the zeroth cube
if ff != 0:
weights[ff] = np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2
# Store the information
numpix = raimg[onslit_gpm].size
all_ra = np.append(all_ra, raimg[onslit_gpm].copy())
all_dec = np.append(all_dec, decimg[onslit_gpm].copy())
all_wave = np.append(all_wave, wave_ext.copy())
all_sci = np.append(all_sci, flux_sav[resrt].copy())
all_ivar = np.append(all_ivar, ivar_sav[resrt].copy())
all_idx = np.append(all_idx, ff*np.ones(numpix))
all_wghts = np.append(all_wghts, weights[ff]*np.ones(numpix))
# Grab cos(dec) for convenience
cosdec = np.cos(np.mean(all_dec) * np.pi / 180.0)
# Register spatial offsets between all frames if several frames are being combined
if combine:
# Check if a reference whitelight image should be used to register the offsets
if cubepar["reference_image"] is None:
# Generate white light images
whitelight_imgs, _, _ = dc_utils.make_whitelight(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx,
dspat)
# ref_idx will be the index of the cube with the highest S/N
ref_idx = np.argmax(weights)
reference_image = whitelight_imgs[:, :, ref_idx].copy()
msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1))
else:
ref_idx = -1 # Don't use an index
# Load reference information
reference_image, whitelight_imgs, wlwcs = \
dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat,
cubepar['reference_image'])
msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'")
# Calculate the image offsets - check the reference is a zero shift
ra_shift_ref, dec_shift_ref = calculate_image_offset(reference_image.copy(), reference_image.copy())
for ff in range(numfiles):
# Don't correlate the reference image with itself
if ff == ref_idx:
continue
# Calculate the shift
ra_shift, dec_shift = calculate_image_offset(whitelight_imgs[:, :, ff], reference_image.copy())
# Convert to reference
ra_shift -= ra_shift_ref
dec_shift -= dec_shift_ref
# Convert pixel shift to degress shift
ra_shift *= dspat/cosdec
dec_shift *= dspat
msgs.info("Spatial shift of cube #{0:d}: RA, DEC (arcsec) = {1:+0.3f}, {2:+0.3f}".format(ff+1, ra_shift*3600.0, dec_shift*3600.0))
# Apply the shift
all_ra[all_idx == ff] += ra_shift
all_dec[all_idx == ff] += dec_shift
# Generate a white light image of *all* data
msgs.info("Generating global white light image")
if cubepar["reference_image"] is None:
whitelight_img, _, wlwcs = dc_utils.make_whitelight(all_ra, all_dec, all_wave, all_sci, all_wghts,
np.zeros(all_ra.size), dspat)
else:
_, whitelight_img, wlwcs = \
dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, np.zeros(all_ra.size),
dspat, cubepar['reference_image'])
# Calculate the relative spectral weights of all pixels
all_wghts = dc_utils.compute_weights(all_ra, all_dec, all_wave, all_sci, all_ivar, all_idx,
whitelight_img[:, :, 0], dspat, dwv,
relative_weights=cubepar['relative_weights'])
# Check if a whitelight image should be saved
if cubepar['save_whitelight']:
# Check if the white light image still needs to be generated - if so, generate it now
if whitelight_img is None:
msgs.info("Generating global white light image")
if cubepar["reference_image"] is None:
whitelight_img, _, wlwcs = dc_utils.make_whitelight(all_ra, all_dec, all_wave, all_sci, all_wghts,
np.zeros(all_ra.size), dspat)
else:
_, whitelight_img, wlwcs = \
dc_utils.make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts,
np.zeros(all_ra.size),
dspat, cubepar['reference_image'])
# Prepare and save the fits file
msgs.info("Saving white light image as: {0:s}".format(out_whitelight))
img_hdu = fits.PrimaryHDU(whitelight_img.T, header=wlwcs.to_header())
img_hdu.writeto(out_whitelight, overwrite=overwrite)
# Setup the cube ranges
ra_min = cubepar['ra_min'] if cubepar['ra_min'] is not None else np.min(all_ra)
ra_max = cubepar['ra_max'] if cubepar['ra_max'] is not None else np.max(all_ra)
dec_min = cubepar['dec_min'] if cubepar['dec_min'] is not None else np.min(all_dec)
dec_max = cubepar['dec_max'] if cubepar['dec_max'] is not None else np.max(all_dec)
wav_min = cubepar['wave_min'] if cubepar['wave_min'] is not None else np.min(all_wave)
wav_max = cubepar['wave_max'] if cubepar['wave_max'] is not None else np.max(all_wave)
if cubepar['wave_delta'] is not None: dwv = cubepar['wave_delta']
# Generate a master WCS to register all frames
coord_min = [ra_min, dec_min, wav_min]
coord_dlt = [dspat, dspat, dwv]
masterwcs = dc_utils.generate_masterWCS(coord_min, coord_dlt, name=specname)
msgs.info(msgs.newline()+"-"*40 +
msgs.newline() + "Parameters of the WCS:" +
msgs.newline() + "RA min, max = {0:f}, {1:f}".format(ra_min, ra_max) +
msgs.newline() + "DEC min, max = {0:f}, {1:f}".format(dec_min, dec_max) +
msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(wav_min, wav_max) +
msgs.newline() + "Spaxel size = {0:f}''".format(3600.0*dspat) +
msgs.newline() + "Wavelength step = {0:f} A".format(dwv) +
msgs.newline() + "-" * 40)
# Generate the output binning
if combine:
numra = int((ra_max-ra_min) * cosdec / dspat)
numdec = int((dec_max-dec_min)/dspat)
numwav = int((wav_max-wav_min)/dwv)
xbins = np.arange(1+numra)-0.5
ybins = np.arange(1+numdec)-0.5
spec_bins = np.arange(1+numwav)-0.5
else:
slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))
numwav = int((np.max(waveimg) - wave0) / dwv)
xbins, ybins, spec_bins = spec.get_datacube_bins(slitlength, minmax, numwav)
# Make the cube
msgs.info("Generating pixel coordinates")
if combine:
pix_coord = masterwcs.wcs_world2pix(all_ra, all_dec, all_wave * 1.0E-10, 0)
hdr = masterwcs.to_header()
else:
pix_coord = wcs.wcs_world2pix(np.vstack((all_ra, all_dec, all_wave*1.0E-10)).T, 0)
hdr = wcs.to_header()
# Find the NGP coordinates for all input pixels
msgs.info("Generating data cube")
bins = (xbins, ybins, spec_bins)
datacube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_sci*all_wghts)
norm, edges = np.histogramdd(pix_coord, bins=bins, weights=all_wghts)
norm_cube = (norm > 0) / (norm + (norm == 0))
datacube *= norm_cube
# Create the variance cube, including weights
msgs.info("Generating variance cube")
all_var = (all_ivar > 0) / (all_ivar + (all_ivar == 0))
var_cube, edges = np.histogramdd(pix_coord, bins=bins, weights=all_var * all_wghts**2)
var_cube *= norm_cube**2
# Save the datacube
debug = False
if debug:
datacube_resid, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins), weights=all_sci*np.sqrt(all_ivar))
norm, edges = np.histogramdd(pix_coord, bins=(xbins, ybins, spec_bins))
norm_cube = (norm > 0) / (norm + (norm == 0))
outfile = "datacube_resid.fits"
msgs.info("Saving datacube as: {0:s}".format(outfile))
hdu = fits.PrimaryHDU((datacube_resid*norm_cube).T, header=masterwcs.to_header())
hdu.writeto(outfile, overwrite=overwrite)
msgs.info("Saving datacube as: {0:s}".format(outfile))
final_cube = dc_utils.DataCube(datacube.T, var_cube.T, specname,
refscale=ref_scale, fluxed=cubepar['flux_calibrate'])
final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)
def main(args):
if args.file is None:
msgs.error('You must input a coadd3d file')
else:
spectrograph_name, config_lines, spec2d_files = io.read_spec2d_file(args.file, filetype="coadd3d")
spectrograph = load_spectrograph(spectrograph_name)
# Parameters
spectrograph_def_par = spectrograph.default_pypeit_par()
parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(),
merge_with=config_lines)
# If detector was passed as an argument override whatever was in the coadd3d file
if args.det is not None:
msgs.info("Restricting to detector={}".format(args.det))
parset['rdx']['detnum'] = int(args.det)
# Coadd the files
coadd_cube(spec2d_files, parset, overwrite=args.overwrite)
| 49.265193
| 147
| 0.639172
|
59b40d5df2810c48036390ec2c4d829d5b3e23f0
| 58,538
|
py
|
Python
|
resources/WPy32/python-3.10.2/Lib/threading.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | 1
|
2022-03-26T15:43:50.000Z
|
2022-03-26T15:43:50.000Z
|
resources/WPy32/python-3.10.2/Lib/threading.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | null | null | null |
resources/WPy32/python-3.10.2/Lib/threading.py
|
eladkarako/yt-dlp_kit
|
6365651111ef4d2f94335cf38bf4d9b0136d42d2
|
[
"Unlicense"
] | 1
|
2022-03-28T19:28:45.000Z
|
2022-03-28T19:28:45.000Z
|
"""Thread module emulating a subset of Java's threading model."""
import os as _os
import sys as _sys
import _thread
import functools
from time import monotonic as _time
from _weakrefset import WeakSet
from itertools import islice as _islice, count as _count
try:
from _collections import deque as _deque
except ImportError:
from collections import deque as _deque
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['get_ident', 'active_count', 'Condition', 'current_thread',
'enumerate', 'main_thread', 'TIMEOUT_MAX',
'Event', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Barrier', 'BrokenBarrierError', 'Timer', 'ThreadError',
'setprofile', 'settrace', 'local', 'stack_size',
'excepthook', 'ExceptHookArgs', 'gettrace', 'getprofile']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
_set_sentinel = _thread._set_sentinel
get_ident = _thread.get_ident
try:
get_native_id = _thread.get_native_id
_HAVE_THREAD_NATIVE_ID = True
__all__.append('get_native_id')
except AttributeError:
_HAVE_THREAD_NATIVE_ID = False
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def getprofile():
"""Get the profiler function as set by threading.setprofile()."""
return _profile_hook
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
def gettrace():
"""Get the trace function as set by threading.settrace()."""
return _trace_hook
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s %s.%s object owner=%r count=%d at %s>" % (
"locked" if self._block.locked() else "unlocked",
self.__class__.__module__,
self.__class__.__qualname__,
owner,
self._count,
hex(id(self))
)
def _at_fork_reinit(self):
self._block._at_fork_reinit()
self._owner = None
self._count = 0
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count += 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = _deque()
def _at_fork_reinit(self):
self._lock._at_fork_reinit()
self._waiters.clear()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if _lock doesn't have _is_owned().
if self._lock.acquire(False):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
gotit = False
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
return gotit
finally:
self._acquire_restore(saved_state)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
all_waiters = self._waiters
waiters_to_notify = _deque(_islice(all_waiters, n))
if not waiters_to_notify:
return
for waiter in waiters_to_notify:
waiter.release()
try:
all_waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def notifyAll(self):
"""Wake up all threads waiting on this condition.
This method is deprecated, use notify_all() instead.
"""
import warnings
warnings.warn('notifyAll() is deprecated, use notify_all() instead',
DeprecationWarning, stacklevel=2)
self.notify_all()
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
self._value += n
for i in range(n):
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self, n=1):
"""Release a semaphore, incrementing the internal counter by one or more.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
if n < 1:
raise ValueError('n must be one or more')
with self._cond:
if self._value + n > self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += n
for i in range(n):
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _at_fork_reinit(self):
# Private method called by Thread._reset_internal_locks()
self._cond._at_fork_reinit()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
def isSet(self):
"""Return true if and only if the internal flag is true.
This method is deprecated, use notify_all() instead.
"""
import warnings
warnings.warn('isSet() is deprecated, use is_set() instead',
DeprecationWarning, stacklevel=2)
return self.is_set()
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
with self._cond:
self._flag = True
self._cond.notify_all()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
with self._cond:
self._flag = False
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self._cond:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously awoken once they
have all made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is used as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 # 0 filling, 1 draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are released. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = _count(1).__next__
def _newname(name_template):
return name_template % _counter()
# Active thread administration.
#
# bpo-44422: Use a reentrant lock to allow reentrant calls to functions like
# threading.enumerate().
_active_limbo_lock = RLock()
_active = {} # maps thread id to Thread object
_limbo = {}
_dangling = WeakSet()
# Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()
# to wait until all Python thread states get deleted:
# see Thread._set_tstate_lock().
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
def _maintain_shutdown_locks():
"""
Drop any shutdown locks that don't correspond to running threads anymore.
Calling this from time to time avoids an ever-growing _shutdown_locks
set when Thread objects are not joined explicitly. See bpo-37788.
This must be called with _shutdown_locks_lock acquired.
"""
# If a lock was released, the corresponding thread has exited
to_remove = [lock for lock in _shutdown_locks if not lock.locked()]
_shutdown_locks.difference_update(to_remove)
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
_initialized = False
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
if name:
name = str(name)
else:
name = _newname("Thread-%d")
if target is not None:
try:
target_name = target.__name__
name += f" ({target_name})"
except AttributeError:
pass
self._target = target
self._name = name
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
if _HAVE_THREAD_NATIVE_ID:
self._native_id = None
self._tstate_lock = None
self._started = Event()
self._is_stopped = False
self._initialized = True
# Copy of sys.stderr used by self._invoke_excepthook()
self._stderr = _sys.stderr
self._invoke_excepthook = _make_invoke_excepthook()
# For debugging and _after_fork()
_dangling.add(self)
def _reset_internal_locks(self, is_alive):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
self._started._at_fork_reinit()
if is_alive:
# bpo-42350: If the fork happens when the thread is already stopped
# (ex: after threading._shutdown() has been called), _tstate_lock
# is None. Do nothing in this case.
if self._tstate_lock is not None:
self._tstate_lock._at_fork_reinit()
self._tstate_lock.acquire()
else:
# The thread isn't alive after fork: it doesn't have a tstate
# anymore.
self._is_stopped = True
self._tstate_lock = None
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
self.is_alive() # easy way to get ._is_stopped set when appropriate
if self._is_stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target is not None:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
if _HAVE_THREAD_NATIVE_ID:
def _set_native_id(self):
self._native_id = get_native_id()
def _set_tstate_lock(self):
"""
Set a lock object which will be released by the interpreter when
the underlying thread state (see pystate.h) gets deleted.
"""
self._tstate_lock = _set_sentinel()
self._tstate_lock.acquire()
if not self.daemon:
with _shutdown_locks_lock:
_maintain_shutdown_locks()
_shutdown_locks.add(self._tstate_lock)
def _bootstrap_inner(self):
try:
self._set_ident()
self._set_tstate_lock()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except:
self._invoke_excepthook(self)
finally:
with _active_limbo_lock:
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
# After calling ._stop(), .is_alive() returns False and .join() returns
# immediately. ._tstate_lock must be released before calling ._stop().
#
# Normal case: C code at the end of the thread's life
# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and
# that's detected by our ._wait_for_tstate_lock(), called by .join()
# and .is_alive(). Any number of threads _may_ call ._stop()
# simultaneously (for example, if multiple threads are blocked in
# .join() calls), and they're not serialized. That's harmless -
# they'll just make redundant rebindings of ._is_stopped and
# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the
# "assert self._is_stopped" in ._wait_for_tstate_lock() always works
# (the assert is executed only if ._tstate_lock is None).
#
# Special case: _main_thread releases ._tstate_lock via this
# module's _shutdown() function.
lock = self._tstate_lock
if lock is not None:
assert not lock.locked()
self._is_stopped = True
self._tstate_lock = None
if not self.daemon:
with _shutdown_locks_lock:
# Remove our lock and other released locks from _shutdown_locks
_maintain_shutdown_locks()
def _delete(self):
"Remove current thread from the dict of currently running threads."
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
is_alive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if timeout is None:
self._wait_for_tstate_lock()
else:
# the behavior of a negative timeout isn't documented, but
# historically .join(timeout=x) for x<0 has acted as if timeout=0
self._wait_for_tstate_lock(timeout=max(timeout, 0))
def _wait_for_tstate_lock(self, block=True, timeout=-1):
# Issue #18808: wait for the thread state to be gone.
# At the end of the thread's life, after all knowledge of the thread
# is removed from C data structures, C code releases our _tstate_lock.
# This method passes its arguments to _tstate_lock.acquire().
# If the lock is acquired, the C code is done, and self._stop() is
# called. That sets ._is_stopped to True, and ._tstate_lock to None.
lock = self._tstate_lock
if lock is None:
# already determined that the C code is done
assert self._is_stopped
return
try:
if lock.acquire(block, timeout):
lock.release()
self._stop()
except:
if lock.locked():
# bpo-45274: lock.acquire() acquired the lock, but the function
# was interrupted with an exception before reaching the
# lock.release(). It can happen if a signal handler raises an
# exception, like CTRL+C which raises KeyboardInterrupt.
lock.release()
self._stop()
raise
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
if _HAVE_THREAD_NATIVE_ID:
@property
def native_id(self):
"""Native integral thread ID of this thread, or None if it has not been started.
This is a non-negative integer. See the get_native_id() function.
This represents the Thread ID as reported by the kernel.
"""
assert self._initialized, "Thread.__init__() not called"
return self._native_id
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. See also the module function
enumerate().
"""
assert self._initialized, "Thread.__init__() not called"
if self._is_stopped or not self._started.is_set():
return False
self._wait_for_tstate_lock(False)
return not self._is_stopped
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when only daemon threads are left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread")
self._daemonic = daemonic
def isDaemon(self):
"""Return whether this thread is a daemon.
This method is deprecated, use the daemon attribute instead.
"""
import warnings
warnings.warn('isDaemon() is deprecated, get the daemon attribute instead',
DeprecationWarning, stacklevel=2)
return self.daemon
def setDaemon(self, daemonic):
"""Set whether this thread is a daemon.
This method is deprecated, use the .daemon property instead.
"""
import warnings
warnings.warn('setDaemon() is deprecated, set the daemon attribute instead',
DeprecationWarning, stacklevel=2)
self.daemon = daemonic
def getName(self):
"""Return a string used for identification purposes only.
This method is deprecated, use the name attribute instead.
"""
import warnings
warnings.warn('getName() is deprecated, get the name attribute instead',
DeprecationWarning, stacklevel=2)
return self.name
def setName(self, name):
"""Set the name string for this thread.
This method is deprecated, use the name attribute instead.
"""
import warnings
warnings.warn('setName() is deprecated, set the name attribute instead',
DeprecationWarning, stacklevel=2)
self.name = name
try:
from _thread import (_excepthook as excepthook,
_ExceptHookArgs as ExceptHookArgs)
except ImportError:
# Simple Python implementation if _thread._excepthook() is not available
from traceback import print_exception as _print_exception
from collections import namedtuple
_ExceptHookArgs = namedtuple(
'ExceptHookArgs',
'exc_type exc_value exc_traceback thread')
def ExceptHookArgs(args):
return _ExceptHookArgs(*args)
def excepthook(args, /):
"""
Handle uncaught Thread.run() exception.
"""
if args.exc_type == SystemExit:
# silently ignore SystemExit
return
if _sys is not None and _sys.stderr is not None:
stderr = _sys.stderr
elif args.thread is not None:
stderr = args.thread._stderr
if stderr is None:
# do nothing if sys.stderr is None and sys.stderr was None
# when the thread was created
return
else:
# do nothing if sys.stderr is None and args.thread is None
return
if args.thread is not None:
name = args.thread.name
else:
name = get_ident()
print(f"Exception in thread {name}:",
file=stderr, flush=True)
_print_exception(args.exc_type, args.exc_value, args.exc_traceback,
file=stderr)
stderr.flush()
# Original value of threading.excepthook
__excepthook__ = excepthook
def _make_invoke_excepthook():
# Create a local namespace to ensure that variables remain alive
# when _invoke_excepthook() is called, even if it is called late during
# Python shutdown. It is mostly needed for daemon threads.
old_excepthook = excepthook
old_sys_excepthook = _sys.excepthook
if old_excepthook is None:
raise RuntimeError("threading.excepthook is None")
if old_sys_excepthook is None:
raise RuntimeError("sys.excepthook is None")
sys_exc_info = _sys.exc_info
local_print = print
local_sys = _sys
def invoke_excepthook(thread):
global excepthook
try:
hook = excepthook
if hook is None:
hook = old_excepthook
args = ExceptHookArgs([*sys_exc_info(), thread])
hook(args)
except Exception as exc:
exc.__suppress_context__ = True
del exc
if local_sys is not None and local_sys.stderr is not None:
stderr = local_sys.stderr
else:
stderr = thread._stderr
local_print("Exception in threading.excepthook:",
file=stderr, flush=True)
if local_sys is not None and local_sys.excepthook is not None:
sys_excepthook = local_sys.excepthook
else:
sys_excepthook = old_sys_excepthook
sys_excepthook(*sys_exc_info())
finally:
# Break reference cycle (exception stored in a variable)
args = None
return invoke_excepthook
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._set_tstate_lock()
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
self._started.set()
self._set_ident()
if _HAVE_THREAD_NATIVE_ID:
self._set_native_id()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def is_alive(self):
assert not self._is_stopped and self._started.is_set()
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
def currentThread():
"""Return the current Thread object, corresponding to the caller's thread of control.
This function is deprecated, use current_thread() instead.
"""
import warnings
warnings.warn('currentThread() is deprecated, use current_thread() instead',
DeprecationWarning, stacklevel=2)
return current_thread()
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
def activeCount():
"""Return the number of Thread objects currently alive.
This function is deprecated, use active_count() instead.
"""
import warnings
warnings.warn('activeCount() is deprecated, use active_count() instead',
DeprecationWarning, stacklevel=2)
return active_count()
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
_threading_atexits = []
_SHUTTING_DOWN = False
def _register_atexit(func, *arg, **kwargs):
"""CPython internal: register *func* to be called before joining threads.
The registered *func* is called with its arguments just before all
non-daemon threads are joined in `_shutdown()`. It provides a similar
purpose to `atexit.register()`, but its functions are called prior to
threading shutdown instead of interpreter shutdown.
For similarity to atexit, the registered functions are called in reverse.
"""
if _SHUTTING_DOWN:
raise RuntimeError("can't register atexit after shutdown")
call = functools.partial(func, *arg, **kwargs)
_threading_atexits.append(call)
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_main_thread = _MainThread()
def _shutdown():
"""
Wait until the Python thread state of all non-daemon threads get deleted.
"""
# Obscure: other threads may be waiting to join _main_thread. That's
# dubious, but some code does it. We can't wait for C code to release
# the main thread's tstate_lock - that won't happen until the interpreter
# is nearly dead. So we release it here. Note that just calling _stop()
# isn't enough: other threads may already be waiting on _tstate_lock.
if _main_thread._is_stopped:
# _shutdown() was already called
return
global _SHUTTING_DOWN
_SHUTTING_DOWN = True
# Call registered threading atexit functions before threads are joined.
# Order is reversed, similar to atexit.
for atexit_call in reversed(_threading_atexits):
atexit_call()
# Main thread
if _main_thread.ident == get_ident():
tlock = _main_thread._tstate_lock
# The main thread isn't finished yet, so its thread state lock can't
# have been released.
assert tlock is not None
assert tlock.locked()
tlock.release()
_main_thread._stop()
else:
# bpo-1596321: _shutdown() must be called in the main thread.
# If the threading module was not imported by the main thread,
# _main_thread is the thread which imported the threading module.
# In this case, ignore _main_thread, similar behavior than for threads
# spawned by C libraries or using _thread.start_new_thread().
pass
# Join all non-deamon threads
while True:
with _shutdown_locks_lock:
locks = list(_shutdown_locks)
_shutdown_locks.clear()
if not locks:
break
for lock in locks:
# mimic Thread.join()
lock.acquire()
lock.release()
# new threads can be spawned while we were waiting for the other
# threads to complete
def main_thread():
"""Return the main thread object.
In normal conditions, the main thread is the thread from which the
Python interpreter was started.
"""
return _main_thread
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
"""
Cleanup threading module state that should not exist after a fork.
"""
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock, _main_thread
global _shutdown_locks_lock, _shutdown_locks
_active_limbo_lock = RLock()
# fork() only copied the current thread; clear references to others.
new_active = {}
try:
current = _active[get_ident()]
except KeyError:
# fork() was called in a thread which was not spawned
# by threading.Thread. For example, a thread spawned
# by thread.start_new_thread().
current = _MainThread()
_main_thread = current
# reset _shutdown() locks: threads re-register their _tstate_lock below
_shutdown_locks_lock = _allocate_lock()
_shutdown_locks = set()
with _active_limbo_lock:
# Dangling thread instances must still have their locks reset,
# because someone may join() them.
threads = set(_enumerate())
threads.update(_dangling)
for thread in threads:
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
thread._reset_internal_locks(True)
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._reset_internal_locks(False)
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
if hasattr(_os, "register_at_fork"):
_os.register_at_fork(after_in_child=_after_fork)
| 35.71568
| 93
| 0.612594
|
eca8d98364b0e3c8d120dd66c07005a1980de419
| 17,630
|
py
|
Python
|
keras_retinanet/bin/train.py
|
Muhannes/keras-retinanet
|
e8d6418a9a7170cb2d6355bbcb62b29cf7cded99
|
[
"Apache-2.0"
] | 1
|
2019-01-03T08:48:15.000Z
|
2019-01-03T08:48:15.000Z
|
keras_retinanet/bin/train.py
|
Muhannes/keras-retinanet
|
e8d6418a9a7170cb2d6355bbcb62b29cf7cded99
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/bin/train.py
|
Muhannes/keras-retinanet
|
e8d6418a9a7170cb2d6355bbcb62b29cf7cded99
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import functools
import os
import sys
import warnings
import keras
import keras.preprocessing.image
from keras.utils import multi_gpu_model
import tensorflow as tf
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import layers
from .. import losses
from .. import models
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..models.retinanet import retinanet_bbox
from ..preprocessing.csv_generator import CSVGenerator
from ..preprocessing.kitti import KittiGenerator
from ..preprocessing.open_images import OpenImagesGenerator
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..utils.anchors import make_shapes_callback, anchor_targets_bbox
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
from ..utils.transform import random_transform_generator
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, freeze_backbone=False):
modifier = freeze_model if freeze_backbone else None
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
else:
model = model_with_weights(backbone_retinanet(num_classes, modifier=modifier), weights=weights, skip_mismatch=True)
training_model = model
# make prediction model
prediction_model = retinanet_bbox(model=model)
# compile model
training_model.compile(
loss={
'regression' : losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, validation_generator, args):
callbacks = []
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{dataset_type}_{{epoch:02d}}.h5'.format(backbone=args.backbone, dataset_type=args.dataset_type)
),
verbose=1
)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from ..callbacks.coco import CocoEval
# use prediction model for evaluation
evaluation = CocoEval(validation_generator, tensorboard=tensorboard_callback)
else:
evaluation = Evaluate(validation_generator, tensorboard=tensorboard_callback)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
epsilon = 0.0001,
cooldown = 0,
min_lr = 0
))
return callbacks
def create_generators(args):
# create random transform generator for augmenting training data
if args.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
if args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from ..preprocessing.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
transform_generator=transform_generator,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
elif args.dataset_type == 'pascal':
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
transform_generator=transform_generator,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'test',
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
elif args.dataset_type == 'csv':
train_generator = CSVGenerator(
args.annotations,
args.classes,
transform_generator=transform_generator,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
if args.val_annotations:
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
else:
validation_generator = None
elif args.dataset_type == 'oid':
train_generator = OpenImagesGenerator(
args.main_dir,
subset='train',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
fixed_labels=args.fixed_labels,
transform_generator=transform_generator,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
validation_generator = OpenImagesGenerator(
args.main_dir,
subset='validation',
version=args.version,
labels_filter=args.labels_filter,
annotation_cache_dir=args.annotation_cache_dir,
fixed_labels=args.fixed_labels,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
elif args.dataset_type == 'kitti':
train_generator = KittiGenerator(
args.kitti_path,
subset='train',
transform_generator=transform_generator,
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
validation_generator = KittiGenerator(
args.kitti_path,
subset='val',
batch_size=args.batch_size,
image_min_side=args.image_min_side,
image_max_side=args.image_max_side
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
"""
Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
:param parsed_args: parser.parse_args()
:return: parsed_args
"""
if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
parsed_args.multi_gpu))
if parsed_args.multi_gpu > 1 and parsed_args.snapshot:
raise ValueError(
"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.".format(parsed_args.multi_gpu,
parsed_args.snapshot))
if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:
raise ValueError("Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.")
if 'resnet' not in parsed_args.backbone:
warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))
return parsed_args
def parse_args(args):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
kitti_parser = subparsers.add_parser('kitti')
kitti_parser.add_argument('kitti_path', help='Path to dataset directory (ie. /tmp/kitti).')
def csv_list(string):
return string.split(',')
oid_parser = subparsers.add_parser('oid')
oid_parser.add_argument('main_dir', help='Path to dataset directory.')
oid_parser.add_argument('--version', help='The current dataset version is V3.', default='2017_11')
oid_parser.add_argument('--labels-filter', help='A list of labels to filter.', type=csv_list, default=None)
oid_parser.add_argument('--annotation-cache-dir', help='Path to store annotation cache.', default='.')
oid_parser.add_argument('--fixed-labels', help='Use the exact specified labels.', default=False)
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.')
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.')
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333)
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create object that stores backbone information
backbone = models.backbone(args.backbone)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# create the generators
train_generator, validation_generator = create_generators(args)
# create the model
if args.snapshot is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.snapshot, backbone=args.backbone)
training_model = model
prediction_model = retinanet_bbox(model=model)
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = backbone.download_imagenet()
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone
)
# print model summary
print(model.summary())
# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in args.backbone or 'densenet' in args.backbone:
compute_anchor_targets = functools.partial(anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
train_generator.compute_anchor_targets = compute_anchor_targets
if validation_generator is not None:
validation_generator.compute_anchor_targets = compute_anchor_targets
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
validation_generator,
args,
)
# start training
training_model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
)
if __name__ == '__main__':
main()
| 40.159453
| 187
| 0.66937
|
2a681ae39ca6fa62be09d20c3970640a1c569924
| 5,640
|
py
|
Python
|
grafana_backup/grafanaSettings.py
|
bilalbayasut/grafana-backup-tool
|
442b62e3e95302dce6efba6d6c6ddbd99fa1fa22
|
[
"MIT"
] | null | null | null |
grafana_backup/grafanaSettings.py
|
bilalbayasut/grafana-backup-tool
|
442b62e3e95302dce6efba6d6c6ddbd99fa1fa22
|
[
"MIT"
] | null | null | null |
grafana_backup/grafanaSettings.py
|
bilalbayasut/grafana-backup-tool
|
442b62e3e95302dce6efba6d6c6ddbd99fa1fa22
|
[
"MIT"
] | null | null | null |
import base64
import json
import os
from datetime import datetime
from grafana_backup.commons import (load_config,
to_python2_and_3_compatible_string)
def main(config_path):
# Load config from optional configuration file located at ~/.grafana-backup.json
# or load defaults from example config stored in grafanaSettings.json
# environment variables can override settings as well and are top of the hierarchy
config_dict = {}
config = load_config(config_path)
grafana_url = config.get('grafana', {}).get('url', '')
grafana_token = config.get('grafana', {}).get('token', '')
grafana_search_api_limit = config.get('grafana', {}).get('search_api_limit', 5000)
debug = config.get('general', {}).get('debug', True)
verify_ssl = config.get('general', {}).get('verify_ssl', False)
client_cert = config.get('general', {}).get('client_cert', None)
backup_dir = config.get('general', {}).get('backup_dir', '_OUTPUT_')
pretty_print = config.get('general', {}).get('pretty_print', False)
aws_s3_bucket_name = config.get('aws', {}).get('s3_bucket_name', '')
aws_s3_bucket_key = config.get('aws', {}).get('s3_bucket_key', '')
aws_default_region = config.get('aws', {}).get('default_region', '')
aws_access_key_id = config.get('aws', {}).get('access_key_id', '')
aws_secret_access_key = config.get('aws', {}).get('secret_access_key', '')
aws_endpoint_url = config.get('aws', {}).get('endpoint_url', None)
admin_account = config.get('grafana', {}).get('admin_account', '')
admin_password = config.get('grafana', {}).get('admin_password', '')
GRAFANA_URL = os.getenv('GRAFANA_URL', grafana_url)
TOKEN = os.getenv('GRAFANA_TOKEN', grafana_token)
SEARCH_API_LIMIT = os.getenv('SEARCH_API_LIMIT', grafana_search_api_limit)
AWS_S3_BUCKET_NAME = os.getenv('AWS_S3_BUCKET_NAME', aws_s3_bucket_name)
AWS_S3_BUCKET_KEY = os.getenv('AWS_S3_BUCKET_KEY', aws_s3_bucket_key)
AWS_DEFAULT_REGION = os.getenv('AWS_DEFAULT_REGION', aws_default_region)
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', aws_access_key_id)
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', aws_secret_access_key)
AWS_ENDPOINT_URL = os.getenv('AWS_ENDPOINT_URL', aws_endpoint_url)
ADMIN_ACCOUNT = os.getenv('GRAFANA_ADMIN_ACCOUNT', admin_account)
ADMIN_PASSWORD = os.getenv('GRAFANA_ADMIN_PASSWORD', admin_password)
GRAFANA_BASIC_AUTH = os.getenv('GRAFANA_BASIC_AUTH', None)
DEBUG = os.getenv('DEBUG', debug)
if isinstance(DEBUG, str):
DEBUG = json.loads(DEBUG.lower()) # convert environment variable string to bool
VERIFY_SSL = os.getenv('VERIFY_SSL', verify_ssl)
if isinstance(VERIFY_SSL, str):
VERIFY_SSL = json.loads(VERIFY_SSL.lower()) # convert environment variable string to bool
CLIENT_CERT = os.getenv('CLIENT_CERT', client_cert)
BACKUP_DIR = os.getenv('BACKUP_DIR', backup_dir)
PRETTY_PRINT = os.getenv('PRETTY_PRINT', pretty_print)
if isinstance(PRETTY_PRINT, str):
PRETTY_PRINT = json.loads(PRETTY_PRINT.lower()) # convert environment variable string to bool
EXTRA_HEADERS = dict(
h.split(':') for h in os.getenv('GRAFANA_HEADERS', '').split(',') if 'GRAFANA_HEADERS' in os.environ)
if TOKEN:
HTTP_GET_HEADERS = {'Authorization': 'Bearer ' + TOKEN}
HTTP_POST_HEADERS = {'Authorization': 'Bearer ' + TOKEN, 'Content-Type': 'application/json'}
else:
HTTP_GET_HEADERS = {}
HTTP_POST_HEADERS = {'Content-Type': 'application/json'}
for k, v in EXTRA_HEADERS.items():
HTTP_GET_HEADERS.update({k: v})
HTTP_POST_HEADERS.update({k: v})
TIMESTAMP = datetime.today().strftime('%Y%m%d%H%M')
config_dict['GRAFANA_URL'] = GRAFANA_URL
config_dict['GRAFANA_ADMIN_ACCOUNT'] = ADMIN_ACCOUNT
config_dict['GRAFANA_ADMIN_PASSWORD'] = ADMIN_PASSWORD
if not GRAFANA_BASIC_AUTH and (ADMIN_ACCOUNT and ADMIN_PASSWORD):
GRAFANA_BASIC_AUTH = base64.b64encode(
"{0}:{1}".format(ADMIN_ACCOUNT, ADMIN_PASSWORD).encode('utf8')
).decode('utf8')
if GRAFANA_BASIC_AUTH:
HTTP_GET_HEADERS_BASIC_AUTH = HTTP_GET_HEADERS.copy()
HTTP_GET_HEADERS_BASIC_AUTH.update({'Authorization': 'Basic {0}'.format(GRAFANA_BASIC_AUTH)})
HTTP_POST_HEADERS_BASIC_AUTH = HTTP_POST_HEADERS.copy()
HTTP_POST_HEADERS_BASIC_AUTH.update({'Authorization': 'Basic {0}'.format(GRAFANA_BASIC_AUTH)})
else:
HTTP_GET_HEADERS_BASIC_AUTH = None
HTTP_POST_HEADERS_BASIC_AUTH = None
config_dict['TOKEN'] = TOKEN
config_dict['SEARCH_API_LIMIT'] = SEARCH_API_LIMIT
config_dict['DEBUG'] = DEBUG
config_dict['VERIFY_SSL'] = VERIFY_SSL
config_dict['CLIENT_CERT'] = CLIENT_CERT
config_dict['BACKUP_DIR'] = BACKUP_DIR
config_dict['PRETTY_PRINT'] = PRETTY_PRINT
config_dict['EXTRA_HEADERS'] = EXTRA_HEADERS
config_dict['HTTP_GET_HEADERS'] = HTTP_GET_HEADERS
config_dict['HTTP_POST_HEADERS'] = HTTP_POST_HEADERS
config_dict['HTTP_GET_HEADERS_BASIC_AUTH'] = HTTP_GET_HEADERS_BASIC_AUTH
config_dict['HTTP_POST_HEADERS_BASIC_AUTH'] = HTTP_POST_HEADERS_BASIC_AUTH
config_dict['TIMESTAMP'] = TIMESTAMP
config_dict['AWS_S3_BUCKET_NAME'] = AWS_S3_BUCKET_NAME
config_dict['AWS_S3_BUCKET_KEY'] = AWS_S3_BUCKET_KEY
config_dict['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION
config_dict['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY_ID
config_dict['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_ACCESS_KEY
config_dict['AWS_ENDPOINT_URL'] = AWS_ENDPOINT_URL
return config_dict
| 45.483871
| 109
| 0.712766
|
1627b37a4682efc46d3bbc8355fd52814105e8cd
| 12,461
|
py
|
Python
|
rpython/jit/backend/llsupport/asmmemmgr.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
rpython/jit/backend/llsupport/asmmemmgr.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
rpython/jit/backend/llsupport/asmmemmgr.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
import sys
from rpython.rlib.rarithmetic import intmask, r_uint, LONG_BIT
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib import rmmap
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
class AsmMemoryManager(object):
LARGE_ALLOC_SIZE = 1024 * 1024 # 1MB
MIN_FRAGMENT = 64
NUM_INDICES = 32 # good for all sizes between 64 bytes and ~490 KB
_allocated = None
def __init__(self, large_alloc_size = LARGE_ALLOC_SIZE,
min_fragment = MIN_FRAGMENT,
num_indices = NUM_INDICES):
self.total_memory_allocated = r_uint(0)
self.total_mallocs = r_uint(0)
self.large_alloc_size = large_alloc_size
self.min_fragment = min_fragment
self.num_indices = num_indices
self.free_blocks = {} # map {start: stop}
self.free_blocks_end = {} # map {stop: start}
self.blocks_by_size = [[] for i in range(self.num_indices)]
def malloc(self, minsize, maxsize):
"""Allocate executable memory, between minsize and maxsize bytes,
and return a pair (start, stop). Does not perform any rounding
of minsize and maxsize.
"""
result = self._allocate_block(minsize)
(start, stop) = result
smaller_stop = start + maxsize
if smaller_stop + self.min_fragment <= stop:
self._add_free_block(smaller_stop, stop)
stop = smaller_stop
result = (start, stop)
self.total_mallocs += r_uint(stop - start)
return result # pair (start, stop)
def free(self, start, stop):
"""Free a block (start, stop) returned by a previous malloc()."""
self.total_mallocs -= r_uint(stop - start)
self._add_free_block(start, stop)
def open_malloc(self, minsize):
"""Allocate at least minsize bytes. Returns (start, stop)."""
result = self._allocate_block(minsize)
(start, stop) = result
self.total_mallocs += r_uint(stop - start)
return result
def open_free(self, middle, stop):
"""Used for freeing the end of an open-allocated block of memory."""
if stop - middle >= self.min_fragment:
self.total_mallocs -= r_uint(stop - middle)
self._add_free_block(middle, stop)
return True
else:
return False # too small to record
def _allocate_large_block(self, minsize):
# Compute 'size' from 'minsize': it must be rounded up to
# 'large_alloc_size'. Additionally, we use the following line
# to limit how many mmap() requests the OS will see in total:
minsize = max(minsize, intmask(self.total_memory_allocated >> 4))
size = minsize + self.large_alloc_size - 1
size = (size // self.large_alloc_size) * self.large_alloc_size
data = rmmap.alloc(size)
if not we_are_translated():
if self._allocated is None:
self._allocated = []
self._allocated.append((data, size))
if sys.maxint > 2147483647:
# Hack to make sure that mcs are not within 32-bits of one
# another for testing purposes
rmmap.hint.pos += 0x80000000 - size
self.total_memory_allocated += r_uint(size)
data = rffi.cast(lltype.Signed, data)
return self._add_free_block(data, data + size)
def _get_index(self, length):
i = 0
while length > self.min_fragment:
length = (length * 3) >> 2
i += 1
if i == self.num_indices - 1:
break
return i
def _add_free_block(self, start, stop):
# Merge with the block on the left
if start in self.free_blocks_end:
left_start = self.free_blocks_end[start]
self._del_free_block(left_start, start)
start = left_start
# Merge with the block on the right
if stop in self.free_blocks:
right_stop = self.free_blocks[stop]
self._del_free_block(stop, right_stop)
stop = right_stop
# Add it to the dicts
self.free_blocks[start] = stop
self.free_blocks_end[stop] = start
i = self._get_index(stop - start)
self.blocks_by_size[i].append(start)
return start
def _del_free_block(self, start, stop):
del self.free_blocks[start]
del self.free_blocks_end[stop]
i = self._get_index(stop - start)
self.blocks_by_size[i].remove(start)
def _allocate_block(self, length):
# First look in the group of index i0 if there is a block that is
# big enough. Following an idea found in the Linux malloc.c, we
# prefer the oldest entries rather than the newest one, to let
# them have enough time to coalesce into bigger blocks. It makes
# a big difference on the purely random test (30% of total usage).
i0 = self._get_index(length)
bbs = self.blocks_by_size[i0]
for j in range(len(bbs)):
start = bbs[j]
stop = self.free_blocks[start]
if start + length <= stop:
del bbs[j]
break # found a block big enough
else:
# Then look in the larger groups
i = i0 + 1
while i < self.num_indices:
if len(self.blocks_by_size[i]) > 0:
# any block found in a larger group is big enough
start = self.blocks_by_size[i].pop(0)
stop = self.free_blocks[start]
break
i += 1
else:
# Exhausted the memory. Allocate the resulting block.
start = self._allocate_large_block(length)
stop = self.free_blocks[start]
i = self._get_index(stop - start)
assert self.blocks_by_size[i][-1] == start
self.blocks_by_size[i].pop()
#
del self.free_blocks[start]
del self.free_blocks_end[stop]
return (start, stop)
def _delete(self):
"NOT_RPYTHON"
if self._allocated:
for data, size in self._allocated:
rmmap.free(data, size)
self._allocated = None
class MachineDataBlockWrapper(object):
def __init__(self, asmmemmgr, allblocks):
self.asmmemmgr = asmmemmgr
self.allblocks = allblocks
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def done(self):
if self.rawstart != 0:
if self.asmmemmgr.open_free(self.rawposition, self.rawstop):
self.rawstop = self.rawposition
self.allblocks.append((self.rawstart, self.rawstop))
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def _allocate_next_block(self, minsize):
self.done()
self.rawstart, self.rawstop = self.asmmemmgr.open_malloc(minsize)
self.rawposition = self.rawstart
def malloc_aligned(self, size, alignment):
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
if p + size > self.rawstop:
self._allocate_next_block(size + alignment - 1)
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
assert p + size <= self.rawstop
self.rawposition = p + size
return p
class BlockBuilderMixin(object):
_mixin_ = True
# A base class to generate assembler. It is equivalent to just a list
# of chars, but it is potentially more efficient for that usage.
# It works by allocating the assembler SUBBLOCK_SIZE bytes at a time.
# Ideally, this number should be a power of two that fits the GC's most
# compact allocation scheme (which is so far 35 * WORD for minimark.py).
WORD = LONG_BIT // 8
SUBBLOCK_SIZE = 32 * WORD
SUBBLOCK_PTR = lltype.Ptr(lltype.GcForwardReference())
SUBBLOCK = lltype.GcStruct('SUBBLOCK',
('prev', SUBBLOCK_PTR),
('data', lltype.FixedSizeArray(lltype.Char, SUBBLOCK_SIZE)))
SUBBLOCK_PTR.TO.become(SUBBLOCK)
gcroot_markers = None
def __init__(self, translated=None):
if translated is None:
translated = we_are_translated()
if translated:
self.init_block_builder()
else:
self._become_a_plain_block_builder()
def init_block_builder(self):
self._cursubblock = lltype.nullptr(self.SUBBLOCK)
self._baserelpos = -self.SUBBLOCK_SIZE
self._make_new_subblock()
def _make_new_subblock(self):
nextsubblock = lltype.malloc(self.SUBBLOCK)
nextsubblock.prev = self._cursubblock
self._cursubblock = nextsubblock
self._cursubindex = 0
self._baserelpos += self.SUBBLOCK_SIZE
_make_new_subblock._dont_inline_ = True
def writechar(self, char):
index = self._cursubindex
if index == self.SUBBLOCK_SIZE:
self._make_new_subblock()
index = 0
self._cursubblock.data[index] = char
self._cursubindex = index + 1
def overwrite(self, index, char):
assert 0 <= index < self.get_relative_pos()
block = self._cursubblock
index -= self._baserelpos
while index < 0:
block = block.prev
index += self.SUBBLOCK_SIZE
block.data[index] = char
def get_relative_pos(self):
return self._baserelpos + self._cursubindex
def copy_to_raw_memory(self, addr):
# indirection for _become_a_plain_block_builder() and for subclasses
self._copy_to_raw_memory(addr)
def _copy_to_raw_memory(self, addr):
block = self._cursubblock
blocksize = self._cursubindex
targetindex = self._baserelpos
while targetindex >= 0:
dst = rffi.cast(rffi.CCHARP, addr + targetindex)
for j in range(blocksize):
dst[j] = block.data[j]
block = block.prev
blocksize = self.SUBBLOCK_SIZE
targetindex -= self.SUBBLOCK_SIZE
assert not block
def _dump(self, addr, logname, backend=None):
debug_start(logname)
if have_debug_prints():
#
if backend is not None:
debug_print('BACKEND', backend)
#
from rpython.jit.backend.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable:
debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable)
else:
debug_print('SYS_EXECUTABLE', '??')
#
HEX = '0123456789ABCDEF'
dump = []
src = rffi.cast(rffi.CCHARP, addr)
for p in range(self.get_relative_pos()):
o = ord(src[p])
dump.append(HEX[o >> 4])
dump.append(HEX[o & 15])
debug_print('CODE_DUMP',
'@%x' % addr,
'+0 ', # backwards compatibility
''.join(dump))
#
debug_stop(logname)
def materialize(self, asmmemmgr, allblocks, gcrootmap=None):
size = self.get_relative_pos()
malloced = asmmemmgr.malloc(size, size)
allblocks.append(malloced)
rawstart = malloced[0]
self.copy_to_raw_memory(rawstart)
if self.gcroot_markers is not None:
assert gcrootmap is not None
for pos, mark in self.gcroot_markers:
gcrootmap.put(rawstart + pos, mark)
return rawstart
def _become_a_plain_block_builder(self):
# hack purely for speed of tests
self._data = []
self.writechar = self._data.append
self.overwrite = self._data.__setitem__
self.get_relative_pos = self._data.__len__
def plain_copy_to_raw_memory(addr):
dst = rffi.cast(rffi.CCHARP, addr)
for i, c in enumerate(self._data):
dst[i] = c
self._copy_to_raw_memory = plain_copy_to_raw_memory
def insert_gcroot_marker(self, mark):
if self.gcroot_markers is None:
self.gcroot_markers = []
self.gcroot_markers.append((self.get_relative_pos(), mark))
| 38.341538
| 79
| 0.599149
|
7fe38f91bad4cbd0fd97bfbf8448d5d06f624e98
| 2,502
|
py
|
Python
|
denver/_colorama/ansi.py
|
AdityaJ7/denver
|
b17e46b7d68980ce695f15bc078b05916dd1705a
|
[
"MIT"
] | null | null | null |
denver/_colorama/ansi.py
|
AdityaJ7/denver
|
b17e46b7d68980ce695f15bc078b05916dd1705a
|
[
"MIT"
] | null | null | null |
denver/_colorama/ansi.py
|
AdityaJ7/denver
|
b17e46b7d68980ce695f15bc078b05916dd1705a
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Xcodz.
# All Rights Reserved.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
| 24.057692
| 78
| 0.548361
|
54500b4818e5c6b90bfd28406619f1fe33aece74
| 2,818
|
py
|
Python
|
analytics/tests/stats/test_cluster.py
|
Genometric/ToolVisibilityQuantifier
|
82572a678c27820ec1a8dbbc54dcee18ee601096
|
[
"MIT"
] | 3
|
2020-04-03T02:00:10.000Z
|
2020-06-18T01:39:22.000Z
|
analytics/tests/stats/test_cluster.py
|
Genometric/ToolVisibilityQuantifier
|
82572a678c27820ec1a8dbbc54dcee18ee601096
|
[
"MIT"
] | 1
|
2020-07-14T06:39:02.000Z
|
2020-07-14T06:39:02.000Z
|
analytics/tests/stats/test_cluster.py
|
Genometric/ToolVisibilityQuantifier
|
82572a678c27820ec1a8dbbc54dcee18ee601096
|
[
"MIT"
] | 1
|
2020-05-22T20:12:47.000Z
|
2020-05-22T20:12:47.000Z
|
"""
"""
import os
from ..base_test_case import BaseTestCase
from lib.stats.cluster import Cluster
from lib.base import Base, CLUSTER_NAME_COLUMN_LABEL
import math
from os import listdir
class TestCluster(BaseTestCase):
"""
"""
def test_if_all_expected_files_written(self, tmp_clustered_files):
# Arrange
tmpdir = tmp_clustered_files[0]
repos = tmp_clustered_files[1]
clustered_files = len(repos)
# Act
Cluster().run(tmpdir)
# Assert
# There should be one file for each repository, and
# one another file that contains the clustering stats.
assert len(listdir(tmpdir)) == clustered_files + 1
def test_cluster_numbers(self, tmp_clustered_files):
# Arrange
tmpdir = tmp_clustered_files[0]
repos = tmp_clustered_files[1]
test_pubs = BaseTestCase.get_test_publications()
# Act
Cluster().run(tmpdir)
files = Base.get_files(tmpdir, include_clustered_files=True)
# Assert
# TODO: This assertion is anti-pattern; must be re-implemented in a much better way.
for file in files:
publications = Base.get_publications(file)
checked = False
for idx, row in publications.iterrows():
for test_pub in test_pubs:
for idx2, row2 in test_pub[0].iterrows():
if row.get("Tools") == row2.get("Tools"):
assert row.get(CLUSTER_NAME_COLUMN_LABEL) == row2.get(CLUSTER_NAME_COLUMN_LABEL)
checked = True
assert checked == True
def test_cluster_stats_file(self, tmp_clustered_files):
"""
This test assert if the file containing the statistics of
the clustering operation is created and contains expected
content.
The test does not assert if the content is accurate (e.g.,
the value of silhouette score is correct), rather it checks
if the expected number of lines and columns exist in the file.
"""
# Arrange
tmpdir = tmp_clustered_files[0]
repos = tmp_clustered_files[1]
test_pubs = BaseTestCase.get_test_publications()
cluster = Cluster()
stats_filename = os.path.join(tmpdir, cluster.clustering_stats_filename)
# Act
cluster.run(tmpdir)
# Assert
with open(stats_filename) as f:
lines = f.readlines()
# One header line plus one line per repository.
assert len(lines) == 1 + len(repos)
# Check if each lines contains the 7 expected columns.
for line in lines:
columns = line.split("\t")
assert len(columns) == 7
| 32.767442
| 108
| 0.606458
|
28bda633611d6f1b7263e1d3b195c46eba4497d3
| 4,794
|
py
|
Python
|
airbyte-integrations/connectors/source-zendesk-support/source_zendesk_support/source.py
|
heap/airbyte
|
35e65f444f38b617a12e12d425da2115f08b5f14
|
[
"MIT"
] | 22
|
2020-08-27T00:47:20.000Z
|
2020-09-17T15:39:39.000Z
|
airbyte-integrations/connectors/source-zendesk-support/source_zendesk_support/source.py
|
heap/airbyte
|
35e65f444f38b617a12e12d425da2115f08b5f14
|
[
"MIT"
] | 116
|
2020-08-27T01:11:27.000Z
|
2020-09-19T02:47:52.000Z
|
airbyte-integrations/connectors/source-zendesk-support/source_zendesk_support/source.py
|
heap/airbyte
|
35e65f444f38b617a12e12d425da2115f08b5f14
|
[
"MIT"
] | 1
|
2020-09-15T06:10:01.000Z
|
2020-09-15T06:10:01.000Z
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import base64
from typing import Any, List, Mapping, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http.requests_native_auth import TokenAuthenticator
from source_zendesk_support.streams import SourceZendeskException
from .streams import (
Brands,
CustomRoles,
GroupMemberships,
Groups,
Macros,
Organizations,
SatisfactionRatings,
Schedules,
SlaPolicies,
Tags,
TicketAudits,
TicketComments,
TicketFields,
TicketForms,
TicketMetricEvents,
TicketMetrics,
Tickets,
Users,
UserSettingsStream,
)
class BasicApiTokenAuthenticator(TokenAuthenticator):
"""basic Authorization header"""
def __init__(self, email: str, password: str):
# for API token auth we need to add the suffix '/token' in the end of email value
email_login = email + "/token"
token = base64.b64encode(f"{email_login}:{password}".encode("utf-8"))
super().__init__(token.decode("utf-8"), auth_method="Basic")
class SourceZendeskSupport(AbstractSource):
"""Source Zendesk Support fetch data from Zendesk CRM that builds customer
support and sales software which aims for quick implementation and adaptation at scale.
"""
@classmethod
def get_authenticator(cls, config: Mapping[str, Any]) -> BasicApiTokenAuthenticator:
# old authentication flow support
auth_old = config.get("auth_method")
if auth_old:
if auth_old.get("auth_method") == "api_token":
return BasicApiTokenAuthenticator(config["auth_method"]["email"], config["auth_method"]["api_token"])
# new authentication flow
auth = config.get("credentials")
if auth:
if auth.get("credentials") == "oauth2.0":
return TokenAuthenticator(token=config["credentials"]["access_token"])
elif auth.get("credentials") == "api_token":
return BasicApiTokenAuthenticator(config["credentials"]["email"], config["credentials"]["api_token"])
else:
raise SourceZendeskException(f"Not implemented authorization method: {config['credentials']}")
def check_connection(self, logger, config) -> Tuple[bool, any]:
"""Connection check to validate that the user-provided config can be used to connect to the underlying API
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully,
(False, error) otherwise.
"""
auth = self.get_authenticator(config)
settings = None
try:
settings = UserSettingsStream(config["subdomain"], authenticator=auth, start_date=None).get_settings()
except requests.exceptions.RequestException as e:
return False, e
active_features = [k for k, v in settings.get("active_features", {}).items() if v]
# logger.info("available features: %s" % active_features)
if "organization_access_enabled" not in active_features:
return False, "Organization access is not enabled. Please check admin permission of the current account"
return True, None
@classmethod
def convert_config2stream_args(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
"""Convert input configs to parameters of the future streams
This function is used by unit tests too
"""
return {
"subdomain": config["subdomain"],
"start_date": config["start_date"],
"authenticator": cls.get_authenticator(config),
}
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
"""Returns relevant a list of available streams
:param config: A Mapping of the user input configuration as defined in the connector spec.
"""
args = self.convert_config2stream_args(config)
# sorted in alphabet order
return [
GroupMemberships(**args),
Groups(**args),
Macros(**args),
Organizations(**args),
SatisfactionRatings(**args),
SlaPolicies(**args),
Tags(**args),
TicketAudits(**args),
TicketComments(**args),
TicketFields(**args),
TicketForms(**args),
TicketMetrics(**args),
TicketMetricEvents(**args),
Tickets(**args),
Users(**args),
Brands(**args),
CustomRoles(**args),
Schedules(**args),
]
| 37.453125
| 117
| 0.644139
|
a777279e25e5f54d2f1baa3f5a903f9db98549ea
| 305
|
py
|
Python
|
scripts/find_unmatched.py
|
dyelax/selfie2bitmoji
|
1aa292800827a250494f43246b65812cff96b921
|
[
"MIT"
] | 3
|
2017-12-21T05:15:12.000Z
|
2018-01-26T16:36:06.000Z
|
scripts/find_unmatched.py
|
dyelax/selfie2bitmoji
|
1aa292800827a250494f43246b65812cff96b921
|
[
"MIT"
] | 1
|
2018-11-30T12:02:01.000Z
|
2018-12-04T12:01:19.000Z
|
scripts/find_unmatched.py
|
dyelax/selfie2bitmoji
|
1aa292800827a250494f43246b65812cff96b921
|
[
"MIT"
] | 2
|
2018-09-21T16:52:29.000Z
|
2019-12-02T08:15:46.000Z
|
from glob import glob
import os
png_paths = glob('../data/bitmoji/*/*.png')
npy_paths = glob('../data/bitmoji/*/*.npy')
png_set = set([os.path.splitext(p)[0] for p in png_paths])
npy_set = set([os.path.splitext(p)[0] for p in npy_paths])
sym_diff = png_set ^ npy_set
print len(sym_diff)
print sym_diff
| 23.461538
| 58
| 0.698361
|
924a7519f61f4047837e62e04bf45c90cc2a16bd
| 13,311
|
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/data_catalog/models/work_request.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/data_catalog/models/work_request.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/data_catalog/models/work_request.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequest(object):
"""
A description of workrequest status.
"""
#: A constant which can be used with the operation_type property of a WorkRequest.
#: This constant has a value of "CREATE_CATALOG"
OPERATION_TYPE_CREATE_CATALOG = "CREATE_CATALOG"
#: A constant which can be used with the operation_type property of a WorkRequest.
#: This constant has a value of "UPDATE_CATALOG"
OPERATION_TYPE_UPDATE_CATALOG = "UPDATE_CATALOG"
#: A constant which can be used with the operation_type property of a WorkRequest.
#: This constant has a value of "DELETE_CATALOG"
OPERATION_TYPE_DELETE_CATALOG = "DELETE_CATALOG"
#: A constant which can be used with the operation_type property of a WorkRequest.
#: This constant has a value of "MOVE_CATALOG"
OPERATION_TYPE_MOVE_CATALOG = "MOVE_CATALOG"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "ACCEPTED"
STATUS_ACCEPTED = "ACCEPTED"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "IN_PROGRESS"
STATUS_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "FAILED"
STATUS_FAILED = "FAILED"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "SUCCEEDED"
STATUS_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "CANCELING"
STATUS_CANCELING = "CANCELING"
#: A constant which can be used with the status property of a WorkRequest.
#: This constant has a value of "CANCELED"
STATUS_CANCELED = "CANCELED"
def __init__(self, **kwargs):
"""
Initializes a new WorkRequest object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param operation_type:
The value to assign to the operation_type property of this WorkRequest.
Allowed values for this property are: "CREATE_CATALOG", "UPDATE_CATALOG", "DELETE_CATALOG", "MOVE_CATALOG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type operation_type: str
:param status:
The value to assign to the status property of this WorkRequest.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param id:
The value to assign to the id property of this WorkRequest.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this WorkRequest.
:type compartment_id: str
:param resources:
The value to assign to the resources property of this WorkRequest.
:type resources: list[WorkRequestResource]
:param percent_complete:
The value to assign to the percent_complete property of this WorkRequest.
:type percent_complete: float
:param time_accepted:
The value to assign to the time_accepted property of this WorkRequest.
:type time_accepted: datetime
:param time_started:
The value to assign to the time_started property of this WorkRequest.
:type time_started: datetime
:param time_finished:
The value to assign to the time_finished property of this WorkRequest.
:type time_finished: datetime
"""
self.swagger_types = {
'operation_type': 'str',
'status': 'str',
'id': 'str',
'compartment_id': 'str',
'resources': 'list[WorkRequestResource]',
'percent_complete': 'float',
'time_accepted': 'datetime',
'time_started': 'datetime',
'time_finished': 'datetime'
}
self.attribute_map = {
'operation_type': 'operationType',
'status': 'status',
'id': 'id',
'compartment_id': 'compartmentId',
'resources': 'resources',
'percent_complete': 'percentComplete',
'time_accepted': 'timeAccepted',
'time_started': 'timeStarted',
'time_finished': 'timeFinished'
}
self._operation_type = None
self._status = None
self._id = None
self._compartment_id = None
self._resources = None
self._percent_complete = None
self._time_accepted = None
self._time_started = None
self._time_finished = None
@property
def operation_type(self):
"""
**[Required]** Gets the operation_type of this WorkRequest.
Type of the work request.
Allowed values for this property are: "CREATE_CATALOG", "UPDATE_CATALOG", "DELETE_CATALOG", "MOVE_CATALOG", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The operation_type of this WorkRequest.
:rtype: str
"""
return self._operation_type
@operation_type.setter
def operation_type(self, operation_type):
"""
Sets the operation_type of this WorkRequest.
Type of the work request.
:param operation_type: The operation_type of this WorkRequest.
:type: str
"""
allowed_values = ["CREATE_CATALOG", "UPDATE_CATALOG", "DELETE_CATALOG", "MOVE_CATALOG"]
if not value_allowed_none_or_none_sentinel(operation_type, allowed_values):
operation_type = 'UNKNOWN_ENUM_VALUE'
self._operation_type = operation_type
@property
def status(self):
"""
**[Required]** Gets the status of this WorkRequest.
Status of current work request.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this WorkRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this WorkRequest.
Status of current work request.
:param status: The status of this WorkRequest.
:type: str
"""
allowed_values = ["ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def id(self):
"""
**[Required]** Gets the id of this WorkRequest.
The id of the work request.
:return: The id of this WorkRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this WorkRequest.
The id of the work request.
:param id: The id of this WorkRequest.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this WorkRequest.
The ocid of the compartment that contains the work request. Work requests should be scoped to
the same compartment as the resource the work request affects. If the work request affects multiple resources,
and those resources are not in the same compartment, it is up to the service team to pick the primary
resource whose compartment should be used.
:return: The compartment_id of this WorkRequest.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this WorkRequest.
The ocid of the compartment that contains the work request. Work requests should be scoped to
the same compartment as the resource the work request affects. If the work request affects multiple resources,
and those resources are not in the same compartment, it is up to the service team to pick the primary
resource whose compartment should be used.
:param compartment_id: The compartment_id of this WorkRequest.
:type: str
"""
self._compartment_id = compartment_id
@property
def resources(self):
"""
**[Required]** Gets the resources of this WorkRequest.
The resources affected by this work request.
:return: The resources of this WorkRequest.
:rtype: list[WorkRequestResource]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this WorkRequest.
The resources affected by this work request.
:param resources: The resources of this WorkRequest.
:type: list[WorkRequestResource]
"""
self._resources = resources
@property
def percent_complete(self):
"""
**[Required]** Gets the percent_complete of this WorkRequest.
Percentage of the request completed.
:return: The percent_complete of this WorkRequest.
:rtype: float
"""
return self._percent_complete
@percent_complete.setter
def percent_complete(self, percent_complete):
"""
Sets the percent_complete of this WorkRequest.
Percentage of the request completed.
:param percent_complete: The percent_complete of this WorkRequest.
:type: float
"""
self._percent_complete = percent_complete
@property
def time_accepted(self):
"""
**[Required]** Gets the time_accepted of this WorkRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_accepted of this WorkRequest.
:rtype: datetime
"""
return self._time_accepted
@time_accepted.setter
def time_accepted(self, time_accepted):
"""
Sets the time_accepted of this WorkRequest.
The date and time the request was created, as described in
`RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_accepted: The time_accepted of this WorkRequest.
:type: datetime
"""
self._time_accepted = time_accepted
@property
def time_started(self):
"""
Gets the time_started of this WorkRequest.
The date and time the request was started, as described in `RFC 3339`__,
section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_started of this WorkRequest.
:rtype: datetime
"""
return self._time_started
@time_started.setter
def time_started(self, time_started):
"""
Sets the time_started of this WorkRequest.
The date and time the request was started, as described in `RFC 3339`__,
section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_started: The time_started of this WorkRequest.
:type: datetime
"""
self._time_started = time_started
@property
def time_finished(self):
"""
Gets the time_finished of this WorkRequest.
The date and time the object was finished, as described in `RFC 3339`__.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_finished of this WorkRequest.
:rtype: datetime
"""
return self._time_finished
@time_finished.setter
def time_finished(self, time_finished):
"""
Sets the time_finished of this WorkRequest.
The date and time the object was finished, as described in `RFC 3339`__.
__ https://tools.ietf.org/rfc/rfc3339
:param time_finished: The time_finished of this WorkRequest.
:type: datetime
"""
self._time_finished = time_finished
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 33.528967
| 245
| 0.648336
|
a8173161eb19f96e96b3ea300c2dd2b83f831312
| 8,727
|
py
|
Python
|
code/utils/loss.py
|
JJBUP/yolov3_pytorch
|
448ff1de08c321bf14fa24eb38b5fc19482a338b
|
[
"MIT"
] | 1
|
2021-12-17T14:34:21.000Z
|
2021-12-17T14:34:21.000Z
|
code/utils/loss.py
|
JJBUP/yolov3_pytorch
|
448ff1de08c321bf14fa24eb38b5fc19482a338b
|
[
"MIT"
] | null | null | null |
code/utils/loss.py
|
JJBUP/yolov3_pytorch
|
448ff1de08c321bf14fa24eb38b5fc19482a338b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
from .util import to_cpu
def compute_loss(predictions, targets, model):
"""
:param predictions: 三个尺度的输出 (b,3,13,13,85) ,(b,3,26,26,85),(b,3,52,52,85)
:param targets: 真实框
:param model:
:return:
"""
device = targets.device
# 存储损失
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
# 获得标签的 target
tcls, tbox, indices, anchors = build_targets(predictions, targets, model) # targets
# 这里的bce包含了 sigmoid + BCE 两个过程
BCEcls = nn.BCEWithLogitsLoss(
pos_weight=torch.tensor([1.0], device=device))
BCEobj = nn.BCEWithLogitsLoss(
pos_weight=torch.tensor([1.0], device=device))
# 计算每个特征层的输出损失
for layer_index, layer_predictions in enumerate(predictions):
# image ids, anchors, grid index i and j for each target in the current yolo layer
b, anchor, grid_j, grid_i = indices[layer_index]
# 创建有目标的标记初始为0 [1,3,13,13]
tobj = torch.zeros_like(layer_predictions[..., 0], device=device) # target obj
num_targets = b.shape[0]
#是否目标
if num_targets:
# (num,85)
ps = layer_predictions[b, anchor, grid_j, grid_i]
# 有目标存在的 box 回归
# xy偏移 sigmoid 保持在(0,1)之间 shape(num,2)
pxy = ps[:, :2].sigmoid()
# ahchors[layer_index] 之前gettarge得到的当前layer的先验框
# 预测的比例 * 之前选中的负责预测的先验框尺寸 #shape(num,2)
pwh = torch.exp(ps[:, 2:4]) * anchors[layer_index]
# (num,4)
pbox = torch.cat((pxy, pwh), 1)
# Calculate CIoU or GIoU for each target with the predicted box for its cell + anchor
iou = bbox_iou(pbox.T, tbox[layer_index], x1y1x2y2=False, CIoU=True)
# 得到的iou是一个 长度为 num 的tensor ,1-iou 再求平均
lbox += (1.0 - iou).mean() # iou loss
# 目标分类损失
# 设置单元格有物体的概率
# 将之前有目标点的真实框 与预测框(根据anchor的)之间的iou作为 有目标的概率 ??? 为什么不直接用1
tobj[b, anchor, grid_j, grid_i] = iou.detach().clamp(0).type(
tobj.dtype) # Use cells with iou > 0 as object targets
# 判断类别个数是否大于1,统计类别损失
if ps.size(1) - 5 > 1:
# (num,classnum)
t = torch.zeros_like(ps[:, 5:], device=device)
t[range(num_targets), tcls[layer_index]] = 1 #设置标签的类别为1 onehot
lcls += BCEcls(ps[:, 5:], t) # BCE
# 置信度损失,正负样本一起计算
lobj += BCEobj(layer_predictions[..., 4], tobj) # obj loss
lbox *= 0.05
lobj *= 1.0 # 是否有物体非常重要
lcls *= 0.5
# Merge losses
loss = lbox + lobj + lcls
return loss, to_cpu(torch.cat((lbox, lobj, lcls, loss)))
def build_targets(p, targets, model):
"""
:param p:
:param targets: # (batchid,classid,x,y,w,h) batch为这个数据在这个batch的的序号
:param model:
:return:
"""
na, nt = 3, targets.shape[0] # anchors数量, targets 真实目标数量
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
# [0,1,2] 来标记三个anchor
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt)
# 如果有1个真实框tensor([[0.],[1.],[2.]])
# 如果有2个真实框tensor([[0., 0.],[1., 1.],[2., 2.]])
# 将真实框复制3份,并拓展最后一个维度为对应的anchor index 分别用来对应三个anchor的计算
# (img id, class, x, y, w, h, anchor id)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2)
# 一个真实目标框 tensor([[[0.0000, 0.0000, 0.5422, 0.5520, 0.1203, 0.1513, 0.0000]],
# [[0.0000, 0.0000, 0.5422, 0.5520, 0.1203, 0.1513, 1.0000]],
# [[0.0000, 0.0000, 0.5422, 0.5520, 0.1203, 0.1513, 2.0000]]])
# 两个真实目标框tensor([[[0.0000, 0.0000, 0.4662, 0.5317, 0.1663, 0.2092, 0.0000],
# [0.0000, 2.0000, 0.3129, 0.6461, 0.2430, 0.2092, 0.0000]],
# [[0.0000, 0.0000, 0.4662, 0.5317, 0.1663, 0.2092, 1.0000],
# [0.0000, 2.0000, 0.3129, 0.6461, 0.2430, 0.2092, 1.0000]],
# [[0.0000, 0.0000, 0.4662, 0.5317, 0.1663, 0.2092, 2.0000],
# [0.0000, 2.0000, 0.3129, 0.6461, 0.2430, 0.2092, 2.0000]]])
for i, yolo_layer in enumerate(model.yolo_out_layer):
anchors = yolo_layer.anchors.to(targets.device)
anchors = anchors / yolo_layer.stride # anchor以单元格为长度单元
# 设置 feature_w,feature_h,feature_w,feature_h
gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain[2:6] tensor([13, 13, 13, 13])
# 计算坐标在特征图上的位置,其他属性乘以1了,不会有影响
t = targets * gain # xywh * feature_w,feature_h,feature_w,feature_h
# 如果有真实框
if nt:
# 计算真实框宽高与anchor宽高的比例
r = t[:, :, 4:6] / anchors[:, None] # w,h
# 选择比例比较合适的anchor来负责预测, 用 max(r,1./r) 是因为不知道真实框和anchor谁大谁小
j = torch.max(r, 1. / r).max(2)[0] < 4 # compare
# 这意味着我们只保留那些比较合适的先验框负责预测,并且放弃了其他的先验框
t = t[j]
# 筛选后的t (num,7)
# tensor([[0.0000, 0.0000, 6.6443, 6.5083, 2.1944, 2.7607, 0.0000],
# [0.0000, 2.0000, 8.6672, 8.0180, 3.2058, 2.7607, 0.0000],
# [0.0000, 0.0000, 6.6443, 6.5083, 2.1944, 2.7607, 1.0000],
# [0.0000, 2.0000, 8.6672, 8.0180, 3.2058, 2.7607, 1.0000],
# [0.0000, 2.0000, 8.6672, 8.0180, 3.2058, 2.7607, 2.0000]])
else:
t = targets[0]
# 抽取 image id in batch 和 class id 长度是t.num
b, c = t[:, :2].long().T
# We isolate the target cell associations.
# x, y, w, h are allready in the cell coordinate system meaning an x = 1.2 would be 1.2 times cellwidth
gxy = t[:, 2:4]
gwh = t[:, 4:6] # grid wh
# 将以单元格为长度单位的 xy值取整,就可以获得所在的单元格位置
gij = gxy.long()
# 分离 i j
gi, gj = gij.T # grid xy indices
# 将anchor的标记也转为int
a = t[:, 6].long()
# clamp 以防超出单元格界限
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1)))
# gxy-gij 获得相对单元格的偏移量 与 wh拼接 组成box的位置信息 添加到list中
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
# 添加负责预测每个目标的先验框 到list中
anch.append(anchors[a])
# 添加每个目标的类别id 到list中
tcls.append(c)
# 返回都是list,每个list的长度都是3,每个元素分别代表了对应的3个尺度的信息
# cls[i] 表示这个尺度的几个真实框分别对应的classid
# tbox[i] 表示这个尺度的几个真实框的xywh信息,其中xy是单个cell的偏移量
# indices[i] 表示这个尺度的几个真实框的单位格位置信息 和 这个真实框是由哪几个anchor来预测,以及imageid
# anch[i] 表示这个尺度的几个真实框,需要用到的 先验框
return tcls, tbox, indices, anch
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
union = w1 * h1 + w2 * h2 - inter + eps
iou = inter / union
if GIoU or DIoU or CIoU:
# convex (smallest enclosing box) width
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1)
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
(b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * \
torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / ((1 + eps) - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU
else:
return iou # IoU
| 42.570732
| 114
| 0.561247
|
a6ca425d4935104b85eda28b7c891a01353aaeda
| 97
|
py
|
Python
|
tests/io/file_close.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 663
|
2018-12-30T00:17:59.000Z
|
2022-03-14T05:03:41.000Z
|
tests/io/file_close.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 41
|
2019-06-06T08:31:19.000Z
|
2022-02-13T16:53:41.000Z
|
tests/io/file_close.py
|
Euromance/pycopy
|
540cfcc52d17667a5f6b2a176427cc031029b78f
|
[
"MIT"
] | 60
|
2019-06-01T04:25:00.000Z
|
2022-02-25T01:47:31.000Z
|
f = open("io/data/file1")
f.close()
# Should be possible to close again without error.
f.close()
| 19.4
| 50
| 0.701031
|
484c31a9eb20f196a06d1b004d2d233220e9669e
| 939
|
py
|
Python
|
awx/main/migrations/0056_v350_custom_venv_history.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/main/migrations/0056_v350_custom_venv_history.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/main/migrations/0056_v350_custom_venv_history.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-22 22:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0055_v340_add_grafana_notification'),
]
operations = [
migrations.AddField(
model_name='inventoryupdate',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
migrations.AddField(
model_name='job',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
]
| 31.3
| 150
| 0.620873
|
02d5414a8a046ddef8a33b6b5f07a3b51f609ffb
| 6,384
|
py
|
Python
|
worker/LinkedIn/service1/Extractor.py
|
LamriAli/remote-extraction-proxy-and-worker
|
6e402efbd94298b015797183934fc10744ea0ded
|
[
"MIT"
] | null | null | null |
worker/LinkedIn/service1/Extractor.py
|
LamriAli/remote-extraction-proxy-and-worker
|
6e402efbd94298b015797183934fc10744ea0ded
|
[
"MIT"
] | null | null | null |
worker/LinkedIn/service1/Extractor.py
|
LamriAli/remote-extraction-proxy-and-worker
|
6e402efbd94298b015797183934fc10744ea0ded
|
[
"MIT"
] | null | null | null |
from asyncio.windows_events import NULL
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import pandas as pd
from bot_studio import *
import re as re
from selenium.webdriver.common.by import By
import win32clipboard
import os
from bs4 import BeautifulSoup as bs
from connect import connect
from context import Context
import networkx as nx
from get_data import get_post_url
from Add_data import Add_comment_user,Add_Posts_nodes
import json
from networkx.readwrite import json_graph
#from API_ExtractionService.Network_Extractor import Network_Extractor
from Zos_context import Zos_Context
class Extractor():#NetworkExtractor
context=None
Schema=[]
context= None
graphe=NULL
linkedin=NULL
def __init__(self,Schema):#,context,Schema,publisher,roadmap,Graphe
print("Extractors")
### ZOS CONTEXT
zos_context=Zos_Context()
keys={'username':zos_context.get("LI_USERNAME"),'password':zos_context.get("LI_PASSWORD")}
#self.super().__init__("LinkedIn",context,Schema,publisher,roadmap)
self.context=Context(zos_context.get("LI_ACCOUNT"),keys,zos_context.get("LIMIT_POSTS"),zos_context.get("LIMIT_FRIENDS"),zos_context.get("LIMIT_COMMENTS"),zos_context.get("USER_COMMENT"),zos_context.get("ADD_USER"),zos_context.get("ADD_COM"),zos_context.get("POST"))
self.graphe=nx.DiGraph()
####Linkedin instance
self.linkedin=bot_studio.linkedin()
self.linkedin.login(zos_context.get("LI_USERNAME") ,zos_context.get("LI_PASSWORD") )
self.create_graphe(Schema)
def get_graph(self):
return self.graphe
def set_graph(self,g):
self.graphe=g
def save_json(self,filename,graph):
g = graph
g_json = json_graph.node_link_data(g)
json.dump(g_json,open(filename,'w'),indent=2)
def Scraper_page(self,username,password,page_url):
driver=connect().connection(username,password)
data={}
driver.get(page_url)
time.sleep(3)
src = driver.page_source
soup = BeautifulSoup(src, 'lxml')
time.sleep(5)
intro = soup.find('div', {'class': 'block mt2'})
name_loc = intro.find("h1")
data["name"]=name_loc.get_text().strip()
#print(name_loc.get_text().strip())
works_at_loc = intro.find("div", {'class': 'inline-block'}).find_all('div')
loc=works_at_loc[0].get_text().strip()
data['localisation']=loc
#print(loc)
abonnee=works_at_loc[1].get_text().strip()
data['abonnee']=abonnee
#print(abonnee)
description= soup.find('div', {'class': 't-14 t-black--light full-width break-words ember-view'})
data['description']=description.get_text().strip()
driver.close()
return data
#@NetworkExtractor.data_publisher
def create_graphe(self,Schema):
print("-----create graphe--------")
context=self.context
username=context.keys['username']
password=context.keys['password']
page=context.account
limit_comment= context.limit_comments
Graphe=self.graphe
if Graphe.number_of_nodes() ==0:
Graphe.add_nodes_from([(page, {'id':page,
'checked' :0 ,
'type':'page'
} )])
page_inf=self.Scraper_page(username,password,page)
for attr in Schema['page']:
nx.set_node_attributes(Graphe, name=attr, values=str(page_inf[attr]))
try:
Nodeslist = [v for v in Graphe.nodes()]
for v in Nodeslist:
if Graphe.nodes[v]['checked']==0 :
Graphe.nodes[v]['checked']=1
if Graphe.nodes[v]['type']=='page':
#Add Postprint()
limit_posts=context.limit_posts
list_url=get_post_url(username,password,context.limit_posts,v)
time.sleep(4)
if len(list_url)==0:
print("no url selected")
break
Add_Posts_nodes(Graphe,context,Schema,list_url,v)
#Add Comment
user_comment=context.user_comment
if(user_comment=='True'):
add_comm=context.add_comm
add_user=context.add_user
Add_comment_user(self.linkedin,Graphe,context,username , password ,list_url,limit_comment,Schema,add_user,add_comm)
Nodeslist = [v for v in Graphe.nodes()]
print("Extraction complete.")
# Get Graph
# self.graphe=Graphe
self.set_graph(context.graph)
final_graph=self.get_graph()
self.save_json("file_graphe.json",final_graph)
loaded_json = json.loads("file_graphe.json")
#dateien = json_graph(Graphe)
print("dateeien")
print(loaded_json)
payload = loaded_json
payload["road_map"] = []
# delivering payload
# locator.getPublisher().publish("Twitter",json.dumps(payload))
except Exception as ex:
self.save_json("file_graphe.json",context.graph)
print(ex)
| 30.84058
| 273
| 0.509085
|
2068c1721d3e8596dfeea10e64a4a7ae531587c7
| 2,539
|
py
|
Python
|
MiDaS/run.py
|
shatgupt/3d-photo-inpainting
|
08e9bf70e39bf9f1d5a187bee9e70ea81d35fd48
|
[
"MIT"
] | null | null | null |
MiDaS/run.py
|
shatgupt/3d-photo-inpainting
|
08e9bf70e39bf9f1d5a187bee9e70ea81d35fd48
|
[
"MIT"
] | null | null | null |
MiDaS/run.py
|
shatgupt/3d-photo-inpainting
|
08e9bf70e39bf9f1d5a187bee9e70ea81d35fd48
|
[
"MIT"
] | null | null | null |
"""Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
# from monodepth_net import MonoDepthNet
# import utils
import matplotlib.pyplot as plt
import numpy as np
import cv2
import imageio
def run_depth(img_names, input_path, output_path, model_path, Net, utils, target_w=None):
"""Run MonoDepthNN to compute depth maps.
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
# device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda:0")
elif isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
device = torch.device(config["gpu_ids"])
else:
device = torch.device("cpu")
print("device: %s" % device)
# load network
model = Net(model_path)
model.to(device)
model.eval()
# get input
# img_names = glob.glob(os.path.join(input_path, "*"))
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
for ind, img_name in enumerate(img_names):
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = utils.read_image(img_name)
w = img.shape[1]
scale = 640. / max(img.shape[0], img.shape[1])
target_height, target_width = int(round(img.shape[0] * scale)), int(round(img.shape[1] * scale))
img_input = utils.resize_image(img)
print(img_input.shape)
img_input = img_input.to(device)
# compute
with torch.no_grad():
out = model.forward(img_input)
depth = utils.resize_depth(out, target_width, target_height)
img = cv2.resize((img * 255).astype(np.uint8), (target_width, target_height), interpolation=cv2.INTER_AREA)
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
np.save(filename + '.npy', depth)
# utils.write_depth(filename, depth, img, bits=2)
print("finished")
# if __name__ == "__main__":
# # set paths
# INPUT_PATH = "image"
# OUTPUT_PATH = "output"
# MODEL_PATH = "model.pt"
# # set torch options
# torch.backends.cudnn.enabled = True
# torch.backends.cudnn.benchmark = True
# # compute depth maps
# run_depth(INPUT_PATH, OUTPUT_PATH, MODEL_PATH, Net, target_w=640)
| 28.852273
| 115
| 0.632532
|
97a28d3e4a5fd3b20ef1db4e64b1406b012dd69e
| 962
|
py
|
Python
|
py/02_home/backward_string_by_word.py
|
myConsciousness/CheckIO
|
fb5336a5d0e1d7c5c6ecc4bba25805547425afe7
|
[
"Apache-2.0"
] | 2
|
2020-09-14T16:02:54.000Z
|
2022-01-24T19:55:05.000Z
|
py/02_home/backward_string_by_word.py
|
myConsciousness/checkio-history
|
fb5336a5d0e1d7c5c6ecc4bba25805547425afe7
|
[
"Apache-2.0"
] | null | null | null |
py/02_home/backward_string_by_word.py
|
myConsciousness/checkio-history
|
fb5336a5d0e1d7c5c6ecc4bba25805547425afe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
In a given string you should reverse every word, but the words should stay in their places.
Input: A string.
Output: A string.
'''
import re
def backward_string_by_word(text):
# your code here
splittedText = re.split("( )", text)
reversedWords = []
for word in splittedText:
reversedWords.append(word[::-1])
return ''.join(reversedWords)
if __name__ == '__main__':
print("Example:")
print(backward_string_by_word('hello world'))
# These "asserts" are used for self-checking and not for an auto-testing
assert backward_string_by_word('') == ''
assert backward_string_by_word('world') == 'dlrow'
assert backward_string_by_word('hello world') == 'olleh dlrow'
assert backward_string_by_word('hello world') == 'olleh dlrow'
assert backward_string_by_word('welcome to a game') == 'emoclew ot a emag'
print("Coding complete? Click 'Check' to earn cool rewards!")
| 28.294118
| 91
| 0.680873
|
50a40ed26b485bd03cc4b59057cb170e67537cac
| 306
|
py
|
Python
|
S3_to_Local.py
|
MoSaidi/Real-Time-Data-Streaming-Analytics-System-for-tracking-Pedestrians
|
5fed44509422a169d614da2694c9b3e64900fc4c
|
[
"MIT"
] | 1
|
2021-11-21T22:42:48.000Z
|
2021-11-21T22:42:48.000Z
|
S3_to_Local.py
|
MoSaidi/Real-Time-Data-Streaming-Analytics-System-for-tracking-Pedestrians
|
5fed44509422a169d614da2694c9b3e64900fc4c
|
[
"MIT"
] | 12
|
2021-11-21T11:05:48.000Z
|
2022-01-10T20:41:05.000Z
|
S3_to_Local.py
|
MoSaidi/Real-Time-Data-Streaming-Analytics-System-for-tracking-Pedestrians
|
5fed44509422a169d614da2694c9b3e64900fc4c
|
[
"MIT"
] | 6
|
2021-11-24T18:41:46.000Z
|
2022-01-09T10:17:46.000Z
|
import boto3
import os
s3 = boto3.client('s3', aws_access_key_id='&YOUR_ACCESS_KEY' , aws_secret_access_key='&YOUR_SECRET_KEY')
s3.download_file('projectbigdata2022', 'positions.csv', os.path.join('.','positions.csv'))
s3.download_file('projectbigdata2022', 'devices.csv', os.path.join('.','devices.csv'))
| 51
| 104
| 0.75817
|
8a131f288c13f2e43687299aea4eb9aec71c817d
| 2,201
|
py
|
Python
|
tests/models/symbol/scsi_target_port_id_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
tests/models/symbol/scsi_target_port_id_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
tests/models/symbol/scsi_target_port_id_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.symbol.scsi_target_port_id import SCSITargetPortId
class SCSITargetPortIdTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_scsi_target_port_id(self):
scsi_target_port_id_obj = SCSITargetPortId()
self.assertNotEqual(scsi_target_port_id_obj, None)
| 57.921053
| 845
| 0.775557
|
561dab8baf1eb15f6a57803e2067df6c3db4008d
| 1,169
|
py
|
Python
|
data_converter/drf_yasg_inspectors.py
|
Hedgehog18/Data_converter
|
2672910d605ca77662a629b587f6ae792c393204
|
[
"MIT"
] | null | null | null |
data_converter/drf_yasg_inspectors.py
|
Hedgehog18/Data_converter
|
2672910d605ca77662a629b587f6ae792c393204
|
[
"MIT"
] | null | null | null |
data_converter/drf_yasg_inspectors.py
|
Hedgehog18/Data_converter
|
2672910d605ca77662a629b587f6ae792c393204
|
[
"MIT"
] | null | null | null |
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg.inspectors import DjangoRestResponsePagination, CoreAPICompatInspector
from rest_framework.filters import SearchFilter
class DODjangoRestResponsePagination(DjangoRestResponsePagination):
def get_paginator_parameters(self, paginator):
return paginator.get_schema_fields(self.view)
def get_paginated_response(self, paginator, response_schema):
return paginator.get_paginated_response_schema(response_schema)
class DjangoFilterDescriptionInspector(CoreAPICompatInspector):
def get_filter_parameters(self, filter_backend):
params = super().get_filter_parameters(filter_backend)
if isinstance(filter_backend, DjangoFilterBackend):
for param in params:
if not param.get('description', ''):
param.description = f'Filter the returned list by {param.name}.'
if isinstance(filter_backend, SearchFilter):
fields = getattr(self.view, 'search_fields')
fields = ', '.join(fields)
params[0].description = f'Search by {fields.split("__")[0]}.'
return params
| 41.75
| 84
| 0.731394
|
e8b5883079c528e6884888d6cf76df3c8a6537c1
| 453
|
py
|
Python
|
video/rooms/participants/kick-remove-participant/remove-participant.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
video/rooms/participants/kick-remove-participant/remove-participant.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
video/rooms/participants/kick-remove-participant/remove-participant.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
api_key_sid = "SKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
api_key_secret = "your_api_key_secret"
client = Client(api_key_sid, api_key_secret)
participant = client.video.rooms('RMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX').participants.get('Alice')
print(participant.update(status='disconnected').status)
| 37.75
| 96
| 0.825607
|
ccd5ab42b286e9d8c02d8ba3d44e10736be46fd6
| 486
|
py
|
Python
|
hubblestack/extmods/modules/sysexit.py
|
arshiyaaggarwal/hubble
|
5bea686df72f2b101546e20467ab2c9b5f536730
|
[
"Apache-2.0"
] | null | null | null |
hubblestack/extmods/modules/sysexit.py
|
arshiyaaggarwal/hubble
|
5bea686df72f2b101546e20467ab2c9b5f536730
|
[
"Apache-2.0"
] | null | null | null |
hubblestack/extmods/modules/sysexit.py
|
arshiyaaggarwal/hubble
|
5bea686df72f2b101546e20467ab2c9b5f536730
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
""" intended for testing, this module's sole purpose is to cause the running
daemon to exit gracefully within a scheduled timeframe """
from __future__ import absolute_import
import logging
import sys
LOG = logging.getLogger(__name__)
def sysexit(code=0):
"""
This function closes the process when called.
code
The exist status with which the process should exit.
"""
LOG.info('instructing daemon to exit')
sys.exit(code)
| 22.090909
| 76
| 0.705761
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.